clickhouse client is going down beacuse of query on Materialized Views - clickhouse

when creating mv with the target table
target table:
CREATE TABLE target_test
(
day date,
hour UInt32,
test_sum SimpleAggregateFunction(sum, Float64)
)
ENGINE = SummingMergeTree()
PARTITION BY toRelativeWeekNum(toDateTime(day))
ORDER BY (day, hour)
CREATE MATERIALIZED VIEW target_test_mv
TO target_test
as select
toDate(session_ts) as day,
toHour(toDateTime(session_ts)) as hour,
sum(if (test is null, 0,test)) as test_sum
from events
where session_ts>= now()-1000
group by day, hour
when the data is inserting into the target table and when I am running this query
clickhouse server is crashed because of the query why??
select sum(revenue_sum) from target_test_mv
this is from the log :
2020.12.07 12:03:48.059559 [ 19243 ] {} <Fatal> BaseDaemon: ########################################
2020.12.07 12:03:48.059595 [ 19244 ] {} <Fatal> BaseDaemon: ########################################
2020.12.07 12:03:48.059642 [ 19244 ] {} <Fatal> BaseDaemon: (version 20.4.2.9 (official build)) (from thread 19242) (query_id: 6651689c-484a-4b53-a174-06fd01848208) Received signal Segmentation fault (11).
2020.12.07 12:03:48.059644 [ 19243 ] {} <Fatal> BaseDaemon: (version 20.4.2.9 (official build)) (from thread 19241) (query_id: 6651689c-484a-4b53-a174-06fd01848208) Received signal Segmentation fault (11).
2020.12.07 12:03:48.059685 [ 19244 ] {} <Fatal> BaseDaemon: Address: NULL pointer. Access: read. Address not mapped to object.
2020.12.07 12:03:48.059691 [ 19243 ] {} <Fatal> BaseDaemon: Address: NULL pointer. Access: read. Address not mapped to object.
2020.12.07 12:03:48.059710 [ 19244 ] {} <Fatal> BaseDaemon: Stack trace: 0xcad040b 0xd26b20d 0xd281202 0xdcbb2f3 0xdcbf909 0xdb0b9a1 0xdb0f95d 0xdb10324 0x901e26b 0x901c753 0x7fec413f0dd5 0x7fec41a04ead
2020.12.07 12:03:48.059712 [ 19243 ] {} <Fatal> BaseDaemon: Stack trace: 0xcad040b 0xd26b20d 0xd281202 0xdcbb2f3 0xdcbf909 0xdb0b9a1 0xdb0f95d 0xdb10324 0x901e26b 0x901c753 0x7fec413f0dd5 0x7fec41a04ead
2020.12.07 12:03:48.059781 [ 19244 ] {} <Fatal> BaseDaemon: 3. DB::IAggregateFunctionHelper<DB::AggregateFunctionNullUnary<true> >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const # 0xcad040b in /opt/clickhouse/engine/clickhouse-common-static-20.4.2.9/usr/bin/clickhouse
2020.12.07 12:03:48.059781 [ 19243 ] {} <Fatal> BaseDaemon: 3. DB::IAggregateFunctionHelper<DB::AggregateFunctionNullUnary<true> >::addBatchSinglePlace(unsigned long, char*, DB::IColumn const**, DB::Arena*) const # 0xcad040b in /opt/clickhouse/engine/clickhouse-common-static-20.4.2.9/usr/bin/clickhouse
2020.12.07 12:03:48.059856 [ 19243 ] {} <Fatal> BaseDaemon: 4. DB::Aggregator::executeWithoutKeyImpl(char*&, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, DB::Arena*) # 0xd26b20d in /opt/clickhouse/engine/clickhouse-common-static-20.4.2.9/usr/bin/clickhouse
2020.12.07 12:03:48.059858 [ 19244 ] {} <Fatal> BaseDaemon: 4. DB::Aggregator::executeWithoutKeyImpl(char*&, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, DB::Arena*) # 0xd26b20d in /opt/clickhouse/engine/clickhouse-common-static-20.4.2.9/usr/bin/clickhouse
2020.12.07 12:03:48.059902 [ 19243 ] {} <Fatal> BaseDaemon: 5. DB::Aggregator::executeOnBlock(std::__1::vector<COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::immutable_ptr<DB::IColumn> > >, unsigned long, DB::AggregatedDataVariants&, std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*> >&, std::__1::vector<std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*> >, std::__1::allocator<std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*> > > >&, bool&) # 0xd281202 in /opt/clickhouse/engine/clickhouse-common-static-20.4.2.9/usr/bin/clickhouse
2020.12.07 12:03:48.059917 [ 19244 ] {} <Fatal> BaseDaemon: 5. DB::Aggregator::executeOnBlock(std::__1::vector<COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::immutable_ptr<DB::IColumn> > >, unsigned long, DB::AggregatedDataVariants&, std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*> >&, std::__1::vector<std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*> >, std::__1::allocator<std::__1::vector<DB::IColumn const*, std::__1::allocator<DB::IColumn const*> > > >&, bool&) # 0xd281202 in /opt/clickhouse/engine/clickhouse-common-static-20.4.2.9/usr/bin/clickhouse

I've tried a scenario similar to yours in a more recent version of ClickHouse and it works for me:
CREATE TABLE target_test
( day date,
hour UInt32,
test_sum Float64 )
ENGINE = SummingMergeTree() PARTITION BY toRelativeWeekNum(toDateTime(day)) ORDER BY (day, hour)
CREATE MATERIALIZED VIEW target_test_mv TO target_test as
select
toDate(event_time) as day,
toHour(toDateTime(event_time)) as hour,
sum(if (read_rows is null, 0,read_rows)) as test_sum
from system.query_log
group by day, hour
limit 10000
:) select sum(test_sum) from target_test;
SELECT sum(test_sum)
FROM target_test
┌─sum(test_sum)─┐
│ 20661 │
└───────────────┘
Note I'm using the query_log for the test and I've removed the SimpleAggregateFunction since the SummingMergeTree takes care of that.
You could try several things:
Try to reproduce the scenario above with the system.query_log instead of your events table, to discard there's any problem with your table.
Try on a more recent version of ClickHouse (20.4.2.9 is from May)
If your scenario still fails and given there's a segmentation fault in the logs, you can open a case in the ClickHouse GitHub repo.

Related

Thread safety of boost::regex_match

ThreadSanitizer gives me an alleged race condition in boost::regex_match. Is this a false positive? I cannot find synchronization primitives that depend on BOOST_HAS_THREADS in the callstacks. All input parameters are on the stack of the respective thread and not shared.
==================
WARNING: ThreadSanitizer: data race (pid=1893)
Write of size 4 at 0x007e19fa8ff0 by thread T36:
#0 boost::re_detail_106700::saved_state::saved_state(unsigned int) include/boost/regex/v4/perl_matcher_non_recursive.hpp:59
#1 boost::re_detail_106700::perl_matcher<char const*, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >::push_recursion_stopper() include/boost/regex/v4/perl_matcher_non_recursive.hpp:288
#2 boost::re_detail_106700::perl_matcher<char const*, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >::match_all_states() include/boost/regex/v4/perl_matcher_non_recursive.hpp:202
#3 boost::re_detail_106700::perl_matcher<char const*, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >::match_prefix() include/boost/regex/v4/perl_matcher_common.hpp:336
#4 boost::re_detail_106700::perl_matcher<char const*, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >::match_imp() include/boost/regex/v4/perl_matcher_common.hpp:220
#5 boost::re_detail_106700::perl_matcher<char const*, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >::match() include/boost/regex/v4/perl_matcher_common.hpp:193
#6 bool boost::regex_match<char const*, std::allocator<boost::sub_match<char const*> >, char, boost::regex_traits<char, boost::cpp_regex_traits<char> > >(char const*, char const*, boost::match_results<char const*, std::allocator<boost::sub_match<char const*> > >&, boost::basic_regex<char, boost::regex_traits<char, boost::cpp_regex_traits<char> > > const&, boost::regex_constants::_match_flags) include/boost/regex/v4/regex_match.hpp:50
#7 bool boost::regex_match<char, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >(char const*, boost::match_results<char const*, std::allocator<boost::sub_match<char const*> > >&, boost::basic_regex<char, boost::regex_traits<char, boost::cpp_regex_traits<char> > > const&, boost::regex_constants::_match_flags) /var/lib/jenkins/workspace/nightly-jnd-navigation__tsd-nav-rsi-viwi-dev/system/ext-boost-dev/dist/17-89ad-bc06/usr/include/boost/regex/v4/regex_match.hpp:73 (tsd.nav.mainapp.mib3+0x3dd0610)
<...>
Previous write of size 4 at 0x007e19fa8ff0 by thread T105:
[failed to restore the stack]
Location is heap block of size 4096 at 0x007e19fa8000 allocated by thread T105:
#0 operator new(unsigned long) <null> (libtsan.so.0+0x79f54)
#1 boost::re_detail_106700::save_state_init::save_state_init(boost::re_detail_106700::saved_state**, boost::re_detail_106700::saved_state**) include/boost/regex/v4/perl_matcher_non_recursive.hpp:107
#2 boost::re_detail_106700::perl_matcher<char const*, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >::match_imp() include/boost/regex/v4/perl_matcher_common.hpp:202
#3 boost::re_detail_106700::perl_matcher<char const*, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >::match() include/boost/regex/v4/perl_matcher_common.hpp:193
#4 bool boost::regex_match<char const*, std::allocator<boost::sub_match<char const*> >, char, boost::regex_traits<char, boost::cpp_regex_traits<char> > >(char const*, char const*, boost::match_results<char const*, std::allocator<boost::sub_match<char const*> > >&, boost::basic_regex<char, boost::regex_traits<char, boost::cpp_regex_traits<char> > > const&, boost::regex_constants::_match_flags) include/boost/regex/v4/regex_match.hpp:50
#5 bool boost::regex_match<char, std::allocator<boost::sub_match<char const*> >, boost::regex_traits<char, boost::cpp_regex_traits<char> > >(char const*, boost::match_results<char const*, std::allocator<boost::sub_match<char const*> > >&, boost::basic_regex<char, boost::regex_traits<char, boost::cpp_regex_traits<char> > > const&, boost::regex_constants::_match_flags) include/boost/regex/v4/regex_match.hpp:73
<...>
Regards
I think the documentation is pretty definitive:
Thread Safety
The Boost.Regex library is thread safe when Boost is: you can verify
that Boost is in thread safe mode by checking to see if
BOOST_HAS_THREADS is defined: this macro is set automatically by the
config system when threading support is turned on in your compiler.
Class basic_regex and its typedefs regex and wregex are thread safe,
in that compiled regular expressions can safely be shared between
threads. The matching algorithms regex_match, regex_search, and
regex_replace are all re-entrant and thread safe. Class match_results
is now thread safe, in that the results of a match can be safely
copied from one thread to another (for example one thread may find
matches and push match_results instances onto a queue, while another
thread pops them off the other end), otherwise use a separate instance
of match_results per thread.
The POSIX API functions are all re-entrant and thread safe, regular
expressions compiled with regcomp can also be shared between threads.
The class RegEx is only thread safe if each thread gets its own RegEx
instance (apartment threading) - this is a consequence of RegEx
handling both compiling and matching regular expressions.
Finally note that changing the global locale invalidates all compiled
regular expressions, therefore calling set_locale from one thread
while another uses regular expressions will produce unpredictable
results.
There is also a requirement that there is only one thread executing
prior to the start of main().
So, you need to make sure:
you are not sharing the match_results object (your description doesn't say, because that's not an input argument depending on your definition)
the regex is pre-compiled:
[are] thread safe,
in that compiled regular expressions can safely be shared between
threads

Materialize index throws DB::Exception: Missing columns

I met a problem for materialize index with version 21.3.
I create a table with 3 coulmns u64, i32 and s.
CREATE TABLE test_idx (`u64` UInt64, `i32` Int32, `s` String) ENGINE = MergeTree() ORDER BY u64;
Then I insert 3 millions data in it and run OPTIMIZE TABLE test_idx FINAL(to make part Wide)
select name, part_type from system.parts where table='test_idx' and active=1;
┌─name──────────────────┬─part_type─┐
│ all_1_21762_111_21773 │ Wide │
└───────────────────────┴───────────┘
Then I add tow indexes to the table
alter table test_idx add INDEX a (u64, s) TYPE minmax GRANULARITY 3;
alter table test_idx add INDEX b (i32 * length(s)) TYPE set(1000) GRANULARITY 4;
Then I materialize the index a to make the index useful for old data.
alter table test_idx materialize index a;
Here's the Exception and stack trace.
2022.07.14 04:06:38.192403 [ 11633 ] {} <Error> DB::IBackgroundJobExecutor::jobExecutingTask()::<lambda()>: Code: 47, e.displayText() = DB::Exception: Missing columns: 'i32' while processing query: 'u64, s, i32 * length(s)', required columns: 'u64' 's' 'i32' 'u64' 's' 'i32', Stack trace (when copying this message, always include the lines below):
0. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../contrib/poco/Foundation/src/Exception.cpp:27: Poco::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) # 0xe16fb61 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
1. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../src/Common/Exception.cpp:55: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) # 0x476f358 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
2. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../src/Interpreters/TreeRewriter.cpp:752: DB::TreeRewriterResult::collectUsedColumns(std::__1::shared_ptr<DB::IAST> const&, bool) (.cold) # 0x416fba2 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
3. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../contrib/libcxx/include/new:237: DB::TreeRewriter::analyze(std::__1::shared_ptr<DB::IAST>&, DB::NamesAndTypesList const&, std::__1::shared_ptr<DB::IStorage const>, std::__1::shared_ptr<DB::StorageInMemoryMetadata const> const&, bool, bool) const # 0xa8a1228 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
4. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../contrib/libcxx/include/list:753: DB::MergeTreeDataMergerMutator::getIndicesToRecalculate(std::__1::shared_ptr<DB::IBlockInputStream>&, DB::NamesAndTypesList const&, std::__1::shared_ptr<DB::StorageInMemoryMetadata const> const&, DB::Context const&) # 0xac80bbb in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
5. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../contrib/libcxx/include/list:753: DB::MergeTreeDataMergerMutator::mutatePartToTemporaryPart(DB::FutureMergedMutatedPart const&, std::__1::shared_ptr<DB::StorageInMemoryMetadata const> const&, DB::MutationCommands const&, DB::BackgroundProcessListEntry<DB::MergeListElement, DB::MergeInfo>&, long, DB::Context const&, std::__1::unique_ptr<DB::IReservation, std::__1::default_delete<DB::IReservation> > const&, std::__1::shared_ptr<DB::RWLockImpl::LockHolderImpl>&) # 0xac87d74 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
6. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../contrib/libcxx/include/type_traits:3934: DB::StorageMergeTree::mutateSelectedPart(std::__1::shared_ptr<DB::StorageInMemoryMetadata const> const&, DB::StorageMergeTree::MergeMutateSelectedEntry&, std::__1::shared_ptr<DB::RWLockImpl::LockHolderImpl>&) # 0xaaf12d2 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
7. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../src/Storages/StorageMergeTree.cpp:967: bool std::__1::__function::__policy_invoker<bool ()>::__call_impl<std::__1::__function::__default_alloc_func<DB::StorageMergeTree::getDataProcessingJob()::'lambda'(), bool ()> >(std::__1::__function::__policy_storage const*) # 0xaaf15dc in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
8. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../src/Storages/MergeTree/BackgroundJobsExecutor.cpp:103: void std::__1::__function::__policy_invoker<void ()>::__call_impl<std::__1::__function::__default_alloc_func<DB::IBackgroundJobExecutor::jobExecutingTask()::'lambda'(), void ()> >(std::__1::__function::__policy_storage const*) # 0xabefd13 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
9. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../contrib/libcxx/include/functional:2212: ThreadPoolImpl<ThreadFromGlobalPool>::worker(std::__1::__list_iterator<ThreadFromGlobalPool, void*>) # 0x47cd3a2 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
10. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../src/Common/ThreadPool.h:181: ThreadFromGlobalPool::ThreadFromGlobalPool<void ThreadPoolImpl<ThreadFromGlobalPool>::scheduleImpl<void>(std::__1::function<void ()>, int, std::__1::optional<unsigned long>)::'lambda1'()>(void&&, void ThreadPoolImpl<ThreadFromGlobalPool>::scheduleImpl<void>(std::__1::function<void ()>, int, std::__1::optional<unsigned long>)::'lambda1'()&&...)::'lambda'()::operator()() # 0x47cd84e in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
11. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../contrib/libcxx/include/functional:2212: ThreadPoolImpl<std::__1::thread>::worker(std::__1::__list_iterator<std::__1::thread, void*>) # 0x47ccb42 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
12. /root/zyf/workspace/clickhouse/build/RelWithDebInfo/../../contrib/libcxx/include/memory:1655: void* std::__1::__thread_proxy<std::__1::tuple<std::__1::unique_ptr<std::__1::__thread_struct, std::__1::default_delete<std::__1::__thread_struct> >, void ThreadPoolImpl<std::__1::thread>::scheduleImpl<void>(std::__1::function<void ()>, int, std::__1::optional<unsigned long>)::'lambda1'()> >(void*) # 0x47cb4f3 in /root/zyf/workspace/clickhouse/build/RelWithDebInfo/bin/clickhouse
13. start_thread # 0x8609 in /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
14. __clone # 0x11f133 in /usr/lib/x86_64-linux-gnu/libc-2.31.so
(version 21.3.14.1.7)
Column i32 is not used for index a, but why here shows Missing columns? Or I can't use skipping index in this way?
Upgrading to the latest version of ClickHouse should resolve your issue. Is there any specific reason for you to use 21.3?
Try detach all the data parts that causes the error (assuming that the MATERIALIZE mutation had succeeded on most data parts), and attach them to a new table with same structure, do MATERIALIZE on the new table, and hopefully it will complete with no error. Finally ALTER TABLE ... MOVE PARTITION ... to move all partitions back to the old table.

ClickHouse ALTER TABLE DELETE Exception Code 241

I'm having a problem on ALTER TABLE DELETE. I tried to execute ALTER DELETE with various max_block_size (8192, 4096, 2048, 1024). Also tried to change merge_max_block_size of the table (8192, 4096, 2048, 1024). No luck.
Full exception text:
SQL Error [341]: ClickHouse exception, code: 341, host: 127.0.0.1, port: 40849; Code: 341, e.displayText() = DB::Exception: Exception happened during execution of mutation 'mutation_94378.txt' with part '1_7146_15644_5_94355' reason: 'Code: 241, e.displayText() = DB::Exception: Memory limit (total) exceeded: would use 14.05 GiB (attempt to allocate chunk of 4425496 bytes), maximum: 14.05 GiB: (avg_value_size_hint = 35.49267578125, avg_chars_size = 32.9912109375, limit = 8192): (while reading column market_code): (while reading from part /mnt/store/clickhouse/store/04d/04d43c3a-9822-4168-a12a-664806ec3b67/1_7146_15644_5_94355/ from mark 0 with max_rows_to_read = 8192): While executing MergeTree (version 21.1.3.32 (official build))'. This error maybe retryable or not. In case of unretryable error, mutation can be killed with KILL MUTATION query (version 21.1.3.32 (official build))
clickhouse-server.log:
2021.03.23 08:30:04.621721 [ 268131 ] {} AggregatingTransform: Aggregating
2021.03.23 08:30:04.621756 [ 268131 ] {} Aggregator: Aggregation method: without_key
2021.03.23 08:30:05.077471 [ 268131 ] {} AggregatingTransform: Aggregated. 20 to 1 rows (from 0.00 B) in 0.683282494 sec. (29.27047037736635 rows/sec., 0.00 B/sec.)
2021.03.23 08:30:05.077501 [ 268131 ] {} Aggregator: Merging aggregated data
2021.03.23 08:30:05.077675 [ 268131 ] {} test.dt_customer (04d43c3a-9822-4168-a12a-664806ec3b67) (MergerMutator): Mutating part 1_7146_15644_5_94355 to mutation version 94378
2021.03.23 08:30:05.078565 [ 268131 ] {} test.dt_customer (04d43c3a-9822-4168-a12a-664806ec3b67) (SelectExecutor): Key condition: unknown
2021.03.23 08:30:05.078581 [ 268131 ] {} test.dt_customer (04d43c3a-9822-4168-a12a-664806ec3b67) (SelectExecutor): MinMax index condition: unknown
2021.03.23 08:30:05.078589 [ 268131 ] {} test.dt_customer (04d43c3a-9822-4168-a12a-664806ec3b67) (SelectExecutor): Not using primary index on part 1_7146_15644_5_94355
2021.03.23 08:30:05.078597 [ 268131 ] {} test.dt_customer (04d43c3a-9822-4168-a12a-664806ec3b67) (SelectExecutor): Selected 1 parts by partition key, 1 parts by primary key, 15147 marks by primary key, 15147 marks to read from 1 ranges
2021.03.23 08:30:05.078658 [ 268131 ] {} MergeTreeSelectProcessor: Reading 1 ranges from part 1_7146_15644_5_94355, approx. 124080000 rows starting from 0
2021.03.23 08:30:05.078722 [ 268131 ] {} InterpreterSelectQuery: FetchColumns -> Complete
2021.03.23 08:30:05.162644 [ 268131 ] {} MemoryTracker: Peak memory usage: 147.51 MiB.
2021.03.23 08:30:05.162743 [ 268131 ] {} auto DB::IBackgroundJobExecutor::jobExecutingTask()::(anonymous class)::operator()() const: Code: 241, e.displayText() = DB::Exception: Memory limit (total) exceeded: would use 14.05 GiB (attempt to allocate chunk of 4426357 bytes), maximum: 14.05 GiB: (avg_value_size_hint = 35.49267578125, avg_chars_size = 32.9912109375, limit = 8192): (while reading column market_code): (while reading from part /mnt/store/clickhouse/store/04d/04d43c3a-9822-4168-a12a-664806ec3b67/1_7146_15644_5_94355/ from mark 0 with max_rows_to_read = 8192): While executing MergeTree, Stack trace (when copying this message, always include the lines below):
DB::Exception::Exception<char const*, char const*, std::__1::basic_string<char, std::__1::char_traits, std::__1::allocator >, long&, std::__1::basic_string<char, std::__1::char_traits, std::__1::allocator > >(int, std::__1::basic_string<char, std::__1::char_traits, std::__1::allocator > const&, char const*&&, char const*&&, std::__1::basic_string<char, std::__1::char_traits, std::__1::allocator >&&, long&, std::__1::basic_string<char, std::__1::char_traits, std::__1::allocator >&&) # 0x8625620 in /usr/bin/clickhouse
MemoryTracker::alloc(long) # 0x8624f7d in /usr/bin/clickhouse
MemoryTracker::alloc(long) # 0x8624ce4 in /usr/bin/clickhouse
MemoryTracker::alloc(long) # 0x8624ce4 in /usr/bin/clickhouse
DB::DataTypeString::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const # 0xea36a74 in /usr/bin/clickhouse
DB::MergeTreeReaderWide::readData(std::__1::basic_string<char, std::__1::char_traits, std::__1::allocator > const&, DB::IDataType const&, DB::IColumn&, unsigned long, bool, unsigned long, bool) # 0xf6f8d19 in /usr/bin/clickhouse
DB::MergeTreeReaderWide::readRows(unsigned long, bool, unsigned long, std::__1::vector<COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::immutable_ptr<DB::IColumn> > >&) # 0xf6f8030 in /usr/bin/clickhouse
DB::MergeTreeRangeReader::DelayedStream::finalize(std::__1::vector<COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::immutable_ptr<DB::IColumn> > >&) # 0xf700b9a in /usr/bin/clickhouse
DB::MergeTreeRangeReader::startReadingChain(unsigned long, std::__1::deque<DB::MarkRange, std::__1::allocator<DB::MarkRange> >&) # 0xf705066 in /usr/bin/clickhouse
DB::MergeTreeRangeReader::read(unsigned long, std::__1::deque<DB::MarkRange, std::__1::allocator<DB::MarkRange> >&) # 0xf703c24 in /usr/bin/clickhouse
DB::MergeTreeBaseSelectProcessor::readFromPartImpl() # 0xf6fe2b3 in /usr/bin/clickhouse
DB::MergeTreeBaseSelectProcessor::readFromPart() # 0xf6fef0d in /usr/bin/clickhouse
DB::MergeTreeBaseSelectProcessor::generate() # 0xf6fd7ab in /usr/bin/clickhouse
DB::ISource::tryGenerate() # 0xf8d8df5 in /usr/bin/clickhouse
DB::ISource::work() # 0xf8d8b1a in /usr/bin/clickhouse
DB::SourceWithProgress::work() # 0xfa4045a in /usr/bin/clickhouse
? # 0xf9129bc in /usr/bin/clickhouse
DB::PipelineExecutor::executeStepImpl(unsigned long, unsigned long, std::__1::atomic*) # 0xf90f766 in /usr/bin/clickhouse
DB::PipelineExecutor::executeStep(std::__1::atomic*) # 0xf90e0ec in /usr/bin/clickhouse
DB::PullingPipelineExecutor::pull(DB::Chunk&) # 0xf91c008 in /usr/bin/clickhouse
DB::PullingPipelineExecutor::pull(DB::Block&) # 0xf91c250 in /usr/bin/clickhouse
DB::PipelineExecutingBlockInputStream::readImpl() # 0xf909fd4 in /usr/bin/clickhouse
DB::IBlockInputStream::read() # 0xe977455 in /usr/bin/clickhouse
DB::CheckSortedBlockInputStream::readImpl() # 0xf063fc1 in /usr/bin/clickhouse
DB::IBlockInputStream::read() # 0xe977455 in /usr/bin/clickhouse
DB::ExpressionBlockInputStream::readImpl() # 0xf64d537 in /usr/bin/clickhouse
DB::IBlockInputStream::read() # 0xe977455 in /usr/bin/clickhouse
DB::MaterializingBlockInputStream::readImpl() # 0xee33eed in /usr/bin/clickhouse
DB::IBlockInputStream::read() # 0xe977455 in /usr/bin/clickhouse
DB::MergeTreeDataMergerMutator::mutateAllPartColumns(std::__1::shared_ptr<DB::IMergeTreeDataPart>, std::__1::shared_ptr<DB::StorageInMemoryMetadata const> const&, std::__1::vector<std::__1::shared_ptr<DB::IMergeTreeIndex const>, std::__1::allocator<std::__1::shared_ptr<DB::IMergeTreeIndex const> > > const&, std::__1::shared_ptr<DB::IBlockInputStream>, long, std::__1::shared_ptr<DB::ICompressionCodec> const&, DB::BackgroundProcessListEntry<DB::MergeListElement, DB::MergeInfo>&, bool, bool) const # 0xf62ff49 in /usr/bin/clickhouse
DB::MergeTreeDataMergerMutator::mutatePartToTemporaryPart(DB::FutureMergedMutatedPart const&, std::__1::shared_ptr<DB::StorageInMemoryMetadata const> const&, DB::MutationCommands const&, DB::BackgroundProcessListEntry<DB::MergeListElement, DB::MergeInfo>&, long, DB::Context const&, std::__1::unique_ptr<DB::IReservation, std::__1::default_delete<DB::IReservation> > const&, std::__1::shared_ptr<DB::RWLockImpl::LockHolderImpl>&) # 0xf62c06e in /usr/bin/clickhouse
DB::StorageMergeTree::mutateSelectedPart(std::__1::shared_ptr<DB::StorageInMemoryMetadata const> const&, DB::StorageMergeTree::MergeMutateSelectedEntry&, std::__1::shared_ptr<DB::RWLockImpl::LockHolderImpl>&) # 0xf3c6fc7 in /usr/bin/clickhouse
(version 21.1.3.32 (official build))

Clickhouse 1.1.54343 Data ingestion in distributed ReplicatedMergeTree table error

I am facing issue in Data load and merging of the table in Clickhouse 1.1.54343 and not able to insert any data in Clickhouse.
We have 3 node cluster and we add 300 columns to the tables in data ingestion and ingesting data from JSON files.
We were able to save data in the tables
Create Table
*-- Each Node*
CREATE TABLE IF NOT EXISTS AudiencePlanner.reached_pod
(
date Date,
p_id String,
language String,
city String,
state String,
platform String,
manufacturer String,
model String,
content_id String
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/shard1/reached_pod', 'clickhouse1')
PARTITION BY date
ORDER BY (date, platform, language, city, state, manufacturer, model, content_id, p_id);
CREATE TABLE IF NOT EXISTS AudiencePlanner2.reached_pod
(
date Date,
p_id String,
language String,
city String,
state String,
platform String,
manufacturer String,
model String,
content_id String
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/shard3/reached_pod', 'clickhouse1')
PARTITION BY date
ORDER BY (date, platform, language, city, state, manufacturer, model, content_id, p_id);
--- All Nodes
CREATE TABLE AudiencePlanner.reached_pod_all AS AudiencePlanner.reached_pod ENGINE = Distributed(test, '', reached_pod, rand());
Config.xml
<remote_servers>
<test>
<shard>
<weight>1</weight>
<internal_replication>false</internal_replication>
<replica>
<host>ip1</host>
<port>9000</port>
<default_database>AudiencePlanner</default_database>
<user>test</user>
<password>testpass</password>
</replica>
<replica>
<host>ip2</host>
<port>9000</port>
<default_database>AudiencePlanner2</default_database>
<user>test</user>
<password>testpass</password>
</replica>
</shard>
<shard>
<weight>1</weight>
<internal_replication>false</internal_replication>
<replica>
<host>ip2</host>
<port>9000</port>
<default_database>AudiencePlanner</default_database>
<user>test</user>
<password>testpass</password>
</replica>
<replica>
<host>ip3</host>
<port>9000</port>
<default_database>AudiencePlanner2</default_database>
<user>test</user>
<password>testpass</password>
</replica>
</shard>
<shard>
<weight>1</weight>
<internal_replication>false</internal_replication>
<replica>
<host>ip3</host>
<port>9000</port>
<default_database>AudiencePlanner</default_database>
<user>test</user>
<password>testpass</password>
</replica>
<replica>
<host>ip1</host>
<port>9000</port>
<default_database>AudiencePlanner2</default_database>
<user>test</user>
<password>testpass</password>
</replica>
</shard>
</dms>
</remote_servers>
Error log
2018.03.10 07:50:59.990953 [ 31 ] <Trace> AudiencePlanner.reached_pod (StorageReplicatedMergeTree): Executing log entry to merge parts 20180203_111_111_0, 20180203_112_112_0, 20180203_113_113_0 to 20180203_111_113_1
2018.03.10 07:50:59.991204 [ 31 ] <Debug> AudiencePlanner.reached_pod (Merger): Merging 3 parts: from 20180203_111_111_0 to 20180203_113_113_0 into tmp_merge_20180203_111_113_1
2018.03.10 07:50:59.996659 [ 31 ] <Debug> AudiencePlanner.reached_pod (Merger): Selected MergeAlgorithm: Horizontal
2018.03.10 07:50:59.997347 [ 31 ] <Trace> MergeTreeBlockInputStream: Reading 1 ranges from part 20180203_111_111_0, approx. 24576 rows starting from 0
2018.03.10 07:50:59.997417 [ 31 ] <Trace> MergeTreeBlockInputStream: Reading 1 ranges from part 20180203_112_112_0, approx. 8192 rows starting from 0
2018.03.10 07:50:59.997476 [ 31 ] <Trace> MergeTreeBlockInputStream: Reading 1 ranges from part 20180203_113_113_0, approx. 8192 rows starting from 0
2018.03.10 07:51:00.016479 [ 31 ] <Debug> MemoryTracker: Peak memory usage: 1.16 GiB.
2018.03.10 07:51:00.044547 [ 31 ] <Error> DB::StorageReplicatedMergeTree::queueTask()::<lambda(DB::StorageReplicatedMergeTree::LogEntryPtr&)>: Code: 76, e.displayText() =
DB::Exception: Cannot open file /data/clickhouse//data/AudiencePlanner/reached_pod/tmp_merge_20180203_111_113_1/%1F%EF%BF%BD%08%00%00%00%00%00%00%00%EF%BF%BDVrs%EF%BF%BDws%EF%BF%BDu%EF%BF%BDq%EF%BF%BD%0F%09%EF%BF%BD76%EF%BF%BD51P%EF%BF%BDQ%0A%0A%EF%BF%BDw%EF%BF%BDu%EF%BF%BD%EF%BF%BD%EF%BF%BD%EF%BF%BDw%042%0D%EF%BF%BD%0D%0C%0D%2D%EF%BF%BD%EF%BF%BD%08%EF%BF%BD%C6%A6%EF%BF%BD%26%26J%EF%BF%BD%00%EF%BF%BD%1C%EF%BF%BD%1E%3F%00%00%00.bin, errno: 36, strerror: File name too long, e.what() = DB::Exception, Stack trace:
0. /usr/bin/clickhouse-server(StackTrace::StackTrace()+0x15) [0x7317e35]
1. /usr/bin/clickhouse-server(DB::Exception::Exception(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, int)+0x1e) [0x19caa8e]
2. /usr/bin/clickhouse-server(DB::throwFromErrno(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, int, int)+0x1a4) [0x72ffea4]
3. /usr/bin/clickhouse-server(DB::WriteBufferFromFile::WriteBufferFromFile(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long, int, unsigned int, char*, unsigned long)+0x1c5) [0x733d4a5]
4. /usr/bin/clickhouse-server(DB::createWriteBufferFromFileBase(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long, unsigned long, unsigned long, int, unsigned int, char*, unsigned long)+0xac) [0x7345c9c]
5. /usr/bin/clickhouse-server(DB::IMergedBlockOutputStream::ColumnStream::ColumnStream(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long, DB::CompressionSettings, unsigned long, unsigned long)+0x
I tried detaching the partition but still not able insert any other data, after detaching the error comes like
2018.03.10 09:00:27.299291 [ 9 ] <Error> reached_pod_all.Distributed.DirectoryMonitor: Code: 76, e.displayText() =
DB::Exception: Received from ip1:9000. DB::Exception: Cannot open file /data/clickhouse//data/AudiencePlanner/reached_pod/tmp_insert_20180207_21_21_0/%1F%EF%BF%BD%08%00%00%00%00%00%00%00%EF%BF%BDVrs%EF%BF%BDws%EF%BF%BDu%EF%BF%BDq%EF%BF%BD%0F%09%EF%BF%BD76%EF%BF%BD51P%EF%BF%BDQ%0A%0A%EF%BF%BDw%EF%BF%BDu%EF%BF%BD%EF%BF%BD%EF%BF%BD%EF%BF%BDw%042%0D%EF%BF%BD%0D%0C%0D%2D%EF%BF%BD%EF%BF%BD%08%EF%BF%BD%C6%A6%EF%BF%BD%26%26J%EF%BF%BD%00%EF%BF%BD%1C%EF%BF%BD%1E%3F%00%00%00.bin, errno: 36, strerror: File name too long. Stack trace:
0. /usr/bin/clickhouse-server(StackTrace::StackTrace()+0x15) [0x7317e35]
1. /usr/bin/clickhouse-server(DB::Exception::Exception(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, int)+0x1e) [0x19caa8e]
2. /usr/bin/clickhouse-server(DB::throwFromErrno(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, int, int)+0x1a4) [0x72ffea4]
3. /usr/bin/clickhouse-server(DB::WriteBufferFromFile::WriteBufferFromFile(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long, int, unsigned int, char*, unsigned long)+0x1c5) [0x733d4a5]
4. /usr/bin/clickhouse-server(DB::createWriteBufferFromFileBase(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long, unsigned long, unsigned long, int, unsigned int, char*, unsigned long)+0xac) [0x7345c9c]
5. /usr/bin/clickhouse-server(DB::IMergedBlockOutputStream::ColumnStream::ColumnStream(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, unsigned long, DB::CompressionSettings, unsigned long, unsigned long)+0xcc) [0x68cd7ac]
6. /usr/bin/clickhouse-server() [0x68ce674]
Please help me in identifying and resolving the issue.
I found a solution for my problem, it was due to number of columns that we were adding dynamically around 1000 of column.
Due to number of columns the filename created long string which gave error in writing in filename.
I reduced the number of columns to be inserted and i was able to write the data.

RethinkDB startup error

After I fixed some weird HD problems at my server I noticed that all the rethinkdb data had been lost, that wasn't a problem because the data wasn't important, but now when I start the rethinkdb service using sudo /etc/init.d/rethinkdb restart it runs into this error:
2017-06-19T00:58:21.734419185 0.174516s notice: Running rethinkdb 2.3.5~0xenial (GCC 5.3.1)...
2017-06-19T00:58:21.783052748 0.223140s notice: Running on Linux 4.4.0-79-generic i686
2017-06-19T00:58:21.783317738 0.223398s notice: Loading data from directory /var/lib/rethinkdb/instance1/data
2017-06-19T00:58:22.008209401 0.448290s info: Automatically using cache size of 100 MB
2017-06-19T00:58:22.009815969 0.449896s warn: Cache size does not leave much memory for server and query overhead (available memory: 877 MB).
2017-06-19T00:58:22.010366140 0.450446s warn: Cache size is very low and may impact performance.
2017-06-19T00:58:22.042696622 0.482776s notice: Listening for intracluster connections on port 29015
2017-06-19T00:58:22.128827940 0.568909s error: Error in src/btree/reql_specific.cc at line 256:
2017-06-19T00:58:22.129000921 0.569083s error: Unrecognized reql_btree_superblock_t::magic found.
2017-06-19T00:58:22.129123530 0.569204s error: Backtrace:
2017-06-19T00:58:22.703288073 1.143377s error: Mon Jun 19 00:58:22 2017\n\n1 [0x8747198]: backtrace_t::backtrace_t() at ??:?\n2 [0x8747678]: format_backtrace[abi:cxx11](bool) at ??:?\n3 [0x8a208f4]: report_fatal_error(char const*, int, char const*, ...) at ??:?\n4 [0x86deb12]: get_superblock_metainfo(real_superblock_t*, std::vector<std::pair<std::vector<char, std::allocator<char> >, std::vector<char, std::allocator<char> > >, std::allocator<std::pair<std::vector<char, std::allocator<char> >, std::vector<char, std::allocator<char> > > > >*, cluster_version_t*) at ??:?\n5 [0x85df957]: store_metainfo_manager_t::store_metainfo_manager_t(real_superblock_t*) at ??:?\n6 [0x846edc9]: store_t::store_t(hash_region_t<key_range_t> const&, serializer_t*, cache_balancer_t*, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, bool, perfmon_collection_t*, rdb_context_t*, io_backender_t*, base_path_t const&, uuid_u, update_sindexes_t) at ??:?\n7 [0x894745b]: real_multistore_ptr_t::real_multistore_ptr_t(uuid_u const&, serializer_filepath_t const&, scoped_ptr_t<real_branch_history_manager_t>&&, base_path_t const&, io_backender_t*, cache_balancer_t*, rdb_context_t*, perfmon_collection_t*, scoped_ptr_t<thread_allocation_t>&&, std::vector<scoped_ptr_t<thread_allocation_t>, std::allocator<scoped_ptr_t<thread_allocation_t> > >&&, std::map<uuid_u, std::pair<real_multistore_ptr_t*, auto_drainer_t::lock_t>, std::less<uuid_u>, std::allocator<std::pair<uuid_u const, std::pair<real_multistore_ptr_t*, auto_drainer_t::lock_t> > > >*)::{lambda(int)#1}::operator()(int) const at ??:?\n8 [0x89487d4]: callable_action_instance_t<pmap_runner_one_arg_t<real_multistore_ptr_t::real_multistore_ptr_t(uuid_u const&, serializer_filepath_t const&, scoped_ptr_t<real_branch_history_manager_t>&&, base_path_t const&, io_backender_t*, cache_balancer_t*, rdb_context_t*, perfmon_collection_t*, scoped_ptr_t<thread_allocation_t>&&, std::vector<scoped_ptr_t<thread_allocation_t>, std::allocator<scoped_ptr_t<thread_allocation_t> > >&&, std::map<uuid_u, std::pair<real_multistore_ptr_t*, auto_drainer_t::lock_t>, std::less<uuid_u>, std::allocator<std::pair<uuid_u const, std::pair<real_multistore_ptr_t*, auto_drainer_t::lock_t> > > >*)::{lambda(int)#1}, long long> >::run_action() at ??:?\n9 [0x864f4bd]: coro_t::run() at ??:?
2017-06-19T00:58:22.703737641 1.143820s error: Exiting.
Also, it works if I start it using the normal command (rethinkdb --bind all), the error only happens with the proccess.

Resources