ToB企服应用市场:ToB评测及商务社交产业平台

标题: [​DuckDB] 多核算子并行的源码解析 [打印本页]

作者: 愛在花開的季節    时间: 2023-2-13 00:43
标题: [​DuckDB] 多核算子并行的源码解析
DuckDB 是近年来颇受关注的OLAP数据库,号称是OLAP领域的SQLite,以精巧简单,性能优异而著称。笔者前段时间在调研Doris的Pipeline的算子并行方案,而DuckDB基于论文《Morsel-Driven Parallelism: A NUMA-Aware Query Evaluation Framework for the Many-Core Age》实现SQL算子的高效并行化的Pipeline执行引擎,所以笔者花了一些时间进行了学习和总结,这里结合了Mark Raasveldt进行的分享和原始代码来一一剖析DuckDB在执行算子并行上的具体实现。
1. 基础知识

问题1:并行task的数目由什么决定 ?

Pipeline的核心是:Morsel-Driven,数据是拆分成了小部分的数据。所以并行Task的核心是:能够利用多线程来处理数据,每一个数据拆分为小部分,所以拆分并行的数目由Source决定。
DuckDB在GlobalSource上实现了一个虚函数MaxThread来决定task数目:

每一个算子的GlobalSource抽象了自己的并行度:

问题2:并行task的怎么样进行多线程同步:

问题3:DuckDB的是如何抽象接口的:
Sink的Opeartor 定义了两种类型:GlobalState, LocalState
  1. class PhysicalOperator {
  2. public:
  3.         unique_ptr<GlobalSinkState> sink_state;
复制代码
  1. //! The Pipeline class represents an execution pipeline
  2. class PipelineExecutor {
  3. private:
  4.         //! The local sink state (if any)
  5.         unique_ptr<LocalSinkState> local_sink_state;
复制代码
后续会详细解析不同的sink之间的LocalState和GlobalState如何配合的,核心部分如下:

Sink :处理LocalState的数据
Combine:合并LocalState到GlobalState之中
2. 核心算子的并行

这部分进行各个算子的源码剖析,笔者在源码的关键部分加上了中文注释,以方便大家的理解
Sort算子

  1. SinkResultType PhysicalOrder::Sink(ExecutionContext &context, GlobalSinkState &gstate_p, LocalSinkState &lstate_p,
  2.                                    DataChunk &input) const {
  3.         auto &lstate = (OrderLocalSinkState &)lstate_p;
  4.         
  5.       // keys 是排序的列block,payload是输出的排序后数据,这里调用LocalState的SinkChunk,进行数据的转行,
  6.         local_sort_state.SinkChunk(keys, payload);
  7.         // 数据达到内存阈值的时候进行基数排序处理,排序之后的结果存入LocalState的本地的SortedBlock中
  8.         if (local_sort_state.SizeInBytes() >= gstate.memory_per_thread) {
  9.                 local_sort_state.Sort(global_sort_state, true);
  10.         }
  11.         return SinkResultType::NEED_MORE_INPUT;
  12. }
复制代码
  1. void PhysicalOrder::Combine(ExecutionContext &context, GlobalSinkState &gstate_p, LocalSinkState &lstate_p) const {
  2.         auto &gstate = (OrderGlobalSinkState &)gstate_p;
  3.         auto &lstate = (OrderLocalSinkState &)lstate_p;
  4.         // 排序剩余内存中不满的数据
  5.         local_sort_state.Sort(*this, external || !local_sort_state.sorted_blocks.empty());
  6.         // Append local state sorted data to this global state
  7.         lock_guard<mutex> append_guard(lock);
  8.         for (auto &sb : local_sort_state.sorted_blocks) {
  9.                 sorted_blocks.push_back(move(sb));
  10.         }
  11. }
复制代码
  1. void Schedule() override {
  2.                 auto &context = pipeline->GetClientContext();
  3.                 idx_t num_threads = ts.NumberOfThreads();
  4.                 vector<unique_ptr<Task>> merge_tasks;
  5.                 for (idx_t tnum = 0; tnum < num_threads; tnum++) {
  6.                         merge_tasks.push_back(make_unique<PhysicalOrderMergeTask>(shared_from_this(), context, gstate));
  7.                 }
  8.                 SetTasks(move(merge_tasks));
  9.         }
  10. class PhysicalOrderMergeTask : public ExecutorTask {
  11. public:
  12.         TaskExecutionResult ExecuteTask(TaskExecutionMode mode) override {
  13.                 // Initialize merge sorted and iterate until done
  14.                 auto &global_sort_state = state.global_sort_state;
  15.                 MergeSorter merge_sorter(global_sort_state, BufferManager::GetBufferManager(context));
  16.                
  17.         // 加锁,获取两路,不断进行两路归并,最终完成全局排序。
  18.         while (true) {
  19.                 {
  20.                         lock_guard<mutex> pair_guard(state.lock);
  21.                         if (state.pair_idx == state.num_pairs) {
  22.                                 break;
  23.                         }
  24.                         GetNextPartition();
  25.                 }
  26.                 MergePartition();
  27.         }
  28.                 event->FinishTask();
  29.                 return TaskExecutionResult::TASK_FINISHED;
  30.         }
复制代码
聚合算子(这里分析的是Prefetch Agg Operator算子)

  1. SinkResultType PhysicalPerfectHashAggregate::Sink(ExecutionContext &context, GlobalSinkState &state,
  2.                                                   LocalSinkState &lstate_p, DataChunk &input) const {
  3.         lstate.ht->AddChunk(group_chunk, aggregate_input_chunk);
  4. }
  5. void PerfectAggregateHashTable::AddChunk(DataChunk &groups, DataChunk &payload) {
  6.         auto address_data = FlatVector::GetData<uintptr_t>(addresses);
  7.         memset(address_data, 0, groups.size() * sizeof(uintptr_t));
  8.         D_ASSERT(groups.ColumnCount() == group_minima.size());
  9.         // 计算group key列对应的entry的位置
  10.         idx_t current_shift = total_required_bits;
  11.         for (idx_t i = 0; i < groups.ColumnCount(); i++) {
  12.                 current_shift -= required_bits[i];
  13.                 ComputeGroupLocation(groups.data[i], group_minima[i], address_data, current_shift, groups.size());
  14.         }
  15.         // 通过data加上面的entry位置 + tuple的偏移量,计算出对应的内存地址,并进行init
  16.         idx_t needs_init = 0;
  17.         for (idx_t i = 0; i < groups.size(); i++) {
  18.                 D_ASSERT(address_data[i] < total_groups);
  19.                 const auto group = address_data[i];
  20.                 address_data[i] = uintptr_t(data) + address_data[i] * tuple_size;
  21.         }
  22.         RowOperations::InitializeStates(layout, addresses, sel, needs_init);
  23.         // after finding the group location we update the aggregates
  24.         idx_t payload_idx = 0;
  25.         auto &aggregates = layout.GetAggregates();
  26.         for (idx_t aggr_idx = 0; aggr_idx < aggregates.size(); aggr_idx++) {
  27.                 auto &aggregate = aggregates[aggr_idx];
  28.                 auto input_count = (idx_t)aggregate.child_count;
  29.                 // 进行聚合的Update操作
  30.                 RowOperations::UpdateStates(aggregate, addresses, payload, payload_idx, payload.size());
  31.         }
  32. }
复制代码
  1. void PhysicalPerfectHashAggregate::Combine(ExecutionContext &context, GlobalSinkState &gstate_p,
  2.                                            LocalSinkState &lstate_p) const {
  3.         auto &lstate = (PerfectHashAggregateLocalState &)lstate_p;
  4.         auto &gstate = (PerfectHashAggregateGlobalState &)gstate_p;
  5.         lock_guard<mutex> l(gstate.lock);
  6.         gstate.ht->Combine(*lstate.ht);
  7. }
复制代码
  1.         // local state的地址vector
  2.         Vector source_addresses(LogicalType::POINTER);
  3.        // global state的地址vector
  4.         Vector target_addresses(LogicalType::POINTER);
  5.         auto source_addresses_ptr = FlatVector::GetData<data_ptr_t>(source_addresses);
  6.         auto target_addresses_ptr = FlatVector::GetData<data_ptr_t>(target_addresses);
  7.         // 遍历所有hash table的表,然后进行合并对应能够合并的key
  8.         data_ptr_t source_ptr = other.data;
  9.         data_ptr_t target_ptr = data;
  10.         idx_t combine_count = 0;
  11.         idx_t reinit_count = 0;
  12.         const auto &reinit_sel = *FlatVector::IncrementalSelectionVector();
  13.         for (idx_t i = 0; i < total_groups; i++) {
  14.                 auto has_entry_source = other.group_is_set[i];
  15.                 // we only have any work to do if the source has an entry for this group
  16.                 if (has_entry_source) {
  17.                         auto has_entry_target = group_is_set[i];
  18.                         if (has_entry_target) {
  19.                                 // both source and target have an entry: need to combine
  20.                                 source_addresses_ptr[combine_count] = source_ptr;
  21.                                 target_addresses_ptr[combine_count] = target_ptr;
  22.                                 combine_count++;
  23.                                 if (combine_count == STANDARD_VECTOR_SIZE) {
  24.                                         RowOperations::CombineStates(layout, source_addresses, target_addresses, combine_count);
  25.                                         combine_count = 0;
  26.                                 }
  27.                         } else {
  28.                                 group_is_set[i] = true;
  29.                                 // only source has an entry for this group: we can just memcpy it over
  30.                                 memcpy(target_ptr, source_ptr, tuple_size);
  31.                                 // we clear this entry in the other HT as we "consume" the entry here
  32.                                 other.group_is_set[i] = false;
  33.                         }
  34.                 }
  35.                 source_ptr += tuple_size;
  36.                 target_ptr += tuple_size;
  37.         }
  38.         // 做对应的merge操作
  39.         RowOperations::CombineStates(layout, source_addresses, target_addresses, combine_count);
复制代码
Join算子

  1. SinkResultType PhysicalHashJoin::Sink(ExecutionContext &context, GlobalSinkState &gstate_p, LocalSinkState &lstate_p,
  2.                                       DataChunk &input) const {
  3.         auto &gstate = (HashJoinGlobalSinkState &)gstate_p;
  4.         auto &lstate = (HashJoinLocalSinkState &)lstate_p;
  5.         lstate.join_keys.Reset();
  6.         lstate.build_executor.Execute(input, lstate.join_keys);
  7.         // build the HT
  8.         auto &ht = *lstate.hash_table;
  9.         if (!right_projection_map.empty()) {
  10.                 // there is a projection map: fill the build chunk with the projected columns
  11.                 lstate.build_chunk.Reset();
  12.                 lstate.build_chunk.SetCardinality(input);
  13.                 for (idx_t i = 0; i < right_projection_map.size(); i++) {
  14.                         lstate.build_chunk.data[i].Reference(input.data[right_projection_map[i]]);
  15.                 }
  16.                 // 构建local state的hash 表
  17.                 ht.Build(lstate.join_keys, lstate.build_chunk)
  18.         return SinkResultType::NEED_MORE_INPUT;
  19. }
复制代码
  1. void PhysicalHashJoin::Combine(ExecutionContext &context, GlobalSinkState &gstate_p, LocalSinkState &lstate_p) const {
  2.         auto &gstate = (HashJoinGlobalSinkState &)gstate_p;
  3.         auto &lstate = (HashJoinLocalSinkState &)lstate_p;
  4.         if (lstate.hash_table) {
  5.                 lock_guard<mutex> local_ht_lock(gstate.lock);
  6.                 gstate.local_hash_tables.push_back(move(lstate.hash_table));
  7.         }
  8. }
复制代码
  1. void Schedule() override {
  2.                 auto &context = pipeline->GetClientContext();
  3.                 vector<unique_ptr<Task>> finalize_tasks;
  4.                 auto &ht = *sink.hash_table;
  5.                 const auto &block_collection = ht.GetBlockCollection();
  6.                 const auto &blocks = block_collection.blocks;
  7.                 const auto num_blocks = blocks.size();
  8.                 if (block_collection.count < PARALLEL_CONSTRUCT_THRESHOLD && !context.config.verify_parallelism) {
  9.                         // Single-threaded finalize
  10.                         finalize_tasks.push_back(
  11.                             make_unique<HashJoinFinalizeTask>(shared_from_this(), context, sink, 0, num_blocks, false));
  12.                 } else {
  13.                         // Parallel finalize
  14.                         idx_t num_threads = TaskScheduler::GetScheduler(context).NumberOfThreads();
  15.                         auto blocks_per_thread = MaxValue<idx_t>((num_blocks + num_threads - 1) / num_threads, 1);
  16.                         idx_t block_idx = 0;
  17.                         for (idx_t thread_idx = 0; thread_idx < num_threads; thread_idx++) {
  18.                                 auto block_idx_start = block_idx;
  19.                                 auto block_idx_end = MinValue<idx_t>(block_idx_start + blocks_per_thread, num_blocks);
  20.                                 finalize_tasks.push_back(make_unique<HashJoinFinalizeTask>(shared_from_this(), context, sink,
  21.                                                                                            block_idx_start, block_idx_end, true));
  22.                                 block_idx = block_idx_end;
  23.                                 if (block_idx == num_blocks) {
  24.                                         break;
  25.                                 }
  26.                         }
  27.                 }
  28.                 SetTasks(move(finalize_tasks));
  29.         }
  30. template <bool PARALLEL>
  31. static inline void InsertHashesLoop(atomic<data_ptr_t> pointers[], const hash_t indices[], const idx_t count,
  32.                                     const data_ptr_t key_locations[], const idx_t pointer_offset) {
  33.         for (idx_t i = 0; i < count; i++) {
  34.                 auto index = indices[i];
  35.                 if (PARALLEL) {
  36.                         data_ptr_t head;
  37.                         do {
  38.                                 head = pointers[index];
  39.                                 Store<data_ptr_t>(head, key_locations[i] + pointer_offset);
  40.                         } while (!std::atomic_compare_exchange_weak(&pointers[index], &head, key_locations[i]));
  41.                 } else {
  42.                         // set prev in current key to the value (NOTE: this will be nullptr if there is none)
  43.                         Store<data_ptr_t>(pointers[index], key_locations[i] + pointer_offset);
  44.                         // set pointer to current tuple
  45.                         pointers[index] = key_locations[i];
  46.                 }
  47.         }
  48. }
复制代码
  1. void PhysicalHashJoin::GetData(ExecutionContext &context, DataChunk &chunk, GlobalSourceState &gstate_p,
  2.                                LocalSourceState &lstate_p) const {
  3.         auto &sink = (HashJoinGlobalSinkState &)*sink_state;
  4.         auto &gstate = (HashJoinGlobalSourceState &)gstate_p;
  5.         auto &lstate = (HashJoinLocalSourceState &)lstate_p;
  6.         sink.scanned_data = true;
  7.         if (!sink.external) {
  8.                 if (IsRightOuterJoin(join_type)) {
  9.                         {
  10.                                 lock_guard<mutex> guard(gstate.lock);
  11.                                 // 拆解扫描部分hash表的数据
  12.                                 lstate.ScanFullOuter(sink, gstate);
  13.                         }
  14.                         // 扫描hash表读取数据
  15.                         sink.hash_table->GatherFullOuter(chunk, lstate.addresses, lstate.full_outer_found_entries);
  16.                 }
  17.                 return;
  18.         }
  19. }
  20. void HashJoinLocalSourceState::ScanFullOuter(HashJoinGlobalSinkState &sink, HashJoinGlobalSourceState &gstate) {
  21.         auto &fo_ss = gstate.full_outer_scan;
  22.         idx_t scan_index_before = fo_ss.scan_index;
  23.         full_outer_found_entries = sink.hash_table->ScanFullOuter(fo_ss, addresses);
  24.         idx_t scanned = fo_ss.scan_index - scan_index_before;
  25.         full_outer_in_progress = scanned;
  26. }
复制代码
小结

3. Spill To Disk的实现

DuckDB并没有如笔者预期的实现异步IO, 所以任意的执行线程是有可能Stall在系统的I/O调度上的,我想大概率是DuckDB本身的定位对于高并发场景的支持不是那么敏感所导致的。这里他们也作为了后续TODO的计划之一。

4. 参考资料

DuckDB源码
Push-Based Execution in DuckDB

免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!




欢迎光临 ToB企服应用市场:ToB评测及商务社交产业平台 (https://dis.qidao123.com/) Powered by Discuz! X3.4