From fb95636bb1d394b3b748cb93373b4c3156e4f8fb Mon Sep 17 00:00:00 2001 From: pbr4nzfkh <18879212807@163.com> Date: Sun, 17 Mar 2024 10:24:19 +0800 Subject: [PATCH] =?UTF-8?q?tf-31=20map-reduce=E6=A8=A1=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 计算设备/map-reduce/tf-31.py | 42 ++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 计算设备/map-reduce/tf-31.py diff --git a/计算设备/map-reduce/tf-31.py b/计算设备/map-reduce/tf-31.py new file mode 100644 index 0000000..46f9288 --- /dev/null +++ b/计算设备/map-reduce/tf-31.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +from collections import Counter +from cppy.cp_util import * +from functools import reduce + +# map - reduce +def process_chunk(chunk): + # 过滤停用词 + stop_words = get_stopwords() + words = [ w for w in chunk if ( not w in stop_words ) and len(w) >= 3 ] + return Counter(words) + + +def merge_counts(count1,count2): + sum_counts = count1 + count2 + return sum_counts + + +@timing_decorator +def main(): + # 读取文件内容 + content = re_split(read_file(testfilepath)) + + # 分割文件内容为多个块,每个块由一个进程处理 + chunk_size = 1000 # 可以根据实际情况调整块大小 + chunks = [content[i:i + chunk_size] for i in range(0, len(content), chunk_size)] + + # 使用 map 方法和 process_chunk 函数处理每个分区 + counts_list = list(map(process_chunk, chunks)) + + # 使用 reduce 和 merge_counts 函数统计所有分区的词频 + total_counts = (reduce(merge_counts,counts_list)) + + # 输出最高频的n个词 + print_word_freqs(total_counts.most_common(10)) + + +if __name__ == '__main__': + main() + + +