Compare commits
78 Commits
Author | SHA1 | Date |
---|---|---|
p46318075 | 3d0220d49b | 4 months ago |
Yao | 36afa1d669 | 4 months ago |
Yao | 15736d7393 | 4 months ago |
Yao | f170c936d8 | 4 months ago |
zj3D | ceb9955051 | 4 months ago |
zj3D | 4606a87618 | 4 months ago |
zj3D | 2a27a2c748 | 7 months ago |
zj3D | 26b6f4c88b | 8 months ago |
zj3D | fa3e01dedc | 8 months ago |
zj3D | 850a3eb772 | 8 months ago |
zj3D | 8d5c578da8 | 8 months ago |
zj3D | 88606f2bce | 8 months ago |
zj3D | ebe28f7670 | 8 months ago |
zj3D | c8946209bf | 8 months ago |
zj3D | 2d46194636 | 8 months ago |
zj3D | 5099345721 | 8 months ago |
zj3D | cd8186dd68 | 8 months ago |
zj3D | 50952795a8 | 8 months ago |
zj3D | 2cac3f2788 | 8 months ago |
zj3D | 44b0c00567 | 8 months ago |
zj3D | 83c156a3d5 | 8 months ago |
zj3D | 31a4dfc8e5 | 9 months ago |
zj3D | e27ecadb25 | 9 months ago |
zj3D | 9d74a5c184 | 9 months ago |
zj3D | a3bc46dae3 | 9 months ago |
zj3D | f2ff5c8d4e | 9 months ago |
zj3D | 18f3901592 | 9 months ago |
zj3D | 44c1f9eb1e | 9 months ago |
zj3D | b86f626e94 | 9 months ago |
zj3D | fe94d8ed1b | 9 months ago |
zj3D | b15c7505f6 | 9 months ago |
zj3D | cab45b3281 | 9 months ago |
zj3D | 4aa6f8469d | 9 months ago |
p46318075 | f8f3f10d2e | 9 months ago |
zj3D | 7db531d2fc | 9 months ago |
zj3D | 41a14b6705 | 9 months ago |
zj3D | ac7fb13827 | 9 months ago |
zj3D | 1920e47a1c | 9 months ago |
p46318075 | c5932334fa | 9 months ago |
zj3D | 239c0188d0 | 9 months ago |
zj3D | bfcaab3439 | 9 months ago |
zj3D | f52645e7b2 | 9 months ago |
p46318075 | b6fc9ef4c3 | 9 months ago |
pbr4nzfkh | 28f60e8216 | 9 months ago |
pbr4nzfkh | fdf6166100 | 9 months ago |
pbr4nzfkh | ffdae7d329 | 9 months ago |
pbr4nzfkh | 0b9d4a63d6 | 9 months ago |
pbr4nzfkh | ada14b9a7b | 9 months ago |
pbr4nzfkh | c8cd7bbc0c | 9 months ago |
zj3D | c99a655997 | 9 months ago |
zj3D | 950cb41e08 | 9 months ago |
p46318075 | f131c63ff4 | 9 months ago |
zj3D | 2518a5cd85 | 9 months ago |
pbr4nzfkh | e993c23ed1 | 9 months ago |
pbr4nzfkh | fb95636bb1 | 9 months ago |
pbr4nzfkh | 285b016a30 | 9 months ago |
pbr4nzfkh | 1ebf2a45fe | 9 months ago |
pbr4nzfkh | 740f5aabff | 9 months ago |
pbr4nzfkh | 2288c18e8a | 9 months ago |
pbr4nzfkh | 028c7ddb07 | 9 months ago |
zj3D | 9bf690d62c | 9 months ago |
zj3D | 041fced368 | 9 months ago |
p46318075 | 254c11c3c9 | 9 months ago |
pbr4nzfkh | 29dbff26cc | 9 months ago |
pbr4nzfkh | 4134d794ab | 9 months ago |
pbr4nzfkh | d54c43b459 | 9 months ago |
pbr4nzfkh | 365c8bb76a | 9 months ago |
pbr4nzfkh | f8055c0044 | 9 months ago |
pbr4nzfkh | 726a8795c7 | 9 months ago |
zj3D | b4a280c55c | 9 months ago |
zj3D | bfbc1120ec | 9 months ago |
zj3D | 3c439ef8d7 | 9 months ago |
zj3D | 445088fde8 | 9 months ago |
zj3D | 0e55cabe5c | 9 months ago |
zj3D | 856fdcc1e1 | 9 months ago |
zj3D | 8545ada6c2 | 9 months ago |
p46318075 | a59ae791b3 | 9 months ago |
p46318075 | 905b75036b | 9 months ago |
@ -0,0 +1,4 @@
|
||||
log.txt
|
||||
/test
|
||||
/.venv
|
||||
__pycache__
|
@ -0,0 +1,3 @@
|
||||
from cppy.cp_util import *
|
||||
|
||||
print_word_freqs( sort_dict ( get_frequencies ( extract_file_words(testfilepath) )))
|
@ -0,0 +1,20 @@
|
||||
from cppy.cp_util import *
|
||||
|
||||
#
|
||||
# 生成器
|
||||
#
|
||||
def non_stop_words(testfilepath):
|
||||
stopwords = get_stopwords()
|
||||
data_str = read_file(testfilepath)
|
||||
wordlist = re_split( data_str )
|
||||
for word in wordlist:
|
||||
if word not in stopwords:
|
||||
yield word # 弹出一个非停用词
|
||||
|
||||
|
||||
freqs = {}
|
||||
for word in non_stop_words(testfilepath):
|
||||
freqs[word] = freqs.get(word, 0) + 1
|
||||
|
||||
data = sort_dict(freqs)
|
||||
print_word_freqs(data)
|
Binary file not shown.
@ -0,0 +1,76 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from flask import Flask, request, jsonify, abort
|
||||
from functools import lru_cache
|
||||
from cppy.cp_util import *
|
||||
from functools import cache
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
# 模拟数据库
|
||||
books_db = []
|
||||
|
||||
# 用于缓存用户数据库的装饰器
|
||||
@lru_cache(maxsize=None)
|
||||
def get_books_db():
|
||||
return books_db
|
||||
|
||||
#查询所有资源
|
||||
@app.route('/books', methods=['GET'])
|
||||
def get_books():
|
||||
return jsonify(get_books_db())
|
||||
|
||||
#查询某个资源
|
||||
@app.route('/books/<int:book_id>', methods=['GET'])
|
||||
def get_book(book_id):
|
||||
book = next((book for book in get_books_db() if book['id'] == book_id), None)
|
||||
if book is None:
|
||||
abort(404)
|
||||
return jsonify(book['content'])
|
||||
|
||||
|
||||
# 创建或更新新资源
|
||||
@app.route('/books/<int:book_id>', methods=['PUT'])
|
||||
def update_book(book_id):
|
||||
global books_db
|
||||
book_to_update = request.json
|
||||
print(book_to_update)
|
||||
books_db = get_books_db()
|
||||
|
||||
book = next((book for book in books_db if book['id'] == book_id), None)
|
||||
|
||||
if book is None:
|
||||
# 如果资源不存在,创建新资源
|
||||
books_db.append(book_to_update)
|
||||
else:
|
||||
# 如果资源存在,更新资源
|
||||
book.update(book_to_update)
|
||||
# 清除缓存的数据库
|
||||
cache.delete(get_books_db)
|
||||
|
||||
return jsonify(books_db), 200
|
||||
|
||||
#操作一个资源
|
||||
@app.route('/books/<int:book_id>/word_frequency', methods=['GET'])
|
||||
def word_frequency(book_id):
|
||||
global books_db
|
||||
book = next((book for book in get_books_db() if book['id'] == book_id), None)
|
||||
filepath = book['content']
|
||||
word_list = extract_file_words(filepath)
|
||||
word_frequency = get_frequencies(word_list)
|
||||
word_frequency = sort_dict(word_frequency)
|
||||
print_word_freqs(word_frequency)
|
||||
return jsonify(word_frequency), 200
|
||||
|
||||
@app.route('/books/<int:book_id>', methods=['DELETE'])
|
||||
def delete_book(book_id):
|
||||
global books_db
|
||||
books_db = [book for book in books_db if book['id'] != book_id]
|
||||
|
||||
if len(books_db) == len([l for l in books_db if l['id'] == book_id]):
|
||||
abort(404) # 用户不存在
|
||||
|
||||
return jsonify({'message': f'book {book_id} deleted'}), 200
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
|
@ -0,0 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import multiprocessing
|
||||
from collections import Counter
|
||||
from cppy.cp_util import *
|
||||
|
||||
|
||||
#
|
||||
# 多进程: 因为创建进程相比计算过程开销太大,结果最慢
|
||||
#
|
||||
stop_words = get_stopwords()
|
||||
|
||||
def process_chunk(chunk):
|
||||
# 过滤停用词
|
||||
words = [ w for w in chunk if ( not w in stop_words ) and len(w) >= 3 ]
|
||||
return Counter(words)
|
||||
|
||||
def merge_counts(counts_list):
|
||||
"""合并多个Counter对象的总和"""
|
||||
return sum(counts_list, Counter())
|
||||
|
||||
|
||||
@timing_decorator
|
||||
def main():
|
||||
# 读取文件内容,分割文件内容为多个块,每个块由一个进程处理
|
||||
chunks = get_chunks(testfilepath,1000)
|
||||
|
||||
# 使用多进程处理每个块
|
||||
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
|
||||
counts_list = pool.map(process_chunk, chunks)
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
# 合并计数
|
||||
total_counts = merge_counts(counts_list)
|
||||
|
||||
# 输出最高频的n个词
|
||||
print_word_freqs(total_counts.most_common(10))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -0,0 +1,48 @@
|
||||
import re
|
||||
from collections import Counter
|
||||
|
||||
# 清洗文本,移除标点符号并转换为小写
|
||||
def clean_text(text):
|
||||
return re.sub(r'[^\w\s]', '', text).lower()
|
||||
|
||||
# 统计词频
|
||||
def count_frequencies(text):
|
||||
return Counter(word for word in clean_text(text).split())
|
||||
|
||||
# 交互式提示用户输入文件路径和前n个单词的数量
|
||||
def interactive_mode():
|
||||
file_path = input("请输入文件路径 >> ")
|
||||
try:
|
||||
n = int(input("请输入你想要输出的前n个最常见单词的数量 >> "))
|
||||
if n <= 0:
|
||||
raise ValueError("数量必须大于0。")
|
||||
except ValueError as e:
|
||||
print(f"输入错误:{e}")
|
||||
return
|
||||
|
||||
try:
|
||||
# 打开文件并读取内容
|
||||
with open(file_path, 'r', encoding='utf-8') as file:
|
||||
text = file.read()
|
||||
|
||||
# 统计词频
|
||||
frequencies = count_frequencies(text)
|
||||
|
||||
# 获取前n个最常见的单词
|
||||
most_common = frequencies.most_common(n)
|
||||
|
||||
# 输出结果
|
||||
for word, freq in most_common:
|
||||
print(f"{word}: {freq}")
|
||||
except FileNotFoundError:
|
||||
print(f"文件未找到: {file_path}")
|
||||
except Exception as e:
|
||||
print(f"发生错误: {e}")
|
||||
|
||||
# 主函数
|
||||
def main():
|
||||
print("欢迎使用词频统计工具。")
|
||||
interactive_mode()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,30 @@
|
||||
from flask import Flask, render_template, request, redirect, url_for
|
||||
from collections import Counter
|
||||
from cppy.cp_util import *
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/', methods=['GET', 'POST'])
|
||||
def index():
|
||||
if request.method == 'POST':
|
||||
# 获取上传的文件
|
||||
file = request.files['file']
|
||||
|
||||
# 保存临时文件并读取内容
|
||||
filename = os.path.join('/temp', file.filename)
|
||||
file.save(filename)
|
||||
|
||||
# 计算词频
|
||||
words = extract_file_words(filename)
|
||||
word_counts = Counter(words)
|
||||
|
||||
# 删除临时文件
|
||||
os.remove(filename)
|
||||
|
||||
return render_template('result.html', word_counts=word_counts.most_common())
|
||||
|
||||
return render_template('index.html')
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
@ -0,0 +1,14 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Upload Text File</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Upload a Text File to Count Word Frequencies</h1>
|
||||
<form action="/" method="post" enctype="multipart/form-data">
|
||||
<input type="file" name="file">
|
||||
<input type="submit" value="Submit">
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
@ -0,0 +1,16 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Word Frequencies</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Top Word Frequencies:</h1>
|
||||
<ul>
|
||||
{% for word, count in word_counts %}
|
||||
<li>{{ word }}: {{ count }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
<a href="{{ url_for('index') }}">Back to Upload</a>
|
||||
</body>
|
||||
</html>
|
@ -0,0 +1,34 @@
|
||||
# 创建对象是消耗资源的,如果发现对象已经存在,可以返回引用,不创造新对象 。设计模式中这个做法叫享元
|
||||
from cppy.cp_util import *
|
||||
|
||||
#享元类
|
||||
class WordFrequencyController():
|
||||
def __init__(self, controllertype,filepath ):
|
||||
word_list = extract_file_words(filepath)
|
||||
word_freq = get_frequencies(word_list)
|
||||
self.word_freq = sort_dict(word_freq)
|
||||
self.number = controllertype
|
||||
def print_word_freqs( self ):
|
||||
print_word_freqs( self.word_freq,self.number)
|
||||
|
||||
#享元工厂
|
||||
class WordFrequencyControllerFactory():
|
||||
def __init__(self):
|
||||
self.types = {}
|
||||
|
||||
def get_WordFrequencyController(self, number,testfilepath):
|
||||
if number not in self.types:
|
||||
self.types[number] = WordFrequencyController(number,testfilepath) # 创建新的对象
|
||||
print('new obj: ','*'*30,number)
|
||||
else:
|
||||
print('ref obj: ','*'*30,number)
|
||||
return self.types[number] # 重复使用已存在的对象
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
factory = WordFrequencyControllerFactory()
|
||||
for number in [ 1,3,5,3,5,7 ]:
|
||||
WordFrequency = factory.get_WordFrequencyController(number,testfilepath)
|
||||
# print(flush=True)
|
||||
WordFrequency.print_word_freqs()
|
||||
|
@ -0,0 +1,9 @@
|
||||
|
||||
注册
|
||||
- 解耦合:通过回调函数,可以将不同部分的代码逻辑分离,降低模块之间的耦合度。
|
||||
- 主动通信:注册回调模式实现了下层模块与上层模块之间的主动通信。当下层模块发生特定事件或满足特定条件时,可以主动调用上层模块注册的回调函数,而不需要上层模块不停地轮询下层模块的状态。
|
||||
|
||||
- 异步处理:回调函数常用于异步操作的响应处理,可以在主线程之外执行耗时操作,提升程序的效率和响应速度。
|
||||
- 简化设计:在某些情况下,使用回调函数可以避免复杂的控制流设计,使代码更加简洁明了。
|
||||
|
||||
- 适应变化:随着项目的发展,需求可能会发生变化。注册回调模式使得在不影响现有代码的基础上,容易添加新功能或修改现有逻辑。
|
@ -0,0 +1,24 @@
|
||||
from cppy.cp_util import *
|
||||
|
||||
# 这个例子没有实际意义,是用来帮助理解其他例子
|
||||
# 主程序只需要启动第一个动作,后面的顺序逻辑写到各个函数里面了
|
||||
|
||||
def readfile(file_path, func):
|
||||
data = read_file(file_path)
|
||||
func(data, frequencies)
|
||||
|
||||
def extractwords(str_data,func):
|
||||
func(extract_str_words(str_data), sort)
|
||||
|
||||
def frequencies(word_list, func):
|
||||
wf = get_frequencies(word_list)
|
||||
func(wf, printall)
|
||||
|
||||
def sort(wf, func):
|
||||
func(sort_dict(wf), None)
|
||||
|
||||
def printall(word_freqs, _ ):
|
||||
print_word_freqs(word_freqs)
|
||||
|
||||
if __name__ == "__main__":
|
||||
readfile(testfilepath, extractwords)
|
@ -0,0 +1,25 @@
|
||||
import requests
|
||||
from cppy.cp_util import *
|
||||
|
||||
def main():
|
||||
# 读测试文件的内容
|
||||
content = read_file()
|
||||
|
||||
# 抽词
|
||||
tokenize_response = requests.post("http://localhost:7770/tokenize", json={"text": content})
|
||||
words = tokenize_response.json()["words"]
|
||||
|
||||
# 计算词频
|
||||
count_response = requests.post("http://localhost:7771/count", json={"words": words})
|
||||
word_count = count_response.json()["word_count"]
|
||||
|
||||
# 排序
|
||||
sort_response = requests.post("http://localhost:7772/sort", json={"word_count": word_count})
|
||||
top_10_words = sort_response.json()["top_10_words"]
|
||||
|
||||
print("Top 10 words:")
|
||||
print_word_freqs(top_10_words)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,14 @@
|
||||
from fastapi import FastAPI
|
||||
from collections import Counter
|
||||
from cppy.cp_util import *
|
||||
import uvicorn
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
@app.post("/count")
|
||||
async def count(words_list: dict): # {"words": ["word1", "word2", ...]}
|
||||
word_count = Counter(words_list["words"])
|
||||
return {"word_count": dict(word_count)}
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run(app, host="127.0.0.1", port= 7771)
|
@ -0,0 +1,13 @@
|
||||
from fastapi import FastAPI
|
||||
import uvicorn
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
@app.post("/sort")
|
||||
async def sort(word_count_dict: dict):
|
||||
sorted_word_count = sorted(word_count_dict["word_count"].items(), key=lambda x: x[1], reverse=True)
|
||||
top_10_words = sorted_word_count[:10]
|
||||
return {"top_10_words": top_10_words}
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run(app, host="127.0.0.1", port= 7772)
|
@ -0,0 +1,13 @@
|
||||
from fastapi import FastAPI
|
||||
from cppy.cp_util import *
|
||||
import uvicorn
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
@app.post("/tokenize")
|
||||
async def tokenize(text: str):
|
||||
words = extract_str_words(text)
|
||||
return {"words": words}
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run(app, host="127.0.0.1", port= 7770)
|
@ -0,0 +1,5 @@
|
||||
|
||||
|
||||
[Plugins]
|
||||
;; Options: plugins/f1.pyc, plugins/f2.pyc
|
||||
frequencies = plugins/f2.pyc
|
@ -0,0 +1,30 @@
|
||||
import configparser, importlib.machinery
|
||||
from cppy.cp_util import *
|
||||
|
||||
class PluginManager:
|
||||
def __init__(self):
|
||||
self.plugins = {}
|
||||
|
||||
def load_plugins(self):
|
||||
_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
os.chdir(_dir)
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read("config.ini")
|
||||
|
||||
frequencies_plugin = config.get("Plugins", "frequencies")
|
||||
|
||||
# 加载插件
|
||||
self.plugins['word_freqs'] = importlib.machinery.SourcelessFileLoader('', frequencies_plugin).load_module()
|
||||
|
||||
def get_plugin(self, name):
|
||||
return self.plugins.get(name)
|
||||
|
||||
|
||||
# 创建 PluginManager 实例
|
||||
plugin_manager = PluginManager()
|
||||
plugin_manager.load_plugins()
|
||||
|
||||
wordlist = extract_file_words(testfilepath) # 提取文件中的单词
|
||||
word_freqs = plugin_manager.get_plugin('word_freqs').top_word(wordlist) # 调用实例方法
|
||||
print_word_freqs(word_freqs) # 打印词频
|
@ -0,0 +1,28 @@
|
||||
import py_compile
|
||||
|
||||
py_compile.compile('f1.py')
|
||||
py_compile.compile('f2.py')
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# 设置源目录和目标目录
|
||||
source_dir = os.path.join(os.path.dirname(__file__), '__pycache__') # 当前目录下的 __pycache__ 目录
|
||||
target_dir = os.path.join(os.path.dirname(__file__), '..', 'plugins') # 上一级目录下的 plugins 目录
|
||||
|
||||
# 确保目标目录存在
|
||||
os.makedirs(target_dir, exist_ok=True)
|
||||
|
||||
# 遍历源目录中的所有 .pyc 文件
|
||||
for filename in os.listdir(source_dir):
|
||||
if filename.endswith('.pyc'):
|
||||
# 提取文件名的前两个字符
|
||||
new_filename = filename[:2]
|
||||
# 构建源文件和目标文件的完整路径
|
||||
source_file = os.path.join(source_dir, filename)
|
||||
target_file = os.path.join(target_dir, new_filename + '.pyc')
|
||||
# 拷贝文件
|
||||
shutil.copyfile(source_file, target_file)
|
||||
# 删除原始文件
|
||||
os.remove(source_file)
|
||||
print(f"Copied {filename} to {target_file} and removed original file.")
|
@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import operator
|
||||
|
||||
def top25(word_list):
|
||||
def top_word(word_list):
|
||||
word_freqs = {}
|
||||
for w in word_list:
|
||||
if w in word_freqs:
|
@ -0,0 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import collections
|
||||
|
||||
def top_word(word_list):
|
||||
counts = collections.Counter( word_list )
|
||||
return counts.most_common(10)
|
||||
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1,16 @@
|
||||
import cppy.cp_util as util
|
||||
|
||||
|
||||
def extract_words(path_to_file:str) -> list:
|
||||
return util.extract_file_words(path_to_file)
|
||||
|
||||
def frequencies( word_list:list ) -> dict :
|
||||
return util.get_frequencies(word_list)
|
||||
|
||||
def sort(word_freq:dict) -> list :
|
||||
return util.sort_dict(word_freq)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
word_freqs = sort( frequencies(extract_words( util.testfilepath )) )
|
||||
util.print_word_freqs(word_freqs)
|
@ -0,0 +1,36 @@
|
||||
from cppy.cp_util import *
|
||||
from dataclasses import dataclass
|
||||
from collections import Counter
|
||||
import re
|
||||
|
||||
@dataclass
|
||||
class WordFrequency:
|
||||
text: str
|
||||
stop_words: set = None
|
||||
|
||||
def __post_init__(self):
|
||||
# 如果未提供停用词表
|
||||
if self.stop_words is None:
|
||||
self.stop_words = get_stopwords()
|
||||
|
||||
def tokenize(self):
|
||||
# 分词并去除停用词
|
||||
words = re.findall(r'\b\w+\b', self.text.lower())
|
||||
filtered_words = [word for word in words if word not in self.stop_words and len(word)>2]
|
||||
return filtered_words
|
||||
|
||||
def get_top_n(self, n=10):
|
||||
# 计算词频
|
||||
word_freqs = Counter(self.tokenize())
|
||||
return word_freqs.most_common(n)
|
||||
|
||||
|
||||
# 使用示例
|
||||
if __name__ == '__main__':
|
||||
# 创建WordFrequency实例
|
||||
text = read_file()
|
||||
word_freq = WordFrequency( text )
|
||||
|
||||
# 获取并打印词频
|
||||
top_words = word_freq.get_top_n()
|
||||
print_word_freqs(top_words)
|
@ -0,0 +1,25 @@
|
||||
from cppy.cp_util import *
|
||||
|
||||
|
||||
def extractWords(path_to_file):
|
||||
assert(type(path_to_file) is str), "Must be a string"
|
||||
assert(path_to_file), "Must be a non-empty string"
|
||||
return extract_file_words(path_to_file)
|
||||
|
||||
def frequencies(word_list):
|
||||
assert(type(word_list) is list), "Must be a list"
|
||||
assert(word_list != []), "Must be a non-empty list"
|
||||
return get_frequencies(word_list)
|
||||
|
||||
def sort(word_freqs):
|
||||
assert(type(word_freqs) is dict), "Must be a dictionary"
|
||||
assert(word_freqs != {}), "Must be a non-empty dictionary"
|
||||
return sort_dict(word_freqs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
word_freqs = sort(frequencies(extractWords( testfilepath )))
|
||||
print_word_freqs(word_freqs)
|
||||
except Exception as e:
|
||||
print(" Something wrong: {0}".format(e) )
|
@ -0,0 +1,4 @@
|
||||
|
||||
|
||||
## 任务
|
||||
本项目的主要功能任务:做文本文件的分词,过滤常见词,求词频,并排序输出。
|
@ -0,0 +1,23 @@
|
||||
# 装饰器模式允许我们在不修改原有类的基础上,动态地添加额外的功能。
|
||||
# 就增加功能来说,装饰器模式比生成子类更为灵活。
|
||||
# 餐吧的顾客可以选择为他们的咖啡添加额外的调料。
|
||||
class Beverage:
|
||||
def __init__(self, description):
|
||||
self.description = description
|
||||
self.price = 0.0
|
||||
|
||||
def cost(self):
|
||||
return self.price
|
||||
|
||||
class CondimentDecorator(Beverage): # 进行装饰
|
||||
def __init__(self, beverage, description, price_increase):
|
||||
self.beverage = beverage
|
||||
self.description = f"{beverage.description}, {description}"
|
||||
self.price_increase = price_increase
|
||||
|
||||
def cost(self):
|
||||
return self.beverage.cost() + self.price_increase
|
||||
|
||||
# 使用装饰器模式
|
||||
coffee = Beverage("Espresso")
|
||||
coffee_with_chocolate = CondimentDecorator(coffee, "Chocolate", 0.50)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue