From dc6df2471818a03af6acf5eca68f4cd37184b9b2 Mon Sep 17 00:00:00 2001 From: 2509165025 <2509165025@student.edu.cn> Date: Thu, 23 Apr 2026 15:54:30 +0800 Subject: [PATCH] =?UTF-8?q?=E5=AE=8C=E6=88=90=E4=BD=9C=E4=B8=9A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 4.23 25/25 .py | 31 ++++++++++++++++ 4.23 25/25 2.py | 99 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 130 insertions(+) create mode 100644 4.23 25/25 .py create mode 100644 4.23 25/25 2.py diff --git a/4.23 25/25 .py b/4.23 25/25 .py new file mode 100644 index 0000000..c7fffe1 --- /dev/null +++ b/4.23 25/25 .py @@ -0,0 +1,31 @@ +import jieba + +print("=" * 50) +print("完整的文本预处理流程") +print("=" * 50) + +docs = [ + "今天天气真不错!适合出去玩。", + "Python是一门很棒的编程语言。", + "人工智能和机器学习是未来的发展方向。", + "今天在咖啡馆喝了一杯很好喝的拿铁。" +] + + +stopwords = set(['的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这', '!', '。', ',']) + +def preprocess_text(text): + """完整的文本预处理流程""" + words = jieba.cut(text) + + words = [w for w in words if w not in stopwords and len(w) > 0] + + words = [w for w in words if w.strip()] + + return words + +print("预处理结果:") +for i, doc in enumerate(docs): + words = preprocess_text(doc) + print(f"\nDoc{i+1}: {doc}") + print(f" → {' / '.join(words)}") \ No newline at end of file diff --git a/4.23 25/25 2.py b/4.23 25/25 2.py new file mode 100644 index 0000000..4eb50d6 --- /dev/null +++ b/4.23 25/25 2.py @@ -0,0 +1,99 @@ +import jieba +import math + +print("=" * 50) +print("实战:jieba分词 + TF-IDF完整流程") +print("=" * 50) + +def simple_tfidf_tokenized(docs, stopwords=None): + """ + 结合分词的TF-IDF实现 + 参数: + docs: 原始文档列表 + stopwords: 停用词集合 + 返回: + vocab, tfidf_matrix + """ + tokenized = [] + for doc in docs: + words = jieba.cut(doc) + if stopwords: + words = [w for w in words if w not in stopwords and len(w) > 1] + else: + words = [w for w in words if len(w) > 1] + tokenized.append(words) + + vocab_set = set() + for doc in tokenized: + vocab_set.update(doc) + vocab = sorted(list(vocab_set)) + + n_docs = len(tokenized) + tf_matrix = [] + df_dict = {word: 0 for word in vocab} + + for doc in tokenized: + vec = [0] * len(vocab) + for word in doc: + if word in vocab: + idx = vocab.index(word) + vec[idx] += 1 + tf_matrix.append(vec) + + for vec in tf_matrix: + for j, count in enumerate(vec): + if count > 0: + word = vocab[j] + df_dict[word] += 1 + + idf = [] + for word in vocab: + df = df_dict[word] + idf_j = math.log(n_docs / (df + 1)) + 1 + idf.append(idf_j) + + tfidf = [] + for vec in tf_matrix: + tfidf_vec = [vec[i] * idf[i] for i in range(len(vec))] + tfidf.append(tfidf_vec) + + return vocab, tfidf, tokenized + +docs = [ + "Python是一门很棒的编程语言", + "人工智能是未来的发展方向", + "深度学习是机器学习的一个分支", + "Python和Java都是很流行的编程语言" +] + +stopwords = set(["的", "是", "一个", "很", "和", "在", "了"]) + +vocab, tfidf_matrix, tokenized = simple_tfidf_tokenized(docs, stopwords) + +print("文档集合:") +for i, doc in enumerate(docs): + print(f" Doc{i+1}: {doc}") +print() + +print(f"分词结果:") +for i, words in enumerate(tokenized): + print(f" Doc{i+1}: {' / '.join(words)}") +print() + +print(f"词表(共{len(vocab)}个词):") +print(f" {vocab}") +print() + +print("TF-IDF矩阵:") +for i, vec in enumerate(tfidf_matrix): + nonzero = [(vocab[j], round(vec[j], 4)) for j in range(len(vec)) if vec[j] > 0] + print(f" Doc{i+1}: {nonzero}") + +print() + +print("每个文档最重要的词(TF-IDF值最高):") +for i, vec in enumerate(tfidf_matrix): + max_idx = max(range(len(vec)), key=lambda j: vec[j]) + max_score = vec[max_idx] + if max_score > 0: + print(f" Doc{i+1}: '{vocab[max_idx]}' (TF-IDF={max_score:.4f})") \ No newline at end of file