完成作业3-2-1

This commit is contained in:
2509165016
2026-04-23 16:00:52 +08:00
parent b936c0ca34
commit 8460c913c8
4 changed files with 486 additions and 0 deletions

240
2509165016-10.py Normal file
View File

@@ -0,0 +1,240 @@
# 安装jieba
import subprocess
subprocess.run(['pip', 'install', 'jieba', '-q'])
print("jieba安装完成")
import jieba
print("=" * 50)
print("jieba分词演示")
print("=" * 50)
text = "我喜欢深度学习和人工智能"
print(f"原文: {text}")
print()
# 精确模式(默认)
words精确 = list(jieba.cut(text, cut_all=False))
print(f"精确模式: {' / '.join(words精确)}")
# 全模式
words全 = list(jieba.cut(text, cut_all=True))
print(f"全模式: {' / '.join(words全)}")
# 搜索引擎模式
words搜索 = list(jieba.cut_for_search(text))
print(f"搜索模式: {' / '.join(words搜索)}")
# 更多分词示例
import jieba
print("=" * 50)
print("更多分词示例")
print("=" * 50)
examples = [
"今天天气真不错",
"人工智能是未来的发展方向",
"Python是一门非常流行的编程语言",
"小明毕业于清华大学计算机系",
"我今天在京东买了一部iPhone手机"
]
for i, text in enumerate(examples):
words = list(jieba.cut(text))
print(f"{i+1}. {text}")
print(f"{' / '.join(words)}")
print()
import jieba.posseg as pseg
print("=" * 50)
print("jieba词性标注演示")
print("=" * 50)
text = "我喜欢深度学习和人工智能"
print(f"原文: {text}")
print()
words = pseg.cut(text)
print("分词 + 词性标注:")
for word, flag in words:
print(f" {word}: {flag}")
import jieba
print("=" * 50)
print("停用词处理演示")
print("=" * 50)
# 常见停用词列表
stopwords = set(['', '', '', '', '', '', '', '', '', '', '', '', '一个', '', '', '', '', '', '', '', '', '', '', '没有', '', '', '自己', ''])
text = "人工智能是未来的发展方向,也是当前科技领域的热门话题"
print(f"原文: {text}")
print()
# 不使用停用词
words_all = list(jieba.cut(text))
print(f"不使用停用词: {' / '.join(words_all)}")
# 使用停用词
words_filtered = [w for w in words_all if w not in stopwords]
print(f"使用停用词: {' / '.join(words_filtered)}")
print()
# 更完整的停用词表可以从网上下载
print("提示:实际项目中可以从以下地方获取停用词表:")
print(" - 哈工大停用词表")
print(" - 百度停用词表")
print(" - 四川大学机器学习实验室停用词表")
# 实战:完整的文本预处理流程
import jieba
print("=" * 50)
print("完整的文本预处理流程")
print("=" * 50)
# 示例文档集合
docs = [
"今天天气真不错!适合出去玩。",
"Python是一门很棒的编程语言。",
"人工智能和机器学习是未来的发展方向。",
"今天在咖啡馆喝了一杯很好喝的拿铁。"
]
# 停用词表
stopwords = set(['', '', '', '', '', '', '', '', '', '', '', '', '一个', '', '', '', '', '', '', '', '', '', '', '没有', '', '', '自己', '', '', '', ','])
def preprocess_text(text):
"""完整的文本预处理流程"""
# 1. 分词
words = jieba.cut(text)
# 2. 去除停用词
words = [w for w in words if w not in stopwords and len(w) > 0]
# 3. 去除空格
words = [w for w in words if w.strip()]
return words
print("预处理结果:")
for i, doc in enumerate(docs):
words = preprocess_text(doc)
print(f"\nDoc{i+1}: {doc}")
print(f"{' / '.join(words)}")
# 实战jieba分词 + TF-IDF完整流程
import jieba
import math
print("=" * 50)
print("实战jieba分词 + TF-IDF完整流程")
print("=" * 50)
def simple_tfidf_tokenized(docs, stopwords=None):
"""
结合分词的TF-IDF实现
参数:
docs: 原始文档列表
stopwords: 停用词集合
返回:
vocab, tfidf_matrix
"""
# 1. 分词
tokenized = []
for doc in docs:
words = jieba.cut(doc)
if stopwords:
words = [w for w in words if w not in stopwords and len(w) > 1]
else:
words = [w for w in words if len(w) > 1]
tokenized.append(words)
# 2. 构建词表
vocab_set = set()
for doc in tokenized:
vocab_set.update(doc)
vocab = sorted(list(vocab_set))
# 3. 构建TF矩阵并计算IDF
n_docs = len(tokenized)
tf_matrix = []
df_dict = {word: 0 for word in vocab}
for doc in tokenized:
vec = [0] * len(vocab)
for word in doc:
if word in vocab:
idx = vocab.index(word)
vec[idx] += 1
tf_matrix.append(vec)
# 计算DF
for vec in tf_matrix:
for j, count in enumerate(vec):
if count > 0:
word = vocab[j]
df_dict[word] += 1
# 计算IDF
idf = []
for word in vocab:
df = df_dict[word]
idf_j = math.log(n_docs / (df + 1)) + 1
idf.append(idf_j)
# 计算TF-IDF
tfidf = []
for vec in tf_matrix:
tfidf_vec = [vec[i] * idf[i] for i in range(len(vec))]
tfidf.append(tfidf_vec)
return vocab, tfidf, tokenized
# 示例文档集合
docs = [
"Python是一门很棒的编程语言",
"人工智能是未来的发展方向",
"深度学习是机器学习的一个分支",
"Python和Java都是很流行的编程语言"
]
# 停用词
stopwords = set(["", "", "一个", "", "", "", ""])
vocab, tfidf_matrix, tokenized = simple_tfidf_tokenized(docs, stopwords)
print("文档集合:")
for i, doc in enumerate(docs):
print(f" Doc{i+1}: {doc}")
print()
print(f"分词结果:")
for i, words in enumerate(tokenized):
print(f" Doc{i+1}: {' / '.join(words)}")
print()
print(f"词表(共{len(vocab)}个词):")
print(f" {vocab}")
print()
print("TF-IDF矩阵")
for i, vec in enumerate(tfidf_matrix):
# 只显示非零值
nonzero = [(vocab[j], round(vec[j], 4)) for j in range(len(vec)) if vec[j] > 0]
print(f" Doc{i+1}: {nonzero}")
print()
# 找每个文档最重要的词
print("每个文档最重要的词TF-IDF值最高")
for i, vec in enumerate(tfidf_matrix):
max_idx = max(range(len(vec)), key=lambda j: vec[j])
max_score = vec[max_idx]
if max_score > 0:
print(f" Doc{i+1}: '{vocab[max_idx]}' (TF-IDF={max_score:.4f})")

163
2509165016-7.py Normal file
View File

@@ -0,0 +1,163 @@
# TF-IDF演示纯Python实现
import math
print("=" * 50)
print("TF-IDF词频-逆文档频率演示")
print("=" * 50)
def simple_tfidf(docs):
"""
简单的TF-IDF实现
参数:
docs: 文档列表,每篇文档已经是分词后的词列表
返回:
vocab: 词表
tfidf_matrix: TF-IDF矩阵
idf: 每个词的IDF值
"""
# 1. 构建词表和BoW
vocab_set = set()
for doc in docs:
vocab_set.update(doc)
vocab = sorted(list(vocab_set))
# 2. 构建BoW矩阵
bow = []
for doc in docs:
vec = [0] * len(vocab)
for word in doc:
if word in vocab:
vec[vocab.index(word)] += 1
bow.append(vec)
n_docs = len(docs)
# 3. 计算IDF
idf = []
for j, word in enumerate(vocab):
df = sum(1 for vec in bow if vec[j] > 0)
idf_j = math.log(n_docs / (df + 1)) + 1
idf.append(idf_j)
# 4. 计算TF-IDF
tfidf = []
for vec in bow:
tfidf_vec = []
for i, tf in enumerate(vec):
tfidf_vec.append(tf * idf[i])
tfidf.append(tfidf_vec)
return vocab, tfidf, idf
docs = [
["Python", "编程", "语言"],
["Python", "Python", "Python"], # Python出现3次
["Java", "编程", "语言"],
]
vocab, tfidf_matrix, idf = simple_tfidf(docs)
print("文档集合:")
for i, doc in enumerate(docs):
print(f" Doc{i+1}: {' '.join(doc)}")
print()
print(f"词表: {vocab}")
print()
print(f"IDF值: {[round(x, 4) for x in idf]}")
print()
print("TF-IDF矩阵")
for i, vec in enumerate(tfidf_matrix):
print(f" Doc{i+1}: {[round(x, 4) for x in vec]}")
print()
print("详细分析:")
for i, doc in enumerate(docs):
print(f"\nDoc{i+1}: {' '.join(doc)}")
for j, score in enumerate(tfidf_matrix[i]):
if score > 0:
print(f" '{vocab[j]}': TF-IDF = {score:.4f}")
# TF-IDF vs BoW 对比
import math
print("=" * 50)
print("TF-IDF vs BoW 对比")
print("=" * 50)
def simple_bow(docs):
vocab_set = set()
for doc in docs:
vocab_set.update(doc)
vocab = sorted(list(vocab_set))
bow_matrix = []
for doc in docs:
vec = [0] * len(vocab)
for word in doc:
if word in vocab:
vec[vocab.index(word)] += 1
bow_matrix.append(vec)
return vocab, bow_matrix
def simple_tfidf(docs):
vocab_set = set()
for doc in docs:
vocab_set.update(doc)
vocab = sorted(list(vocab_set))
bow = []
for doc in docs:
vec = [0] * len(vocab)
for word in doc:
if word in vocab:
vec[vocab.index(word)] += 1
bow.append(vec)
n_docs = len(docs)
idf = []
for j, word in enumerate(vocab):
df = sum(1 for vec in bow if vec[j] > 0)
idf_j = math.log(n_docs / (df + 1)) + 1
idf.append(idf_j)
tfidf = []
for vec in bow:
tfidf_vec = []
for i, tf in enumerate(vec):
tfidf_vec.append(tf * idf[i])
tfidf.append(tfidf_vec)
return vocab, tfidf, idf
docs = [
["Python", "编程"],
["Java", "编程"],
["Python", "Python", "Python"] # Python出现3次
]
vocab_bow, bow_matrix = simple_bow(docs)
vocab_tfidf, tfidf_matrix, idf = simple_tfidf(docs)
print("文档:")
for i, doc in enumerate(docs):
print(f" Doc{i+1}: {' '.join(doc)}")
print()
print("BoW矩阵")
for i, vec in enumerate(bow_matrix):
print(f" Doc{i+1}: {vec}")
print()
print("TF-IDF矩阵")
for i, vec in enumerate(tfidf_matrix):
print(f" Doc{i+1}: {[round(x, 4) for x in vec]}")
print()
# 重点分析Doc3
print("重点分析:")
print(f"Doc3 'Python Python Python':")
print(f" BoW: Python出现3次")
print(f" TF-IDF: Python的TF-IDF = {tfidf_matrix[2][0]:.4f}")
print()
print("为什么Doc3的TF-IDF不是最高的")
print("因为Python在Doc1和Doc2也出现了IDF值被稀释")

40
2509165016-8.py Normal file
View File

@@ -0,0 +1,40 @@
# Word2Vec词嵌入的概念演示
import numpy as np
print("=" * 50)
print("词嵌入Word Embedding概念演示")
print("=" * 50)
print()
# 假设这些是用Word2Vec等方法训练出来的词向量简化版3维
# 实际中向量通常是50/100/300维
word_vectors = {
"": np.array([0.9, 0.1, 0.2]), # 动物属性高,其他低
"": np.array([0.8, 0.3, 0.1]), # 动物属性高
"小猫": np.array([0.85, 0.2, 0.15]), # 小动物,也像猫
"苹果": np.array([0.1, 0.2, 0.9]), # 水果属性高
"香蕉": np.array([0.1, 0.1, 0.85]), # 水果属性高
"Python": np.array([0.1, 0.0, 0.9]), # 编程语言
"Java": np.array([0.1, 0.0, 0.85]), # 编程语言
}
print("词向量简化版3维示意")
print("维度含义: [动物性, 植物性, 其他/技术性]")
print()
for word, vec in word_vectors.items():
print(f" {word}: {vec}")
print()
# 计算相似度
print("语义相似度:")
print(f" 猫 vs 狗: {cosine_similarity(word_vectors[''], word_vectors['']):.3f}")
print(f" 猫 vs 小猫: {cosine_similarity(word_vectors[''], word_vectors['小猫']):.3f}")
print(f" 猫 vs 苹果: {cosine_similarity(word_vectors[''], word_vectors['苹果']):.3f}")
print(f" 苹果 vs 香蕉: {cosine_similarity(word_vectors['苹果'], word_vectors['香蕉']):.3f}")
print(f" Python vs Java: {cosine_similarity(word_vectors['Python'], word_vectors['Java']):.3f}")
print()
print("词嵌入的优势:")
print(" - 语义相似的词,向量也相似")
print(" - 可以做类比推理:国王-男人+女人=女王")

43
2509165016-9.py Normal file
View File

@@ -0,0 +1,43 @@
import jieba
print("=" * 50)
print("jieba分词演示")
print("=" * 50)
text = "我喜欢深度学习和人工智能"
print(f"原文: {text}")
print()
# 精确模式(默认)
words精确 = list(jieba.cut(text, cut_all=False))
print(f"精确模式: {' / '.join(words精确)}")
# 全模式
words全 = list(jieba.cut(text, cut_all=True))
print(f"全模式: {' / '.join(words全)}")
# 搜索引擎模式
words搜索 = list(jieba.cut_for_search(text))
print(f"搜索模式: {' / '.join(words搜索)}")
# 更多分词示例
import jieba
print("=" * 50)
print("更多分词示例")
print("=" * 50)
examples = [
"今天天气真不错",
"人工智能是未来的发展方向",
"Python是一门非常流行的编程语言",
"小明毕业于清华大学计算机系",
"我今天在京东买了一部iPhone手机"
]
for i, text in enumerate(examples):
words = list(jieba.cut(text))
print(f"{i+1}. {text}")
print(f"{' / '.join(words)}")
print()