上传文件至 /
This commit is contained in:
7767
ChnSentiCorp_htl_all.csv
Normal file
7767
ChnSentiCorp_htl_all.csv
Normal file
File diff suppressed because one or more lines are too long
59
config.py
59
config.py
@@ -1,40 +1,19 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# ģ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
"""
|
MODEL_TYPE = 'mlp' # 'lr' <20><EFBFBD><DFBC>ع<EFBFBD> / 'mlp' <20><><EFBFBD><EFBFBD>֪<EFBFBD><D6AA>
|
||||||
配置文件 - 所有超参数集中管理
|
VECTORIZER_TYPE = 'tfidf' # 'bow' / 'tfidf'
|
||||||
|
|
||||||
设计思路:
|
# ѵ<><D1B5><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
将超参数分门别类,学生可以单独修改某一类而不会影响其他
|
NUM_EPOCHS = 100
|
||||||
"""
|
LEARNING_RATE = 0.05
|
||||||
|
BATCH_SIZE = 32
|
||||||
# ==================== 数据相关 ====================
|
HIDDEN_SIZE = 64
|
||||||
DATA_DIR = 'data/ChnSentiCorp' # 数据集路径
|
|
||||||
MAX_FEATURES = 3000 # 词表最大容量
|
# <20>ı<EFBFBD><C4B1><EFBFBD><EFBFBD><EFBFBD>
|
||||||
MAX_SEQ_LEN = 100 # 句子最大长度(词数)
|
MAX_FEATURES = 3000 # <20>ʱ<EFBFBD><CAB1><EFBFBD>С
|
||||||
VECTORIZER_TYPE = 'tfidf' # 'tfidf' 或 'bow'(向量化方式)
|
MAX_SEQ_LEN = 100 # <20><><EFBFBD><EFBFBD>ı<EFBFBD><C4B1><EFBFBD><EFBFBD><EFBFBD>
|
||||||
|
|
||||||
# ==================== 模型相关 ====================
|
# <EFBFBD><EFBFBD><EFBFBD>ݲ<EFBFBD>ƽ<EFBFBD><EFBFBD><EFBFBD>
|
||||||
MODEL_TYPE = 'mlp' # 'mlp' 或 'lr'(模型类型)
|
USE_CLASS_WEIGHT = True # <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ȩ<EFBFBD><C8A8>
|
||||||
HIDDEN_SIZE = 64 # MLP隐藏层大小(LR忽略)
|
|
||||||
NUM_CLASSES = 2 # 类别数(正面/负面二分类)
|
# ʵ<><CAB5>Ա<EFBFBD>
|
||||||
KEEP_PROB = 1.0 # Dropout保留概率(LR忽略,设为1即可)
|
RUN_COMPARISON = True
|
||||||
|
|
||||||
# ==================== 训练相关 ====================
|
|
||||||
LEARNING_RATE = 0.05 # 学习率
|
|
||||||
NUM_EPOCHS = 100 # 训练轮数
|
|
||||||
BATCH_SIZE = 64 # 批次大小
|
|
||||||
|
|
||||||
# ==================== 类别权重(解决数据不平衡问题)====================
|
|
||||||
USE_CLASS_WEIGHT = True # True=启用类别权重, False=不启用(对比用)
|
|
||||||
# 权重计算公式: n_samples / (n_classes * n_class_i)
|
|
||||||
# 正面评论多所以权重小,负面评论少所以权重大
|
|
||||||
CLASS_WEIGHT_POS = 0.73 # 正面类权重(自动计算)
|
|
||||||
CLASS_WEIGHT_NEG = 1.58 # 负面类权重(自动计算)
|
|
||||||
|
|
||||||
# ==================== 实验相关 ====================
|
|
||||||
RUN_COMPARISON = False # True=运行对比实验, False=运行单个模型
|
|
||||||
COMPARE_MODELS = ['lr', 'mlp'] # 要对比的模型列表
|
|
||||||
COMPARE_VECTORS = ['bow', 'tfidf'] # 要对比的向量化方式
|
|
||||||
|
|
||||||
# ==================== 其他 ====================
|
|
||||||
RANDOM_SEED = 42 # 随机种子(保证可复现)
|
|
||||||
VERBOSE = True # 打印详细日志
|
|
||||||
374
dataset.py
374
dataset.py
@@ -1,286 +1,88 @@
|
|||||||
# -*- coding: utf-8 -*-
|
import numpy as np
|
||||||
"""
|
import jieba
|
||||||
数据加载与向量化模块
|
from collections import Counter
|
||||||
|
import os
|
||||||
支持两种向量化方法:
|
import requests
|
||||||
1. BoW (Bag of Words) - 词频向量
|
|
||||||
2. TF-IDF - 词频-逆文档频率向量
|
# <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ݼ<EFBFBD>
|
||||||
|
def download_data():
|
||||||
TF-IDF 的优势:
|
url = "https://github.com/SophonPlus/ChineseNlpCorpus/raw/master/datasets/ChnSentiCorp_htl_all/ChnSentiCorp_htl_all.csv"
|
||||||
- 降低常见词(如"的"、"是")的权重
|
path = "ChnSentiCorp_htl_all.csv"
|
||||||
- 提升罕见词的信息量
|
if not os.path.exists(path):
|
||||||
- 通常效果优于简单BoW
|
print("<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ݼ<EFBFBD>...")
|
||||||
"""
|
r = requests.get(url)
|
||||||
|
with open(path, "wb") as f:
|
||||||
import os
|
f.write(r.content)
|
||||||
import re
|
return path
|
||||||
import csv
|
|
||||||
import math
|
# <20>ִʣ<D6B4><CAA3><EFBFBD><EFBFBD>˵<EFBFBD><CBB5>֣<EFBFBD>
|
||||||
import jieba
|
def tokenize(text):
|
||||||
import numpy as np
|
words = jieba.lcut(str(text).strip())
|
||||||
from collections import Counter
|
return [w for w in words if len(w) > 1]
|
||||||
|
|
||||||
try:
|
# <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
import urllib.request
|
def load_data():
|
||||||
import ssl
|
path = download_data()
|
||||||
DOWNLOAD_AVAILABLE = True
|
texts, labels = [], []
|
||||||
except ImportError:
|
with open(path, "r", encoding="utf-8") as f:
|
||||||
DOWNLOAD_AVAILABLE = False
|
next(f)
|
||||||
|
for line in f:
|
||||||
|
parts = line.strip().split(",", 1)
|
||||||
DATASET_URL = "https://raw.githubusercontent.com/SophonPlus/ChineseNlpCorpus/master/datasets/ChnSentiCorp_htl_all/ChnSentiCorp_htl_all.csv"
|
if len(parts) != 2:
|
||||||
|
continue
|
||||||
|
label, text = parts
|
||||||
def download_dataset(data_dir):
|
texts.append(text)
|
||||||
"""下载数据集(如果不存在)"""
|
labels.append(int(label))
|
||||||
csv_path = os.path.join(data_dir, 'ChnSentiCorp_htl_all.csv')
|
return texts, labels
|
||||||
|
|
||||||
if os.path.exists(csv_path):
|
# BoW <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
print(f"数据已存在: {csv_path}")
|
class BoWVectorizer:
|
||||||
return True
|
def __init__(self, max_features=3000):
|
||||||
|
self.max_features = max_features
|
||||||
if not DOWNLOAD_AVAILABLE:
|
self.vocab = {}
|
||||||
return False
|
|
||||||
|
def fit(self, texts):
|
||||||
print("正在下载数据集...")
|
counter = Counter()
|
||||||
ssl_context = ssl.create_default_context()
|
for t in texts:
|
||||||
ssl_context.check_hostname = False
|
counter.update(tokenize(t))
|
||||||
ssl_context.verify_mode = ssl.CERT_NONE
|
words = [w for w, _ in counter.most_common(self.max_features)]
|
||||||
|
self.vocab = {w:i for i, w in enumerate(words)}
|
||||||
try:
|
|
||||||
request = urllib.request.Request(DATASET_URL, headers={'User-Agent': 'Mozilla/5.0'})
|
def transform(self, text):
|
||||||
response = urllib.request.urlopen(request, timeout=120, context=ssl_context)
|
words = tokenize(text)
|
||||||
os.makedirs(data_dir, exist_ok=True)
|
vec = np.zeros(self.max_features)
|
||||||
with open(csv_path, 'wb') as f:
|
for w in words:
|
||||||
f.write(response.read())
|
if w in self.vocab:
|
||||||
print(f"下载完成: {csv_path}")
|
vec[self.vocab[w]] += 1
|
||||||
return True
|
return vec
|
||||||
except Exception as e:
|
|
||||||
print(f"下载失败: {e}")
|
# TF-IDF <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
return False
|
class TFIDFVectorizer:
|
||||||
|
def __init__(self, max_features=3000):
|
||||||
|
self.max_features = max_features
|
||||||
def load_raw_data(data_dir):
|
self.vocab = {}
|
||||||
"""加载原始数据"""
|
self.idf = {}
|
||||||
csv_path = os.path.join(data_dir, 'ChnSentiCorp_htl_all.csv')
|
|
||||||
texts, labels = [], []
|
def fit(self, texts):
|
||||||
|
counter = Counter()
|
||||||
with open(csv_path, 'r', encoding='utf-8') as f:
|
doc_freq = Counter()
|
||||||
reader = csv.reader(f)
|
for t in texts:
|
||||||
for row in reader:
|
ws = set(tokenize(t))
|
||||||
if len(row) < 2:
|
counter.update(tokenize(t))
|
||||||
continue
|
for w in ws:
|
||||||
try:
|
doc_freq[w] += 1
|
||||||
label = int(row[0])
|
|
||||||
review = row[1].strip()
|
words = [w for w, _ in counter.most_common(self.max_features)]
|
||||||
if review:
|
self.vocab = {w:i for i, w in enumerate(words)}
|
||||||
texts.append(review)
|
N = len(texts)
|
||||||
labels.append(label)
|
for w in self.vocab:
|
||||||
except (ValueError, IndexError):
|
self.idf[w] = np.log(N / (doc_freq.get(w, 0) + 1))
|
||||||
continue
|
|
||||||
|
def transform(self, text):
|
||||||
return texts, np.array(labels)
|
words = tokenize(text)
|
||||||
|
vec = np.zeros(self.max_features)
|
||||||
|
tf = Counter(words)
|
||||||
def tokenize(text):
|
for w, cnt in tf.items():
|
||||||
"""中文分词"""
|
if w in self.vocab:
|
||||||
text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z]', ' ', text)
|
vec[self.vocab[w]] = cnt * self.idf[w]
|
||||||
words = jieba.lcut(text)
|
return vec
|
||||||
return [w for w in words if len(w) > 1]
|
|
||||||
|
|
||||||
|
|
||||||
# ==================== 向量化器 ====================
|
|
||||||
|
|
||||||
class BaseVectorizer:
|
|
||||||
"""向量化器基类"""
|
|
||||||
def fit(self, texts): pass
|
|
||||||
def transform(self, texts): pass
|
|
||||||
def fit_transform(self, texts): pass
|
|
||||||
|
|
||||||
|
|
||||||
class BoWVectorizer(BaseVectorizer):
|
|
||||||
"""
|
|
||||||
词袋模型 (Bag of Words)
|
|
||||||
|
|
||||||
原理:统计每个词在文本中出现的次数
|
|
||||||
向量维度 = 词表大小
|
|
||||||
每个维度 = 该词在本文本中出现的次数
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, max_features, max_seq_len):
|
|
||||||
self.max_features = max_features
|
|
||||||
self.max_seq_len = max_seq_len
|
|
||||||
self.vocab = {}
|
|
||||||
self.doc_freq = {} # 文档频率
|
|
||||||
self.num_docs = 0
|
|
||||||
|
|
||||||
def fit(self, texts):
|
|
||||||
"""构建词表(基于词频)"""
|
|
||||||
counter = Counter()
|
|
||||||
doc_counter = Counter() # 统计包含该词的文档数
|
|
||||||
|
|
||||||
for text in texts:
|
|
||||||
words = tokenize(text)
|
|
||||||
unique_words = set(words)
|
|
||||||
counter.update(words)
|
|
||||||
for w in unique_words:
|
|
||||||
doc_counter[w] += 1
|
|
||||||
|
|
||||||
self.num_docs = len(texts)
|
|
||||||
|
|
||||||
# 取最高频的词
|
|
||||||
most_common = counter.most_common(self.max_features)
|
|
||||||
self.vocab = {word: idx for idx, (word, _) in enumerate(most_common)}
|
|
||||||
|
|
||||||
# 记录文档频率(用于TF-IDF)
|
|
||||||
self.doc_freq = {w: doc_counter[w] for w in self.vocab}
|
|
||||||
|
|
||||||
print(f" BoW词表大小: {len(self.vocab)}")
|
|
||||||
return self
|
|
||||||
|
|
||||||
def transform(self, texts):
|
|
||||||
"""将文本转换为词频向量"""
|
|
||||||
vectors = []
|
|
||||||
for text in texts:
|
|
||||||
words = tokenize(text)
|
|
||||||
freq = [0] * self.max_seq_len
|
|
||||||
for i, word in enumerate(words[:self.max_seq_len]):
|
|
||||||
if word in self.vocab:
|
|
||||||
freq[i] = 1 # 二值(出现=1,不出现=0)
|
|
||||||
vectors.append(freq)
|
|
||||||
return np.array(vectors, dtype=np.float32)
|
|
||||||
|
|
||||||
def fit_transform(self, texts):
|
|
||||||
self.fit(texts)
|
|
||||||
return self.transform(texts)
|
|
||||||
|
|
||||||
|
|
||||||
class TFIDFVectorizer(BaseVectorizer):
|
|
||||||
"""
|
|
||||||
TF-IDF 向量器
|
|
||||||
|
|
||||||
原理:
|
|
||||||
- TF(词频) = 词在本文本中出现的次数
|
|
||||||
- IDF(逆文档频率) = log(总文档数 / 包含该词的文档数)
|
|
||||||
- TF-IDF = TF × IDF
|
|
||||||
|
|
||||||
优势:
|
|
||||||
- 降低常见无意义词的权重(如"的"、"是")
|
|
||||||
- 提升罕见但有信息量的词
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, max_features, max_seq_len):
|
|
||||||
self.max_features = max_features
|
|
||||||
self.max_seq_len = max_seq_len
|
|
||||||
self.vocab = {}
|
|
||||||
self.idf = {} # 存储每个词的IDF值
|
|
||||||
self.num_docs = 0
|
|
||||||
|
|
||||||
def fit(self, texts):
|
|
||||||
"""构建词表并计算IDF"""
|
|
||||||
counter = Counter()
|
|
||||||
doc_counter = Counter()
|
|
||||||
|
|
||||||
for text in texts:
|
|
||||||
words = tokenize(text)
|
|
||||||
unique_words = set(words)
|
|
||||||
counter.update(words)
|
|
||||||
for w in unique_words:
|
|
||||||
doc_counter[w] += 1
|
|
||||||
|
|
||||||
self.num_docs = len(texts)
|
|
||||||
|
|
||||||
# 计算每个词的IDF
|
|
||||||
# IDF = log(总文档数 / 包含该词的文档数)
|
|
||||||
idf_values = {}
|
|
||||||
for word, df in doc_counter.items():
|
|
||||||
idf_values[word] = math.log(self.num_docs / (df + 1)) + 1 # 加1防零
|
|
||||||
|
|
||||||
# 取IDF值最高的词(信息量最大的词)
|
|
||||||
sorted_words = sorted(idf_values.items(), key=lambda x: x[1], reverse=True)
|
|
||||||
self.vocab = {word: idx for idx, (word, _) in enumerate(sorted_words[:self.max_features])}
|
|
||||||
|
|
||||||
# 保存IDF值
|
|
||||||
self.idf = {word: idf_values[word] for word in self.vocab}
|
|
||||||
|
|
||||||
print(f" TF-IDF词表大小: {len(self.vocab)}")
|
|
||||||
print(f" 平均IDF: {np.mean(list(self.idf.values())):.3f}")
|
|
||||||
return self
|
|
||||||
|
|
||||||
def transform(self, texts):
|
|
||||||
"""将文本转换为TF-IDF向量"""
|
|
||||||
vectors = []
|
|
||||||
for text in texts:
|
|
||||||
words = tokenize(text)
|
|
||||||
|
|
||||||
# 计算TF
|
|
||||||
tf = Counter(words)
|
|
||||||
tf_sum = len(words) if words else 1
|
|
||||||
|
|
||||||
# 生成向量
|
|
||||||
vec = [0.0] * self.max_seq_len
|
|
||||||
for i, word in enumerate(words[:self.max_seq_len]):
|
|
||||||
if word in self.vocab:
|
|
||||||
# TF × IDF
|
|
||||||
vec[i] = (tf[word] / tf_sum) * self.idf.get(word, 0)
|
|
||||||
vectors.append(vec)
|
|
||||||
|
|
||||||
return np.array(vectors, dtype=np.float32)
|
|
||||||
|
|
||||||
def fit_transform(self, texts):
|
|
||||||
self.fit(texts)
|
|
||||||
return self.transform(texts)
|
|
||||||
|
|
||||||
|
|
||||||
def load_data(data_dir, max_features, max_seq_len, vectorizer_type='tfidf'):
|
|
||||||
"""
|
|
||||||
加载并向量化数据
|
|
||||||
|
|
||||||
参数:
|
|
||||||
- vectorizer_type: 'tfidf' 或 'bow'
|
|
||||||
"""
|
|
||||||
if not download_dataset(data_dir):
|
|
||||||
raise RuntimeError("数据加载失败,请检查网络或手动下载数据集")
|
|
||||||
|
|
||||||
print("正在加载数据...")
|
|
||||||
texts, labels = load_raw_data(data_dir)
|
|
||||||
print(f"总评论数: {len(texts)}, 正面: {sum(labels)}, 负面: {len(labels) - sum(labels)}")
|
|
||||||
|
|
||||||
# 选择向量化器
|
|
||||||
if vectorizer_type == 'tfidf':
|
|
||||||
vectorizer = TFIDFVectorizer(max_features, max_seq_len)
|
|
||||||
vec_name = "TF-IDF"
|
|
||||||
else:
|
|
||||||
vectorizer = BoWVectorizer(max_features, max_seq_len)
|
|
||||||
vec_name = "BoW"
|
|
||||||
|
|
||||||
print(f"正在使用{vec_name}向量化...")
|
|
||||||
X = vectorizer.fit_transform(texts)
|
|
||||||
y = labels
|
|
||||||
|
|
||||||
# 打乱并划分
|
|
||||||
np.random.seed(42)
|
|
||||||
indices = np.random.permutation(len(X))
|
|
||||||
X = X[indices]
|
|
||||||
y = y[indices]
|
|
||||||
|
|
||||||
split_idx = int(len(X) * 0.8)
|
|
||||||
X_train, X_test = X[:split_idx], X[split_idx:]
|
|
||||||
y_train, y_test = y[:split_idx], y[split_idx:]
|
|
||||||
|
|
||||||
print(f"训练集: {len(X_train)}条, 测试集: {len(X_test)}条")
|
|
||||||
|
|
||||||
return X_train, y_train, X_test, y_test, vectorizer
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
# 测试
|
|
||||||
print("=" * 60)
|
|
||||||
print("测试 TF-IDF 向量化")
|
|
||||||
print("=" * 60)
|
|
||||||
X_train, y_train, X_test, y_test, vec = load_data(
|
|
||||||
'data/ChnSentiCorp', max_features=3000, max_seq_len=100,
|
|
||||||
vectorizer_type='tfidf'
|
|
||||||
)
|
|
||||||
print(f"\nX_train shape: {X_train.shape}")
|
|
||||||
print(f"X_train sample (前5个特征): {X_train[0][:5]}")
|
|
||||||
94
main.py
94
main.py
@@ -1,34 +1,60 @@
|
|||||||
# -*- coding: utf-8 -*-
|
import numpy as np
|
||||||
"""
|
from dataset import load_data, BoWVectorizer, TFIDFVectorizer
|
||||||
主程序入口
|
from train import train
|
||||||
|
import config as cfg
|
||||||
使用方式:
|
import pickle
|
||||||
|
import time
|
||||||
1. 运行单个模型(默认):
|
|
||||||
python main.py
|
# <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
|
texts, labels = load_data()
|
||||||
修改 config.py 中的 MODEL_TYPE 和 VECTORIZER_TYPE 来切换配置
|
labels = np.array(labels)
|
||||||
|
|
||||||
2. 运行对比实验:
|
# <20><><EFBFBD><EFBFBD>ѵ<EFBFBD><D1B5><EFBFBD><EFBFBD>/<2F><><EFBFBD>Լ<EFBFBD>
|
||||||
修改 config.py 中 RUN_COMPARISON = True
|
np.random.seed(42)
|
||||||
|
indices = np.random.permutation(len(texts))
|
||||||
这会依次运行:
|
split = int(0.8 * len(texts))
|
||||||
- 实验1: BoW vs TF-IDF (固定LR模型)
|
train_idx, test_idx = indices[:split], indices[split:]
|
||||||
- 实验2: LR vs MLP (固定TF-IDF)
|
train_texts = [texts[i] for i in train_idx]
|
||||||
- 实验3: 不同学习率对比
|
test_texts = [texts[i] for i in test_idx]
|
||||||
- 实验4: 不同隐藏层大小对比
|
y_train, y_test = labels[train_idx], labels[test_idx]
|
||||||
|
|
||||||
最后输出汇总报告
|
# <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
"""
|
if cfg.VECTORIZER_TYPE == "bow":
|
||||||
|
vec = BoWVectorizer(cfg.MAX_FEATURES)
|
||||||
from train import main
|
else:
|
||||||
|
vec = TFIDFVectorizer(cfg.MAX_FEATURES)
|
||||||
if __name__ == '__main__':
|
|
||||||
print("\n" + "=" * 70)
|
vec.fit(train_texts)
|
||||||
print("文本分类实验 - 纯NumPy实现")
|
X_train = np.array([vec.transform(t) for t in train_texts])
|
||||||
print("数据集: ChnSentiCorp (中文酒店评论)")
|
X_test = np.array([vec.transform(t) for t in test_texts])
|
||||||
print("模型: Logistic Regression / MLP")
|
|
||||||
print("向量化: BoW / TF-IDF")
|
# ѵ<><D1B5>
|
||||||
print("=" * 70 + "\n")
|
print("="*50)
|
||||||
|
print(f"ѵ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>:\n ģ<><C4A3>: {cfg.MODEL_TYPE}\n <20><><EFBFBD><EFBFBD>: {cfg.VECTORIZER_TYPE}\n ѧϰ<D1A7><CFB0>: {cfg.LEARNING_RATE}")
|
||||||
main()
|
print("="*50)
|
||||||
|
|
||||||
|
model, t = train(
|
||||||
|
X_train, y_train, X_test, y_test,
|
||||||
|
model_type=cfg.MODEL_TYPE,
|
||||||
|
lr=cfg.LEARNING_RATE,
|
||||||
|
epochs=cfg.NUM_EPOCHS,
|
||||||
|
use_weight=cfg.USE_CLASS_WEIGHT
|
||||||
|
)
|
||||||
|
|
||||||
|
# <20><><EFBFBD><EFBFBD>
|
||||||
|
ts = time.strftime("%m%d_%H%M%S")
|
||||||
|
name = f"model_{cfg.MODEL_TYPE}_{cfg.VECTORIZER_TYPE}_{'weighted' if cfg.USE_CLASS_WEIGHT else 'raw'}_{ts}"
|
||||||
|
|
||||||
|
if cfg.MODEL_TYPE == "lr":
|
||||||
|
np.save(f"{name}_W.npy", model.W)
|
||||||
|
np.save(f"{name}_b.npy", model.b)
|
||||||
|
else:
|
||||||
|
np.save(f"{name}_W1.npy", model.W1)
|
||||||
|
np.save(f"{name}_b1.npy", model.b1)
|
||||||
|
np.save(f"{name}_W2.npy", model.W2)
|
||||||
|
np.save(f"{name}_b2.npy", model.b2)
|
||||||
|
|
||||||
|
with open(f"{name}_vec.pkl", "wb") as f:
|
||||||
|
pickle.dump(vec, f)
|
||||||
|
|
||||||
|
print(f"\nģ<EFBFBD><EFBFBD><EFBFBD>ѱ<EFBFBD><EFBFBD><EFBFBD>: {name}_*.npy/*.pkl")
|
||||||
BIN
model_mlp_tfidf_weighted_0430_154611_b1.npy
Normal file
BIN
model_mlp_tfidf_weighted_0430_154611_b1.npy
Normal file
Binary file not shown.
Reference in New Issue
Block a user