微博情绪分类

引自:https://blog.csdn.net/no1xiaoqianqian/article/details/130593783

友好借鉴,总体抄袭。

所需要的文件如下:https://download.csdn.net/download/m0_37567738/88340795

import os
import torch
import torch.nn as nn
import numpy as npclass TextRNN(nn.Module):def __init__(self, Config):super(TextRNN, self).__init__()self.hidden_size = 128  # lstm隐藏层self.num_layers = 2  # lstm层数self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)self.lstm = nn.LSTM(Config.embed_dim, self.hidden_size, self.num_layers,bidirectional=True, batch_first=True, dropout=Config.dropout)self.fc = nn.Linear(self.hidden_size * 2, Config.num_classes)def forward(self, x):out = self.embedding(x)  # [batch_size, seq_len, embeding]=[128, 32, 300]out, _ = self.lstm(out)out = self.fc(out[:, -1, :])  # 句子最后时刻的 hidden statereturn outimport torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copyclass Transformer(nn.Module):def __init__(self, Config):super(Transformer, self).__init__()self.hidden = 1024self.last_hidden = 512self.num_head = 5self.num_encoder = 2self.dim_model = 300self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)self.postion_embedding = Positional_Encoding(Config.embed_dim, Config.all_seq_len, Config.dropout, Config.device)self.encoder = Encoder(self.dim_model, self.num_head, self.hidden, Config.dropout)self.encoders = nn.ModuleList([copy.deepcopy(self.encoder)# Encoder(config.dim_model, config.num_head, config.hidden, config.dropout)for _ in range(self.num_encoder)])self.fc1 = nn.Linear(Config.all_seq_len * self.dim_model, Config.num_classes)# self.fc2 = nn.Linear(config.last_hidden, config.num_classes)# self.fc1 = nn.Linear(config.dim_model, config.num_classes)def forward(self, x):out = self.embedding(x)out = self.postion_embedding(out)for encoder in self.encoders:out = encoder(out)out = out.view(out.size(0), -1)# out = torch.mean(out, 1)out = self.fc1(out)return outclass Encoder(nn.Module):def __init__(self, dim_model, num_head, hidden, dropout):super(Encoder, self).__init__()self.attention = Multi_Head_Attention(dim_model, num_head, dropout)self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout)def forward(self, x):out = self.attention(x)out = self.feed_forward(out)return outclass Positional_Encoding(nn.Module):def __init__(self, embed, pad_size, dropout, device):super(Positional_Encoding, self).__init__()self.device = deviceself.pe = torch.tensor([[pos / (10000.0 ** (i // 2 * 2.0 / embed)) for i in range(embed)] for pos in range(pad_size)])self.pe[:, 0::2] = np.sin(self.pe[:, 0::2])self.pe[:, 1::2] = np.cos(self.pe[:, 1::2])self.dropout = nn.Dropout(dropout)def forward(self, x):out = x + nn.Parameter(self.pe, requires_grad=False).to(self.device)out = self.dropout(out)return outclass Scaled_Dot_Product_Attention(nn.Module):'''Scaled Dot-Product Attention '''def __init__(self):super(Scaled_Dot_Product_Attention, self).__init__()def forward(self, Q, K, V, scale=None):'''Args:Q: [batch_size, len_Q, dim_Q]K: [batch_size, len_K, dim_K]V: [batch_size, len_V, dim_V]scale: 缩放因子 论文为根号dim_KReturn:self-attention后的张量,以及attention张量'''attention = torch.matmul(Q, K.permute(0, 2, 1))if scale:attention = attention * scale# if mask:  # TODO change this#     attention = attention.masked_fill_(mask == 0, -1e9)attention = F.softmax(attention, dim=-1)context = torch.matmul(attention, V)return contextclass Multi_Head_Attention(nn.Module):def __init__(self, dim_model, num_head, dropout=0.0):super(Multi_Head_Attention, self).__init__()self.num_head = num_headassert dim_model % num_head == 0self.dim_head = dim_model // self.num_headself.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)self.attention = Scaled_Dot_Product_Attention()self.fc = nn.Linear(num_head * self.dim_head, dim_model)self.dropout = nn.Dropout(dropout)self.layer_norm = nn.LayerNorm(dim_model)def forward(self, x):batch_size = x.size(0)Q = self.fc_Q(x)K = self.fc_K(x)V = self.fc_V(x)Q = Q.view(batch_size * self.num_head, -1, self.dim_head)K = K.view(batch_size * self.num_head, -1, self.dim_head)V = V.view(batch_size * self.num_head, -1, self.dim_head)# if mask:  # TODO#     mask = mask.repeat(self.num_head, 1, 1)  # TODO change thisscale = K.size(-1) ** -0.5  # 缩放因子context = self.attention(Q, K, V, scale)context = context.view(batch_size, -1, self.dim_head * self.num_head)out = self.fc(context)out = self.dropout(out)out = out + x  # 残差连接out = self.layer_norm(out)return outclass Position_wise_Feed_Forward(nn.Module):def __init__(self, dim_model, hidden, dropout=0.0):super(Position_wise_Feed_Forward, self).__init__()self.fc1 = nn.Linear(dim_model, hidden)self.fc2 = nn.Linear(hidden, dim_model)self.dropout = nn.Dropout(dropout)self.layer_norm = nn.LayerNorm(dim_model)def forward(self, x):out = self.fc1(x)out = F.relu(out)out = self.fc2(out)out = self.dropout(out)out = out + x  # 残差连接out = self.layer_norm(out)return outimport torch.nn as nn
import torch
import torch.nn.functional as Fclass TextCNN(nn.Module):def __init__(self, Config):super(TextCNN, self).__init__()self.filter_sizes = (2, 3, 4)  # 卷积核尺寸self.num_filters = 64  # 卷积核数量(channels数)self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)self.convs = nn.ModuleList([nn.Conv2d(1, self.num_filters, (k, Config.embed_dim)) for k in self.filter_sizes])self.dropout = nn.Dropout(Config.dropout)self.fc = nn.Linear(self.num_filters * len(self.filter_sizes), Config.num_classes)def conv_and_pool(self, x, conv):x = F.relu(conv(x))x = x.squeeze(3)x = F.max_pool1d(x, x.size(2)).squeeze(2)return xdef forward(self, x):out = self.embedding(x)out = out.unsqueeze(1)out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)out = self.dropout(out)out = self.fc(out)return outimport matplotlib.pyplot as plt
import numpy as npdef draw_loss_pic(train_loss, test_loss, y):x = np.linspace(0, len(train_loss), len(train_loss))plt.plot(x, train_loss, label="train_" + y, linewidth=1.5)plt.plot(x, test_loss, label="test_" + y, linewidth=1.5)plt.xlabel("epoch")plt.ylabel(y)plt.legend()plt.show()import torchclass Config():train_data_path = '../data/virus_train.txt'test_data_path = '../data/virus_eval_labeled.txt'vocab_path = '../data/vocab.pkl'split_word_all_path = '../data/split_word_all.txt'model_file_name_path = '../data/vec_model.txt'id_vec_path = '../data/id_vec.pkl'device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')word_level = True   # 按照字级别进行分词embedding_pretrained = False   # 是否使用预训练的词向量label_fields = {'neural': 0, 'happy': 1, 'angry': 2, 'sad': 3, 'fear': 4, 'surprise': 5}all_seq_len = 64  # 句子长度,长剪短补batch_size = 128learning_rate = 0.0001epoches = 50dropout = 0.5num_classes = 6embed_dim = 300n_vocab = 0import re
import os
import json
#import jieba
import pickle as pkl
import numpy as np
import gensim.models.word2vec as w2v
import torch
#from src.Config import Config
import torch.utils.data as Datatrain_data_path = Config.train_data_path
test_data_path = Config.test_data_path
vocab_path = Config.vocab_pathlabel_fields = Config.label_fields
all_seq_len = Config.all_seq_lenUNK, PAD = '<UNK>', '<PAD>'  # 未知字,padding符号def build_vocab(content_list, tokenizer):file_split_word = open(Config.split_word_all_path, 'w', encoding='utf-8')vocab_dic = {}for content in content_list:word_lines = []for word in tokenizer(content):vocab_dic[word] = vocab_dic.get(word, 0) + 1word_lines.append(word)str = " ".join(word_lines) + "\n"file_split_word.write(str)file_split_word.close()vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})vocab_dic = {word_count: idx for idx, word_count in enumerate(vocab_dic)}return vocab_dicdef build_id_vec(vocab_dic, model):model.wv.add_vector(UNK, np.zeros(300))model.wv.add_vector(PAD, np.ones(300))id2vec = {}for word in vocab_dic.keys():id = vocab_dic.get(word, vocab_dic.get(UNK))vec = model.wv.get_vector(word)id2vec.update({id: vec})return id2vecdef train_vec():model_file_name = Config.model_file_name_pathsentences = w2v.LineSentence(Config.split_word_all_path)model = w2v.Word2Vec(sentences, vector_size=300, window=20, min_count=0)model.save(model_file_name)def load_data(root):content_list = []content_token_list = []label_list = []if Config.word_level:tokenizer = lambda x: [y for y in x]else:tokenizer = lambda x: jieba.cut(x, cut_all=False)file = open(root, 'r', encoding='utf-8')datas = json.load(file)# pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!|?|\[|\]]')pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!|?]')# pattern = re.compile(r'[^\u4e00-\u9fa5|,|。]')       # seq_len=32 CNN:67%-68%  RNN:61%-62%  Transformer:63-64%# pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!]')       # CNN:65%-66%for data in datas:content_after_clean = re.sub(pattern, '', data['content'])content_list.append(content_after_clean)label_list.append(label_fields[data['label']])if os.path.exists(vocab_path):vocab = pkl.load(open(vocab_path, 'rb'))else:vocab = build_vocab(content_list, tokenizer)pkl.dump(vocab, open(vocab_path, 'wb'))if Config.embedding_pretrained:train_vec()model = w2v.Word2Vec.load(Config.model_file_name_path)id_vec = build_id_vec(vocab, model)pkl.dump(id_vec, open(Config.id_vec_path, 'wb'))for content in content_list:word_line = []token = list(tokenizer(content))seq_len = len(token)if seq_len < all_seq_len:token.extend([PAD] * (all_seq_len - seq_len))else:token = token[:all_seq_len]for word in token:word_line.append(vocab.get(word, vocab.get(UNK)))content_token_list.append(word_line)n_vocab = len(vocab)return content_token_list, label_list, n_vocabclass WeiBboDataset(Data.Dataset):def __init__(self, content_token_list, label_list):super(WeiBboDataset, self).__init__()self.content_token_list = content_token_listself.label_list = label_listdef __getitem__(self, index):label = float(self.label_list[index])return torch.tensor(self.content_token_list[index]), torch.tensor(label)def __len__(self):return len(self.label_list)def get_data(batch_size):train_content_token_list, train_label_list, n_vocab = load_data(train_data_path)test_content_token_list, test_label_list, _ = load_data(test_data_path)train_dataset = WeiBboDataset(train_content_token_list, train_label_list)test_dataset = WeiBboDataset(test_content_token_list, test_label_list)train_dataloader = Data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)test_dataloader = Data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)return train_dataloader, test_dataloader, n_vocabif __name__ == '__main__':get_data(32)import os
import torch
import torch.nn as nn
from torch.autograd import Variable
#from utils.draw_loss_pic import draw_loss_picos.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"def train(net, loss, optimizer, train_loader, test_loader, epoches, device):train_loss = []train_acc = []test_loss = []test_acc = []for epoch in range(epoches):net.train()total_loss = 0.0correct = 0sample_num = 0for batch_idx, (data, target) in enumerate(train_loader):data = data.to(device).long()target = target.to(device).long()optimizer.zero_grad()output = net(data)ls = loss(output, target)ls.backward()optimizer.step()total_loss += ls.item()sample_num += len(target)max_output = output.data.max(1, keepdim=True)[1].view_as(target)correct += (max_output == target).sum()print('epoch %d, train_loss %f, train_acc: %f' % (epoch + 1, total_loss/sample_num, float(correct.data.item()) / sample_num))train_loss.append(total_loss/sample_num)train_acc.append(float(correct.data.item()) / sample_num)test_ls, test_accury = test(net, test_loader, device, loss)test_loss.append(test_ls)test_acc.append(test_accury)draw_loss_pic(train_loss, test_loss, "loss")draw_loss_pic(train_acc, test_acc, "acc")def test(net, test_loader, device, loss):net.eval()total_loss = 0.0correct = 0sample_num = 0for batch_idx, (data, target) in enumerate(test_loader):data = data.to(device)target = target.to(device).long()output = net(data)ls = loss(output, target)total_loss += ls.item()sample_num += len(target)max_output = output.data.max(1, keepdim=True)[1].view_as(target)correct += (max_output == target).sum()print('test_loss %f, test_acc: %f' % (total_loss / sample_num, float(correct.data.item()) / sample_num))return total_loss / sample_num, float(correct.data.item()) / sample_numimport torch
import torch.nn as nn
import torch.optim as optim
import pickle as pkl
#from src.models.textCNN import TextCNN
#from src.models.textRNN import TextRNN
#from src.models.Transformer import Transformer
#from src.Config import Config
#from src.get_data import get_data
#from src.train import trainif __name__ == '__main__':config = Config()batch_size = config.batch_sizelearning_rate = config.learning_ratetrain_dataloader, test_dataloader, n_vocab = get_data(batch_size)config.n_vocab = n_vocab# model = TextCNN(config).to(Config.device)model = TextRNN(config).to(Config.device)# model = Transformer(config).to(Config.device)# 导入word2vec训练出来的预训练词向量id_vec = open(Config.id_vec_path, 'rb')id_vec = pkl.load(id_vec)id_vec = torch.tensor(list(id_vec.values())).to(Config.device)if config.embedding_pretrained:model.embedding = nn.Embedding.from_pretrained(id_vec)loss = nn.CrossEntropyLoss().to(Config.device)optimizer = optim.Adam(params=model.parameters(), lr=learning_rate)train(model, loss, optimizer, train_dataloader, test_dataloader, Config.epoches, Config.device)

运行结果(准确率和错误率):

正确率达到85%。
在这里插入图片描述

在这里插入图片描述

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/137763.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

32:TX Text Control ActiveX/ASP.NET/WinForms/WPF Crack

TX Text Control ActiveX 32.0 添加操作“普通”样式表的能力。 2023 年 9 月 14 日 - 15:38新版本 特征 脚注- 在文档中插入与 Microsoft Word 兼容的脚注。脚注是一种文字处理功能&#xff0c;允许用户在页面底部插入附加信息。 可编辑的[普通]样式表- 添加了操作[普通]样式的…

9.20号作业实现钟表

1.widget.h #include <QPainter> //画家 #include <QTimerEvent> #include <QTime> #include<QTimer> //定时器类QT_BEGIN_NAMESPACE namespace Ui { class Widget; } QT_END_NAMESPACEclass Widget : public QWidget {Q_OBJECTpublic:Wid…

物联网网络安全:保护物理世界和数字世界的融合

我们正在见证数字技术如何成为我们日常生活和经济系统的一部分&#xff0c;从而提高福利并增强竞争力。尽管如此&#xff0c;新的尖端互联技术的迅速出现和采用也对政府、企业和整个社会构成了重大威胁。 长期以来&#xff0c;网络安全威胁一直是电影行业的一个现成的灵感来源&…

el-table表格中加入输入框

<template><div class"box"><div class"btn"><el-button type"primary">发送评委</el-button><el-button type"primary" click"flag true" v-if"!flag">编辑</el-button…

RFID技术在仓储物流供应链管理中的应用

仓储物流供应链管理的透明度和库存周转率成为管控的重点&#xff0c;为了提高仓储物流的效率和减少库存损失&#xff0c;RFID技术被广泛应用于仓储、分发、零售管理等各个环节&#xff0c;为供应链管理带来了巨大的改变和提升。 首先&#xff0c;采用RFID技术进行仓库物流智能化…

Jenkins “Trigger/call builds on other project“用法及携带参数

1.功能 “Trigger/call builds on other project” 功能是 Jenkins 中的一个特性&#xff0c;允许您在某个项目的构建过程中触发或调用另一个项目的构建。 当您在 Jenkins 中启用了 “Trigger/call builds on other project” 功能并配置了相应的触发条件后&#xff0c;当主项…

(三十二)大数据实战——Maxwell安装部署及其应用案例实战

前言 Maxwell是一个开源的MySQL数据库binlog解析工具&#xff0c;用于将MySQL数据库的binlog转换成易于消费的JSON格式&#xff0c;并通过Kafka、RabbitMQ、Kinesis 等消息队列或直接写入文件等方式将其输出。本节内容主要介绍如何安装部署Maxwell以及如何使用Maxwell完成数据…

从淘宝数据分析产品需求(商品销量总销量精准月销)

淘宝数据分析总体来说可以分为商品分析、客户分析、地区分析、时间分析四大维度(参考数据雷达的分析思路)。在这里我重点说商品分析。 在淘宝上开店的竞争还是非常激烈的&#xff0c;随便拿出一个单品就有很多竞品存在&#xff0c;所以做起来还是很难的&#xff0c;而想要在众…

嵌入式学习 - 用电控制电

目录 前言&#xff1a; 1、继电器 2、二极管 3、三极管 3.1 特殊的三极管-mos管 3.2 npn类型三极管 3.3 pnp类型三极管 3.4 三极管的放大特性 3.5 mos管和三极管的区别 前言&#xff1a; 计算机的工作的核心原理&#xff1a;用电去控制电。 所有的电子元件都有数据手册…

MySQL的高级SQL语句

目录 一、高级SQL语句 1、select 查询表中一个或多个字段的数据 2、distinct 不显示重复的数据记录 3、where 有条件查询 4、and与or 且与或 5、in 显示在某个范围值内 的字段的信息 6、between 显示两个值范围内的数据记录 7、order by 对字…

ChatGLM 实现一个BERT

前言 本文包含大量源码和讲解,通过段落和横线分割了各个模块,同时网站配备了侧边栏,帮助大家在各个小节中快速跳转,希望大家阅读完能对BERT有深刻的了解。同时建议通过pycharm、vscode等工具对bert源码进行单步调试,调试到对应的模块再对比看本章节的讲解。 涉及到的jupyt…

网络安全:保护你的系统

&#x1f337;&#x1f341; 博主猫头虎&#xff08;&#x1f405;&#x1f43e;&#xff09;带您 Go to New World✨&#x1f341; &#x1f984; 博客首页——&#x1f405;&#x1f43e;猫头虎的博客&#x1f390; &#x1f433; 《面试题大全专栏》 &#x1f995; 文章图文…

logback异步appender日志源码详解

背景&#xff1a; 日常打印日志时&#xff0c;使用logback的异步写日志几乎是标准的配置方式&#xff0c;本文从源码上看看异步写日志的整个流程 异步Appender日志 一般日志的配置如下所示 appender(“ASYNC-LOG”, AsyncAppender) { neverBlock true queueSize 10000 } 这…

【chrome 插件】AdGuard 广告拦截器:安全清爽的互联网浏览体验

AdGuard 广告拦截器介绍 基本信息 AdGuard 是一款功能强大的广告拦截程序&#xff0c;它可以帮助用户在浏览网页时过滤掉网站中烦人的广告和恶意弹窗&#xff0c;提升获取信息的效率&#xff0c;同时&#xff0c;作为一款 Chrome 插件&#xff0c;AdGuard 提供了简单易用的界…

除法求值00

题目链接 除法求值 题目描述 注意点 Ai, Bi, Cj, Dj 由小写英文字母与数字组成输入总是有效的&#xff0c;可以假设除法运算中不会出现除数为 0 的情况&#xff0c;且不存在任何矛盾的结果未在等式列表中出现的变量是未定义的&#xff0c;因此无法确定它们的答案 解答思路 …

Linux内核源码分析 (B.4) 深度剖析 Linux 伙伴系统的设计与实现

Linux内核源码分析 (B.4) 深度剖析 Linux 伙伴系统的设计与实现 文章目录 1\. 伙伴系统的核心数据结构2\. 到底什么是伙伴3\. 伙伴系统的内存分配原理4\. 伙伴系统的内存回收原理5\. 进入伙伴系统的前奏5.1 获取内存区域 zone 里指定的内存水位线5.2 检查 zone 中剩余内存容量…

HelpLook全新升级!定制AI问答机器人,企业内容中心焕新

一直以来&#xff0c;企业都在努力解决内外部“企业知识管理”问题&#xff1a;从纸质手册发放&#xff0c;转线上电子文档传阅(pdf/ppt/word等)&#xff0c;再到整理客户常见问题(FAQ)和内部知识库(wiki)&#xff0c;但始终没有找到一套完整方案将“企业知识”很好地集中管理及…

Flutter与Native通信原理剖析与实践

通信原理 我们分几种场景来介绍Flutter和Native之间的通信。 Native发送数据给FlutterFlutter发送数据给NativeFlutter发送数据给Native&#xff0c;然后Native回传数据给Flutter Flutter与Native通信机制 在讲解Flutter与Native之间是如何传递数据之前&#xff0c;我们先了…

PostgreSQL16源码包编译安装

一、安装环境 操作系统&#xff1a;CentOS Linux release 7.8.2003 (Core) PostgreSQL版本&#xff1a;16 服务器IP地址&#xff1a;192.168.0.244 Firewalld关闭、selinux关闭 笔者本次选用最新v16版本进行部署 二、pg数据库安装包下载 下载地址&#xff1a;https://www.po…

什么是IoT数字孪生?

数字孪生是资产或系统的实时虚拟模型&#xff0c;它使用来自连接的物联网传感器的数据来创建数字表示。数字孪生允许您从任何地方实时监控设备、资产或流程。数字孪生用于多种目的&#xff0c;例如分析性能、监控问题或在实施之前运行测试。从物联网数字孪生中获得的见解使用户…