pytorch 写模型 tensor 常用的操作

某个维度上做扩张 自身重复

tensor 定义数据类型 避免模型训练出错

增加一个1维度.unsqueeze(0) 删除一个1维度squeeze(0)

tensor 拼接 cat 其余唯独应该一致

tensor 转换唯独 .transpose(0,1)

tensor 改变形状 reshape

完整的pytorch 开发模板

import torch
from torch import nn

x = torch.randn(1,2,64)
print(x.shape)
y = x.expand(50,2,64)#此时做expand,可以发现(3,)和(2, 3)是第二个维度相同,因此按第一个维度扩张
print(y.shape)
x = x.type(torch.FloatTensor)
    def forward(self, x, batch_size):
        x = x.type(torch.FloatTensor)
        x = x.to(device)
print("137",x_input.shape,temp_aspect.shape)
137 torch.Size([50, 2, 64]) torch.Size([50, 2, 64])
x_input=torch.cat((x_input,temp_aspect),dim=2)
x_input=x_input.transpose(0,1)
lstm_out=lstm_out.reshape(batch_size,-1)
-*- coding: utf-8 -*-
import pandas as pd
import gensim
import jieba
import re
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
import torch
from torch import nn
import torch.utils.data as data
import torch.nn.functional as F
from torch import tensor
from sklearn.metrics import f1_score
from datetime import datetime
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import random_split
from tqdm import tqdm
def data_process():
        data=pd.read_excel("pre_process_level_2_table(1).xlsx")
        data_neirong=list(data['内容'].values)
        data_1_aspect=list(data['1_aspect'].values)
        data_label=list(data['label'].values)

        aspect_vec_dict={}
        with open("ceshi_1_aspect_vec.txt","r") as f:
            f=f.readlines()
            for line in f:
                temp_word=line.split("_||_")[0]
                temp_vec=line.split("_||_")[1].split(" ")[:-1]
                temp_vec=[float(i) for i in temp_vec]# 转化为数值型列表
                aspect_vec_dict[temp_word]=temp_vec
        print(aspect_vec_dict)
        data_neirong_word_list=[]
        text_len=[]
        for line in data_neirong:
            line=line.strip()
            line=line.split(" ")
            print(line)
            while 1 :
                print(1)
                if '' in line:line.remove('')
                if '' not in line:break
            data_neirong_word_list.append(line)
            text_len.append(len(line))
        print("48-----------------------")
        # print(max(text_len),np.mean(text_len))# 393 14.989528010696924
        # 对句子进行截断重复 设置句子长度是 50
        # pading_data_neirong_word_list=[]
        data_x = []
        temp_data_y=[]
        for idx,line in tqdm(enumerate(data_neirong_word_list)):
            # print("54",idx, len(line),line)
            temp_line = line.copy()
            # 会有数据只有空格这样子 这个while 循环会出问题
            temp_idx = 0  # 设置while循环标志位 来解决这个问题
            if len(line) <60: while 1: line="line+temp_line" # print(len(line)) temp_idx+="1" if len(line)>=50:break
                    if temp_idx==50:break
            if temp_idx != 50:
                line = line[:50]
                data_x.append(line + [data_1_aspect[idx]])
                temp_data_y.append(data_label[idx])
        print("62----&#x6570;&#x636E;&#x6570;&#x76EE;&#xFF1A;---------",len(data_x))
        # &#x77E9;&#x9635;&#x751F;&#x6210;
        wd2 = gensim.models.Word2Vec.load("wd2.bin")#print(wd2.wv['hotel'])
        data_x_vec=[]
        # data_x_aspect=[]
        data_y=[]
        for idx,line in tqdm(enumerate(data_x)):
                try:
                    # print(line)
                    temp_vec=[]
                    line_neirong=line[:-1]
                    line_1_aspect=line[-1]
                    for word in line_neirong:
                        temp_vec.append(wd2.wv[word])

                    temp_vec.append(np.array(aspect_vec_dict[line_1_aspect]))
                    data_x_vec.append(temp_vec)
                    data_y.append(temp_data_y[idx])
                except KeyError:
                    pass
        return np.array(data_y),np.array(data_x_vec)#,np.array(data_x_aspect)

class mydataset(Dataset):
    def __init__(self):  # &#x8BFB;&#x53D6;&#x52A0;&#x8F7D;&#x6570;&#x636E;
        data_y,data_x=data_process()
        self._x = torch.tensor(np.array(data_x).astype(float))
        self._y = torch.tensor(np.array(data_y).astype(float))
        print(len(data_x),data_y.shape,data_y)
        # self._aspect= torch.tensor(np.array(data_x_aspect).astype(float))
        self._len = len(data_y)
    def __getitem__(self, item):
        return self._x[item], self._y[item]#,self._aspect[item]
    def __len__(self):  # &#x8FD4;&#x56DE;&#x6574;&#x4E2A;&#x6570;&#x636E;&#x7684;&#x957F;&#x5EA6;
        return self._len
mydata = mydataset()
&#x5212;&#x5206; &#x8BAD;&#x7EC3;&#x96C6; &#x6D4B;&#x8BD5;&#x96C6;
train_data, test_data = random_split(mydata, [round(0.8 * mydata._len), round(0.2 * mydata._len)])  # &#x8FD9;&#x4E2A;&#x53C2;&#x6570;&#x6709;&#x7684;&#x7248;&#x672C;&#x6CA1;&#x6709; generator=torch.Generator().manual_seed(0)
                    &#x968F;&#x673A;&#x6DF7;&#x4E71;&#x987A;&#x5E8F;&#x5212;&#x5206;&#x7684;     &#x56DB;&#x820D;&#x4E94;&#x5165;
#
train_loader =DataLoader(train_data, batch_size =2, shuffle = True, num_workers = 0 , drop_last=False)
#
# for step,(train_x,train_y) in enumerate(train_loader):
#     print(step,':',(train_x.shape,train_y.shape),(train_x,train_y))
#     break
#
# &#x6D4B;&#x8BD5; loader
test_loader =DataLoader(test_data, batch_size = 2, shuffle = True, num_workers = 0 , drop_last=False)
# dorp_last &#x662F;&#x8BF4;&#x6700;&#x540E;&#x4E00;&#x7EC4;&#x6570;&#x636E;&#x4E0D;&#x8DB3;&#x4E00;&#x4E2A;batch&#x7684;&#x65F6;&#x5019; &#x80FD;&#x7EE7;&#x7EED;&#x7528;&#x8FD8;&#x662F;&#x820D;&#x5F03;&#x3002; # num_workers &#x591A;&#x5C11;&#x4E2A;&#x8FDB;&#x7A0B;&#x8F7D;&#x5165;&#x6570;&#x636E;
#
# &#x6D4B;&#x8BD5;
# for step,(test_x,test_y) in enumerate(test_loader):
#     print(step,':',(test_x.shape,test_y.shape),(test_x,test_y))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class LSTM_attention(nn.Module):  # &#x6CE8;&#x610F;Module&#x9996;&#x5B57;&#x6BCD;&#x9700;&#x8981;&#x5927;&#x5199;
    def __init__(self, ):
        super().__init__()
        input_size = 64
        hidden_size = 64
        output_size = 64
        # input_size&#xFF1A;&#x8F93;&#x5165;lstm&#x5355;&#x5143;&#x5411;&#x91CF;&#x7684;&#x957F;&#x5EA6; &#xFF0C;hidden_size&#x8F93;&#x51FA;lstm&#x5355;&#x5143;&#x5411;&#x91CF;&#x7684;&#x957F;&#x5EA6;&#x3002;&#x4E5F;&#x662F;&#x8F93;&#x5165;&#x3001;&#x8F93;&#x51FA;&#x9690;&#x85CF;&#x5C42;&#x5411;&#x91CF;&#x7684;&#x957F;&#x5EA6;
        self.lstm = nn.LSTM(input_size, output_size, num_layers=1)  # ,batch_first=True
        self.ReLU = nn.ReLU()
        self.attention = nn.Linear(6400,64)
        self.liner=nn.Linear(128,5)
    def forward(self, x, batch_size):
        x = x.type(torch.FloatTensor)
        x = x.to(device)

        x_input=x[:,:50]
        x_input=x_input.transpose(0,1)

        temp_aspect=x[:,-1]
        temp_aspect=temp_aspect.unsqueeze(0)
        temp_aspect =temp_aspect.expand(50,batch_size, 64)

        #print("137",x_input.shape,temp_aspect.shape)# 137 torch.Size([50, 2, 64]) torch.Size([50, 2, 64])
        x_input=torch.cat((x_input,temp_aspect),dim=2)
        #print("137",x_input.shape,temp_aspect.shape)# 137 torch.Size([50, 2, 128]) torch.Size([50, 2, 64])
        # &#x8F93;&#x5165; lstm&#x7684;&#x77E9;&#x9635;&#x5F62;&#x72B6;&#x662F;&#xFF1A;[&#x5E8F;&#x5217;&#x957F;&#x5EA6;&#xFF0C;batch_size,&#x6BCF;&#x4E2A;&#x5411;&#x91CF;&#x7684;&#x7EF4;&#x5EA6;] [&#x5E8F;&#x5217;&#x957F;&#x5EA6;,batch, 64]
        lstm_out, (h_n, c_n) = self.lstm(x, None)
        lstm_out=self.ReLU(lstm_out)
        last_lstm=lstm_out[:,-1]# &#x53D6;&#x6700;&#x540E;&#x4E00;&#x4E2A;
        lstm_out=lstm_out[:,:-1]
        lstm_out=lstm_out.transpose(0, 1)
        #print("154",lstm_out.shape,temp_aspect.shape)
        lstm_out=torch.cat((lstm_out,temp_aspect),dim=2)
        lstm_out=lstm_out.transpose(0, 1)
        lstm_out=lstm_out.reshape(batch_size,-1)

        lstm_out = self.ReLU(lstm_out)
        lstm_out  = self.attention(lstm_out)
        lstm_out = self.ReLU(lstm_out)

        # print("157",lstm_out.shape,last_lstm.shape)
        out_sum= torch.cat((lstm_out,last_lstm), dim=1)
        # print(out_sum.shape)
        prediction=self.liner(out_sum)
        return prediction

&#x8FD9;&#x4E2A;&#x51FD;&#x6570;&#x662F;&#x6D4B;&#x8BD5;&#x7528;&#x6765;&#x6D4B;&#x8BD5;x_test y_test &#x6570;&#x636E; &#x51FD;&#x6570;
def eval_test(model):  # &#x8FD4;&#x56DE;&#x7684;&#x662F;&#x8FD9;10&#x4E2A; &#x6D4B;&#x8BD5;&#x6570;&#x636E;&#x7684;&#x5E73;&#x5747;loss
    test_epoch_loss = []
    with torch.no_grad():
        optimizer.zero_grad()
        for step, (test_x, test_y) in enumerate(test_loader):
            y_pre = model(test_x, batch_size)
            test_y = test_y.to(device)
            test_loss = loss_function(y_pre, test_y.long())
            test_epoch_loss.append(test_loss.item())
    return np.mean(test_epoch_loss)

epochs = 50
batch_size = 128
&#x5728;&#x6A21;&#x578B;&#x6D4B;&#x8BD5;&#x4E2D; &#x8FD9;&#x4E24;&#x4E2A;&#x503C;&#xFF1A;batch_size = 19 &#x56FA;&#x5B9A;&#x5F97; epochs = &#x968F;&#x4FBF;&#x8BBE;&#x7F6E;
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True)

&#x521B;&#x5EFA;LSTM()&#x7C7B;&#x7684;&#x5BF9;&#x8C61;&#xFF0C;&#x5B9A;&#x4E49;&#x635F;&#x5931;&#x51FD;&#x6570;&#x548C;&#x4F18;&#x5316;&#x5668;

model = LSTM_attention().to(device)
loss_function = torch.nn.CrossEntropyLoss().to(device)  # &#x635F;&#x5931;&#x51FD;&#x6570;&#x7684;&#x8BA1;&#x7B97; &#x4EA4;&#x53C9;&#x71B5;&#x635F;&#x5931;&#x51FD;&#x6570;&#x8BA1;&#x7B97;
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)  # &#x5EFA;&#x7ACB;&#x4F18;&#x5316;&#x5668;&#x5B9E;&#x4F8B;
print(model)

sum_train_epoch_loss = []  # &#x5B58;&#x50A8;&#x6BCF;&#x4E2A;epoch &#x4E0B; &#x8BAD;&#x7EC3;train&#x6570;&#x636E;&#x7684;loss
sum_test_epoch_loss = []  # &#x5B58;&#x50A8;&#x6BCF;&#x4E2A;epoch &#x4E0B; &#x6D4B;&#x8BD5; test&#x6570;&#x636E;&#x7684;loss
best_test_loss = 10000
for epoch in tqdm(range(epochs)):
    epoch_loss = []
    for step, (train_x, train_y) in enumerate(train_loader):
        y_pred = model(train_x, batch_size)
        # &#x8BAD;&#x7EC3;&#x8FC7;&#x7A0B;&#x4E2D;&#xFF0C;&#x6B63;&#x5411;&#x4F20;&#x64AD;&#x751F;&#x6210;&#x7F51;&#x7EDC;&#x7684;&#x8F93;&#x51FA;&#xFF0C;&#x8BA1;&#x7B97;&#x8F93;&#x51FA;&#x548C;&#x5B9E;&#x9645;&#x503C;&#x4E4B;&#x95F4;&#x7684;&#x635F;&#x5931;&#x503C;
        # print(y_pred,train_y)
        single_loss = loss_function(y_pred.cpu(), train_y.long())
        # print("single_loss",single_loss)
        single_loss.backward()  # &#x8C03;&#x7528;backward()&#x81EA;&#x52A8;&#x751F;&#x6210;&#x68AF;&#x5EA6;
        optimizer.step()  # &#x4F7F;&#x7528;optimizer.step()&#x6267;&#x884C;&#x4F18;&#x5316;&#x5668;&#xFF0C;&#x628A;&#x68AF;&#x5EA6;&#x4F20;&#x64AD;&#x56DE;&#x6BCF;&#x4E2A;&#x7F51;&#x7EDC;
        epoch_loss.append(single_loss.item())
    train_epoch_loss = np.mean(epoch_loss)
    test_epoch_loss = eval_test(model)  # &#x6D4B;&#x8BD5;&#x6570;&#x636E;&#x7684;&#x5E73;&#x5747;loss

    if test_epoch_loss < best_test_loss:
        best_test_loss = test_epoch_loss
        print("best_test_loss", best_test_loss)
        best_model = model
    sum_train_epoch_loss.append(train_epoch_loss)
    sum_test_epoch_loss.append(test_epoch_loss)
    print("epoch:" + str(epoch) + "  train_epoch_loss&#xFF1A; " + str(train_epoch_loss) + "  test_epoch_loss: " + str(
        test_epoch_loss))

torch.save(best_model, 'best_model.pth')

&#x753B;&#x56FE;
sum_train_epoch_loss=[]
sum_test_epoch_loss=[]
fig = plt.figure(facecolor='white', figsize=(10, 7))
plt.xlabel('&#x7B2C;&#x51E0;&#x4E2A;epoch')
plt.ylabel('loss&#x503C;')
plt.xlim(xmax=len(sum_train_epoch_loss), xmin=0)
plt.ylim(ymax=max(sum_train_epoch_loss), ymin=0)
&#x753B;&#x4E24;&#x6761;&#xFF08;0-9&#xFF09;&#x7684;&#x5750;&#x6807;&#x8F74;&#x5E76;&#x8BBE;&#x7F6E;&#x8F74;&#x6807;&#x7B7E;x&#xFF0C;y

x1 = [i for i in range(0, len(sum_train_epoch_loss), 1)]  # &#x968F;&#x673A;&#x4EA7;&#x751F;300&#x4E2A;&#x5E73;&#x5747;&#x503C;&#x4E3A;2&#xFF0C;&#x65B9;&#x5DEE;&#x4E3A;1.2&#x7684;&#x6D6E;&#x70B9;&#x6570;&#xFF0C;&#x5373;&#x7B2C;&#x4E00;&#x7C07;&#x70B9;&#x7684;x&#x8F74;&#x5750;&#x6807;
y1 = sum_train_epoch_loss  # &#x968F;&#x673A;&#x4EA7;&#x751F;300&#x4E2A;&#x5E73;&#x5747;&#x503C;&#x4E3A;2&#xFF0C;&#x65B9;&#x5DEE;&#x4E3A;1.2&#x7684;&#x6D6E;&#x70B9;&#x6570;&#xFF0C;&#x5373;&#x7B2C;&#x4E00;&#x7C07;&#x70B9;&#x7684;y&#x8F74;&#x5750;&#x6807;

x2 = [i for i in range(0, len(sum_test_epoch_loss), 1)]
y2 = sum_test_epoch_loss

colors1 = '#00CED4'  # &#x70B9;&#x7684;&#x989C;&#x8272;
colors2 = '#DC143C'
area = np.pi * 4 ** 1  # &#x70B9;&#x9762;&#x79EF;
&#x753B;&#x6563;&#x70B9;&#x56FE;
plt.scatter(x1, y1, s=area, c=colors1, alpha=0.4, label='train_loss')
plt.scatter(x2, y2, s=area, c=colors2, alpha=0.4, label='val_loss')
plt.plot([0,9.5],[9.5,0],linewidth = '0.5',color='#000000')
plt.legend()
plt.savefig(r'C:\Users\jichao\Desktop\&#x5927;&#x8BBA;&#x6587;\12345svm.png', dpi=300)
plt.show()

import sklearn
from sklearn.metrics import accuracy_score
&#x6A21;&#x578B;&#x52A0;&#x8F7D;&#xFF1A;
model.load_state_dict(torch.load('best_model.pth').cpu().state_dict())
model.eval()
test_pred = []
test_true = []

with torch.no_grad():
    optimizer.zero_grad()
    for step, (test_x, test_y) in enumerate(test_loader):
        y_pre = model(test_x, batch_size).cpu()
        y_pre = torch.argmax(y_pre, dim=1)
        for i in y_pre:
            test_pred.append(i)
        for i in test_y:
            test_true.append(i)

Acc = accuracy_score(test_pred, test_true)
print(Acc)

</60:>

Original: https://blog.csdn.net/qq_38735017/article/details/126469631
Author: 甜辣uu
Title: pytorch 写模型 tensor 常用的操作

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/710419/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球