1. 简介
- 本代码是可运行的transformer代码块,只是基于一组数据(随机生成的数据)来调试,具体业务需要自己封装数据。
- 对Encoder/Decoder都做了mask处理(上三角矩阵),适用于streaming模式。(比如语音识别)
- mask未采用chunk-mode,不能偷窥到后面输入,所以性能应该不高(需要自己调chunk-mask)
- lost-func(损失函数)采用的是KLDIVCross。
2. 网络结构
- Trasnformer
- Encoder
- MultiHeadAttention
- poise-embedding
- mask-matrix
- Decoder
- MultiHeadAttention
- poise-embedding
- mask-matrix
- KL_LOSS
3. 源码
下面展示一些 内联代码片
。
"""
By fangfuping
2022/7/30
"""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import math
import logging
## 10
def get_attn_subsequent_mask(seq):
"""
seq: [batch_size, tgt_len]
"""
attn_shape = [seq.size(0), seq.size(1), seq.size(1)]
# attn_shape: [batch_size, tgt_len, tgt_len]
subsequence_mask = np.triu(np.ones(attn_shape), k=1) # 生成一个上三角矩阵
subsequence_mask = torch.from_numpy(subsequence_mask).byte()
return subsequence_mask # [batch_size, tgt_len, tgt_len]
## 7. ScaledDotProductAttention
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V):
## 输入进来的维度分别是 [batch_size x n_heads x len_q x d_k] K: [batch_size x n_heads x len_k x d_k] V: [batch_size x n_heads x len_k x d_v]
##首先经过matmul函数得到的scores形状是 : [batch_size x n_heads x len_q x len_k]
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
## 然后关键词地方来了,下面这个就是用到了我们之前重点讲的attn_mask,把被mask的地方置为无限小,softmax之后基本就是0,对q的单词不起作用
#scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is one.
attn = nn.Softmax(dim=-1)(scores)
context = torch.matmul(attn, V)
return context, attn
## 6. MultiHeadAttention
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, d_k, d_v, n_heads):
super(MultiHeadAttention, self).__init__()
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.n_heads = n_heads
## 输入进来的QKV是相等的,我们会使用映射linear做一个映射得到参数矩阵Wq, Wk,Wv
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.linear = nn.Linear(n_heads * d_v, d_model)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, Q, K, V):
## 这个多头分为这几个步骤,首先映射分头,然后计算atten_scores,然后计算atten_value;
##输入进来的数据形状: Q: [batch_size x len_q x d_model], K: [batch_size x len_k x d_model], V: [batch_size x len_k x d_model]
residual, batch_size = Q, Q.size(0)
# (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)
##下面这个就是先映射,后分头;一定要注意的是q和k分头之后维度是一致额,所以一看这里都是dk
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2) # q_s: [batch_size x n_heads x len_q x d_k]
k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2) # k_s: [batch_size x n_heads x len_k x d_k]
v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1,2) # v_s: [batch_size x n_heads x len_k x d_v]
## 输入进行的attn_mask形状是 batch_size x len_q x len_k,然后经过下面这个代码得到 新的attn_mask : [batch_size x n_heads x len_q x len_k],就是把pad信息重复了n个头上
#attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1)
##然后我们计算 ScaledDotProductAttention 这个函数,去7.看一下
## 得到的结果有两个:context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q x len_k]
context, attn = ScaledDotProductAttention(self.d_k)(q_s, k_s, v_s)
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v) # context: [batch_size x len_q x n_heads * d_v]
output = self.linear(context)
return self.layer_norm(output + residual), attn # output: [batch_size x len_q x d_model]
## 8. PoswiseFeedForwardNet
class PoswiseFeedForwardNet(nn.Module):
def __init__(self, d_model, d_ff):
super(PoswiseFeedForwardNet, self).__init__()
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inputs):
residual = inputs # inputs : [batch_size, len_q, d_model]
output = nn.ReLU()(self.conv1(inputs.transpose(1, 2)))
output = self.conv2(output).transpose(1, 2)
return self.layer_norm(output + residual)
## 4. get_attn_pad_mask
## len_input * len*input 代表每个单词对其余包含自己的单词的影响力
## 没用上,因为没有补0
def get_attn_pad_mask(seq_q, seq_k):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
# eq(zero) is PAD token
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # batch_size x 1 x len_k, one is masking
return pad_attn_mask.expand(batch_size, len_q, len_k) # batch_size x len_q x len_k
## 3. PositionalEncoding 代码实现
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
## 位置编码直接对照着公式去敲代码就可以;
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)## 这里需要注意的是pe[:, 0::2]这个用法,就是从0开始到最后面,补长为2,其实代表的就是偶数位置
pe[:, 1::2] = torch.cos(position * div_term)##这里需要注意的是pe[:, 1::2]这个用法,就是从1开始到最后面,补长为2,其实代表的就是奇数位置
## 上面代码获取之后得到的pe:[max_len*d_model]
## 下面这个代码之后,我们得到的pe形状是:[max_len*1*d_model]
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe) ## 定一个缓冲区,其实简单理解为这个参数不更新就可以
def forward(self, x):
"""
x: [seq_len, batch_size, d_model]
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
## 5. EncoderLayer :包含两个部分,多头注意力机制和前馈神经网络
class EncoderLayer(nn.Module):
def __init__(self,d_model,d_k,d_v,n_heads,d_ff):
super(EncoderLayer, self).__init__()
self.enc_self_attn = MultiHeadAttention(d_model,d_k,d_v,n_heads)
self.pos_ffn = PoswiseFeedForwardNet(d_model,d_ff)
def forward(self, enc_inputs):
## 下面这个就是做自注意力层,输入是enc_inputs,形状是[batch_size x seq_len_q x d_model] 需要注意的是最初始的QKV矩阵是等同于这个输入的,去看一下enc_self_attn函数 6.
enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs) # enc_inputs to same Q,K,V
enc_outputs = self.pos_ffn(enc_outputs) # enc_outputs: [batch_size x len_q x d_model]
return enc_outputs, attn
## 2. Encoder 部分包含三个部分:词向量embedding,位置编码部分,注意力层及后续的前馈神经网络
class Encoder(nn.Module):
def __init__(self, src_vocab_size,d_model,n_layers,d_k,d_v,n_heads,d_ff):
super(Encoder, self).__init__()
#self.src_emb = nn.Embedding(src_vocab_size, 80) ## 这个其实就是去定义生成一个矩阵,大小是 src_vocab_size * d_model
self.linear = nn.Linear(src_vocab_size,d_model)
self.pos_emb = PositionalEncoding(d_model) ## 位置编码情况,这里是固定的正余弦函数,也可以使用类似词向量的nn.Embedding获得一个可以更新学习的位置编码
self.layers = nn.ModuleList([EncoderLayer(d_model,d_k,d_v,n_heads,d_ff) for _ in range(n_layers)]) ## 使用ModuleList对多个encoder进行堆叠,因为后续的encoder并没有使用词向量和位置编码,所以抽离出来;
def forward(self, enc_inputs):
enc_outputs = self.linear(enc_inputs)
## 这里就是位置编码,把两者相加放入到了这个函数里面,从这里可以去看一下位置编码函数的实现;3.
enc_outputs = self.pos_emb(enc_outputs.transpose(0, 1)).transpose(0, 1)
##get_attn_pad_mask是为了得到句子中pad的位置信息,给到模型后面,在计算自注意力和交互注意力的时候去掉pad符号的影响,去看一下这个函数 4.
#enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs)
enc_self_attns = []
for layer in self.layers:
## 去看EncoderLayer 层函数 5.
enc_outputs, enc_self_attn = layer(enc_outputs)
enc_self_attns.append(enc_self_attn)
return enc_outputs, enc_self_attns
## 12. ScaledDotProductAttentionMask
class ScaledDotProductAttentionMask(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttentionMask, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask):
## 输入进来的维度分别是 [batch_size x n_heads x len_q x d_k] K: [batch_size x n_heads x len_k x d_k] V: [batch_size x n_heads x len_k x d_v]
##首先经过matmul函数得到的scores形状是 : [batch_size x n_heads x len_q x len_k]
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
## 然后关键词地方来了,下面这个就是用到了我们之前重点讲的attn_mask,把被mask的地方置为无限小,softmax之后基本就是0,对q的单词不起作用
scores.masked_fill_(attn_mask, -1e9) # Fills elements of self tensor with value where mask is one.
attn = nn.Softmax(dim=-1)(scores)
context = torch.matmul(attn, V)
return context, attn
## 11 mask-atten
class MultiHeadAttentionMask(nn.Module):
def __init__(self, d_model, d_k, d_v, n_heads):
super(MultiHeadAttentionMask, self).__init__()
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.n_heads = n_heads
## 输入进来的QKV是相等的,我们会使用映射linear做一个映射得到参数矩阵Wq, Wk,Wv
self.W_Q = nn.Linear(d_model, d_k * n_heads)
self.W_K = nn.Linear(d_model, d_k * n_heads)
self.W_V = nn.Linear(d_model, d_v * n_heads)
self.linear = nn.Linear(n_heads * d_v, d_model)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, Q, K, V, attn_mask):
## 这个多头分为这几个步骤,首先映射分头,然后计算atten_scores,然后计算atten_value;
##输入进来的数据形状: Q: [batch_size x len_q x d_model], K: [batch_size x len_k x d_model], V: [batch_size x len_k x d_model]
residual, batch_size = Q, Q.size(0)
##下面这个就是先映射,后分头;这里都是dk
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2) # q_s: [batch_size x n_heads x len_q x d_k]
k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2) # k_s: [batch_size x n_heads x len_k x d_k]
v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1,2) # v_s: [batch_size x n_heads x len_k x d_v]
## 输入进行的attn_mask形状是 batch_size x len_q x len_k,然后经过下面这个代码得到 新的attn_mask : [batch_size x n_heads x len_q x len_k],就是把pad信息重复了n个头上
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
## context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q x len_k]
context, attn = ScaledDotProductAttentionMask(self.d_k)(q_s, k_s, v_s, attn_mask)
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v) # context: [batch_size x len_q x n_heads * d_v]
output = self.linear(context)
return self.layer_norm(output + residual), attn # output: [batch_size x len_q x d_model]
## 10.
class DecoderLayer(nn.Module):
def __init__(self,d_model,d_k,d_v,n_heads,d_ff):
super(DecoderLayer, self).__init__()
self.dec_self_attn = MultiHeadAttentionMask(d_model,d_k,d_v,n_heads)
self.dec_enc_attn = MultiHeadAttention(d_model,d_k,d_v,n_heads)
self.pos_ffn = PoswiseFeedForwardNet(d_model,d_ff)
def forward(self, dec_inputs, enc_outputs, mask):
dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs, dec_inputs, mask)
dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs, enc_outputs)
dec_outputs = self.pos_ffn(dec_outputs)
return dec_outputs, dec_self_attn, dec_enc_attn
## 9. Decoder
class Decoder(nn.Module):
def __init__(self, tgt_vocab_size, d_model, n_layers,d_k,d_v,n_heads,d_ff):
super(Decoder, self).__init__()
self.tgt_emb = nn.Embedding(tgt_vocab_size, d_model)
self.pos_emb = PositionalEncoding(d_model,0.1,5000)
self.layers = nn.ModuleList([DecoderLayer(d_model,d_k,d_v,n_heads,d_ff) for _ in range(n_layers)])
def forward(self, dec_inputs, enc_inputs, enc_outputs): # dec_inputs : [batch_size x target_len]
dec_outputs = self.tgt_emb(dec_inputs) # [batch_size, tgt_len, d_model]
dec_outputs = self.pos_emb(dec_outputs.transpose(0, 1)).transpose(0, 1) # [batch_size, tgt_len, d_model]
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_inputs)
dec_self_attns, dec_enc_attns = [], []
for layer in self.layers:
dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs,dec_self_attn_subsequent_mask)
dec_self_attns.append(dec_self_attn)
dec_enc_attns.append(dec_enc_attn)
return dec_outputs, dec_self_attns, dec_enc_attns
## 1. 从整体网路结构来看,分为三个部分:编码层,解码层,输出层
class Transformer(nn.Module):
def __init__(self, src_vocab_size,tgt_vocab_size, d_model, n_layers,d_k,d_v,n_heads,d_ff):
super(Transformer, self).__init__()
self.d_model = d_model
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.encoder = Encoder(src_vocab_size,d_model,n_layers,d_k,d_v,n_heads,d_ff) ## 编码层
self.decoder = Decoder(tgt_vocab_size, d_model, n_layers,d_k,d_v,n_heads,d_ff) ## 解码层
self.crition = LOSS(tgt_vocab_size)
self.projection = nn.Linear(d_model, tgt_vocab_size, bias=False) ## 输出层 d_model 是我们解码层每个token输出的维度大小,之后会做一个 tgt_vocab_size 大小的softmax
def forward(self, enc_inputs, dec_inputs):
## 这里有两个数据进行输入,一个是enc_inputs 形状为[batch_size, src_len],主要是作为编码段的输入,一个dec_inputs,形状为[batch_size, tgt_len],主要是作为解码端的输入
## enc_inputs作为输入 形状为[batch_size, src_len],输出由自己的函数内部指定,想要什么指定输出什么,可以是全部tokens的输出,可以是特定每一层的输出;也可以是中间某些参数的输出;
## enc_outputs就是主要的输出,enc_self_attns这里没记错的是QK转置相乘之后softmax之后的矩阵值,代表的是每个单词和其他单词相关性;
enc_outputs, enc_self_attns = self.encoder(enc_inputs)
## dec_outputs 是decoder主要输出,用于后续的linear映射; dec_self_attns类比于enc_self_attns 是查看每个单词对decoder中输入的其余单词的相关性;dec_enc_attns是decoder中每个单词对encoder中每个单词的相关性;
dec_outputs, dec_self_attns, dec_enc_attns = self.decoder(dec_inputs, enc_inputs, enc_outputs)
## dec_outputs做映射到词表大小
dec_logits = self.projection(dec_outputs) # dec_logits : [batch_size x src_vocab_size x tgt_vocab_size]
dec_logits = dec_logits.view(-1,dec_logits.size(-1))
loss1 = self.crition(dec_logits,dec_inputs)
return loss1
13、KILDIV
class LOSS(nn.Module):
def __init__(self, tgt_vocab_size):
super(LOSS, self).__init__()
self.criterion = nn.KLDivLoss()
self.tgt_vocab_size = tgt_vocab_size
def forward(self,outputs, target):
batch_size = outputs.size(0)
outputs = outputs.view(-1,self.tgt_vocab_size)
target = target.view(-1)
true_dist = torch.zeros_like(outputs)
true_dist.fill_(0.01 / (self.tgt_vocab_size - 1))
true_dist.scatter_(1, target.unsqueeze(1), 1 - 0.01)
outputs = torch.softmax(outputs, dim=0)
return self.criterion(outputs.log(),true_dist)
if __name__ == '__main__':
#src_vocab_size = 80 # fbank
#tgt_vocab_size = 5000 # bpe
src_vocab_size=40
tgt_vocab_size=1000
d_model=512
n_layers=6
d_k=64
d_v=64
n_heads=8
d_ff=2048
model = Transformer(src_vocab_size,tgt_vocab_size, d_model, n_layers,d_k,d_v,n_heads,d_ff)
optimizer = optim.Adam(model.parameters(), lr=0.001)
enc_inputs = torch.ones((1,3,40),dtype=torch.float32)
dec_inputs = torch.ones((1,50),dtype=torch.int64)
for epoch in range(20):
optimizer.zero_grad()
loss = model(enc_inputs, dec_inputs)
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
logging.info('loss'.format(loss))
Original: https://blog.csdn.net/qq_37258753/article/details/126329654
Author: 方付平
Title: Transformer代码:适用于语音识别(streaming-mode)
原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/515655/
转载文章受原作者版权保护。转载请注明原作者出处!