Python笔记

Roc-Python笔记

类型转换

  1. array强制类型转换.astype()
x_train = x_train[:50000].astype(float)
  1. tensor强制类型转换
tensor = torch.randn(2, 2)
float_tensor = tensor.float()
  1. tensor → array
x_train = tensor1.detach().numpy()
x_train = np.array(tensor1)
  1. array → tensor
tensor1 = torch.from_numpy(x_train)
tensor1 = torch.FloatTensor(x_train)

np专区

  1. np.arange() 返回一个序列
x1 = np.arange(10)
x2 = np.arange(5,10)
x3 = np.arange(5,10,2)

x1 = [0 1 2 3 4 5 6 7 8 9]
x2 = [5 6 7 8 9]
x3 = [5 7 9]
  1. np.vstack() 按垂直方向(行顺序)堆叠数组构成一个新的数组
x = np.array([[1,2,3]])
y = np.array([[4,5,6]])
print(np.shape(x),np.shape(y))
z = np.vstack((x,y))
print(np.shape(z))
print(z)

(1, 3) (1, 3)
(2, 3)
[[1 2 3]
 [4 5 6]]
  1. np.concatenate() 对array进行拼接
import numpy as np

x = np.random.normal(1,1,(3,4))
y = np.random.normal(1,1,(5,4))

print(x)
print(x.shape)
print(y)
print(y.shape)

con = np.concatenate([x,y],axis=0)
print(con)
print(con.shape)

[[ 2.27797734  1.93682907  1.71275883  1.4273014 ]
 [ 0.43591098  1.18648342 -0.86484436  1.09550423]
 [ 0.20651792  0.72164363  3.17146439  2.09230831]]
(3, 4)
[[ 1.11623897 -0.30188719  2.72808548  1.41957035]
 [-0.74015543  0.63804769  0.60298067  0.2233076 ]
 [ 1.38327269  0.9440501   1.74518678  1.97365391]
 [ 1.36026607  1.08024697  0.74210636  1.39705105]
 [ 2.5518915   0.28423631 -1.15274472  0.96521751]]
(5, 4)
[[ 2.27797734  1.93682907  1.71275883  1.4273014 ]
 [ 0.43591098  1.18648342 -0.86484436  1.09550423]
 [ 0.20651792  0.72164363  3.17146439  2.09230831]
 [ 1.11623897 -0.30188719  2.72808548  1.41957035]
 [-0.74015543  0.63804769  0.60298067  0.2233076 ]
 [ 1.38327269  0.9440501   1.74518678  1.97365391]
 [ 1.36026607  1.08024697  0.74210636  1.39705105]
 [ 2.5518915   0.28423631 -1.15274472  0.96521751]]
(8, 4)
  1. np.where() 判断条件
    定义:np.where(condition,x,y)
    功能:当条件成立时where方法返回x,当条件不成立时where返回y
    定义:np.where(condition)
    功能:当条件成立时,where返回的是每个符合condition条件元素的坐标,返回的是以元组的形式
  2. X[:,0]和X[0,:]的表示
import numpy as np
x=np.arange(15).reshape(3,5)
print(x)
print(x[0,:])
print(x[:,0])

[[ 0  1  2  3  4]
 [ 5  6  7  8  9]
 [10 11 12 13 14]]
[0 1 2 3 4]
[ 0  5 10]

Torch专区

  1. torch.zeros_like()和torch.zeros()
    定义:torch.zeros_like(tensor)
    功能:根据给定张量,生成与其形状相同的全0张量
clipped_grads = {name: torch.zeros_like(param) for name, param in self.model.named_parameters()}

定义:torch.zeros(size)
功能:形状由变量参数size定义,返回一个由标量值0填充的张量
2. torch.normal()
定义:torch.normal(mean, std, *, generator=None, out=None)
功能:返回一个随机数字的张量,该张量是从给定均值和标准差的独立正态分布中提取的。
注意:不是指标准正态分布,而是给定的独立的正态分布。

torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))

tensor升维和降维

tensor.squeeze() 若squeeze()括号内为空,则将张量中所有维度为1的维数进⾏压缩。只进行维度为1的压缩。

   x = torch.arange(1,19).reshape(1,2,1,9)
   print(x.size())
   x = x.squeeze(0)
   print(x.size())

   torch.Size([1, 2, 1, 9])
   torch.Size([2, 1, 9])

tensor拼接与拆分

拼接:cat、stack
拆分:split、chunk

函数功能区别cat拼接维度不变stack拼接维度数量加一split拆分指定拆分chunk拆分等分

高斯噪声

敏感度 求范数

torch.norm()

torch.norm(grad, p=2)

生成高斯噪声
σ = 2 log ⁡ 1.25 δ / ε \sigma {\rm{ = }}\sqrt {2\log \frac{{1.25}}{\delta } }/\varepsilon σ=2 lo g δ1.25 ​​/ε

def gaussian_noise(grad, s, epsilon, delta):
"""
    generate Gaussian Noise, disturb the gradient matrix
"""
    c = np.sqrt(2*np.log(1.25 / delta))
    sigma = c * s / epsilon
    noise = torch.normal(0, sigma, grad.shape)
    return grad + noise

FedAVG – add Gaussian noise to model parameters

self.clients[idx].update()


        sensitivity = 2 * self.lr * self.clip / self.data_size + (self.E - 1) * 2 * self.lr * self.clip
        new_param = copy.deepcopy(self.model.state_dict())
        for name in new_param:
            new_param[name] = torch.zeros(new_param[name].shape).to(self.device)
            new_param[name] += 1.0 * self.model.state_dict()[name]
            new_param[name] += gaussian_noise_ls(self.model.state_dict()[name].shape, sensitivity,
                                                 self.sigma, device=self.device)
        self.model.load_state_dict(copy.deepcopy(new_param))

获取模型信息

查看模型参数 ****

supervise_model = self.model.state_dict()

查看模型参数和梯度,创建全0模型参数的变量

grad = {}
params = dict(self.model.named_parameters())
for name in params:
    grad[name] = torch.zeros(params[name].shape).to(self.device)

修改模型梯度

for name, param in self.model.named_parameters():
    param.grad = gaussian_noise(param.grad, 2 * self.lr * self.clip, self.eps, self.delta, device=self.device)

修改模型参数1

params = dict(self.model.named_parameters())
for name, param in self.model.named_parameters():
    params[name] =

修改模型参数2

for name in self.global_model.state_dict():
    self.global_model.state_dict()[name] -= agg_grad[name]

创建全0模型参数的变量,保存模型参数的梯度,累加梯度平均后再给回模型

clipped_grads = {name: torch.zeros_like(param) for name, param in self.model.named_parameters()}
for name, param in self.model.named_parameters():
    clipped_grads[name] += param.grad
for name, param in self.model.named_parameters():
    clipped_grads[name] /= len(idx)
for name, param in self.model.named_parameters():
    param.grad = clipped_grads[name]

查看各层参数信息

fcnet.state_dict()
temp = copy.deepcopy(fcnet.state_dict)

查看梯度信息

fcnet.layer1.weight.grad.detach().numpy()

手动计算梯度

grad = torch.autograd.grad(loss, model.parameters.values(), retain_graph=True, create_graph=True, only_inputs=True)

模型参数赋值


self.model.parameters = OrderedDict(self.model.named_parameters())
self.model.parameters = OrderedDict((name, param - self.lr * grad_part)
                                        for ((name, param), grad_part)
                                        in zip(self.model.parameters.items(), grad))

梯度裁剪

grads = dict(fcnet.named_parameters())
for name in grads:
    grads[name].grad = clip_grad(grads[name].grad, Clip)

batch训练过程

数据以batch为单位,batch内计算一次梯度,然后更新模型

for i, (X,y) in enumerate(train_loader)
    output = fcnet(X)
    loss = loss_func(output, y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

思考*

平滑敏感度

思考*

梯度累加,提升batchsize

for i, (X,y) in enumerate(train_loader)
    output = fcnet(X)
    loss = loss_func(output, y)

    loss = loss / cumulative_n

    loss.backward()

    if (i+1) % cumulative_n == 0:
        optimizer.step()
        optimizer.zero_grad()

使用时需要注意,学习率也要适当放大

思考*

指数机制用于FedAVG的客户端选择

思考*

双云+SS

思考*

Python笔记
Python笔记

; 随机种子

产生随机化种子np.random.seed(0)

np.random.rand()
np.random.choice(x, 3, replace=False) # 每次随机从x中选择3个值,且不重复

产生随机化种子torch.manual_seed(0)

torch.rand()

torch.manual_seed(1000)的结果

Python笔记
在训练中初始化相同的模型权重和偏置
class FCNet(nn.Module):
    def __init__(self,in_dim,n_hidden_1,n_hidden_2,out_dim):
        super(FCNet,self).__init__()
        self.layer1 = nn.Linear(in_dim, n_hidden_1)
        self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)
        self.layer3 = nn.Linear(n_hidden_2, out_dim)
        '''The same model weights and biases are initialized in each test of federated learning training'''
        np.random.seed(0)
        weights_scale = 1e-3
        self.layer1.weight = torch.nn.Parameter(torch.Tensor(weights_scale * np.random.randn(256, 28 * 28)))
        self.layer2.weight = torch.nn.Parameter(torch.Tensor(weights_scale * np.random.rand(256, 256)))
        self.layer3.weight = torch.nn.Parameter(torch.Tensor(weights_scale * np.random.rand(10, 256)))
        self.layer1.bias = torch.nn.Parameter(torch.Tensor(weights_scale * np.random.randn(256)))
        self.layer2.bias = torch.nn.Parameter(torch.Tensor(weights_scale * np.random.rand(256)))
        self.layer3.bias = torch.nn.Parameter(torch.Tensor(weights_scale * np.random.rand(10)))
    def forward(self,x):
        x=self.layer1(x)
        x=F.relu(x)
        x=self.layer2(x)
        x = F.relu(x)
        x=self.layer3(x)
        return x

修改conda默认环境

Python笔记

mark

Original: https://blog.csdn.net/xdroc/article/details/124503879
Author: xdroc
Title: Python笔记

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/760815/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球