亚欧色一区w666天堂,色情一区二区三区免费看,少妇特黄A片一区二区三区,亚洲人成网站999久久久综合,国产av熟女一区二区三区

  • 發布文章
  • 消息中心
點贊
收藏
評論
分享
原創

pytorch凍結訓練(下)

2024-11-15 09:17:46
5
0
import torch.nn as nn
import torch.optim as optim
import torch
import numpy as np

np.random.seed(0)
torch.manual_seed(0)
x = torch.randn((3, 8))
label = torch.randint(0, 5, [3]).long()
epoch_N = 10


# 定義一個簡單的網絡
class MyNet(nn.Module):
def __init__(self, num_class=5):
super(MyNet, self).__init__()
# 這里 bias = False 不然由于 bias = True 就初始化成不同參數,導致 demo01和 demo02 運行的 fc.weight 不一致
self.fc1 = nn.Linear(8, 4, bias=False)
self.fc1.weight = nn.Parameter(torch.ones((4, 8), dtype=torch.float32))
self.fc2 = nn.Linear(4, num_class, bias=False)
self.fc2.weight = nn.Parameter(torch.ones((num_class, 4), dtype=torch.float32))

def forward(self, x):
return self.fc2(self.fc1(x))


def demo_01():
model = MyNet()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2) # 傳入的是所有的參數

for epoch in range(epoch_N):
output = model(x)
loss = loss_fn(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()

# 訓練后的模型參數
print("model.fc1.weight", model.fc1.weight)
print("model.fc2.weight", model.fc2.weight)


def demo_02():
model = MyNet()
loss_fn = nn.CrossEntropyLoss()
# 定義2個 優化器,各種優化不同層的參數
optimizer1 = optim.SGD(model.fc1.parameters(), lr=1e-2)
optimizer2 = optim.SGD(model.fc2.parameters(), lr=1e-2)
for epoch in range(epoch_N):
output = model(x)
loss = loss_fn(output, label)
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer2.step()
optimizer1.step()
# 訓練后的模型參數
print("model.fc1.weight", model.fc1.weight)
print("model.fc2.weight", model.fc2.weight)


if __name__ == "__main__":
demo_01()
demo_02()

'''
model.fc1.weight Parameter containing:
tensor([[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997]],
requires_grad=True)
model.fc2.weight Parameter containing:
tensor([[0.9986, 0.9986, 0.9986, 0.9986],
[1.0317, 1.0317, 1.0317, 1.0317],
[0.9986, 0.9986, 0.9986, 0.9986],
[0.9726, 0.9726, 0.9726, 0.9726],
[0.9986, 0.9986, 0.9986, 0.9986]], requires_grad=True)
model.fc1.weight Parameter containing:
tensor([[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997]],
requires_grad=True)
model.fc2.weight Parameter containing:
tensor([[0.9986, 0.9986, 0.9986, 0.9986],
[1.0317, 1.0317, 1.0317, 1.0317],
[0.9986, 0.9986, 0.9986, 0.9986],
[0.9726, 0.9726, 0.9726, 0.9726],
[0.9986, 0.9986, 0.9986, 0.9986]], requires_grad=True)

結論: 定義1個優化器優化全部參數 和定義2個優化器優化不同層的參數,更新w 參數是一模一樣的。
'''
0條評論
作者已關閉評論
Top123
32文章數
3粉絲數
Top123
32 文章 | 3 粉絲
Top123
32文章數
3粉絲數
Top123
32 文章 | 3 粉絲
原創

pytorch凍結訓練(下)

2024-11-15 09:17:46
5
0
import torch.nn as nn
import torch.optim as optim
import torch
import numpy as np

np.random.seed(0)
torch.manual_seed(0)
x = torch.randn((3, 8))
label = torch.randint(0, 5, [3]).long()
epoch_N = 10


# 定義一個簡單的網絡
class MyNet(nn.Module):
def __init__(self, num_class=5):
super(MyNet, self).__init__()
# 這里 bias = False 不然由于 bias = True 就初始化成不同參數,導致 demo01和 demo02 運行的 fc.weight 不一致
self.fc1 = nn.Linear(8, 4, bias=False)
self.fc1.weight = nn.Parameter(torch.ones((4, 8), dtype=torch.float32))
self.fc2 = nn.Linear(4, num_class, bias=False)
self.fc2.weight = nn.Parameter(torch.ones((num_class, 4), dtype=torch.float32))

def forward(self, x):
return self.fc2(self.fc1(x))


def demo_01():
model = MyNet()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2) # 傳入的是所有的參數

for epoch in range(epoch_N):
output = model(x)
loss = loss_fn(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()

# 訓練后的模型參數
print("model.fc1.weight", model.fc1.weight)
print("model.fc2.weight", model.fc2.weight)


def demo_02():
model = MyNet()
loss_fn = nn.CrossEntropyLoss()
# 定義2個 優化器,各種優化不同層的參數
optimizer1 = optim.SGD(model.fc1.parameters(), lr=1e-2)
optimizer2 = optim.SGD(model.fc2.parameters(), lr=1e-2)
for epoch in range(epoch_N):
output = model(x)
loss = loss_fn(output, label)
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer2.step()
optimizer1.step()
# 訓練后的模型參數
print("model.fc1.weight", model.fc1.weight)
print("model.fc2.weight", model.fc2.weight)


if __name__ == "__main__":
demo_01()
demo_02()

'''
model.fc1.weight Parameter containing:
tensor([[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997]],
requires_grad=True)
model.fc2.weight Parameter containing:
tensor([[0.9986, 0.9986, 0.9986, 0.9986],
[1.0317, 1.0317, 1.0317, 1.0317],
[0.9986, 0.9986, 0.9986, 0.9986],
[0.9726, 0.9726, 0.9726, 0.9726],
[0.9986, 0.9986, 0.9986, 0.9986]], requires_grad=True)
model.fc1.weight Parameter containing:
tensor([[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997]],
requires_grad=True)
model.fc2.weight Parameter containing:
tensor([[0.9986, 0.9986, 0.9986, 0.9986],
[1.0317, 1.0317, 1.0317, 1.0317],
[0.9986, 0.9986, 0.9986, 0.9986],
[0.9726, 0.9726, 0.9726, 0.9726],
[0.9986, 0.9986, 0.9986, 0.9986]], requires_grad=True)

結論: 定義1個優化器優化全部參數 和定義2個優化器優化不同層的參數,更新w 參數是一模一樣的。
'''
文章來自個人專欄
文章 | 訂閱
0條評論
作者已關閉評論
作者已關閉評論
0
0