2023.08.29 全結合型NNのサンプル(関数版)【torch】
2023.6.18 全結合型NNのサンプル【torch】を関数化したもの。
code:20230829NN_FC.py
import torch
import torch.nn as nn
import torch.nn.functional as f
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
import torch.optim
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self, n_input, n_hidden, n_output):
super().__init__()
self.fc1 = nn.Linear(n_input, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_output)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.softmax(x)
return x
def get_data(test_size, batch_size,random_state):
print('Dataset: Iris')
print('test_size', test_size, 'batch size =', batch_size, 'random_state', random_state)
dataset = load_iris() # データセットの取得
X = torch.tensor(dataset'data').float()
y = torch.tensor(dataset'target')
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
y_train_onehot = f.one_hot(y_train, num_classes=3).float()
# y_test_onehot = f.one_hot(y_test, num_classes=3).float() 使わない
train_dataset = torch.utils.data.TensorDataset(X_train, y_train_onehot)
train_loader = torch.utils.data.DataLoader(
dataset = train_dataset,
batch_size = batch_size,
shuffle = True,
num_workers = 0
)
return train_loader, X_test, y_test
def create_model_opt_loss(n_input, n_hidden, n_output, lr):
print('input =', n_input, ', hidden =', n_hidden, ', output =', n_output)
model = Net(n_input=n_input, n_hidden=n_hidden, n_output=n_output)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
loss_func = nn.MSELoss()
return model, optimizer, loss_func
def train(model, train_loader, optimizer, epochs):
model.train() # モデルを訓練モードに設定、計算グラフ生成する
LOSS = []
for epoch in range(1, epochs + 1):
for X, y in train_loader:
optimizer.zero_grad()
y_train_predict = model(X)
loss = loss_func(y, y_train_predict)
LOSS.append(loss.item())
loss.backward()
optimizer.step()
if epoch % (epochs // 10) == 0:
print('progress =', int(epoch / epochs * 100),'%, EPOCH =', epoch, ', loss ={:.3e}'.format(loss.item()))
return LOSS # 損失関数の履歴
# 結果として得られたパラメータは関数外のmodelオブジェクトに保持されているので返す必要なし
def test(model, X, y):
model.eval() # 検証モードに設定
# 検証モードで推論
y_predict = model.forward(X)
# 推論結果の最大値のインデックス取得
y_predict_max_index = y_predict.max(dim=1)1
# 正解であればTrueなbool型tensor
y_predict_compare = y_predict_max_index == y
# Trueの数をカウントして全要素数で割る
y_predict_accuracy = y_predict_compare.sum() / len(y_predict_compare)
# 結果はここで画面表示、必要であればreturnするべし
print('accuracy: ', y_predict_accuracy.item())
def plot(loss):
plt.plot(loss, label='loss function')
plt.grid()
plt.legend()
plt.show()
############################################
if __name__ == '__main__':
# 訓練用のバッチを生成する train_loader
# 検証用のデータ X_test, y_test
train_loader, X_test, y_test = get_data(
test_size = 0.25,
batch_size = 10,
random_state = 42,
)
# モデル、最適化器、損失関数を生成
model, optimizer, loss_func = create_model_opt_loss(
n_input = 4,
n_hidden = 10,
n_output = 3,
lr = 0.01,
)
# 訓練を行い、損失関数値の履歴を取得
LOSS = train(
model = model,
train_loader = train_loader,
optimizer = optimizer,
epochs = 10000,
)
test(model, X_test, y_test) # 検証
plot(LOSS) # 結果のプロット
# torch.save(model, 'result.pth')