我们在函数上取一些点列,作为数据。
譬如以下点列:
x | y |
---|---|
-2.5 | 5.312000023 |
-2.236842105 | 4.589010204 |
.... | .... |
2.5 | 7.187999977 |
然后利用 Python 3.10.0 的 PyTorch 库搭建经典的 MLP 神经网络:
import torch
import matplotlib.pyplot as plt
X = [-2.5, -2.236842105, -1.973684211, -1.710526316, -1.447368421, -1.184210526, -0.921052632, -0.657894737, -0.394736842, -0.131578947, 0.131578947, 0.394736842, 0.657894737, 0.921052632, 1.184210526, 1.447368421, 1.710526316, 1.973684211, 2.236842105, 2.5]
Y = [5.312000023, 4.589010204, 4.249698758, 3.839319217, 3.027100058, 1.801916503, 0.478871519, -0.487107075, -0.770384531, -0.367252262, 0.401878301, 1.08201888, 1.352758045, 1.217804382, 1.002792638, 1.162650635, 2.012481337, 3.541159968, 5.417915003, 7.187999977]
class MLP(torch.nn.Module):
def __init__(
self, *args,
input_layer: int, hidden_layers: tuple[int, int, int], output_layer: int,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.input_layer = input_layer
self.INPUT = torch.nn.Linear(input_layer, hidden_layers[0], bias=True)
self.FC1 = torch.nn.Linear(hidden_layers[0], hidden_layers[1], bias=True)
self.FC2 = torch.nn.Linear(hidden_layers[1], hidden_layers[2], bias=True)
self.OUTPUT = torch.nn.Linear(hidden_layers[2], output_layer, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = torch.nn.functional.relu(self.INPUT(x))
y = torch.nn.functional.relu(self.FC1(y))
y = torch.nn.functional.sigmoid(self.FC2(y))
return self.OUTPUT(y)
class Fit:
def __init__(
self, X: list, Y: list,
epoch: int, model: object,
optimizer: torch.optim, loss: torch.nn.modules.loss,
device: str = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
) -> None:
self.tensorX = torch.Tensor(X).unsqueeze(dim=1)
self.tensorY = torch.Tensor(Y).unsqueeze(dim=1)
self.epoch = epoch
self.optimizer = optimizer
self.loss = loss
self.model = model.to(device)
self.__Train()
def __Train(self) -> None:
plt.ion()
plt.show()
for idx in range(1, self.epoch + 1, 1):
prediction = self.model(self.tensorX)
MSE = self.loss(prediction, self.tensorY)
self.optimizer.zero_grad()
MSE.backward()
self.optimizer.step()
if ((idx % 5) == 0):
self.__Show(idx, MSE.data, prediction)
plt.ioff()
plt.show()
def __Show(self, idx: int, mse: torch.Tensor, prediction: torch.Tensor) -> None:
plt.cla()
plt.title(
label = 'Progress: %.2f%% -- Loss: %.9f' % (100 * idx / self.epoch, mse),
fontdict = {'family': 'Times New Roman', 'size': 15}
)
plt.xticks(fontproperties = 'Times New Roman', size = 12)
plt.yticks(fontproperties = 'Times New Roman', size = 12)
plt.scatter(
self.tensorX.cpu().data.numpy(),
self.tensorY.cpu().data.numpy(),
marker='*'
)
tempX = list()
tempY = list()
for t in range(0, len(self.tensorX), 1):
temp = self.tensorX.cpu().data.numpy().tolist()
tempX.append(temp[t][0])
temp = prediction.cpu().data.numpy().tolist()
tempY.append(temp[t][0])
plt.plot(tempX, tempY, 'r-.', lw=1.5)
plt.pause(0.025)
if __name__ == '__main__':
net = MLP(input_layer=1, hidden_layers=(10, 7, 5), output_layer=1)
Fit(
X = X, Y = Y,
epoch = 2400,
model = net,
optimizer = torch.optim.SGD(
params = net.parameters(),
lr = 1e-2,
momentum = 0.75
),
loss = torch.nn.MSELoss()
)
最后动画演示神经网络的拟合,发现神经网络能力很强大。