前馈神经网络 (Feedforward Neural Network, FNN)
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
# 1. 定义前馈神经网络
class FeedforwardNN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(FeedforwardNN, self).__init__()
self.fc = nn.Sequential(
nn.Linear(input_dim, hidden_dim), # 输入层到隐藏层
nn.ReLU(), # 激活函数
nn.Linear(hidden_dim, output_dim), # 隐藏层到输出层
nn.Sigmoid() # 输出层的激活函数(适用于二分类问题)
)
def forward(self, x):
return self.fc(x)
# 2. 创建 XOR 数据集
def create_xor_data():
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y = np.array([[0], [1], [1], [0]], dtype=np.float32)
return X, y
# 3. 训练前馈神经网络
def train_fnn():
# 数据准备
X, y = create_xor_data()
X = torch.tensor(X, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
# 初始化网络、损失函数和优化器
input_dim = X.shape[1]
hidden_dim = 10
output_dim = 1
model = FeedforwardNN(input_dim, hidden_dim, output_dim)
criterion = nn.BCELoss() # 二分类交叉熵损失
optimizer = optim.Adam(model.parameters(), lr=0.01)
# 训练网络
epochs = 1000
loss_history = []
for epoch in range(epochs):
# 前向传播
outputs = model(X)
loss = criterion(outputs, y)
# 反向传播与优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 记录损失
loss_history.append(loss.item())
if (epoch + 1) % 100 == 0:
print(f"Epoch [{epoch + 1}/{epochs}], Loss: {loss.item():.4f}")
# 绘制损失曲线
plt.plot(loss_history)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss Curve')
plt.show()
# 输出训练结果
with torch.no_grad():
predictions = model(X).round()
print("Predictions:", predictions.numpy())
print("Ground Truth:", y.numpy())
# 运行训练
if __name__ == "__main__":
train_fnn()