import scipy.io
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# 加载数据和标签
data = scipy.io.loadmat("D:/Indian_pines_corrected.mat")['indian_pines_corrected']
gt = scipy.io.loadmat("D:/Indian_pines_gt.mat")['indian_pines_gt'].ravel() # 确保标签是一个一维数组
# 数据预处理
num_bands = data.shape[2]
X = data.reshape(-1, num_bands)
# 归一化数据
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, gt, test_size=0.2, random_state=42)
# 将NumPy数组转换为PyTorch张量
X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.long)
X_test_tensor = torch.tensor(X_test, dtype=torch.float32)
y_test_tensor = torch.tensor(y_test, dtype=torch.long)
# 为Conv1d增加通道维度
X_train_tensor = X_train_tensor.unsqueeze(1)
X_test_tensor = X_test_tensor.unsqueeze(1)
# 定义网络
class SpectralCNN(nn.Module):
def __init__(self, num_bands, num_classes):
super(SpectralCNN, self).__init__()
self.conv1 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=3, padding=1)
self.pool = nn.MaxPool1d(kernel_size=2)
self.conv2 = nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
# 计算经过两次卷积和池化后的输出大小
# 卷积后,宽度保持不变;池化后,宽度减半
conv_output_size = num_bands
pool_output_size = int(np.ceil(conv_output_size / 2)) # 第一次池化后的大小
pool_output_size = int(np.ceil(pool_output_size / 2)) # 第二次池化后的大小
# 更新全连接层的输入大小
self.fc1 = nn.Linear(128 * pool_output_size, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(x.size(0), -1) # 扁平化特征向量
x = self.fc1(x)
return x
# 初始化模型
num_classes = len(np.unique(gt))
model = SpectralCNN(num_bands=X_train.shape[1], num_classes=num_classes)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# 训练模型
num_epochs = 10000 # 设置训练轮数
# 开始训练模型
for epoch in range(num_epochs):
model.train()
optimizer.zero_grad()
outputs = model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
loss.backward()
optimizer.step()
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item()}')
# 测试模型
def evaluate_model(model, X_test_tensor, y_test_tensor):
model.eval() # 设置模型为评估模式
with torch.no_grad(): # 禁用梯度计算
outputs = model(X_test_tensor)
_, predicted = torch.max(outputs.data, 1)
total = y_test_tensor.size(0)
correct = (predicted == y_test_tensor).sum().item()
print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))
# 在训练结束后评估模型
evaluate_model(model, X_test_tensor, y_test_tensor)