import pandas as pd
import numpy as np
import torch
import as nn
import as optim
import as plt
from .data import DataLoader, TensorDataset
from import accuracy_score
import as nn
import as optim
import as plt
from .data import DataLoader, TensorDataset
from import accuracy_score
from sklearn.model_selection import train_test_split
from date_process import data_read_csv
train_x, train_y=data_read_csv()
# 转换为张量
X = (train_x, dtype=32)
y = (train_y, dtype=32)
# 数据加载器
dataset = TensorDataset(X, y)
dataloader = DataLoader(dataset, batch_size=16, shuffle=True)
# 生成器
class Generator():
def __init__(self, input_dim, output_dim):
super(Generator, self).__init__()
self.model = nn.Sequential(
(input_dim, 128),
(),
(128, 256),
(),
(256, output_dim),
()
)
def forward(self, z):
return self.model(z)
# 判别器
class Discriminator():
def __init__(self, input_dim):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
(input_dim, 256),
(0.2),
(256, 128),
(0.2),
(128, 1),
()
)
def forward(self, x):
return self.model(x)
# 参数设置
latent_dim = 100
input_dim = [1]
lr = 0.0002
b1 = 0.5
b2 = 0.999
num_epochs = 100
# 初始化模型
generator = Generator(latent_dim, input_dim)
discriminator = Discriminator(input_dim)
# 优化器
optimizer_G = ((), lr=lr, betas=(b1, b2))
optimizer_D = ((), lr=lr, betas=(b1, b2))
# 损失函数
adversarial_loss = ()
# 训练模型
g_losses = []
d_losses = []
for epoch in range(num_epochs):
for i, (imgs, labels) in enumerate(dataloader):
valid = (imgs.size(0), 1)
fake = torch.zeros(imgs.size(0), 1)
# 训练生成器
optimizer_G.zero_grad()
z = (imgs.size(0), latent_dim)
gen_imgs = generator(z)
g_loss = adversarial_loss(discriminator(gen_imgs), valid)
g_loss.backward()
optimizer_G.step()
# 训练判别器
optimizer_D.zero_grad()
real_loss = adversarial_loss(discriminator(imgs), valid)
fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
g_losses.append(g_loss.item())
d_losses.append(d_loss.item())
print(f"[Epoch {epoch}/{num_epochs}] [D loss: {d_loss.item()}] [G loss: {g_loss.item()}]")
# 绘制损失图
(figsize=(10, 5))
(g_losses, label="Generator Loss")
(d_losses, label="Discriminator Loss")
("Epochs")
("Loss")
()
()
# 生成新数据
gen_data_size = 1000
z = (gen_data_size, latent_dim)
gen_data = generator(z).detach().numpy()
gen_labels = ((gen_data_size, 1))
# 合并真实数据和生成数据
X_combined = (((), gen_data))
y_combined = ((().reshape(-1, 1), gen_labels))
# 数据集拆分
X_train, X_test, y_train, y_test = train_test_split(X_combined, y_combined, test_size=0.2, random_state=42)
# 构建分类器
class Classifier():
def __init__(self, input_dim):
super(Classifier, self).__init__()
self.model = nn.Sequential(
(input_dim, 128),
(),
(128, 64),
(),
(64, 1),
()
)
def forward(self, x):
return self.model(x)
classifier = Classifier(input_dim)
optimizer_C = ((), lr=0.001)
criterion = ()
# 训练分类器
num_epochs_classifier = 100
for epoch in range(num_epochs_classifier):
()
optimizer_C.zero_grad()
outputs = classifier((X_train, dtype=32))
loss = criterion(outputs, (y_train, dtype=32))
()
optimizer_C.step()
print(f"Epoch [{epoch+1}/{num_epochs_classifier}], Loss: {()}")
# 评估分类器
()
with torch.no_grad():
y_pred = classifier((X_test, dtype=32)).numpy()
y_pred = (y_pred)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy}")
from metra import acc_metra
acc_metra(y_test, y_pred,label=['0','1'])
# 输出准确率