海南网站建设制作,博罗县建设局网站,检查网站的死链接,百度搜索风云榜总榜文章目录Day 39 信贷数据集神经网络训练一、数据预处理二、构建 DataLoader 与神经网络三、可视化Dropout 模型表现四、小结Day 39 信贷数据集神经网络训练
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import…文章目录Day 39 · 信贷数据集神经网络训练一、数据预处理二、构建 DataLoader 与神经网络三、可视化Dropout 模型表现四、小结Day 39 · 信贷数据集神经网络训练importpandasaspdimportnumpyasnpimporttorchimporttorch.nnasnnimporttorch.optimasoptimimportmatplotlib.pyplotaspltfromsklearn.model_selectionimporttrain_test_splitimportwarningsimportrandom warnings.filterwarnings(ignore)# 忽略警告信息defset_seed(seed:int42):random.seed(seed)np.random.seed(seed)torch.manual_seed(seed)torch.cuda.manual_seed_all(seed)torch.backends.cudnn.deterministicTruetorch.backends.cudnn.benchmarkFalseset_seed(42)datapd.read_csv(../data.csv)data.head()IdHome OwnershipAnnual IncomeYears in current jobTax LiensNumber of Open AccountsYears of Credit HistoryMaximum Open CreditNumber of Credit ProblemsMonths since last delinquentBankruptciesPurposeTermCurrent Loan AmountCurrent Credit BalanceMonthly DebtCredit ScoreCredit Default00Own Home482087.0NaN0.011.026.3685960.01.0NaN1.0debt consolidationShort Term99999999.047386.07914.0749.0011Own Home1025487.010 years0.015.015.31181730.00.0NaN0.0debt consolidationLong Term264968.0394972.018373.0737.0122Home Mortgage751412.08 years0.011.035.01182434.00.0NaN0.0debt consolidationShort Term99999999.0308389.013651.0742.0033Own Home805068.06 years0.08.022.5147400.01.0NaN1.0debt consolidationShort Term121396.095855.011338.0694.0044Rent776264.08 years0.013.013.6385836.01.0NaN0.0debt consolidationShort Term125840.093309.07180.0719.00一、数据预处理discrete_featuresdata.select_dtypes(include[object]).columns.tolist()print(discrete_features)maps{Home Ownership:{Own Home:1,Rent:2,Have Mortgage:3,Home Mortgage:4},Years in current job:{ 1 year:1,1 year:2,2 years:3,3 years:4,4 years:5,5 years:6,6 years:7,7 years:8,8 years:9,9 years:10,10 years:11},Term:{Short Term:0,Long Term:1}}datadata.replace(maps)# Purpose 独热编码记得需要将bool类型转换为数值datapd.get_dummies(data,columns[Purpose])data2pd.read_csv(../data.csv)list_diffdata.columns.difference(data2.columns)data[list_diff]data[list_diff].astype(int)data.rename(columns{Term:Long Term},inplaceTrue)# 重命名列continuous_featuresdata.select_dtypes(include[int64,float64]).columns.tolist()#把筛选出来的列名转换成列表# 连续特征用中位数补全forfeatureincontinuous_features:mode_valuedata[feature].mode()[0]#获取该列的众数。data[feature].fillna(mode_value,inplaceTrue)#用众数填充该列的缺失值inplaceTrue表示直接在原数据上修改。Xdata.drop([Credit Default,Id],axis1)# 特征axis1表示按列删除ydata[Credit Default]# 标签# 按照8:2划分训练集和测试集X_train,X_test,y_train,y_testtrain_test_split(X,y,test_size0.2,random_state42)# 80%训练集20%测试集[Home Ownership, Years in current job, Purpose, Term]fromsklearn.preprocessingimportStandardScaler scalerStandardScaler()X_trainscaler.fit_transform(X_train)X_testscaler.transform(X_test)print(X_train.shape)print(X_test.shape)print(y_train.shape)print(y_test.shape)(6000, 30) (1500, 30) (6000,) (1500,)二、构建 DataLoader 与神经网络为train_loader、test_loader设置可选的 pinned memory方便 GPU 加速。在每个 epoch 结束后对训练集与测试集分别计算损失和准确率并保存下来用于可视化。打印定期的监控日志便于观察收敛情况。fromtorch.utils.dataimportDataLoader,TensorDataset devicetorch.device(cuda:0iftorch.cuda.is_available()elsecpu)pin_memorydevice.typecudaX_traintorch.FloatTensor(X_train)y_traintorch.FloatTensor(y_train.to_numpy()).unsqueeze(1)X_testtorch.FloatTensor(X_test)y_testtorch.FloatTensor(y_test.to_numpy()).unsqueeze(1)print(X_train.shape)print(X_test.shape)print(y_train.shape)print(y_test.shape)print(---------------------------------)train_datasetTensorDataset(X_train,y_train)test_datasetTensorDataset(X_test,y_test)train_loaderDataLoader(train_dataset,batch_size64,shuffleTrue,pin_memorypin_memory)test_loaderDataLoader(test_dataset,batch_size256,shuffleFalse,pin_memorypin_memory)modelnn.Sequential(nn.Linear(X_train.shape[1],64),nn.ReLU(),nn.Linear(64,32),nn.ReLU(),nn.Linear(32,1)).to(device)criterionnn.BCEWithLogitsLoss()optimizeroptim.Adam(model.parameters(),lr1e-3)num_epochs300train_losses,test_losses[],[]train_accuracies,test_accuracies[],[]forepochinrange(1,num_epochs1):model.train()running_loss0.0running_correct0total_train0forx_batch,y_batchintrain_loader:x_batchx_batch.to(device,non_blockingpin_memory)y_batchy_batch.to(device,non_blockingpin_memory)optimizer.zero_grad()outputsmodel(x_batch)losscriterion(outputs,y_batch)loss.backward()optimizer.step()running_lossloss.item()*x_batch.size(0)preds(torch.sigmoid(outputs)0.5).int()running_correct(predsy_batch.int()).sum().item()total_trainx_batch.size(0)avg_train_lossrunning_loss/total_train avg_train_accrunning_correct/total_train train_losses.append(avg_train_loss)train_accuracies.append(avg_train_acc)model.eval()test_loss0.0test_correct0total_test0withtorch.no_grad():forx_batch,y_batchintest_loader:x_batchx_batch.to(device,non_blockingpin_memory)y_batchy_batch.to(device,non_blockingpin_memory)outputsmodel(x_batch)losscriterion(outputs,y_batch)test_lossloss.item()*x_batch.size(0)preds(torch.sigmoid(outputs)0.5).int()test_correct(predsy_batch.int()).sum().item()total_testx_batch.size(0)avg_test_losstest_loss/total_test avg_test_acctest_correct/total_test test_losses.append(avg_test_loss)test_accuracies.append(avg_test_acc)ifepoch%200orepoch1:print(fEpoch [{epoch:03d}/{num_epochs}] | fTrain Loss:{avg_train_loss:.4f}, Train Acc:{avg_train_acc:.4f}| fTest Loss:{avg_test_loss:.4f}, Test Acc:{avg_test_acc:.4f})print(fFinal Test Accuracy:{test_accuracies[-1]:.4f})torch.Size([6000, 30]) torch.Size([1500, 30]) torch.Size([6000, 1]) torch.Size([1500, 1]) --------------------------------- Epoch [001/300] | Train Loss: 0.5566, Train Acc: 0.7417 | Test Loss: 0.5053, Test Acc: 0.7673 Epoch [020/300] | Train Loss: 0.4383, Train Acc: 0.7883 | Test Loss: 0.4872, Test Acc: 0.7587 Epoch [040/300] | Train Loss: 0.4183, Train Acc: 0.7990 | Test Loss: 0.4940, Test Acc: 0.7540 Epoch [060/300] | Train Loss: 0.3951, Train Acc: 0.8105 | Test Loss: 0.5158, Test Acc: 0.7500 Epoch [080/300] | Train Loss: 0.3816, Train Acc: 0.8165 | Test Loss: 0.5445, Test Acc: 0.7560 Epoch [100/300] | Train Loss: 0.3640, Train Acc: 0.8263 | Test Loss: 0.5684, Test Acc: 0.7560 Epoch [120/300] | Train Loss: 0.3565, Train Acc: 0.8320 | Test Loss: 0.5776, Test Acc: 0.7220 Epoch [140/300] | Train Loss: 0.3459, Train Acc: 0.8402 | Test Loss: 0.5914, Test Acc: 0.7213 Epoch [160/300] | Train Loss: 0.3373, Train Acc: 0.8432 | Test Loss: 0.6063, Test Acc: 0.7400 Epoch [180/300] | Train Loss: 0.3273, Train Acc: 0.8488 | Test Loss: 0.6210, Test Acc: 0.7427 Epoch [200/300] | Train Loss: 0.3175, Train Acc: 0.8558 | Test Loss: 0.6753, Test Acc: 0.7007 Epoch [220/300] | Train Loss: 0.3103, Train Acc: 0.8628 | Test Loss: 0.6701, Test Acc: 0.7233 Epoch [240/300] | Train Loss: 0.3035, Train Acc: 0.8618 | Test Loss: 0.6862, Test Acc: 0.7060 Epoch [260/300] | Train Loss: 0.2984, Train Acc: 0.8628 | Test Loss: 0.7258, Test Acc: 0.7120 Epoch [280/300] | Train Loss: 0.2891, Train Acc: 0.8730 | Test Loss: 0.7432, Test Acc: 0.7213 Epoch [300/300] | Train Loss: 0.2828, Train Acc: 0.8750 | Test Loss: 0.7428, Test Acc: 0.7220 Final Test Accuracy: 0.7220三、可视化下图左侧展示损失曲线右侧展示准确率曲线。epochsrange(1,num_epochs1)fig,axesplt.subplots(1,2,figsize(14,5))axes[0].plot(epochs,train_losses,labelTrain Loss)axes[0].plot(epochs,test_losses,labelTest Loss)axes[0].set_xlabel(Epoch)axes[0].set_ylabel(Loss)axes[0].set_title(Train vs Test Loss)axes[0].legend()axes[1].plot(epochs,train_accuracies,labelTrain Acc)axes[1].plot(epochs,test_accuracies,labelTest Acc)axes[1].set_xlabel(Epoch)axes[1].set_ylabel(Accuracy)axes[1].set_title(Train vs Test Accuracy)axes[1].legend()plt.tight_layout()plt.show()可以看到模型在测试集上的表现极差。过拟合很严重。接下来我们来尝试使用Dropout来减缓过拟合。# 使用 Dropout L2 正则重新训练缓解过拟合set_seed(42)# 确保第二次实验也可复现drop_modelnn.Sequential(nn.Linear(X_train.shape[1],128),nn.ReLU(),nn.Dropout(0.3),nn.Linear(128,64),nn.ReLU(),nn.Dropout(0.3),nn.Linear(64,1)).to(device)weight_decay1e-4drop_optimizeroptim.Adam(drop_model.parameters(),lr1e-3,weight_decayweight_decay)drop_epochs200drop_train_losses,drop_test_losses[],[]drop_train_accs,drop_test_accs[],[]forepochinrange(1,drop_epochs1):drop_model.train()running_loss0.0running_correct0total0forxb,ybintrain_loader:xbxb.to(device,non_blockingpin_memory)ybyb.to(device,non_blockingpin_memory)drop_optimizer.zero_grad()logitsdrop_model(xb)losscriterion(logits,yb)loss.backward()drop_optimizer.step()running_lossloss.item()*xb.size(0)preds(torch.sigmoid(logits)0.5).int()running_correct(predsyb.int()).sum().item()totalxb.size(0)train_lossrunning_loss/total train_accrunning_correct/total drop_train_losses.append(train_loss)drop_train_accs.append(train_acc)drop_model.eval()test_loss0.0test_correct0total_test0withtorch.no_grad():forxb,ybintest_loader:xbxb.to(device,non_blockingpin_memory)ybyb.to(device,non_blockingpin_memory)logitsdrop_model(xb)losscriterion(logits,yb)test_lossloss.item()*xb.size(0)preds(torch.sigmoid(logits)0.5).int()test_correct(predsyb.int()).sum().item()total_testxb.size(0)avg_test_losstest_loss/total_test avg_test_acctest_correct/total_test drop_test_losses.append(avg_test_loss)drop_test_accs.append(avg_test_acc)ifepoch%200orepoch1:print(fDropout Epoch [{epoch:03d}/{drop_epochs}] | fTrain Loss:{train_loss:.4f}, Train Acc:{train_acc:.4f}| fTest Loss:{avg_test_loss:.4f}, Test Acc:{avg_test_acc:.4f})print(fFinal Test Accuracy with Dropout/L2:{drop_test_accs[-1]:.4f})Dropout Epoch [001/200] | Train Loss: 0.5564, Train Acc: 0.7473 | Test Loss: 0.4935, Test Acc: 0.7667 Dropout Epoch [020/200] | Train Loss: 0.4507, Train Acc: 0.7858 | Test Loss: 0.4711, Test Acc: 0.7647 Dropout Epoch [040/200] | Train Loss: 0.4392, Train Acc: 0.7872 | Test Loss: 0.4743, Test Acc: 0.7620 Dropout Epoch [060/200] | Train Loss: 0.4284, Train Acc: 0.7978 | Test Loss: 0.4817, Test Acc: 0.7593 Dropout Epoch [080/200] | Train Loss: 0.4182, Train Acc: 0.8008 | Test Loss: 0.4826, Test Acc: 0.7553 Dropout Epoch [100/200] | Train Loss: 0.4133, Train Acc: 0.8065 | Test Loss: 0.4929, Test Acc: 0.7547 Dropout Epoch [120/200] | Train Loss: 0.4085, Train Acc: 0.8067 | Test Loss: 0.4951, Test Acc: 0.7533 Dropout Epoch [140/200] | Train Loss: 0.3974, Train Acc: 0.8135 | Test Loss: 0.5040, Test Acc: 0.7580 Dropout Epoch [160/200] | Train Loss: 0.3925, Train Acc: 0.8173 | Test Loss: 0.5158, Test Acc: 0.7533 Dropout Epoch [180/200] | Train Loss: 0.3872, Train Acc: 0.8178 | Test Loss: 0.5231, Test Acc: 0.7533 Dropout Epoch [200/200] | Train Loss: 0.3828, Train Acc: 0.8220 | Test Loss: 0.5193, Test Acc: 0.7480 Final Test Accuracy with Dropout/L2: 0.7480Dropout 模型表现再绘制一次损失/准确率曲线观察正则化后的收敛情况。drop_epochs_rangerange(1,drop_epochs1)fig,axesplt.subplots(1,2,figsize(14,5))axes[0].plot(drop_epochs_range,drop_train_losses,labelDrop Train Loss)axes[0].plot(drop_epochs_range,drop_test_losses,labelDrop Test Loss)axes[0].set_xlabel(Epoch)axes[0].set_ylabel(Loss)axes[0].set_title(Dropout Model Loss)axes[0].legend()axes[1].plot(drop_epochs_range,drop_train_accs,labelDrop Train Acc)axes[1].plot(drop_epochs_range,drop_test_accs,labelDrop Test Acc)axes[1].set_xlabel(Epoch)axes[1].set_ylabel(Accuracy)axes[1].set_title(Dropout Model Accuracy)axes[1].legend()plt.tight_layout()plt.show()四、小结我天效果更差了。(ㄒ o ㄒ)第一阶段模型有明显的过拟合迹象因此又用 Dropout L2 重新训练记录了第二组损失/准确率曲线。结果效果更差了(ŏ﹏ŏ)。本人才疏学浅先这样吧之后再来研究一下是为啥吧。之后继续在正则化、网络深度、学习率调度或早停等方向尝试。浙大疏锦行