news 2026/2/28 11:07:04

DAY34 GPU训练及类的call方法

作者头像

张小明

前端开发工程师

1.2k 24
文章封面图
DAY34 GPU训练及类的call方法

@浙大疏锦行

importtorchimporttorch.nnasnnimporttorch.optimasoptimfromsklearn.datasetsimportload_irisfromsklearn.model_selectionimporttrain_test_splitimportnumpyasnp iris=load_iris()X=iris.data y=iris.target X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
print(X_train.shape)print(X_test.shape)print(y_train.shape)print(y_test.shape)
X_train=torch.FloatTensor(X_train)y_train=torch.LongTensor(y_train)X_test=torch.FloatTensor(X_test)y_test=torch.LongTensor(y_test)# 这是整数classMLP(nn.Module):def__init__(self,*args,**kwargs):super().__init__(*args,**kwargs)self.fc1=nn.Linear(4,10)self.relu=nn.ReLU()self.fc2=nn.Linear(10,3)defforward(self,x):out=self.fc1(x)out=self.relu(out)out=self.fc2(out)returnout model=MLP()criterion=nn.CrossEntropyLoss()optimizer=optim.SGD(model.parameters(),lr=0.01)num_epochs=20000losses=[]
importtime start_time=time.time()forepochinrange(num_epochs):outputs=model.forward(X_train)loss=criterion(outputs,y_train)optimizer.zero_grad()loss.backward()optimizer.step()losses.append(loss.item())if(epoch+1)%100==0:print(f'Epoch [{epoch+1}/{num_epochs}],Loss:{loss.item():.4f}')time_all=time.time()-start_timeprint(f'Training time:{time_all:.2f}seconds')importmatplotlib.pyplotasplt plt.plot(range(num_epochs),losses)plt.xlabel('Epoch')plt.ylabel('Loss')plt.title('Training Loss over Epochs')plt.show()


importwmi c=wmi.WMI()processors=c.Win32_Processor()forprocessorinprocessors:print(f"CPU型号:{processor.Name}")print(f"核心数:{processor.NumberOfCores}")print(f"线程数:{processor.NumberOfLogicalProcessors}")


CUDA不可用

版权声明: 本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若内容造成侵权/违法违规/事实不符,请联系邮箱:809451989@qq.com进行投诉反馈,一经查实,立即删除!