Pytorch提取模型特征向量保存至csv的例子
Pytorch提取模型特征向量
#-*-coding:utf-8-*-
"""
dj
"""
importtorch
importtorch.nnasnn
importos
fromtorchvisionimportmodels,transforms
fromtorch.autogradimportVariable
importnumpyasnp
fromPILimportImage
importtorchvision.modelsasmodels
importpretrainedmodels
importpandasaspd
classFCViewer(nn.Module):
defforward(self,x):
returnx.view(x.size(0),-1)
classM(nn.Module):
def__init__(self,backbone1,drop,pretrained=True):
super(M,self).__init__()
ifpretrained:
img_model=pretrainedmodels.__dict__[backbone1](num_classes=1000,pretrained='imagenet')
else:
img_model=pretrainedmodels.__dict__[backbone1](num_classes=1000,pretrained=None)
self.img_encoder=list(img_model.children())[:-2]
self.img_encoder.append(nn.AdaptiveAvgPool2d(1))
self.img_encoder=nn.Sequential(*self.img_encoder)
ifdrop>0:
self.img_fc=nn.Sequential(FCViewer())
else:
self.img_fc=nn.Sequential(
FCViewer())
defforward(self,x_img):
x_img=self.img_encoder(x_img)
x_img=self.img_fc(x_img)
returnx_img
model1=M('resnet18',0,pretrained=True)
features_dir='/home/cc/Desktop/features'
transform1=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()])
file_path='/home/cc/Desktop/picture'
names=os.listdir(file_path)
print(names)
fornameinnames:
pic=file_path+'/'+name
img=Image.open(pic)
img1=transform1(img)
x=Variable(torch.unsqueeze(img1,dim=0).float(),requires_grad=False)
y=model1(x)
y=y.data.numpy()
y=y.tolist()
#print(y)
test=pd.DataFrame(data=y)
#print(test)
test.to_csv("/home/cc/Desktop/features/3.csv",mode='a+',index=None,header=None)
jiazaixunlianhaodemoxing
importtorch
importtorch.nn.functionalasF
importtorch.nnasnn
importtorch.optimasoptim
importtorchvision
importtorchvision.transformsastransforms
importargparse
classResidualBlock(nn.Module):
def__init__(self,inchannel,outchannel,stride=1):
super(ResidualBlock,self).__init__()
self.left=nn.Sequential(
nn.Conv2d(inchannel,outchannel,kernel_size=3,stride=stride,padding=1,bias=False),
nn.BatchNorm2d(outchannel),
nn.ReLU(inplace=True),
nn.Conv2d(outchannel,outchannel,kernel_size=3,stride=1,padding=1,bias=False),
nn.BatchNorm2d(outchannel)
)
self.shortcut=nn.Sequential()
ifstride!=1orinchannel!=outchannel:
self.shortcut=nn.Sequential(
nn.Conv2d(inchannel,outchannel,kernel_size=1,stride=stride,bias=False),
nn.BatchNorm2d(outchannel)
)
defforward(self,x):
out=self.left(x)
out+=self.shortcut(x)
out=F.relu(out)
returnout
classResNet(nn.Module):
def__init__(self,ResidualBlock,num_classes=10):
super(ResNet,self).__init__()
self.inchannel=64
self.conv1=nn.Sequential(
nn.Conv2d(3,64,kernel_size=3,stride=1,padding=1,bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
)
self.layer1=self.make_layer(ResidualBlock,64,2,stride=1)
self.layer2=self.make_layer(ResidualBlock,128,2,stride=2)
self.layer3=self.make_layer(ResidualBlock,256,2,stride=2)
self.layer4=self.make_layer(ResidualBlock,512,2,stride=2)
self.fc=nn.Linear(512,num_classes)
defmake_layer(self,block,channels,num_blocks,stride):
strides=[stride]+[1]*(num_blocks-1)#strides=[1,1]
layers=[]
forstrideinstrides:
layers.append(block(self.inchannel,channels,stride))
self.inchannel=channels
returnnn.Sequential(*layers)
defforward(self,x):
out=self.conv1(x)
out=self.layer1(out)
out=self.layer2(out)
out=self.layer3(out)
out=self.layer4(out)
out=F.avg_pool2d(out,4)
out=out.view(out.size(0),-1)
out=self.fc(out)
returnout
defResNet18():
returnResNet(ResidualBlock)
importos
fromtorchvisionimportmodels,transforms
fromtorch.autogradimportVariable
importnumpyasnp
fromPILimportImage
importtorchvision.modelsasmodels
importpretrainedmodels
importpandasaspd
classFCViewer(nn.Module):
defforward(self,x):
returnx.view(x.size(0),-1)
classM(nn.Module):
def__init__(self,backbone1,drop,pretrained=True):
super(M,self).__init__()
ifpretrained:
img_model=pretrainedmodels.__dict__[backbone1](num_classes=1000,pretrained='imagenet')
else:
img_model=ResNet18()
we='/home/cc/Desktop/dj/model1/incption--7'
#模型定义-ResNet
#net=ResNet18().to(device)
img_model.load_state_dict(torch.load(we))#diaoyong
self.img_encoder=list(img_model.children())[:-2]
self.img_encoder.append(nn.AdaptiveAvgPool2d(1))
self.img_encoder=nn.Sequential(*self.img_encoder)
ifdrop>0:
self.img_fc=nn.Sequential(FCViewer())
else:
self.img_fc=nn.Sequential(
FCViewer())
defforward(self,x_img):
x_img=self.img_encoder(x_img)
x_img=self.img_fc(x_img)
returnx_img
model1=M('resnet18',0,pretrained=None)
features_dir='/home/cc/Desktop/features'
transform1=transforms.Compose([
transforms.Resize(56),
transforms.CenterCrop(32),
transforms.ToTensor()])
file_path='/home/cc/Desktop/picture'
names=os.listdir(file_path)
print(names)
fornameinnames:
pic=file_path+'/'+name
img=Image.open(pic)
img1=transform1(img)
x=Variable(torch.unsqueeze(img1,dim=0).float(),requires_grad=False)
y=model1(x)
y=y.data.numpy()
y=y.tolist()
#print(y)
test=pd.DataFrame(data=y)
#print(test)
test.to_csv("/home/cc/Desktop/features/3.csv",mode='a+',index=None,header=None)
以上这篇Pytorch提取模型特征向量保存至csv的例子就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持毛票票。
声明:本文内容来源于网络,版权归原作者所有,内容由互联网用户自发贡献自行上传,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任。如果您发现有涉嫌版权的内容,欢迎发送邮件至:czq8825#qq.com(发邮件时,请将#更换为@)进行举报,并提供相关证据,一经查实,本站将立刻删除涉嫌侵权内容。