Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,226 changes: 616 additions & 610 deletions CASME2_3.py
100644 → 100755

Large diffs are not rendered by default.

1,256 changes: 636 additions & 620 deletions CASME2_5.py
100644 → 100755

Large diffs are not rendered by default.

619 changes: 619 additions & 0 deletions CASME3_7.py

Large diffs are not rendered by default.

92 changes: 44 additions & 48 deletions CA_block.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,11 @@ def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):

def conv1x1(in_planes, out_planes, stride=1, groups=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False,groups=groups)
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False, groups=groups)

# CA BLOCK


##CA BLOCK
class CABlock(nn.Module):
expansion = 1

Expand All @@ -43,24 +45,26 @@ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
# if groups != 1 or base_width != 64:
# raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride,groups=groups)
self.conv1 = conv3x3(inplanes, planes, stride, groups=groups)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv1x1(planes, planes,groups=groups)
self.conv2 = conv1x1(planes, planes, groups=groups)
self.bn2 = norm_layer(planes)
self.attn = nn.Sequential(
nn.Conv2d(2, 1, kernel_size=1, stride=1,bias=False), # 32*33*33
nn.Conv2d(2, 1, kernel_size=1, stride=1, bias=False), # 32*33*33
nn.BatchNorm2d(1),
nn.Sigmoid(),
)
self.downsample = downsample
self.stride = stride
self.planes=planes
self.planes = planes

def forward(self, x):
x, attn_last,if_attn =x##attn_last: downsampled attention maps from last layer as a prior knowledge
# attn_last: downsampled attention maps from last layer as a prior knowledge
x, attn_last, if_attn = x
identity = x

out = self.conv1(x)
Expand All @@ -83,13 +87,9 @@ def forward(self, x):

attn = attn.repeat(1, self.planes, 1, 1)
if if_attn:
out = out *attn


return out,attn[:, 0, :, :].unsqueeze(1),True


out = out * attn

return out, attn[:, 0, :, :].unsqueeze(1), True


class ResNet(nn.Module):
Expand All @@ -113,35 +113,32 @@ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(90*2, self.inplanes, kernel_size=3, stride=1,padding=1,
bias=False,groups=1)
self.conv1 = nn.Conv2d(90*2, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False, groups=1)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2,padding=1)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.layer1 = self._make_layer(block, 128, layers[0],groups=1)
self.layer1 = self._make_layer(block, 128, layers[0], groups=1)
self.inplanes = int(self.inplanes*1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0],groups=1)
dilate=replace_stride_with_dilation[0], groups=1)
self.inplanes = int(self.inplanes * 1)

self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1],groups=1)
dilate=replace_stride_with_dilation[1], groups=1)
self.inplanes = int(self.inplanes * 1)

self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2],groups=1)
dilate=replace_stride_with_dilation[2], groups=1)
self.inplanes = int(self.inplanes * 1)





self.fc = nn.Linear(512* block.expansion*196, 5)
self.fc = nn.Linear(512 * block.expansion*196, 7)
self.drop = nn.Dropout(p=0.1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
Expand All @@ -151,12 +148,12 @@ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
if isinstance(m, block.Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
elif isinstance(m, block.BasicBlock):
nn.init.constant_(m.bn2.weight, 0)

def _make_layer(self, block, planes, blocks, stride=1, dilate=False,groups=1):
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, groups=1):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
Expand All @@ -180,47 +177,49 @@ def _make_layer(self, block, planes, blocks, stride=1, dilate=False,groups=1):

return nn.Sequential(*layers)

def _forward_impl(self, x,POS):##x->input of main branch; POS->position embeddings generated by sub branch
# x->input of main branch; POS->position embeddings generated by sub branch
def _forward_impl(self, x, POS):

x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
##main branch
x,attn1,_ = self.layer1((x,None,True))
# main branch
x, attn1, _ = self.layer1((x, None, True))
temp = attn1
attn1 = self.maxpool(attn1)

x ,attn2,_= self.layer2((x,attn1,True))
x, attn2, _ = self.layer2((x, attn1, True))

attn2 = self.maxpool(attn2)

attn2=self.maxpool(attn2)

x ,attn3,_= self.layer3((x,attn2,True))
x, attn3, _ = self.layer3((x, attn2, True))
#
attn3 = self.maxpool(attn3)
x,attn4,_ = self.layer4((x,attn3,True))
x, attn4, _ = self.layer4((x, attn3, True))

x=x+POS#fusion of motion pattern feature and position embeddings
x = x+POS # fusion of motion pattern feature and position embeddings

x = torch.flatten(x, 1)

x = self.fc(x)

return x,temp.view(x.size(0),-1)
return x, temp.view(x.size(0), -1)

def forward(self, x,POS):
return self._forward_impl(x,POS)
def forward(self, x, POS):
return self._forward_impl(x, POS)


def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
state_dict = torch.hub.load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model

##main branch consisting of CA blocks
# main branch consisting of CA blocks


def resnet18_pos_attention(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Expand All @@ -231,6 +230,3 @@ def resnet18_pos_attention(pretrained=False, progress=True, **kwargs):
"""
return _resnet('resnet18', CABlock, [1, 1, 1, 1], pretrained, progress,
**kwargs)



22 changes: 22 additions & 0 deletions Concat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import sys
path = "temp.csv"
path2 = "SMIC_all_cropped\concat.csv"
df = pd.read_csv(path)
df2 = pd.read_csv(path2)
# change the column name of df
df.columns = ['Subject', 'File_name',
'Apex_predicted', 'ons', 'offs', 'Emotion']
# append new column TYPE with value casme to df
df['TYPE'] = 'casme'
# Let TYPE become column 0
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
df_new = pd.concat([df, df2], axis=0)
print(df_new)
df_new.to_csv("temp2.csv", index=False)
Loading