#TIN 编译 cuda_shift 失效

提示说:

shift_cuda.cpp:18:26: error: ‘THCState_getCurrentStream’ was not declared in this scope

解决方法按照就是把:

1
2
3
4
5
6
7
8
9
ShiftDataCudaForward(THCState_getCurrentStream(state),
data.data<float>(),
shift.data<int>(),
batch_size,
channels,
tsize,
hwsize,
groupsize,
out.data<float>());

替换成:

1
2
3
4
5
6
7
8
9
ShiftDataCudaForward(at::cuda::getCurrentCUDAStream(),
data.data<float>(),
shift.data<int>(),
batch_size,
channels,
tsize,
hwsize,
groupsize,
out.data<float>());

#参考链接

#autograd 报错

提示说:

RuntimeError: Legacy autograd function with non-static forward method is deprecated. Please use new-style autograd function with static forward method. (Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)

参照官方文档,在rtc_wrap.py文件中的forwardbackward方法前添加@staticmethod

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# Code for "Temporal Interlacing Network"
# Hao Shao, Shengju Qian, Yu Liu
# shaoh19@mails.tsinghua.edu.cn, sjqian@cse.cuhk.edu.hk, yuliu@ee.cuhk.edu.hk

import torch
from torch.autograd import Function

import cudashift as backend

class ShiftFeatureFunc(Function):
def __init__(self):
super(ShiftFeatureFunc, self).__init__()

@staticmethod
def forward(self, data, shift):
if not data.is_cuda or not shift.is_cuda:
raise NotImplementedError

if data.requires_grad:
self.save_for_backward(shift)

out = torch.zeros_like(data)
backend.shift_featuremap_cuda_forward(data, shift, out)
return out

@staticmethod
def backward(self, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
shift = self.saved_tensors[0]
data_grad_input = grad_output.new(*grad_output.size()).zero_()
shift_grad_input = shift.new(*shift.size()).zero_()
backend.shift_featuremap_cuda_backward(grad_output, shift, data_grad_input)
return data_grad_input, shift_grad_input

然后在temporal_interlace.py文件中的linear_sampler方法,替换如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
def linear_sampler(data, bias):
'''
data: N * T * C * H * W
bias: N * T * Groups
weight: N * T
'''
N, T, C, H, W = data.shape
bias_0 = torch.floor(bias).int()
bias_1 = bias_0 + 1

# N * T * C * H * W
sf1 = ShiftFeatureFunc.apply
sf2 = ShiftFeatureFunc.apply

data = data.view(N, T, C, H*W).contiguous()
data_0 = sf1(data, bias_0)
data_1 = sf2(data, bias_1)

w_0 = 1 - (bias - bias_0.float())
w_1 = 1 - w_0

groupsize = bias.shape[1]
w_0 = w_0[:, :, None].repeat(1, 1, C // groupsize )
w_0 = w_0.view(w_0.size(0), -1)
w_1 = w_1[:, :, None].repeat(1, 1, C // groupsize )
w_1 = w_1.view(w_1.size(0), -1)

w_0 = w_0[:,None,:,None]
w_1 = w_1[:,None,:,None]

out = w_0 * data_0 + w_1 * data_1
out = out.view(N, T, C, H, W)

return out

#参考文档

错误解决,效果待测试。