我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用torch._thnn()。
def _update_output(self, input, weight, bias): self.use_cudnn = cudnn.is_acceptable(input) if self.use_cudnn and cudnn.version() < 6000: self.use_cudnn = not self.is_dilated() if self.use_cudnn: output = input.new(*self._output_size(input, weight)) if self.transposed: self._cudnn_info = ( torch._C._cudnn_convolution_transpose_full_forward( input, weight, bias, output, self.padding, self.stride, self.dilation, self.groups, cudnn.benchmark)) else: self._cudnn_info = torch._C._cudnn_convolution_full_forward( input, weight, bias, output, self.padding, self.stride, self.dilation, self.groups, cudnn.benchmark) if not self.requires_grad: del self._cudnn_info return output self._bufs = [[] for g in range(self.groups)] output = self._thnn('update_output', input, weight, bias) if not self.requires_grad: del self._bufs return output
def _grad_input(self, input, weight, grad_output): if self.use_cudnn: grad_input = input.new().resize_as_(input) if self.transposed: # ConvTranspose uses the same kernels as regular convolution # but swaps forward and backward calls torch._C._cudnn_convolution_forward( grad_output, weight, grad_input, self._cudnn_info, cudnn.benchmark) else: torch._C._cudnn_convolution_backward_data( grad_output, grad_input, weight, self._cudnn_info, cudnn.benchmark) return grad_input return self._thnn('grad_input', input, weight, grad_output)
def _grad_params(self, input, weight, bias, grad_output): if self.use_cudnn: grad_weight = grad_bias = None if self.needs_input_grad[1]: grad_weight = weight.new().resize_as_(weight) torch._C._cudnn_convolution_backward_filter( grad_output, input, grad_weight, self._cudnn_info, cudnn.benchmark) if bias is not None and self.needs_input_grad[2]: grad_bias = bias.new().resize_as_(bias) torch._C._cudnn_convolution_backward_bias( grad_output, grad_bias, self._cudnn_info) return grad_weight, grad_bias return self._thnn('grad_params', input, weight, bias, grad_output)
def _thnn(self, fn_name, input, weight, *args): impl = _thnn_convs[self.thnn_class_name(input)] if self.groups == 1: return impl[fn_name](self, self._bufs[0], input, weight, *args) else: res = [] for g in range(self.groups): def group(tensor, dim=None): if tensor is None: return None if dim is None: dim = 0 if tensor.dim() == 1 else 1 n = tensor.size(dim) // self.groups return tensor.narrow(dim, n * g, n).contiguous() grouped_args = [group(input, 1), group(weight, 0)] grouped_args += [group(t) for t in args] res.append(impl[fn_name](self, self._bufs[g], *grouped_args)) if fn_name == 'grad_params': return [torch.cat(t, 0) if t[0] is not None else None for t in zip(*res)] else: return torch.cat(res, 1)
def __init__(self): self.gradInput = torch.Tensor() self.output = 0 self._backend = torch._thnn.type2backend[type(self.gradInput)]
def type(self, type, tensorCache=None): # find all tensors and convert them for key, param in self.__dict__.items(): setattr(self, key, recursiveType(param, type, tensorCache or {})) self._backend = torch._thnn.type2backend[type] return self
def __init__(self): self.gradInput = torch.Tensor() self.output = torch.Tensor() self._type = self.output.type() self._backend = torch._thnn.type2backend[type(self.output)]
def type(self, type=None, tensorCache=None): if not type: return self._type tensorCache = tensorCache or {} # find all tensors and convert them for key, param in self.__dict__.items(): setattr(self, key, recursiveType(param, type, tensorCache)) self._backend = torch._thnn.type2backend[type] self._type = type return self
def type(self, type=None, tensorCache=None): if type is None: return self._type tensorCache = tensorCache or {} # find all tensors and convert them for key, param in self.__dict__.items(): setattr(self, key, recursiveType(param, type, tensorCache)) self._backend = torch._thnn.type2backend[type] self._type = type return self