Python torch.backends.cudnn 模块,is_acceptable() 实例源码

我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用torch.backends.cudnn.is_acceptable()

项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def forward(self, input, weight, bias=None):
        output = input.new(*self._output_size(input, weight))
        if bias is not None:
            self.save_for_backward(input, weight, bias)
        else:
            self.save_for_backward(input, weight)

        if cudnn.is_acceptable(input):
            self._cudnn_info = torch._C._cudnn_convolution_forward(
                input, weight, bias, output, self.pad[0], self.pad[1],
                self.stride[0], self.stride[1], self.groups, cudnn.benchmark)
        else:
            # TODO: implement groups for THNN
            if self.groups != 1:
                raise ValueError('THNN does not support groups')
            backend = type2backend[type(input)]
            self._finput = input.new()
            self._fgrad_input = input.new()
            backend.SpatialConvolutionMM_updateOutput(
                backend.library_state, input, output, weight, bias,
                self._finput, self._fgrad_input, weight.size(3), weight.size(2),
                self.stride[1], self.stride[0], self.pad[1], self.pad[0])

        return output
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def _update_output(self, input, weight, bias):
        self.use_cudnn = cudnn.is_acceptable(input)
        if self.use_cudnn and cudnn.version() < 6000:
            self.use_cudnn = not self.is_dilated()
        if self.use_cudnn:
            output = input.new(*self._output_size(input, weight))
            if self.transposed:
                self._cudnn_info = (
                    torch._C._cudnn_convolution_transpose_full_forward(
                        input, weight, bias, output, self.padding, self.stride, self.dilation,
                        self.groups, cudnn.benchmark))
            else:
                self._cudnn_info = torch._C._cudnn_convolution_full_forward(
                    input, weight, bias, output, self.padding, self.stride, self.dilation,
                    self.groups, cudnn.benchmark)
            if not self.requires_grad:
                del self._cudnn_info
            return output

        self._bufs = [[] for g in range(self.groups)]
        output = self._thnn('update_output', input, weight, bias)
        if not self.requires_grad:
            del self._bufs
        return output
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def _update_output(self, input, weight, bias):
        self.use_cudnn = cudnn.is_acceptable(input)
        if self.use_cudnn and cudnn.version() < 6000:
            self.use_cudnn = not self.is_dilated()
        if self.use_cudnn:
            output = input.new(*self._output_size(input, weight))
            if self.transposed:
                self._cudnn_info = (
                    torch._C._cudnn_convolution_transpose_full_forward(
                        input, weight, bias, output, self.padding, self.stride, self.dilation,
                        self.groups, cudnn.benchmark))
            else:
                self._cudnn_info = torch._C._cudnn_convolution_full_forward(
                    input, weight, bias, output, self.padding, self.stride, self.dilation,
                    self.groups, cudnn.benchmark)
            if not self.requires_grad:
                del self._cudnn_info
            return output

        self._bufs = [[] for g in range(self.groups)]
        output = self._thnn('update_output', input, weight, bias)
        if not self.requires_grad:
            del self._bufs
        return output
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def backward(ctx, grad_output):
        input, grid = ctx.saved_tensors
        if cudnn.is_acceptable(input):
            grad_input = input.new(input.size())
            grad_grid = grid.new(grid.size())
            grid = grid.contiguous()
            if 0 in input.stride():
                input = input.contiguous()
            torch._C._cudnn_grid_sampler_backward(input, grad_input,
                                                  grid, grad_grid,
                                                  grad_output)
        else:
            backend = type2backend[type(input)]
            grad_input = input.new(input.size())
            grad_grid = grid.new(grid.size())
            backend.SpatialGridSamplerBilinear_updateGradInput(
                backend.library_state, input, grad_input,
                grid, grad_grid, grad_output)
        return grad_input, grad_grid
项目:temperature_scaling    作者:gpleiss    | 项目源码 | 文件源码
def forward(self, weight, bias, input):
        # Assert we're using cudnn
        for i in ([weight, bias, input]):
            if i is not None and not(cudnn.is_acceptable(i)):
                raise Exception('You must be using CUDNN to use _EfficientBatchNorm')

        res = input.new(*self._output_size(input, weight))
        self._cudnn_info = torch._C._cudnn_convolution_full_forward(
            input, weight, bias, res,
            (self.padding, self.padding),
            (self.stride, self.stride),
            (self.dilation, self.dilation),
            self.groups, cudnn.benchmark
        )

        return res
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def forward(self, weight, bias, input):
        # Assert we're using cudnn
        for i in ([weight, bias, input]):
            if i is not None and not(cudnn.is_acceptable(i)):
                raise Exception('You must be using CUDNN to use _EfficientBatchNorm')

        res = input.new(*self._output_size(input, weight))
        self._cudnn_info = torch._C._cudnn_convolution_full_forward(
            input, weight, bias, res,
            (self.padding, self.padding),
            (self.stride, self.stride),
            (self.dilation, self.dilation),
            self.groups, cudnn.benchmark
        )

        return res
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def forward(self, weight, bias, input):
        # Assert we're using cudnn
        for i in ([weight, bias, input]):
            if i is not None and not(cudnn.is_acceptable(i)):
                raise Exception('You must be using CUDNN to use _EfficientBatchNorm')

        # Create save variables
        self.save_mean = self.running_mean.new()
        self.save_mean.resize_as_(self.running_mean)
        self.save_var = self.running_var.new()
        self.save_var.resize_as_(self.running_var)

        # Do forward pass - store in input variable
        res = type(input)(self.storage)
        res.resize_as_(input)
        torch._C._cudnn_batch_norm_forward(
            input, res, weight, bias, self.running_mean, self.running_var,
            self.save_mean, self.save_var, self.training, self.momentum, self.eps
        )

        return res
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def forward(self, weight, bias, input):
        # Assert we're using cudnn
        for i in ([weight, bias, input]):
            if i is not None and not(cudnn.is_acceptable(i)):
                raise Exception('You must be using CUDNN to use _EfficientBatchNorm')

        res = input.new(*self._output_size(input, weight))
        self._cudnn_info = torch._C._cudnn_convolution_full_forward(
            input, weight, bias, res,
            (self.padding, self.padding),
            (self.stride, self.stride),
            (self.dilation, self.dilation),
            self.groups, cudnn.benchmark
        )

        return res
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def forward_extended(self, input, weight, hx):

        assert(cudnn.is_acceptable(input))

        output = input.new()

        if torch.is_tensor(hx):
            hy = hx.new()
        else:
            hy = tuple(h.new() for h in hx)

        cudnn.rnn.forward(self, input, hx, weight, output, hy)

        self.save_for_backward(input, hx, weight, output)
        return output, hy
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def backward_extended(self, grad_output, grad_hy):
        input, hx, weight, output = self.saved_tensors

        grad_input, grad_weight, grad_hx = None, None, None

        assert(cudnn.is_acceptable(input))

        grad_input = input.new()
        grad_weight = input.new()
        grad_hx = input.new()
        if torch.is_tensor(hx):
            grad_hx = input.new()
        else:
            grad_hx = tuple(h.new() for h in hx)

        cudnn.rnn.backward_grad(
            self,
            input,
            hx,
            weight,
            output,
            grad_output,
            grad_hy,
            grad_input,
            grad_hx)

        if self.needs_input_grad[1]:
            grad_weight = [tuple(w.new().resize_as_(w).zero_() for w in layer_weight) for layer_weight in weight]
            cudnn.rnn.backward_weight(
                self,
                input,
                hx,
                output,
                weight,
                grad_weight)

        return grad_input, grad_weight, grad_hx
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def RNN(*args, **kwargs):
    def forward(input, *fargs, **fkwargs):
        if cudnn.is_acceptable(input.data):
            func = CudnnRNN(*args, **kwargs)
        else:
            func = AutogradRNN(*args, **kwargs)
        return func(input, *fargs, **fkwargs)

    return forward
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def forward_extended(self, input, weight, hx):
        assert cudnn.is_acceptable(input)

        output = input.new()

        if torch.is_tensor(hx):
            hy = hx.new()
        else:
            hy = tuple(h.new() for h in hx)

        cudnn.rnn.forward(self, input, hx, weight, output, hy)

        self.save_for_backward(input, hx, weight, output)
        return output, hy
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def RNN(*args, **kwargs):
    def forward(input, *fargs, **fkwargs):
        if cudnn.is_acceptable(input.data):
            func = CudnnRNN(*args, **kwargs)
        else:
            func = AutogradRNN(*args, **kwargs)
        return func(input, *fargs, **fkwargs)

    return forward
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def forward_extended(self, input, weight, hx):
        assert cudnn.is_acceptable(input)

        output = input.new()

        if torch.is_tensor(hx):
            hy = hx.new()
        else:
            hy = tuple(h.new() for h in hx)

        cudnn.rnn.forward(self, input, hx, weight, output, hy)

        self.save_for_backward(input, hx, weight, output)
        return output, hy
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def RNN(*args, **kwargs):
    def forward(input, *fargs, **fkwargs):
        if cudnn.is_acceptable(input.data):
            func = CudnnRNN(*args, **kwargs)
        else:
            func = AutogradRNN(*args, **kwargs)
        return func(input, *fargs, **fkwargs)

    return forward
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def forward(ctx, input, grid):
        ctx.save_for_backward(input, grid)
        grid_sz = grid.size()
        if cudnn.is_acceptable(input):
            output = input.new(grid_sz[0], input.size(1), grid_sz[1], grid_sz[2])
            grid = grid.contiguous()
            if 0 in input.stride():
                input = input.contiguous()
            torch._C._cudnn_grid_sampler_forward(input, grid, output)
        else:
            backend = type2backend[type(input)]
            output = input.new(grid_sz[0], input.size(1), grid_sz[1], grid_sz[2])
            backend.SpatialGridSamplerBilinear_updateOutput(backend.library_state, input, grid, output)
        return output
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def _enforce_cudnn(input):
        if not cudnn.enabled:
            raise RuntimeError("AffineGridGenerator needs CuDNN for "
                               "processing CUDA inputs, but CuDNN is not enabled")
        assert cudnn.is_acceptable(input)
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def forward_extended(self, input, weight, hx):
        assert cudnn.is_acceptable(input)
        # TODO: raise a warning if weight_data_ptr is None

        output = input.new()

        if torch.is_tensor(hx):
            hy = hx.new()
        else:
            hy = tuple(h.new() for h in hx)

        cudnn.rnn.forward(self, input, hx, weight, output, hy)

        self.save_for_backward(input, hx, weight, output)
        return output, hy
项目:temperature_scaling    作者:gpleiss    | 项目源码 | 文件源码
def forward(self, weight, bias, input):
        # Assert we're using cudnn
        for i in ([weight, bias, input]):
            if i is not None and not(cudnn.is_acceptable(i)):
                raise Exception('You must be using CUDNN to use EfficientBatchNorm')

        # Create save variables
        self.save_mean = self.running_mean.new()
        self.save_mean.resize_as_(self.running_mean)
        self.save_var = self.running_var.new()
        self.save_var.resize_as_(self.running_var)

        # Do forward pass - store in input variable
        cur_device_id = weight.get_device()
        res = type(input)(self.storage.change_device(cur_device_id)).resize_as_(input)
        assert weight.get_device() == res.get_device(), \
            "input and output should be on the same chip!"

        torch._C._cudnn_batch_norm_forward(input, res,
                weight, bias,
                self.running_mean, self.running_var,
                self.save_mean, self.save_var,
                self.training,
                self.momentum,
                self.eps)
        return res
项目:efficient_densenet_pytorch    作者:gpleiss    | 项目源码 | 文件源码
def forward(self, weight, bias, input):
        # Assert we're using cudnn
        for i in ([weight, bias, input]):
            if i is not None and not(cudnn.is_acceptable(i)):
                raise Exception('You must be using CUDNN to use EfficientBatchNorm')

        # Create save variables
        self.save_mean = self.running_mean.new()
        self.save_mean.resize_as_(self.running_mean)
        self.save_var = self.running_var.new()
        self.save_var.resize_as_(self.running_var)

        # Do forward pass - store in input variable
        cur_device_id = weight.get_device()
        res = type(input)(self.storage.change_device(cur_device_id)).resize_as_(input)
        assert weight.get_device() == res.get_device(), \
            "input and output should be on the same chip!"

        torch._C._cudnn_batch_norm_forward(input, res,
                weight, bias,
                self.running_mean, self.running_var,
                self.save_mean, self.save_var,
                self.training,
                self.momentum,
                self.eps)
        return res
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def grid_sampler(input, grid, padding_mode):
    if cudnn.is_acceptable(input.data) and padding_mode == 'zeros':
        return torch._C._VariableBase.cudnn_grid_sampler(input, grid)
    else:
        return GridSampler.apply(input, grid, padding_mode)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def affine_grid_generator(theta, size):
    if theta.data.is_cuda:
        if not cudnn.enabled:
            raise RuntimeError("AffineGridGenerator needs CuDNN for "
                               "processing CUDA inputs, but CuDNN is not enabled")
        if not cudnn.is_acceptable(theta.data):
            raise RuntimeError("AffineGridGenerator generator theta not acceptable for CuDNN")
        N, C, H, W = size
        return torch._C._VariableBase.cudnn_affine_grid_generator(theta, N, C, H, W)
    else:
        return AffineGridGenerator.apply(theta, size)


# TODO: Port these completely into C++
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def _enforce_cudnn(input):
        if not cudnn.enabled:
            raise RuntimeError("AffineGridGenerator needs CuDNN for "
                               "processing CUDA inputs, but CuDNN is not enabled")
        assert cudnn.is_acceptable(input)
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def forward_extended(self, input, weight, hx):
        assert cudnn.is_acceptable(input)
        # TODO: raise a warning if weight_data_ptr is None

        output = input.new()

        if torch.is_tensor(hx):
            hy = hx.new()
        else:
            hy = tuple(h.new() for h in hx)

        cudnn.rnn.forward(self, input, hx, weight, output, hy)

        self.save_for_backward(input, hx, weight, output)
        return output, hy
项目:pytorch-dist    作者:apaszke    | 项目源码 | 文件源码
def backward(self, grad_output):
        tensors = self.saved_tensors
        if len(tensors) == 2:
            input, weight = tensors
            bias = None
        else:
            input, weight, bias = tensors

        grad_input, grad_weight, grad_bias = None, None, None

        if cudnn.is_acceptable(input):
            if self.needs_input_grad[0]:
                grad_input = input.new().resize_as_(input)
                torch._C._cudnn_convolution_backward_data(
                    grad_output, grad_input, weight, self._cudnn_info,
                    cudnn.benchmark)

            if self.needs_input_grad[1]:
                grad_weight = weight.new().resize_as_(weight)
                torch._C._cudnn_convolution_backward_filter(
                    grad_output, input, grad_weight, self._cudnn_info,
                    cudnn.benchmark)

            if bias is not None and self.needs_input_grad[2]:
                grad_bias = bias.new().resize_as_(bias)
                torch._C._cudnn_convolution_backward_bias(
                    grad_output, grad_bias, self._cudnn_info)
        else:
            backend = type2backend[type(input)]
            if self.needs_input_grad[0]:
                grad_input = input.new().resize_as_(input).zero_()
                backend.SpatialConvolutionMM_updateGradInput(
                    backend.library_state, input, grad_output, grad_input,
                    weight, self._finput, self._fgrad_input, weight.size(3),
                    weight.size(2), self.stride[1], self.stride[0], self.pad[1],
                    self.pad[0])

            if any(self.needs_input_grad[1:]):
                grad_weight = weight.new().resize_as_(weight).zero_()
                if bias is not None and self.needs_input_grad[2]:
                    grad_bias = bias.new().resize_as_(bias).zero_()
                else:
                    grad_bias = None
                backend.SpatialConvolutionMM_accGradParameters(
                    backend.library_state, input, grad_output, grad_weight,
                    grad_bias, self._finput, self._fgrad_input, weight.size(3),
                    weight.size(2), self.stride[1], self.stride[0], self.pad[1],
                    self.pad[0], 1)

        if bias is not None:
            return grad_input, grad_weight, grad_bias
        else:
            return grad_input, grad_weight
项目:pytorch    作者:tylergenter    | 项目源码 | 文件源码
def backward_extended(self, grad_output, grad_hy):
        input, hx, weight, output = self.saved_tensors
        input = input.contiguous()

        grad_input, grad_weight, grad_hx = None, None, None

        assert cudnn.is_acceptable(input)

        grad_input = input.new()
        if torch.is_tensor(hx):
            grad_hx = input.new()
        else:
            grad_hx = tuple(h.new() for h in hx)

        if self.retain_variables:
            self._reserve_clone = self.reserve.clone()

        cudnn.rnn.backward_grad(
            self,
            input,
            hx,
            weight,
            output,
            grad_output,
            grad_hy,
            grad_input,
            grad_hx)

        if any(self.needs_input_grad[1:]):
            grad_weight = [tuple(w.new().resize_as_(w) for w in layer_weight) for layer_weight in weight]
            cudnn.rnn.backward_weight(
                self,
                input,
                hx,
                output,
                weight,
                grad_weight)
        else:
            grad_weight = [(None,) * len(layer_weight) for layer_weight in weight]

        if self.retain_variables:
            self.reserve = self._reserve_clone
            del self._reserve_clone

        return grad_input, grad_weight, grad_hx
项目:pytorch-coriander    作者:hughperkins    | 项目源码 | 文件源码
def backward_extended(self, grad_output, grad_hy):
        input, hx, weight, output = self.saved_tensors
        input = input.contiguous()

        grad_input, grad_weight, grad_hx = None, None, None

        assert cudnn.is_acceptable(input)

        grad_input = input.new()
        if torch.is_tensor(hx):
            grad_hx = input.new()
        else:
            grad_hx = tuple(h.new() for h in hx)

        if self.retain_variables:
            self._reserve_clone = self.reserve.clone()

        cudnn.rnn.backward_grad(
            self,
            input,
            hx,
            weight,
            output,
            grad_output,
            grad_hy,
            grad_input,
            grad_hx)

        if any(self.needs_input_grad[1:]):
            grad_weight = [tuple(w.new().resize_as_(w) for w in layer_weight) for layer_weight in weight]
            cudnn.rnn.backward_weight(
                self,
                input,
                hx,
                output,
                weight,
                grad_weight)
        else:
            grad_weight = [(None,) * len(layer_weight) for layer_weight in weight]

        if self.retain_variables:
            self.reserve = self._reserve_clone
            del self._reserve_clone

        return grad_input, grad_weight, grad_hx
项目:pytorch    作者:ezyang    | 项目源码 | 文件源码
def backward_extended(self, grad_output, grad_hy):
        input, hx, weight, output = self.saved_tensors
        input = input.contiguous()

        grad_input, grad_weight, grad_hx = None, None, None

        assert cudnn.is_acceptable(input)

        grad_input = input.new()
        if torch.is_tensor(hx):
            grad_hx = input.new()
        else:
            grad_hx = tuple(h.new() for h in hx)

        if self.retain_variables:
            self._reserve_clone = self.reserve.clone()

        cudnn.rnn.backward_grad(
            self,
            input,
            hx,
            weight,
            output,
            grad_output,
            grad_hy,
            grad_input,
            grad_hx)

        if any(self.needs_input_grad[1:]):
            grad_weight = [tuple(w.new().resize_as_(w) for w in layer_weight) for layer_weight in weight]
            cudnn.rnn.backward_weight(
                self,
                input,
                hx,
                output,
                weight,
                grad_weight)
        else:
            grad_weight = [(None,) * len(layer_weight) for layer_weight in weight]

        if self.retain_variables:
            self.reserve = self._reserve_clone
            del self._reserve_clone

        return grad_input, grad_weight, grad_hx
项目:pytorch    作者:pytorch    | 项目源码 | 文件源码
def backward_extended(self, grad_output, grad_hy):
        input, hx, weight, output = self.saved_tensors
        input = input.contiguous()

        grad_input, grad_weight, grad_hx = None, None, None

        assert cudnn.is_acceptable(input)

        grad_input = input.new()
        if torch.is_tensor(hx):
            grad_hx = input.new()
        else:
            grad_hx = tuple(h.new() for h in hx)

        if self.retain_variables:
            self._reserve_clone = self.reserve.clone()

        cudnn.rnn.backward_grad(
            self,
            input,
            hx,
            weight,
            output,
            grad_output,
            grad_hy,
            grad_input,
            grad_hx)

        if any(self.needs_input_grad[1:]):
            grad_weight = [tuple(w.new().resize_as_(w) for w in layer_weight) for layer_weight in weight]
            cudnn.rnn.backward_weight(
                self,
                input,
                hx,
                output,
                weight,
                grad_weight)
        else:
            grad_weight = [(None,) * len(layer_weight) for layer_weight in weight]

        if self.retain_variables:
            self.reserve = self._reserve_clone
            del self._reserve_clone

        return grad_input, grad_weight, grad_hx