tensor

package
v0.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 13, 2026 License: MIT Imports: 5 Imported by: 0

Documentation

Overview

Package tensor provides a safe, idiomatic Go tensor type built on libtorch.

Tensors are immutable values — every operation returns a new tensor. Memory is managed via GC finalizers (safety net) and optional Scopes (deterministic bulk cleanup for training loops).

Operations are chainable methods that carry errors:

result := x.Matmul(w).Add(b).ReLU()
if err := result.Err(); err != nil {
    log.Fatal(err)
}
Example (Activations)
package main

import (
	"fmt"

	"github.com/fab2s/goDl/tensor"
)

func main() {
	x, _ := tensor.FromFloat32([]float32{-2, -1, 0, 1, 2}, []int64{5})
	defer x.Release()

	// ReLU: max(0, x)
	r := x.ReLU()
	defer r.Release()
	rdata, _ := r.Float32Data()
	fmt.Println("relu:", rdata)

	// Sigmoid: 1 / (1 + exp(-x)), output in (0, 1)
	s := x.Sigmoid()
	defer s.Release()
	sdata, _ := s.Float32Data()
	fmt.Printf("sigmoid(0): %.1f\n", sdata[2])

	// Tanh: output in (-1, 1)
	t := x.Tanh()
	defer t.Release()
	tdata, _ := t.Float32Data()
	fmt.Printf("tanh(0): %.1f\n", tdata[2])

}
Output:

relu: [0 0 0 1 2]
sigmoid(0): 0.5
tanh(0): 0.0
Example (Chaining)
package main

import (
	"fmt"
	"log"

	"github.com/fab2s/goDl/tensor"
)

func main() {
	// Operations chain naturally. Errors propagate — check once at the end.
	x, _ := tensor.FromFloat32([]float32{1, 2, 3}, []int64{1, 3})
	defer x.Release()
	w, _ := tensor.FromFloat32([]float32{1, 0, 0, 1, 0, 0}, []int64{3, 2})
	defer w.Release()
	b, _ := tensor.FromFloat32([]float32{-5, 10}, []int64{1, 2})
	defer b.Release()

	// Linear layer: y = ReLU(x @ W + b)
	y := x.Matmul(w).Add(b).ReLU()
	if err := y.Err(); err != nil {
		log.Fatal(err)
	}
	defer y.Release()

	data, _ := y.Float32Data()
	fmt.Println(data)

}
Output:

[0 12]
Example (Creation)
package main

import (
	"fmt"
	"log"

	"github.com/fab2s/goDl/tensor"
)

func main() {
	// Create tensors with default options (float32, CPU)
	z, err := tensor.Zeros([]int64{2, 3})
	if err != nil {
		log.Fatal(err)
	}
	defer z.Release()
	fmt.Println(z)

	// Create with options
	o, err := tensor.Ones([]int64{4}, tensor.WithDType(tensor.Float64))
	if err != nil {
		log.Fatal(err)
	}
	defer o.Release()
	fmt.Println(o)

	// Create from Go slice
	x, err := tensor.FromFloat32([]float32{1, 2, 3, 4, 5, 6}, []int64{2, 3})
	if err != nil {
		log.Fatal(err)
	}
	defer x.Release()
	fmt.Println(x)

}
Output:

Tensor(shape=[2 3], dtype=float32, device=cpu)
Tensor(shape=[4], dtype=float64, device=cpu)
Tensor(shape=[2 3], dtype=float32, device=cpu)
Example (DataAccess)
package main

import (
	"fmt"
	"log"

	"github.com/fab2s/goDl/tensor"
)

func main() {
	x, _ := tensor.FromFloat32([]float32{1.5, 2.5, 3.5}, []int64{3})
	defer x.Release()

	// Read data back to Go
	data, err := x.Float32Data()
	if err != nil {
		log.Fatal(err)
	}
	fmt.Println(data)

	// Metadata
	fmt.Println("shape:", x.Shape())
	fmt.Println("ndim:", x.Ndim())
	fmt.Println("numel:", x.Numel())
	fmt.Println("dtype:", x.DType())
	fmt.Println("device:", x.Device())

}
Output:

[1.5 2.5 3.5]
shape: [3]
ndim: 1
numel: 3
dtype: float32
device: cpu
Example (ErrorPropagation)
package main

import (
	"fmt"

	"github.com/fab2s/goDl/tensor"
)

func main() {
	a, _ := tensor.FromFloat32([]float32{1, 2, 3}, []int64{3})
	defer a.Release()

	// Release b to simulate a bad tensor
	b, _ := tensor.FromFloat32([]float32{4, 5, 6}, []int64{3})
	b.Release()

	// The error propagates through the entire chain
	result := a.Add(b).ReLU().Sigmoid()
	fmt.Println(result.Err())

}
Output:

tensor: use after release
Example (Scope)
package main

import (
	"fmt"
	"log"

	"github.com/fab2s/goDl/tensor"
)

func main() {
	// Scopes provide deterministic cleanup for groups of tensors.
	// All tracked tensors are freed when the scope closes.
	result, err := tensor.WithScope(func(s *tensor.Scope) *tensor.Tensor {
		a, _ := tensor.Zeros([]int64{3})
		s.Track(a)
		b, _ := tensor.Ones([]int64{3})
		s.Track(b)

		// The returned tensor survives; a and b are freed.
		return a.Add(b)
	})
	if err != nil {
		log.Fatal(err)
	}
	defer result.Release()

	data, _ := result.Float32Data()
	fmt.Println(data)

}
Output:

[1 1 1]
Example (ScopeManual)
package main

import (
	"fmt"

	"github.com/fab2s/goDl/tensor"
)

func main() {
	// Scopes can also be managed manually for more control.
	scope := tensor.NewScope()

	a, _ := tensor.Zeros([]int64{100, 100})
	scope.Track(a)
	b, _ := tensor.Ones([]int64{100, 100})
	scope.Track(b)
	c := scope.Track(a.Add(b))

	fmt.Println(c)

	// Free all tracked tensors at once
	scope.Close()
	fmt.Println(c.Err())

}
Output:

Tensor(shape=[100 100], dtype=float32, device=cpu)
tensor: use after release

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

func ActiveTensors

func ActiveTensors() int64

ActiveTensors returns the number of tensors that haven't been released. Useful for detecting leaks in tests.

func CUDAAvailable

func CUDAAvailable() bool

CUDAAvailable returns true if CUDA is available on this system.

func CUDADeviceCount

func CUDADeviceCount() int

CUDADeviceCount returns the number of available CUDA devices.

Types

type DType

type DType int

DType represents a tensor's element type.

const (
	Float16  DType = DType(libtorch.Float16)
	BFloat16 DType = DType(libtorch.BFloat16)
	Float32  DType = DType(libtorch.Float32)
	Float64  DType = DType(libtorch.Float64)
	Int32    DType = DType(libtorch.Int32)
	Int64    DType = DType(libtorch.Int64)
)

func (DType) ElementSize

func (d DType) ElementSize() int64

ElementSize returns the size in bytes of one element of this dtype.

func (DType) String

func (d DType) String() string

type Device

type Device int

Device represents where a tensor's data lives.

const (
	CPU  Device = Device(libtorch.CPU)
	CUDA Device = Device(libtorch.CUDA)
)

func DevicePtr added in v0.2.0

func DevicePtr(d Device) *Device

DevicePtr returns a pointer to the given Device value. Convenience for configuration structs:

cfg := data.LoaderConfig{Device: tensor.DevicePtr(tensor.CUDA)}

func (Device) String

func (d Device) String() string

type Option

type Option func(*options)

Option configures tensor creation. Use WithDType and WithDevice.

func WithDType

func WithDType(dtype DType) Option

WithDType sets the element type for tensor creation.

func WithDevice

func WithDevice(device Device) Option

WithDevice sets the device for tensor creation.

type Scope

type Scope struct {
	// contains filtered or unexported fields
}

Scope tracks tensors for bulk cleanup. All tensors created through a Scope are released when the Scope closes, except those explicitly kept via Keep().

Scopes are useful in training loops where each iteration creates many intermediate tensors that should all be freed together:

for batch := range dataloader {
    scope := tensor.NewScope()
    x := scope.Track(loadBatch(batch))
    out := scope.Track(x.Matmul(w).Add(b).ReLU())
    // ... use out ...
    scope.Close() // frees x, out, and all intermediates
}

Or with the functional style:

result, err := tensor.WithScope(func(s *tensor.Scope) *tensor.Tensor {
    x := s.Track(loadBatch(batch))
    return x.Matmul(w).Add(b).ReLU()
})
// result is kept alive; everything else is freed

func NewScope

func NewScope() *Scope

NewScope creates a new scope for tracking tensors.

func (*Scope) Close

func (s *Scope) Close()

Close releases all tracked tensors. Safe to call multiple times.

func (*Scope) Track

func (s *Scope) Track(t *Tensor) *Tensor

Track registers a tensor with this scope. The tensor will be released when the scope closes. Returns the same tensor for chaining:

out := scope.Track(x.Matmul(w))

type Tensor

type Tensor struct {
	// contains filtered or unexported fields
}

Tensor is an n-dimensional array of numbers, backed by libtorch.

Tensors carry an error state for chainable operations. If any operation in a chain fails, subsequent operations become no-ops and the error propagates to the end:

result := x.Matmul(w).Add(b).ReLU()
if err := result.Err(); err != nil { ... }

Tensors are reference-counted. The autograd engine calls Retain/Release to manage saved-for-backward tensors deterministically, freeing C++ memory (including VRAM) as soon as backward finishes with each tensor — without waiting for Go's garbage collector. A GC finalizer remains as a safety net for any tensor not explicitly released.

func AdaptiveAvgPool2dBackward

func AdaptiveAvgPool2dBackward(gradOutput, input *Tensor) *Tensor

AdaptiveAvgPool2dBackward computes the gradient for adaptive avg pool.

func Arange

func Arange(start, end, step float64, opts ...Option) (*Tensor, error)

Arange creates a 1D tensor with values [start, start+step, start+2*step, ...) < end.

func ArangeEnd

func ArangeEnd(end float64, opts ...Option) (*Tensor, error)

ArangeEnd creates a 1D tensor with values [0, 1, 2, ..., end-1]. Shorthand for Arange(0, end, 1).

func CatAll

func CatAll(tensors []*Tensor, dim int) *Tensor

CatAll concatenates multiple tensors along a dimension.

func Conv2dBackward

func Conv2dBackward(gradOutput, input, weight *Tensor, stride, padding, dilation []int64, groups int64, computeBias bool) (gradInput, gradWeight, gradBias *Tensor)

Conv2dBackward computes gradients for a 2D convolution. Returns (gradInput, gradWeight, gradBias). gradBias is nil if computeBias is false.

func ConvTranspose2dBackward

func ConvTranspose2dBackward(gradOutput, input, weight *Tensor, stride, padding, outputPadding, dilation []int64, groups int64, computeBias bool) (gradInput, gradWeight, gradBias *Tensor)

ConvTranspose2dBackward computes gradients for a 2D transposed convolution.

func Eye

func Eye(n int64, opts ...Option) (*Tensor, error)

Eye creates an n×n identity matrix (float32).

func FromFloat32

func FromFloat32(data []float32, shape []int64, opts ...Option) (*Tensor, error)

FromFloat32 creates a tensor from a Go slice. Data is copied.

func FromFloat64

func FromFloat64(data []float64, shape []int64, opts ...Option) (*Tensor, error)

FromFloat64 creates a tensor from a Go slice. Data is copied.

func FromInt64

func FromInt64(data []int64, shape []int64, opts ...Option) (*Tensor, error)

FromInt64 creates an Int64 tensor from a Go slice. Data is copied. Useful for index tensors (e.g., Embedding lookups).

func Full

func Full(shape []int64, value float64, opts ...Option) (*Tensor, error)

Full creates a tensor filled with a single value.

func GridSampleBackward

func GridSampleBackward(gradOutput, input, grid *Tensor, mode, paddingMode int, alignCorners bool) (gradInput, gradGrid *Tensor)

GridSampleBackward computes gradients for grid sampling. Returns (gradInput, gradGrid).

func Linspace

func Linspace(start, end float64, steps int64, opts ...Option) (*Tensor, error)

Linspace creates a 1D tensor with evenly spaced values from start to end (inclusive).

func OneHot

func OneHot(indices *Tensor, nClasses, batchSize int64) *Tensor

OneHot converts a 1D int64 index tensor [B] to a float32 one-hot tensor [B, C]. Each row has a 1.0 at the index position and 0.0 elsewhere. C is the number of classes, B is the batch size (inferred from indices if 0).

func Ones

func Ones(shape []int64, opts ...Option) (*Tensor, error)

Ones creates a tensor filled with ones.

func Rand

func Rand(shape []int64, opts ...Option) (*Tensor, error)

Rand creates a tensor with uniform random values in [0, 1).

func RandN

func RandN(shape []int64, opts ...Option) (*Tensor, error)

RandN creates a tensor with values from a standard normal distribution.

func Stack

func Stack(tensors []*Tensor, dim int) *Tensor

Stack concatenates tensors along a new dimension. All tensors must have the same shape.

func WithScope

func WithScope(fn func(s *Scope) *Tensor) (*Tensor, error)

WithScope runs fn with a new scope and returns the result. All tensors tracked by the scope are released after fn returns, EXCEPT the returned tensor (which is automatically untracked).

result, err := tensor.WithScope(func(s *tensor.Scope) *tensor.Tensor {
    a := s.Track(someOp())
    b := s.Track(someOtherOp(a))
    return b  // b survives, a is freed
})

func WrapRaw

func WrapRaw(raw *libtorch.Tensor) *Tensor

WrapRaw creates a managed Tensor from a raw libtorch tensor. Exported for use by sibling packages (autograd).

func Zeros

func Zeros(shape []int64, opts ...Option) (*Tensor, error)

Zeros creates a tensor filled with zeros.

func (*Tensor) Abs

func (t *Tensor) Abs() *Tensor

Abs returns element-wise absolute value.

func (*Tensor) AdaptiveAvgPool2d

func (t *Tensor) AdaptiveAvgPool2d(outputSize []int64) *Tensor

AdaptiveAvgPool2d performs 2D adaptive average pooling. Input shape: [N, C, H, W]. Pools to outputSize [H_out, W_out].

func (*Tensor) Add

func (t *Tensor) Add(other *Tensor) *Tensor

Add returns the element-wise sum of t and other.

func (*Tensor) AddScalar

func (t *Tensor) AddScalar(scalar float64) *Tensor

AddScalar adds a scalar to every element.

func (*Tensor) AllFinite

func (t *Tensor) AllFinite() bool

AllFinite returns true if all elements are finite (no inf, no nan).

func (*Tensor) ArgMax

func (t *Tensor) ArgMax(dim int, keepdim bool) *Tensor

ArgMax returns indices of maximum values along a dimension (Int64 tensor).

func (*Tensor) Cat

func (t *Tensor) Cat(other *Tensor, dim int) *Tensor

Cat concatenates two tensors along dim.

func (*Tensor) Clamp

func (t *Tensor) Clamp(minVal, maxVal float64) *Tensor

Clamp clamps every element to [minVal, maxVal].

func (*Tensor) Conv2d

func (t *Tensor) Conv2d(weight, bias *Tensor, stride, padding, dilation []int64, groups int64) *Tensor

Conv2d performs a 2D convolution. bias may be nil. Input shape: [N, C_in, H, W]. Weight shape: [C_out, C_in/groups, kH, kW]. Bias shape: [C_out] or nil.

func (*Tensor) ConvTranspose2d

func (t *Tensor) ConvTranspose2d(weight, bias *Tensor, stride, padding, outputPadding, dilation []int64, groups int64) *Tensor

ConvTranspose2d performs a 2D transposed convolution (deconvolution). bias may be nil. Input shape: [N, C_in, H, W]. Weight shape: [C_in, C_out/groups, kH, kW].

func (*Tensor) DType

func (t *Tensor) DType() DType

DType returns the element type.

func (*Tensor) Device

func (t *Tensor) Device() Device

Device returns where the tensor lives (CPU or CUDA).

func (*Tensor) Div

func (t *Tensor) Div(other *Tensor) *Tensor

Div returns element-wise division t / other.

func (*Tensor) DivScalar

func (t *Tensor) DivScalar(scalar float64) *Tensor

DivScalar divides every element by a scalar value.

func (*Tensor) Double added in v0.2.0

func (t *Tensor) Double() *Tensor

Double casts the tensor to float64. Shorthand for ToDType(Float64).

func (*Tensor) Err

func (t *Tensor) Err() error

Err returns the error carried by this tensor, or nil if the tensor is valid. Check this after a chain of operations:

result := a.Add(b).Matmul(c)
if err := result.Err(); err != nil { ... }

func (*Tensor) Exp

func (t *Tensor) Exp() *Tensor

Exp returns element-wise exponential.

func (*Tensor) Expand

func (t *Tensor) Expand(shape []int64) *Tensor

Expand broadcasts the tensor to a larger shape. -1 keeps the existing size. The result shares data where possible (like a view). No gradient support needed — expand is a tensor creation/shaping op used to build sampling grids.

func (*Tensor) Flatten

func (t *Tensor) Flatten(startDim int) *Tensor

Flatten collapses dimensions from startDim to the end into a single dimension. Flatten(0) produces a 1D tensor; Flatten(1) keeps the batch dimension.

func (*Tensor) Float

func (t *Tensor) Float() *Tensor

Float casts the tensor to float32. Shorthand for ToDType(Float32).

func (*Tensor) Float32Data

func (t *Tensor) Float32Data() ([]float32, error)

Float32Data copies the tensor data into a Go float32 slice. The tensor is moved to CPU if necessary (without modifying the original).

func (*Tensor) Float64Data

func (t *Tensor) Float64Data() ([]float64, error)

Float64Data copies the tensor data into a Go float64 slice.

func (*Tensor) GEScalar

func (t *Tensor) GEScalar(scalar float64) *Tensor

GEScalar returns a float mask: 1.0 where element >= scalar, else 0.0.

func (*Tensor) GTScalar

func (t *Tensor) GTScalar(scalar float64) *Tensor

GTScalar returns a float mask: 1.0 where element > scalar, else 0.0.

func (*Tensor) GridSample

func (t *Tensor) GridSample(grid *Tensor, mode, paddingMode int, alignCorners bool) *Tensor

GridSample performs 2D grid sampling with bilinear interpolation. Input shape: [N, C, H, W]. Grid shape: [N, H_out, W_out, 2]. Grid coordinates are in [-1, 1] when alignCorners is true. mode: 0=bilinear, 1=nearest, 2=bicubic. paddingMode: 0=zeros, 1=border, 2=reflection.

func (*Tensor) Half

func (t *Tensor) Half() *Tensor

Half casts the tensor to float16. Shorthand for ToDType(Float16).

func (*Tensor) IndexAdd

func (t *Tensor) IndexAdd(dim int, index *Tensor, src *Tensor) *Tensor

IndexAdd returns t with src added at positions given by index along dim.

func (*Tensor) IndexSelect

func (t *Tensor) IndexSelect(dim int, index *Tensor) *Tensor

IndexSelect gathers slices along dim at the given indices (Int64 tensor).

func (*Tensor) Int64Data

func (t *Tensor) Int64Data() ([]int64, error)

Int64Data copies the tensor data into a Go int64 slice. If the tensor is not int64, it is cast first.

func (*Tensor) LEScalar

func (t *Tensor) LEScalar(scalar float64) *Tensor

LEScalar returns a float mask: 1.0 where element <= scalar, else 0.0.

func (*Tensor) LTScalar

func (t *Tensor) LTScalar(scalar float64) *Tensor

LTScalar returns a float mask: 1.0 where element < scalar, else 0.0.

func (*Tensor) Log

func (t *Tensor) Log() *Tensor

Log returns element-wise natural logarithm.

func (*Tensor) Matmul

func (t *Tensor) Matmul(other *Tensor) *Tensor

Matmul returns the matrix product of t and other.

func (*Tensor) MaxDim

func (t *Tensor) MaxDim(dim int, keepdim bool) *Tensor

MaxDim returns max values along a dimension.

func (*Tensor) Mean

func (t *Tensor) Mean() *Tensor

Mean reduces all elements to a scalar mean.

func (*Tensor) MeanDim

func (t *Tensor) MeanDim(dim int, keepdim bool) *Tensor

MeanDim computes the mean along a single dimension.

func (*Tensor) Min

func (t *Tensor) Min() *Tensor

Min reduces all elements to a scalar minimum.

func (*Tensor) MinDim

func (t *Tensor) MinDim(dim int, keepdim bool) *Tensor

MinDim returns min values along a dimension.

func (*Tensor) Mul

func (t *Tensor) Mul(other *Tensor) *Tensor

Mul returns the element-wise product of t and other.

func (*Tensor) MulScalar

func (t *Tensor) MulScalar(scalar float64) *Tensor

MulScalar multiplies every element by a scalar value.

func (*Tensor) Narrow

func (t *Tensor) Narrow(dim int, start, length int64) *Tensor

Narrow extracts a slice along dim: t[dim, start:start+length].

func (*Tensor) NarrowScatter

func (t *Tensor) NarrowScatter(src *Tensor, dim int, start int64) *Tensor

NarrowScatter returns t with the narrow slice at (dim, start) replaced by src.

func (*Tensor) Ndim

func (t *Tensor) Ndim() int

Ndim returns the number of dimensions.

func (*Tensor) Neg

func (t *Tensor) Neg() *Tensor

Neg returns element-wise negation.

func (*Tensor) Numel

func (t *Tensor) Numel() int64

Numel returns the total number of elements.

func (*Tensor) OnesLike

func (t *Tensor) OnesLike() *Tensor

OnesLike creates a tensor of ones with the same shape, dtype, and device.

func (*Tensor) Permute

func (t *Tensor) Permute(dims ...int) *Tensor

Permute reorders dimensions. For example, [B,C,H,W].Permute(0,2,3,1) → [B,H,W,C].

func (*Tensor) Pow

func (t *Tensor) Pow(exponent float64) *Tensor

Pow raises every element to a scalar exponent.

func (*Tensor) Raw

func (t *Tensor) Raw() *libtorch.Tensor

Raw returns the underlying libtorch tensor handle. This is exported for use by sibling packages (autograd) and should not be used by end users.

func (*Tensor) ReLU

func (t *Tensor) ReLU() *Tensor

ReLU applies the rectified linear unit activation: max(0, x).

func (*Tensor) Release

func (t *Tensor) Release()

Release decrements the reference count and frees the underlying C++ memory when no references remain. After the last Release, the tensor is in an error state and operations on it will return an error.

For tensors not managed by autograd, this behaves identically to the previous immediate-free semantics (refcount goes from 1 to 0). The GC finalizer remains as a safety net for unreleased tensors.

func (*Tensor) Reshape

func (t *Tensor) Reshape(shape []int64) *Tensor

Reshape returns a tensor with the given shape.

func (*Tensor) Retain added in v0.2.0

func (t *Tensor) Retain()

Retain increments the reference count, keeping the underlying C++ tensor alive until a matching Release is called. Used by autograd to save tensors for backward without depending on GC timing.

func (*Tensor) Select

func (t *Tensor) Select(dim int, index int64) *Tensor

Select picks a single index along a dimension, removing that dimension.

func (*Tensor) SelectScatter

func (t *Tensor) SelectScatter(src *Tensor, dim int, index int64) *Tensor

SelectScatter returns a copy of t with the slice at (dim, index) replaced by src.

func (*Tensor) Shape

func (t *Tensor) Shape() []int64

Shape returns the full shape as a slice.

func (*Tensor) Sigmoid

func (t *Tensor) Sigmoid() *Tensor

Sigmoid applies the sigmoid activation function: 1 / (1 + exp(-x)).

func (*Tensor) Softmax

func (t *Tensor) Softmax(dim int) *Tensor

Softmax applies softmax along a dimension.

func (*Tensor) Sqrt

func (t *Tensor) Sqrt() *Tensor

Sqrt returns element-wise square root.

func (*Tensor) Squeeze

func (t *Tensor) Squeeze(dim int) *Tensor

Squeeze removes a dimension of size 1. If the dimension is not size 1, returns the tensor unchanged.

func (*Tensor) String

func (t *Tensor) String() string

String returns a human-readable summary of the tensor.

func (*Tensor) Sub

func (t *Tensor) Sub(other *Tensor) *Tensor

Sub returns the element-wise difference t - other.

func (*Tensor) Sum

func (t *Tensor) Sum() *Tensor

Sum reduces all elements to a scalar tensor.

func (*Tensor) SumDim

func (t *Tensor) SumDim(dim int, keepdim bool) *Tensor

SumDim reduces along a single dimension.

func (*Tensor) Tanh

func (t *Tensor) Tanh() *Tensor

Tanh applies the hyperbolic tangent activation function.

func (*Tensor) ToBFloat16

func (t *Tensor) ToBFloat16() *Tensor

BFloat16 casts the tensor to bfloat16. Shorthand for ToDType(BFloat16).

func (*Tensor) ToCPU

func (t *Tensor) ToCPU() *Tensor

ToCPU is shorthand for ToDevice(CPU).

func (*Tensor) ToCUDA

func (t *Tensor) ToCUDA() *Tensor

ToCUDA is shorthand for ToDevice(CUDA).

func (*Tensor) ToDType

func (t *Tensor) ToDType(dtype DType) *Tensor

ToDType casts the tensor to a different element type. Returns a new tensor. No-op if the tensor is already the target dtype.

func (*Tensor) ToDevice

func (t *Tensor) ToDevice(device Device) *Tensor

ToDevice moves the tensor to the specified device. Returns a new tensor.

func (*Tensor) ToInt64 added in v0.2.0

func (t *Tensor) ToInt64() *Tensor

ToInt64 casts the tensor to int64. Shorthand for ToDType(Int64). Useful after comparison ops: caseLabel.GTScalar(0.5).ToInt64()

func (*Tensor) Transpose

func (t *Tensor) Transpose(dim0, dim1 int) *Tensor

Transpose swaps two dimensions.

func (*Tensor) Unsqueeze

func (t *Tensor) Unsqueeze(dim int) *Tensor

Unsqueeze inserts a new dimension of size 1 at the given position.

func (*Tensor) Where

func (t *Tensor) Where(x, y *Tensor) *Tensor

Where selects elements from x where t > 0, from y otherwise. t must be a float mask (0.0 or 1.0), as returned by GTScalar, GEScalar, etc.

func (*Tensor) ZerosLike

func (t *Tensor) ZerosLike() *Tensor

ZerosLike creates a tensor of zeros with the same shape, dtype, and device.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL