protobuf

package
v0.1.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 8, 2022 License: MIT Imports: 6 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	OptimizerOptions_Level_name = map[int32]string{
		0:  "L1",
		-1: "L0",
	}
	OptimizerOptions_Level_value = map[string]int32{
		"L1": 0,
		"L0": -1,
	}
)

Enum value maps for OptimizerOptions_Level.

View Source
var (
	OptimizerOptions_GlobalJitLevel_name = map[int32]string{
		0:  "DEFAULT",
		-1: "OFF",
		1:  "ON_1",
		2:  "ON_2",
	}
	OptimizerOptions_GlobalJitLevel_value = map[string]int32{
		"DEFAULT": 0,
		"OFF":     -1,
		"ON_1":    1,
		"ON_2":    2,
	}
)

Enum value maps for OptimizerOptions_GlobalJitLevel.

View Source
var (
	ConfigProto_Experimental_MlirBridgeRollout_name = map[int32]string{
		0: "MLIR_BRIDGE_ROLLOUT_UNSPECIFIED",
		1: "MLIR_BRIDGE_ROLLOUT_ENABLED",
		2: "MLIR_BRIDGE_ROLLOUT_DISABLED",
		3: "MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED",
		4: "MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED",
	}
	ConfigProto_Experimental_MlirBridgeRollout_value = map[string]int32{
		"MLIR_BRIDGE_ROLLOUT_UNSPECIFIED":                0,
		"MLIR_BRIDGE_ROLLOUT_ENABLED":                    1,
		"MLIR_BRIDGE_ROLLOUT_DISABLED":                   2,
		"MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED":          3,
		"MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED": 4,
	}
)

Enum value maps for ConfigProto_Experimental_MlirBridgeRollout.

View Source
var (
	RunOptions_TraceLevel_name = map[int32]string{
		0: "NO_TRACE",
		1: "SOFTWARE_TRACE",
		2: "HARDWARE_TRACE",
		3: "FULL_TRACE",
	}
	RunOptions_TraceLevel_value = map[string]int32{
		"NO_TRACE":       0,
		"SOFTWARE_TRACE": 1,
		"HARDWARE_TRACE": 2,
		"FULL_TRACE":     3,
	}
)

Enum value maps for RunOptions_TraceLevel.

View Source
var (
	Code_name = map[int32]string{
		0:  "OK",
		1:  "CANCELLED",
		2:  "UNKNOWN",
		3:  "INVALID_ARGUMENT",
		4:  "DEADLINE_EXCEEDED",
		5:  "NOT_FOUND",
		6:  "ALREADY_EXISTS",
		7:  "PERMISSION_DENIED",
		16: "UNAUTHENTICATED",
		8:  "RESOURCE_EXHAUSTED",
		9:  "FAILED_PRECONDITION",
		10: "ABORTED",
		11: "OUT_OF_RANGE",
		12: "UNIMPLEMENTED",
		13: "INTERNAL",
		14: "UNAVAILABLE",
		15: "DATA_LOSS",
		20: "DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_",
	}
	Code_value = map[string]int32{
		"OK":                  0,
		"CANCELLED":           1,
		"UNKNOWN":             2,
		"INVALID_ARGUMENT":    3,
		"DEADLINE_EXCEEDED":   4,
		"NOT_FOUND":           5,
		"ALREADY_EXISTS":      6,
		"PERMISSION_DENIED":   7,
		"UNAUTHENTICATED":     16,
		"RESOURCE_EXHAUSTED":  8,
		"FAILED_PRECONDITION": 9,
		"ABORTED":             10,
		"OUT_OF_RANGE":        11,
		"UNIMPLEMENTED":       12,
		"INTERNAL":            13,
		"UNAVAILABLE":         14,
		"DATA_LOSS":           15,
		"DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_": 20,
	}
)

Enum value maps for Code.

View Source
var (
	RewriterConfig_Toggle_name = map[int32]string{
		0: "DEFAULT",
		1: "ON",
		2: "OFF",
		3: "AGGRESSIVE",
	}
	RewriterConfig_Toggle_value = map[string]int32{
		"DEFAULT":    0,
		"ON":         1,
		"OFF":        2,
		"AGGRESSIVE": 3,
	}
)

Enum value maps for RewriterConfig_Toggle.

View Source
var (
	RewriterConfig_CpuLayout_name = map[int32]string{
		0: "NO_CONVERSION_ON_CPU",
		1: "NCHW_TO_NHWC",
		2: "NHWC_TO_NCHW",
	}
	RewriterConfig_CpuLayout_value = map[string]int32{
		"NO_CONVERSION_ON_CPU": 0,
		"NCHW_TO_NHWC":         1,
		"NHWC_TO_NCHW":         2,
	}
)

Enum value maps for RewriterConfig_CpuLayout.

View Source
var (
	RewriterConfig_NumIterationsType_name = map[int32]string{
		0: "DEFAULT_NUM_ITERS",
		1: "ONE",
		2: "TWO",
	}
	RewriterConfig_NumIterationsType_value = map[string]int32{
		"DEFAULT_NUM_ITERS": 0,
		"ONE":               1,
		"TWO":               2,
	}
)

Enum value maps for RewriterConfig_NumIterationsType.

View Source
var (
	RewriterConfig_MemOptType_name = map[int32]string{
		0: "DEFAULT_MEM_OPT",
		1: "NO_MEM_OPT",
		2: "MANUAL",
		4: "SWAPPING_HEURISTICS",
		5: "RECOMPUTATION_HEURISTICS",
		6: "SCHEDULING_HEURISTICS",
		3: "HEURISTICS",
	}
	RewriterConfig_MemOptType_value = map[string]int32{
		"DEFAULT_MEM_OPT":          0,
		"NO_MEM_OPT":               1,
		"MANUAL":                   2,
		"SWAPPING_HEURISTICS":      4,
		"RECOMPUTATION_HEURISTICS": 5,
		"SCHEDULING_HEURISTICS":    6,
		"HEURISTICS":               3,
	}
)

Enum value maps for RewriterConfig_MemOptType.

View Source
var (
	FunctionSpec_JitCompile_name = map[int32]string{
		0: "DEFAULT",
		1: "ON",
		2: "OFF",
	}
	FunctionSpec_JitCompile_value = map[string]int32{
		"DEFAULT": 0,
		"ON":      1,
		"OFF":     2,
	}
)

Enum value maps for FunctionSpec_JitCompile.

View Source
var (
	SaverDef_CheckpointFormatVersion_name = map[int32]string{
		0: "LEGACY",
		1: "V1",
		2: "V2",
	}
	SaverDef_CheckpointFormatVersion_value = map[string]int32{
		"LEGACY": 0,
		"V1":     1,
		"V2":     2,
	}
)

Enum value maps for SaverDef_CheckpointFormatVersion.

View Source
var (
	TypeSpecProto_TypeSpecClass_name = map[int32]string{
		0:  "UNKNOWN",
		1:  "SPARSE_TENSOR_SPEC",
		2:  "INDEXED_SLICES_SPEC",
		3:  "RAGGED_TENSOR_SPEC",
		4:  "TENSOR_ARRAY_SPEC",
		5:  "DATA_DATASET_SPEC",
		6:  "DATA_ITERATOR_SPEC",
		7:  "OPTIONAL_SPEC",
		8:  "PER_REPLICA_SPEC",
		9:  "VARIABLE_SPEC",
		10: "ROW_PARTITION_SPEC",
		12: "REGISTERED_TYPE_SPEC",
		13: "EXTENSION_TYPE_SPEC",
	}
	TypeSpecProto_TypeSpecClass_value = map[string]int32{
		"UNKNOWN":              0,
		"SPARSE_TENSOR_SPEC":   1,
		"INDEXED_SLICES_SPEC":  2,
		"RAGGED_TENSOR_SPEC":   3,
		"TENSOR_ARRAY_SPEC":    4,
		"DATA_DATASET_SPEC":    5,
		"DATA_ITERATOR_SPEC":   6,
		"OPTIONAL_SPEC":        7,
		"PER_REPLICA_SPEC":     8,
		"VARIABLE_SPEC":        9,
		"ROW_PARTITION_SPEC":   10,
		"REGISTERED_TYPE_SPEC": 12,
		"EXTENSION_TYPE_SPEC":  13,
	}
)

Enum value maps for TypeSpecProto_TypeSpecClass.

View Source
var (
	VerifierConfig_Toggle_name = map[int32]string{
		0: "DEFAULT",
		1: "ON",
		2: "OFF",
	}
	VerifierConfig_Toggle_value = map[string]int32{
		"DEFAULT": 0,
		"ON":      1,
		"OFF":     2,
	}
)

Enum value maps for VerifierConfig_Toggle.

View Source
var File_tensorflow_core_protobuf_cluster_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_config_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_debug_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_error_codes_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_meta_graph_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_named_tensor_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_rewriter_config_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_saved_object_graph_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_saver_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_struct_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_trackable_object_graph_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_verifier_config_proto protoreflect.FileDescriptor

Functions

This section is empty.

Types

type AssetFileDef

type AssetFileDef struct {

	// The tensor to bind the asset filename to.
	TensorInfo *TensorInfo `protobuf:"bytes,1,opt,name=tensor_info,json=tensorInfo,proto3" json:"tensor_info,omitempty"`
	// The filename within an assets directory. Note: does not include the path
	// prefix, i.e. directories. For an asset at /tmp/path/vocab.txt, the filename
	// would be "vocab.txt".
	Filename string `protobuf:"bytes,2,opt,name=filename,proto3" json:"filename,omitempty"`
	// contains filtered or unexported fields
}

An asset file def for a single file or a set of sharded files with the same name.

func (*AssetFileDef) Descriptor deprecated

func (*AssetFileDef) Descriptor() ([]byte, []int)

Deprecated: Use AssetFileDef.ProtoReflect.Descriptor instead.

func (*AssetFileDef) GetFilename

func (x *AssetFileDef) GetFilename() string

func (*AssetFileDef) GetTensorInfo

func (x *AssetFileDef) GetTensorInfo() *TensorInfo

func (*AssetFileDef) ProtoMessage

func (*AssetFileDef) ProtoMessage()

func (*AssetFileDef) ProtoReflect

func (x *AssetFileDef) ProtoReflect() protoreflect.Message

func (*AssetFileDef) Reset

func (x *AssetFileDef) Reset()

func (*AssetFileDef) String

func (x *AssetFileDef) String() string

type AutoParallelOptions

type AutoParallelOptions struct {
	Enable      bool  `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"`
	NumReplicas int32 `protobuf:"varint,2,opt,name=num_replicas,json=numReplicas,proto3" json:"num_replicas,omitempty"`
	// contains filtered or unexported fields
}

func (*AutoParallelOptions) Descriptor deprecated

func (*AutoParallelOptions) Descriptor() ([]byte, []int)

Deprecated: Use AutoParallelOptions.ProtoReflect.Descriptor instead.

func (*AutoParallelOptions) GetEnable

func (x *AutoParallelOptions) GetEnable() bool

func (*AutoParallelOptions) GetNumReplicas

func (x *AutoParallelOptions) GetNumReplicas() int32

func (*AutoParallelOptions) ProtoMessage

func (*AutoParallelOptions) ProtoMessage()

func (*AutoParallelOptions) ProtoReflect

func (x *AutoParallelOptions) ProtoReflect() protoreflect.Message

func (*AutoParallelOptions) Reset

func (x *AutoParallelOptions) Reset()

func (*AutoParallelOptions) String

func (x *AutoParallelOptions) String() string

type BoundedTensorSpecProto

type BoundedTensorSpecProto struct {
	Name    string                      `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Shape   *framework.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Dtype   framework.DataType          `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Minimum *framework.TensorProto      `protobuf:"bytes,4,opt,name=minimum,proto3" json:"minimum,omitempty"`
	Maximum *framework.TensorProto      `protobuf:"bytes,5,opt,name=maximum,proto3" json:"maximum,omitempty"`
	// contains filtered or unexported fields
}

A protobuf to represent tf.BoundedTensorSpec.

func (*BoundedTensorSpecProto) Descriptor deprecated

func (*BoundedTensorSpecProto) Descriptor() ([]byte, []int)

Deprecated: Use BoundedTensorSpecProto.ProtoReflect.Descriptor instead.

func (*BoundedTensorSpecProto) GetDtype

func (*BoundedTensorSpecProto) GetMaximum

func (x *BoundedTensorSpecProto) GetMaximum() *framework.TensorProto

func (*BoundedTensorSpecProto) GetMinimum

func (x *BoundedTensorSpecProto) GetMinimum() *framework.TensorProto

func (*BoundedTensorSpecProto) GetName

func (x *BoundedTensorSpecProto) GetName() string

func (*BoundedTensorSpecProto) GetShape

func (*BoundedTensorSpecProto) ProtoMessage

func (*BoundedTensorSpecProto) ProtoMessage()

func (*BoundedTensorSpecProto) ProtoReflect

func (x *BoundedTensorSpecProto) ProtoReflect() protoreflect.Message

func (*BoundedTensorSpecProto) Reset

func (x *BoundedTensorSpecProto) Reset()

func (*BoundedTensorSpecProto) String

func (x *BoundedTensorSpecProto) String() string

type CallableOptions

type CallableOptions struct {

	// Tensors to be fed in the callable. Each feed is the name of a tensor.
	Feed []string `protobuf:"bytes,1,rep,name=feed,proto3" json:"feed,omitempty"`
	// Fetches. A list of tensor names. The caller of the callable expects a
	// tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
	// order of specified fetches does not change the execution order.
	Fetch []string `protobuf:"bytes,2,rep,name=fetch,proto3" json:"fetch,omitempty"`
	// Target Nodes. A list of node names. The named nodes will be run by the
	// callable but their outputs will not be returned.
	Target []string `protobuf:"bytes,3,rep,name=target,proto3" json:"target,omitempty"`
	// Options that will be applied to each run.
	RunOptions *RunOptions `protobuf:"bytes,4,opt,name=run_options,json=runOptions,proto3" json:"run_options,omitempty"`
	// Tensors to be connected in the callable. Each TensorConnection denotes
	// a pair of tensors in the graph, between which an edge will be created
	// in the callable.
	TensorConnection []*TensorConnection `protobuf:"bytes,5,rep,name=tensor_connection,json=tensorConnection,proto3" json:"tensor_connection,omitempty"`
	// The Tensor objects fed in the callable and fetched from the callable
	// are expected to be backed by host (CPU) memory by default.
	//
	// The options below allow changing that - feeding tensors backed by
	// device memory, or returning tensors that are backed by device memory.
	//
	// The maps below map the name of a feed/fetch tensor (which appears in
	// 'feed' or 'fetch' fields above), to the fully qualified name of the device
	// owning the memory backing the contents of the tensor.
	//
	// For example, creating a callable with the following options:
	//
	// CallableOptions {
	//   feed: "a:0"
	//   feed: "b:0"
	//
	//   fetch: "x:0"
	//   fetch: "y:0"
	//
	//   feed_devices: {
	//     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
	//   }
	//
	//   fetch_devices: {
	//     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
	//  }
	// }
	//
	// means that the Callable expects:
	// - The first argument ("a:0") is a Tensor backed by GPU memory.
	// - The second argument ("b:0") is a Tensor backed by host memory.
	// and of its return values:
	// - The first output ("x:0") will be backed by host memory.
	// - The second output ("y:0") will be backed by GPU memory.
	//
	// FEEDS:
	// It is the responsibility of the caller to ensure that the memory of the fed
	// tensors will be correctly initialized and synchronized before it is
	// accessed by operations executed during the call to Session::RunCallable().
	//
	// This is typically ensured by using the TensorFlow memory allocators
	// (Device::GetAllocator()) to create the Tensor to be fed.
	//
	// Alternatively, for CUDA-enabled GPU devices, this typically means that the
	// operation that produced the contents of the tensor has completed, i.e., the
	// CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
	// cuStreamSynchronize()).
	FeedDevices  map[string]string `` /* 182-byte string literal not displayed */
	FetchDevices map[string]string `` /* 185-byte string literal not displayed */
	// By default, RunCallable() will synchronize the GPU stream before returning
	// fetched tensors on a GPU device, to ensure that the values in those tensors
	// have been produced. This simplifies interacting with the tensors, but
	// potentially incurs a performance hit.
	//
	// If this options is set to true, the caller is responsible for ensuring
	// that the values in the fetched tensors have been produced before they are
	// used. The caller can do this by invoking `Device::Sync()` on the underlying
	// device(s), or by feeding the tensors back to the same Session using
	// `feed_devices` with the same corresponding device name.
	FetchSkipSync bool `protobuf:"varint,8,opt,name=fetch_skip_sync,json=fetchSkipSync,proto3" json:"fetch_skip_sync,omitempty"`
	// contains filtered or unexported fields
}

Defines a subgraph in another `GraphDef` as a set of feed points and nodes to be fetched or executed.

Compare with the arguments to `Session::Run()`.

func (*CallableOptions) Descriptor deprecated

func (*CallableOptions) Descriptor() ([]byte, []int)

Deprecated: Use CallableOptions.ProtoReflect.Descriptor instead.

func (*CallableOptions) GetFeed

func (x *CallableOptions) GetFeed() []string

func (*CallableOptions) GetFeedDevices

func (x *CallableOptions) GetFeedDevices() map[string]string

func (*CallableOptions) GetFetch

func (x *CallableOptions) GetFetch() []string

func (*CallableOptions) GetFetchDevices

func (x *CallableOptions) GetFetchDevices() map[string]string

func (*CallableOptions) GetFetchSkipSync

func (x *CallableOptions) GetFetchSkipSync() bool

func (*CallableOptions) GetRunOptions

func (x *CallableOptions) GetRunOptions() *RunOptions

func (*CallableOptions) GetTarget

func (x *CallableOptions) GetTarget() []string

func (*CallableOptions) GetTensorConnection

func (x *CallableOptions) GetTensorConnection() []*TensorConnection

func (*CallableOptions) ProtoMessage

func (*CallableOptions) ProtoMessage()

func (*CallableOptions) ProtoReflect

func (x *CallableOptions) ProtoReflect() protoreflect.Message

func (*CallableOptions) Reset

func (x *CallableOptions) Reset()

func (*CallableOptions) String

func (x *CallableOptions) String() string

type CapturedTensor

type CapturedTensor struct {

	// Name of captured tensor
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Name of concrete function which contains the computed graph tensor.
	ConcreteFunction string `protobuf:"bytes,2,opt,name=concrete_function,json=concreteFunction,proto3" json:"concrete_function,omitempty"`
	// contains filtered or unexported fields
}

func (*CapturedTensor) Descriptor deprecated

func (*CapturedTensor) Descriptor() ([]byte, []int)

Deprecated: Use CapturedTensor.ProtoReflect.Descriptor instead.

func (*CapturedTensor) GetConcreteFunction

func (x *CapturedTensor) GetConcreteFunction() string

func (*CapturedTensor) GetName

func (x *CapturedTensor) GetName() string

func (*CapturedTensor) ProtoMessage

func (*CapturedTensor) ProtoMessage()

func (*CapturedTensor) ProtoReflect

func (x *CapturedTensor) ProtoReflect() protoreflect.Message

func (*CapturedTensor) Reset

func (x *CapturedTensor) Reset()

func (*CapturedTensor) String

func (x *CapturedTensor) String() string

type ClusterDef

type ClusterDef struct {

	// The jobs that comprise the cluster.
	Job []*JobDef `protobuf:"bytes,1,rep,name=job,proto3" json:"job,omitempty"`
	// contains filtered or unexported fields
}

Defines a TensorFlow cluster as a set of jobs.

func (*ClusterDef) Descriptor deprecated

func (*ClusterDef) Descriptor() ([]byte, []int)

Deprecated: Use ClusterDef.ProtoReflect.Descriptor instead.

func (*ClusterDef) GetJob

func (x *ClusterDef) GetJob() []*JobDef

func (*ClusterDef) ProtoMessage

func (*ClusterDef) ProtoMessage()

func (*ClusterDef) ProtoReflect

func (x *ClusterDef) ProtoReflect() protoreflect.Message

func (*ClusterDef) Reset

func (x *ClusterDef) Reset()

func (*ClusterDef) String

func (x *ClusterDef) String() string

type Code

type Code int32

The canonical error codes for TensorFlow APIs.

Warnings:

  • Do not change any numeric assignments.
  • Changes to this list should only be made if there is a compelling need that can't be satisfied in another way. Such changes must be approved by at least two OWNERS.
  • These error codes must match gRPC and protobuf error codes (except for DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_).

Sometimes multiple error codes may apply. Services should return the most specific error code that applies. For example, prefer OUT_OF_RANGE over FAILED_PRECONDITION if both codes apply. Similarly prefer NOT_FOUND or ALREADY_EXISTS over FAILED_PRECONDITION.

const (
	// Not an error; returned on success
	Code_OK Code = 0
	// The operation was cancelled (typically by the caller).
	Code_CANCELLED Code = 1
	// Unknown error.  An example of where this error may be returned is
	// if a Status value received from another address space belongs to
	// an error-space that is not known in this address space.  Also
	// errors raised by APIs that do not return enough error information
	// may be converted to this error.
	Code_UNKNOWN Code = 2
	// Client specified an invalid argument.  Note that this differs
	// from FAILED_PRECONDITION.  INVALID_ARGUMENT indicates arguments
	// that are problematic regardless of the state of the system
	// (e.g., a malformed file name).
	Code_INVALID_ARGUMENT Code = 3
	// Deadline expired before operation could complete.  For operations
	// that change the state of the system, this error may be returned
	// even if the operation has completed successfully.  For example, a
	// successful response from a server could have been delayed long
	// enough for the deadline to expire.
	Code_DEADLINE_EXCEEDED Code = 4
	// Some requested entity (e.g., file or directory) was not found.
	// For privacy reasons, this code *may* be returned when the client
	// does not have the access right to the entity.
	Code_NOT_FOUND Code = 5
	// Some entity that we attempted to create (e.g., file or directory)
	// already exists.
	Code_ALREADY_EXISTS Code = 6
	// The caller does not have permission to execute the specified
	// operation.  PERMISSION_DENIED must not be used for rejections
	// caused by exhausting some resource (use RESOURCE_EXHAUSTED
	// instead for those errors).  PERMISSION_DENIED must not be
	// used if the caller can not be identified (use UNAUTHENTICATED
	// instead for those errors).
	Code_PERMISSION_DENIED Code = 7
	// The request does not have valid authentication credentials for the
	// operation.
	Code_UNAUTHENTICATED Code = 16
	// Some resource has been exhausted, perhaps a per-user quota, or
	// perhaps the entire file system is out of space.
	Code_RESOURCE_EXHAUSTED Code = 8
	// Operation was rejected because the system is not in a state
	// required for the operation's execution.  For example, directory
	// to be deleted may be non-empty, an rmdir operation is applied to
	// a non-directory, etc.
	//
	// A litmus test that may help a service implementor in deciding
	// between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
	//  (a) Use UNAVAILABLE if the client can retry just the failing call.
	//  (b) Use ABORTED if the client should retry at a higher-level
	//      (e.g., restarting a read-modify-write sequence).
	//  (c) Use FAILED_PRECONDITION if the client should not retry until
	//      the system state has been explicitly fixed.  E.g., if an "rmdir"
	//      fails because the directory is non-empty, FAILED_PRECONDITION
	//      should be returned since the client should not retry unless
	//      they have first fixed up the directory by deleting files from it.
	//  (d) Use FAILED_PRECONDITION if the client performs conditional
	//      REST Get/Update/Delete on a resource and the resource on the
	//      server does not match the condition. E.g., conflicting
	//      read-modify-write on the same resource.
	Code_FAILED_PRECONDITION Code = 9
	// The operation was aborted, typically due to a concurrency issue
	// like sequencer check failures, transaction aborts, etc.
	//
	// See litmus test above for deciding between FAILED_PRECONDITION,
	// ABORTED, and UNAVAILABLE.
	Code_ABORTED Code = 10
	// Operation tried to iterate past the valid input range.  E.g., seeking or
	// reading past end of file.
	//
	// Unlike INVALID_ARGUMENT, this error indicates a problem that may
	// be fixed if the system state changes. For example, a 32-bit file
	// system will generate INVALID_ARGUMENT if asked to read at an
	// offset that is not in the range [0,2^32-1], but it will generate
	// OUT_OF_RANGE if asked to read from an offset past the current
	// file size.
	//
	// There is a fair bit of overlap between FAILED_PRECONDITION and
	// OUT_OF_RANGE.  We recommend using OUT_OF_RANGE (the more specific
	// error) when it applies so that callers who are iterating through
	// a space can easily look for an OUT_OF_RANGE error to detect when
	// they are done.
	Code_OUT_OF_RANGE Code = 11
	// Operation is not implemented or not supported/enabled in this service.
	Code_UNIMPLEMENTED Code = 12
	// Internal errors.  Means some invariant expected by the underlying
	// system has been broken.  If you see one of these errors,
	// something is very broken.
	Code_INTERNAL Code = 13
	// The service is currently unavailable.  This is a most likely a
	// transient condition and may be corrected by retrying with
	// a backoff.
	//
	// See litmus test above for deciding between FAILED_PRECONDITION,
	// ABORTED, and UNAVAILABLE.
	Code_UNAVAILABLE Code = 14
	// Unrecoverable data loss or corruption.
	Code_DATA_LOSS Code = 15
	// An extra enum entry to prevent people from writing code that
	// fails to compile when a new code is added.
	//
	// Nobody should ever reference this enumeration entry. In particular,
	// if you write C++ code that switches on this enumeration, add a default:
	// case instead of a case that mentions this enumeration entry.
	//
	// Nobody should rely on the value (currently 20) listed here.  It
	// may change in the future.
	Code_DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ Code = 20
)

func (Code) Descriptor

func (Code) Descriptor() protoreflect.EnumDescriptor

func (Code) Enum

func (x Code) Enum() *Code

func (Code) EnumDescriptor deprecated

func (Code) EnumDescriptor() ([]byte, []int)

Deprecated: Use Code.Descriptor instead.

func (Code) Number

func (x Code) Number() protoreflect.EnumNumber

func (Code) String

func (x Code) String() string

func (Code) Type

func (Code) Type() protoreflect.EnumType

type CollectionDef

type CollectionDef struct {

	// Types that are assignable to Kind:
	//	*CollectionDef_NodeList_
	//	*CollectionDef_BytesList_
	//	*CollectionDef_Int64List_
	//	*CollectionDef_FloatList_
	//	*CollectionDef_AnyList_
	Kind isCollectionDef_Kind `protobuf_oneof:"kind"`
	// contains filtered or unexported fields
}

CollectionDef should cover most collections. To add a user-defined collection, do one of the following:

  1. For simple data types, such as string, int, float: tf.add_to_collection("your_collection_name", your_simple_value) strings will be stored as bytes_list.

2. For Protobuf types, there are three ways to add them:

  1. tf.add_to_collection("your_collection_name", your_proto.SerializeToString())

    collection_def { key: "user_defined_bytes_collection" value { bytes_list { value: "queue_name: \"test_queue\"\n" } } }

    or

  2. tf.add_to_collection("your_collection_name", str(your_proto))

    collection_def { key: "user_defined_string_collection" value { bytes_list { value: "\n\ntest_queue" } } }

    or

  3. any_buf = any_pb2.Any() tf.add_to_collection("your_collection_name", any_buf.Pack(your_proto))

    collection_def { key: "user_defined_any_collection" value { any_list { value { type_url: "type.googleapis.com/tensorflow.QueueRunnerDef" value: "\n\ntest_queue" } } } }

  1. For Python objects, implement to_proto() and from_proto(), and register them in the following manner: ops.register_proto_function("your_collection_name", proto_type, to_proto=YourPythonObject.to_proto, from_proto=YourPythonObject.from_proto) These functions will be invoked to serialize and de-serialize the collection. For example, ops.register_proto_function(ops.GraphKeys.GLOBAL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=Variable.to_proto, from_proto=Variable.from_proto)

func (*CollectionDef) Descriptor deprecated

func (*CollectionDef) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef.ProtoReflect.Descriptor instead.

func (*CollectionDef) GetAnyList

func (x *CollectionDef) GetAnyList() *CollectionDef_AnyList

func (*CollectionDef) GetBytesList

func (x *CollectionDef) GetBytesList() *CollectionDef_BytesList

func (*CollectionDef) GetFloatList

func (x *CollectionDef) GetFloatList() *CollectionDef_FloatList

func (*CollectionDef) GetInt64List

func (x *CollectionDef) GetInt64List() *CollectionDef_Int64List

func (*CollectionDef) GetKind

func (m *CollectionDef) GetKind() isCollectionDef_Kind

func (*CollectionDef) GetNodeList

func (x *CollectionDef) GetNodeList() *CollectionDef_NodeList

func (*CollectionDef) ProtoMessage

func (*CollectionDef) ProtoMessage()

func (*CollectionDef) ProtoReflect

func (x *CollectionDef) ProtoReflect() protoreflect.Message

func (*CollectionDef) Reset

func (x *CollectionDef) Reset()

func (*CollectionDef) String

func (x *CollectionDef) String() string

type CollectionDef_AnyList

type CollectionDef_AnyList struct {
	Value []*anypb.Any `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

AnyList is used for collecting Any protos.

func (*CollectionDef_AnyList) Descriptor deprecated

func (*CollectionDef_AnyList) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_AnyList.ProtoReflect.Descriptor instead.

func (*CollectionDef_AnyList) GetValue

func (x *CollectionDef_AnyList) GetValue() []*anypb.Any

func (*CollectionDef_AnyList) ProtoMessage

func (*CollectionDef_AnyList) ProtoMessage()

func (*CollectionDef_AnyList) ProtoReflect

func (x *CollectionDef_AnyList) ProtoReflect() protoreflect.Message

func (*CollectionDef_AnyList) Reset

func (x *CollectionDef_AnyList) Reset()

func (*CollectionDef_AnyList) String

func (x *CollectionDef_AnyList) String() string

type CollectionDef_AnyList_

type CollectionDef_AnyList_ struct {
	AnyList *CollectionDef_AnyList `protobuf:"bytes,5,opt,name=any_list,json=anyList,proto3,oneof"`
}

type CollectionDef_BytesList

type CollectionDef_BytesList struct {
	Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

BytesList is used for collecting strings and serialized protobufs. For example:

collection_def {
  key: "trainable_variables"
  value {
    bytes_list {
      value: "\n\017conv1/weights:0\022\024conv1/weights/Assign
             \032\024conv1/weights/read:0"
      value: "\n\016conv1/biases:0\022\023conv1/biases/Assign\032
             \023conv1/biases/read:0"
    }
  }
}

func (*CollectionDef_BytesList) Descriptor deprecated

func (*CollectionDef_BytesList) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_BytesList.ProtoReflect.Descriptor instead.

func (*CollectionDef_BytesList) GetValue

func (x *CollectionDef_BytesList) GetValue() [][]byte

func (*CollectionDef_BytesList) ProtoMessage

func (*CollectionDef_BytesList) ProtoMessage()

func (*CollectionDef_BytesList) ProtoReflect

func (x *CollectionDef_BytesList) ProtoReflect() protoreflect.Message

func (*CollectionDef_BytesList) Reset

func (x *CollectionDef_BytesList) Reset()

func (*CollectionDef_BytesList) String

func (x *CollectionDef_BytesList) String() string

type CollectionDef_BytesList_

type CollectionDef_BytesList_ struct {
	BytesList *CollectionDef_BytesList `protobuf:"bytes,2,opt,name=bytes_list,json=bytesList,proto3,oneof"`
}

type CollectionDef_FloatList

type CollectionDef_FloatList struct {
	Value []float32 `protobuf:"fixed32,1,rep,packed,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

FloatList is used for collecting float values.

func (*CollectionDef_FloatList) Descriptor deprecated

func (*CollectionDef_FloatList) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_FloatList.ProtoReflect.Descriptor instead.

func (*CollectionDef_FloatList) GetValue

func (x *CollectionDef_FloatList) GetValue() []float32

func (*CollectionDef_FloatList) ProtoMessage

func (*CollectionDef_FloatList) ProtoMessage()

func (*CollectionDef_FloatList) ProtoReflect

func (x *CollectionDef_FloatList) ProtoReflect() protoreflect.Message

func (*CollectionDef_FloatList) Reset

func (x *CollectionDef_FloatList) Reset()

func (*CollectionDef_FloatList) String

func (x *CollectionDef_FloatList) String() string

type CollectionDef_FloatList_

type CollectionDef_FloatList_ struct {
	FloatList *CollectionDef_FloatList `protobuf:"bytes,4,opt,name=float_list,json=floatList,proto3,oneof"`
}

type CollectionDef_Int64List

type CollectionDef_Int64List struct {
	Value []int64 `protobuf:"varint,1,rep,packed,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

Int64List is used for collecting int, int64 and long values.

func (*CollectionDef_Int64List) Descriptor deprecated

func (*CollectionDef_Int64List) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_Int64List.ProtoReflect.Descriptor instead.

func (*CollectionDef_Int64List) GetValue

func (x *CollectionDef_Int64List) GetValue() []int64

func (*CollectionDef_Int64List) ProtoMessage

func (*CollectionDef_Int64List) ProtoMessage()

func (*CollectionDef_Int64List) ProtoReflect

func (x *CollectionDef_Int64List) ProtoReflect() protoreflect.Message

func (*CollectionDef_Int64List) Reset

func (x *CollectionDef_Int64List) Reset()

func (*CollectionDef_Int64List) String

func (x *CollectionDef_Int64List) String() string

type CollectionDef_Int64List_

type CollectionDef_Int64List_ struct {
	Int64List *CollectionDef_Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,proto3,oneof"`
}

type CollectionDef_NodeList

type CollectionDef_NodeList struct {
	Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

NodeList is used for collecting nodes in graph. For example

collection_def {
  key: "summaries"
  value {
    node_list {
      value: "input_producer/ScalarSummary:0"
      value: "shuffle_batch/ScalarSummary:0"
      value: "ImageSummary:0"
    }
  }

func (*CollectionDef_NodeList) Descriptor deprecated

func (*CollectionDef_NodeList) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_NodeList.ProtoReflect.Descriptor instead.

func (*CollectionDef_NodeList) GetValue

func (x *CollectionDef_NodeList) GetValue() []string

func (*CollectionDef_NodeList) ProtoMessage

func (*CollectionDef_NodeList) ProtoMessage()

func (*CollectionDef_NodeList) ProtoReflect

func (x *CollectionDef_NodeList) ProtoReflect() protoreflect.Message

func (*CollectionDef_NodeList) Reset

func (x *CollectionDef_NodeList) Reset()

func (*CollectionDef_NodeList) String

func (x *CollectionDef_NodeList) String() string

type CollectionDef_NodeList_

type CollectionDef_NodeList_ struct {
	NodeList *CollectionDef_NodeList `protobuf:"bytes,1,opt,name=node_list,json=nodeList,proto3,oneof"`
}

type ConfigProto

type ConfigProto struct {

	// Map from device type name (e.g., "CPU" or "GPU" ) to maximum
	// number of devices of that type to use.  If a particular device
	// type is not found in the map, the system picks an appropriate
	// number.
	DeviceCount map[string]int32 `` /* 183-byte string literal not displayed */
	// The execution of an individual op (for some op types) can be
	// parallelized on a pool of intra_op_parallelism_threads.
	// 0 means the system picks an appropriate number.
	//
	// If you create an ordinary session, e.g., from Python or C++,
	// then there is exactly one intra op thread pool per process.
	// The first session created determines the number of threads in this pool.
	// All subsequent sessions reuse/share this one global pool.
	//
	// There are notable exceptions to the default behavior described above:
	// 1. There is an environment variable  for overriding this thread pool,
	//    named TF_OVERRIDE_GLOBAL_THREADPOOL.
	// 2. When connecting to a server, such as a remote `tf.train.Server`
	//    instance, then this option will be ignored altogether.
	IntraOpParallelismThreads int32 `` /* 141-byte string literal not displayed */
	// Nodes that perform blocking operations are enqueued on a pool of
	// inter_op_parallelism_threads available in each process.
	//
	// 0 means the system picks an appropriate number.
	// Negative means all operations are performed in caller's thread.
	//
	// Note that the first Session created in the process sets the
	// number of threads for all future sessions unless use_per_session_threads is
	// true or session_inter_op_thread_pool is configured.
	InterOpParallelismThreads int32 `` /* 141-byte string literal not displayed */
	// If true, use a new set of threads for this session rather than the global
	// pool of threads. Only supported by direct sessions.
	//
	// If false, use the global threads created by the first session, or the
	// per-session thread pools configured by session_inter_op_thread_pool.
	//
	// This option is deprecated. The same effect can be achieved by setting
	// session_inter_op_thread_pool to have one element, whose num_threads equals
	// inter_op_parallelism_threads.
	UsePerSessionThreads bool `` /* 126-byte string literal not displayed */
	// This option is experimental - it may be replaced with a different mechanism
	// in the future.
	//
	// Configures session thread pools. If this is configured, then RunOptions for
	// a Run call can select the thread pool to use.
	//
	// The intended use is for when some session invocations need to run in a
	// background pool limited to a small number of threads:
	// - For example, a session may be configured to have one large pool (for
	// regular compute) and one small pool (for periodic, low priority work);
	// using the small pool is currently the mechanism for limiting the inter-op
	// parallelism of the low priority work.  Note that it does not limit the
	// parallelism of work spawned by a single op kernel implementation.
	// - Using this setting is normally not needed in training, but may help some
	// serving use cases.
	// - It is also generally recommended to set the global_name field of this
	// proto, to avoid creating multiple large pools. It is typically better to
	// run the non-low-priority work, even across sessions, in a single large
	// pool.
	SessionInterOpThreadPool []*ThreadPoolOptionProto `` /* 140-byte string literal not displayed */
	// Assignment of Nodes to Devices is recomputed every placement_period
	// steps until the system warms up (at which point the recomputation
	// typically slows down automatically).
	PlacementPeriod int32 `protobuf:"varint,3,opt,name=placement_period,json=placementPeriod,proto3" json:"placement_period,omitempty"`
	// When any filters are present sessions will ignore all devices which do not
	// match the filters. Each filter can be partially specified, e.g. "/job:ps"
	// "/job:worker/replica:3", etc.
	DeviceFilters []string `protobuf:"bytes,4,rep,name=device_filters,json=deviceFilters,proto3" json:"device_filters,omitempty"`
	// Options that apply to all GPUs.
	GpuOptions *GPUOptions `protobuf:"bytes,6,opt,name=gpu_options,json=gpuOptions,proto3" json:"gpu_options,omitempty"`
	// Whether soft placement is allowed. If allow_soft_placement is true,
	// an op will be placed on CPU if
	//   1. there's no GPU implementation for the OP
	// or
	//   2. no GPU devices are known or registered
	// or
	//   3. need to co-locate with reftype input(s) which are from CPU.
	AllowSoftPlacement bool `protobuf:"varint,7,opt,name=allow_soft_placement,json=allowSoftPlacement,proto3" json:"allow_soft_placement,omitempty"`
	// Whether device placements should be logged.
	LogDevicePlacement bool `protobuf:"varint,8,opt,name=log_device_placement,json=logDevicePlacement,proto3" json:"log_device_placement,omitempty"`
	// Options that apply to all graphs.
	GraphOptions *GraphOptions `protobuf:"bytes,10,opt,name=graph_options,json=graphOptions,proto3" json:"graph_options,omitempty"`
	// Global timeout for all blocking operations in this session.  If non-zero,
	// and not overridden on a per-operation basis, this value will be used as the
	// deadline for all blocking operations.
	OperationTimeoutInMs int64 `` /* 127-byte string literal not displayed */
	// Options that apply when this session uses the distributed runtime.
	RpcOptions *RPCOptions `protobuf:"bytes,13,opt,name=rpc_options,json=rpcOptions,proto3" json:"rpc_options,omitempty"`
	// Optional list of all workers to use in this session.
	ClusterDef *ClusterDef `protobuf:"bytes,14,opt,name=cluster_def,json=clusterDef,proto3" json:"cluster_def,omitempty"`
	// If true, any resources such as Variables used in the session will not be
	// shared with other sessions. However, when clusterspec propagation is
	// enabled, this field is ignored and sessions are always isolated.
	IsolateSessionState bool `protobuf:"varint,15,opt,name=isolate_session_state,json=isolateSessionState,proto3" json:"isolate_session_state,omitempty"`
	// When true, WorkerSessions are created with device attributes from the
	// full cluster.
	// This is helpful when a worker wants to partition a graph
	// (for example during a PartitionedCallOp).
	ShareClusterDevicesInSession bool                      `` /* 153-byte string literal not displayed */
	Experimental                 *ConfigProto_Experimental `protobuf:"bytes,16,opt,name=experimental,proto3" json:"experimental,omitempty"`
	// contains filtered or unexported fields
}

Session configuration parameters. The system picks appropriate values for fields that are not set.

func (*ConfigProto) Descriptor deprecated

func (*ConfigProto) Descriptor() ([]byte, []int)

Deprecated: Use ConfigProto.ProtoReflect.Descriptor instead.

func (*ConfigProto) GetAllowSoftPlacement

func (x *ConfigProto) GetAllowSoftPlacement() bool

func (*ConfigProto) GetClusterDef

func (x *ConfigProto) GetClusterDef() *ClusterDef

func (*ConfigProto) GetDeviceCount

func (x *ConfigProto) GetDeviceCount() map[string]int32

func (*ConfigProto) GetDeviceFilters

func (x *ConfigProto) GetDeviceFilters() []string

func (*ConfigProto) GetExperimental

func (x *ConfigProto) GetExperimental() *ConfigProto_Experimental

func (*ConfigProto) GetGpuOptions

func (x *ConfigProto) GetGpuOptions() *GPUOptions

func (*ConfigProto) GetGraphOptions

func (x *ConfigProto) GetGraphOptions() *GraphOptions

func (*ConfigProto) GetInterOpParallelismThreads

func (x *ConfigProto) GetInterOpParallelismThreads() int32

func (*ConfigProto) GetIntraOpParallelismThreads

func (x *ConfigProto) GetIntraOpParallelismThreads() int32

func (*ConfigProto) GetIsolateSessionState

func (x *ConfigProto) GetIsolateSessionState() bool

func (*ConfigProto) GetLogDevicePlacement

func (x *ConfigProto) GetLogDevicePlacement() bool

func (*ConfigProto) GetOperationTimeoutInMs

func (x *ConfigProto) GetOperationTimeoutInMs() int64

func (*ConfigProto) GetPlacementPeriod

func (x *ConfigProto) GetPlacementPeriod() int32

func (*ConfigProto) GetRpcOptions

func (x *ConfigProto) GetRpcOptions() *RPCOptions

func (*ConfigProto) GetSessionInterOpThreadPool

func (x *ConfigProto) GetSessionInterOpThreadPool() []*ThreadPoolOptionProto

func (*ConfigProto) GetShareClusterDevicesInSession

func (x *ConfigProto) GetShareClusterDevicesInSession() bool

func (*ConfigProto) GetUsePerSessionThreads

func (x *ConfigProto) GetUsePerSessionThreads() bool

func (*ConfigProto) ProtoMessage

func (*ConfigProto) ProtoMessage()

func (*ConfigProto) ProtoReflect

func (x *ConfigProto) ProtoReflect() protoreflect.Message

func (*ConfigProto) Reset

func (x *ConfigProto) Reset()

func (*ConfigProto) String

func (x *ConfigProto) String() string

type ConfigProto_Experimental

type ConfigProto_Experimental struct {

	// Task name for group resolution.
	CollectiveGroupLeader string `` /* 126-byte string literal not displayed */
	// Which executor to use, the default executor will be used
	// if it is an empty string or "DEFAULT"
	ExecutorType string `protobuf:"bytes,3,opt,name=executor_type,json=executorType,proto3" json:"executor_type,omitempty"`
	// Guidance to formatting of large RecvBuf fields for transfer.
	// Any positive value sets the max chunk size.  0 defaults to 4096.
	// Any negative value indicates no max, i.e. one chunk only.
	RecvBufMaxChunk int32 `protobuf:"varint,4,opt,name=recv_buf_max_chunk,json=recvBufMaxChunk,proto3" json:"recv_buf_max_chunk,omitempty"`
	// If true, and supported by the platform, the runtime will attempt to
	// use NUMA affinity where applicable.  One consequence will be the
	// existence of as many CPU devices as there are available NUMA nodes.
	UseNumaAffinity bool `protobuf:"varint,5,opt,name=use_numa_affinity,json=useNumaAffinity,proto3" json:"use_numa_affinity,omitempty"`
	// If true, make collective op execution order sequential and deterministic
	// for potentially concurrent collective instances.
	CollectiveDeterministicSequentialExecution bool `` /* 192-byte string literal not displayed */
	// If true, use NCCL for CollectiveOps.  This feature is highly
	// experimental.
	CollectiveNccl bool `protobuf:"varint,7,opt,name=collective_nccl,json=collectiveNccl,proto3" json:"collective_nccl,omitempty"`
	// In the following, session state means the value of a variable, elements
	// in a hash table, or any other resource, accessible by worker sessions
	// held by a TF server.
	//
	// When ClusterSpec propagation is enabled, the value of
	// isolate_session_state is ignored when deciding whether to share session
	// states in a TF server (for backwards compatibility reasons).
	// - If share_session_state_in_clusterspec_propagation is true, the session
	// states are shared.
	// - If share_session_state_in_clusterspec_propagation is false, session
	// states are isolated.
	//
	// When clusterspec propagation is not used, the value of
	// share_session_state_in_clusterspec_propagation is ignored when deciding
	// whether to share session states in a TF server.
	// - If isolate_session_state is true, session states are isolated.
	// - If isolate_session_state is false, session states are shared.
	//
	// TODO(b/129330037): Add a single API that consistently treats
	// isolate_session_state and ClusterSpec propagation.
	ShareSessionStateInClusterspecPropagation bool `` /* 193-byte string literal not displayed */
	// If using a direct session, disable spinning while waiting for work in
	// the thread pool. This may result in higher latency for completing ops,
	// but in the case where there is a lot of spinning may result in lower
	// CPU usage.
	DisableThreadSpinning bool `` /* 127-byte string literal not displayed */
	// This was promoted to a non-experimental API. Please use
	// ConfigProto.share_cluster_devices_in_session instead.
	ShareClusterDevicesInSession bool `` /* 153-byte string literal not displayed */
	// Metadata about the session.
	//
	// If set, this can be used by the runtime and the Ops for debugging,
	// monitoring, etc.
	//
	// NOTE: This is currently used and propagated only by the direct session.
	SessionMetadata *SessionMetadata `protobuf:"bytes,11,opt,name=session_metadata,json=sessionMetadata,proto3" json:"session_metadata,omitempty"`
	// If true, the session may treat the graph as being static for optimization
	// purposes.
	//
	// If this option is set to true when a session is created, the full
	// GraphDef must be passed in a single call to Session::Create(), and
	// Session::Extend() may not be supported.
	OptimizeForStaticGraph bool `` /* 133-byte string literal not displayed */
	// This field will eventually be deprecated and replaced by
	// mlir_bridge_rollout (b/166038521).
	//
	// Whether to enable the MLIR-based TF->XLA bridge.
	//
	// This is a replacement to the existing bridge, and not ready for
	// production usage yet.
	// If this option is set to true when a session is created, MLIR is used to
	// perform the set of graph transformations to put the graph in a form that
	// can be executed with delegation of some computations to an accelerator.
	// This builds on the model of XLA where a subset of the graph is
	// encapsulated and attached to a "compile" operation, whose result is fed
	// to an "execute" operation. The kernel for these operations is responsible
	// to lower the encapsulated graph to a particular device.
	EnableMlirBridge bool `protobuf:"varint,13,opt,name=enable_mlir_bridge,json=enableMlirBridge,proto3" json:"enable_mlir_bridge,omitempty"`
	// This field is underdevelopment, for now use enable_mlir_bridge
	// (b/166038521).
	//
	// Whether to enable the MLIR-based TF->XLA bridge.
	MlirBridgeRollout ConfigProto_Experimental_MlirBridgeRollout `` /* 175-byte string literal not displayed */
	// Whether to enable the MLIR-based Graph optimizations.
	//
	// This will become a part of standard Tensorflow graph optimization
	// pipeline, currently this is only used for gradual migration and testing
	// new passes that are replacing existing optimizations in Grappler.
	EnableMlirGraphOptimization bool `` /* 148-byte string literal not displayed */
	// If true, the session will not store an additional copy of the graph for
	// each subgraph.
	//
	// If this option is set to true when a session is created, the
	// `RunOptions.output_partition_graphs` options must not be set.
	DisableOutputPartitionGraphs bool `` /* 151-byte string literal not displayed */
	// Minimum number of batches run through the XLA graph before XLA fusion
	// autotuner is enabled. Default value of zero disables the autotuner.
	//
	// The XLA fusion autotuner can improve performance by executing a heuristic
	// search on the compiler parameters.
	XlaFusionAutotunerThresh int64 `` /* 139-byte string literal not displayed */
	// Whether runtime execution uses TFRT.
	UseTfrt bool `protobuf:"varint,18,opt,name=use_tfrt,json=useTfrt,proto3" json:"use_tfrt,omitempty"`
	// Distributed coordination service to be enabled if set.
	// Currently only effective in multi-client setup.
	CoordinationService string `protobuf:"bytes,19,opt,name=coordination_service,json=coordinationService,proto3" json:"coordination_service,omitempty"`
	// Whether the remote devices in the cluster should be fetched during setup
	// of multi-client cluster. If enabled, the workers will run an extra device
	// information exchange step during startup and the workers' EagerContexts
	// will become aware of remote devices in the cluster as well.
	FetchRemoteDevicesInMultiClient bool `` /* 164-byte string literal not displayed */
	// contains filtered or unexported fields
}

Everything inside Experimental is subject to change and is not subject to API stability guarantees in https://www.tensorflow.org/guide/version_compat.

func (*ConfigProto_Experimental) Descriptor deprecated

func (*ConfigProto_Experimental) Descriptor() ([]byte, []int)

Deprecated: Use ConfigProto_Experimental.ProtoReflect.Descriptor instead.

func (*ConfigProto_Experimental) GetCollectiveDeterministicSequentialExecution

func (x *ConfigProto_Experimental) GetCollectiveDeterministicSequentialExecution() bool

func (*ConfigProto_Experimental) GetCollectiveGroupLeader

func (x *ConfigProto_Experimental) GetCollectiveGroupLeader() string

func (*ConfigProto_Experimental) GetCollectiveNccl

func (x *ConfigProto_Experimental) GetCollectiveNccl() bool

func (*ConfigProto_Experimental) GetCoordinationService

func (x *ConfigProto_Experimental) GetCoordinationService() string

func (*ConfigProto_Experimental) GetDisableOutputPartitionGraphs

func (x *ConfigProto_Experimental) GetDisableOutputPartitionGraphs() bool

func (*ConfigProto_Experimental) GetDisableThreadSpinning

func (x *ConfigProto_Experimental) GetDisableThreadSpinning() bool

func (*ConfigProto_Experimental) GetEnableMlirBridge

func (x *ConfigProto_Experimental) GetEnableMlirBridge() bool

func (*ConfigProto_Experimental) GetEnableMlirGraphOptimization

func (x *ConfigProto_Experimental) GetEnableMlirGraphOptimization() bool

func (*ConfigProto_Experimental) GetExecutorType

func (x *ConfigProto_Experimental) GetExecutorType() string

func (*ConfigProto_Experimental) GetFetchRemoteDevicesInMultiClient

func (x *ConfigProto_Experimental) GetFetchRemoteDevicesInMultiClient() bool

func (*ConfigProto_Experimental) GetMlirBridgeRollout

func (*ConfigProto_Experimental) GetOptimizeForStaticGraph

func (x *ConfigProto_Experimental) GetOptimizeForStaticGraph() bool

func (*ConfigProto_Experimental) GetRecvBufMaxChunk

func (x *ConfigProto_Experimental) GetRecvBufMaxChunk() int32

func (*ConfigProto_Experimental) GetSessionMetadata

func (x *ConfigProto_Experimental) GetSessionMetadata() *SessionMetadata

func (*ConfigProto_Experimental) GetShareClusterDevicesInSession

func (x *ConfigProto_Experimental) GetShareClusterDevicesInSession() bool

func (*ConfigProto_Experimental) GetShareSessionStateInClusterspecPropagation

func (x *ConfigProto_Experimental) GetShareSessionStateInClusterspecPropagation() bool

func (*ConfigProto_Experimental) GetUseNumaAffinity

func (x *ConfigProto_Experimental) GetUseNumaAffinity() bool

func (*ConfigProto_Experimental) GetUseTfrt

func (x *ConfigProto_Experimental) GetUseTfrt() bool

func (*ConfigProto_Experimental) GetXlaFusionAutotunerThresh

func (x *ConfigProto_Experimental) GetXlaFusionAutotunerThresh() int64

func (*ConfigProto_Experimental) ProtoMessage

func (*ConfigProto_Experimental) ProtoMessage()

func (*ConfigProto_Experimental) ProtoReflect

func (x *ConfigProto_Experimental) ProtoReflect() protoreflect.Message

func (*ConfigProto_Experimental) Reset

func (x *ConfigProto_Experimental) Reset()

func (*ConfigProto_Experimental) String

func (x *ConfigProto_Experimental) String() string

type ConfigProto_Experimental_MlirBridgeRollout

type ConfigProto_Experimental_MlirBridgeRollout int32

An enum that describes the state of the MLIR bridge rollout.

const (
	// If this field is left unspecified, the MLIR bridge may be selectively
	// enabled on a per graph basis.
	ConfigProto_Experimental_MLIR_BRIDGE_ROLLOUT_UNSPECIFIED ConfigProto_Experimental_MlirBridgeRollout = 0
	// Enabling the MLIR bridge enables it for all graphs in this session.
	ConfigProto_Experimental_MLIR_BRIDGE_ROLLOUT_ENABLED ConfigProto_Experimental_MlirBridgeRollout = 1
	// Disabling the MLIR bridge disables it for all graphs in this session.
	ConfigProto_Experimental_MLIR_BRIDGE_ROLLOUT_DISABLED ConfigProto_Experimental_MlirBridgeRollout = 2
	// Enable the MLIR bridge on a per graph basis based on an analysis of
	// the features used in the graph. If the features used by the graph are
	// supported by the MLIR bridge, the MLIR bridge will be used to run the
	// graph.
	ConfigProto_Experimental_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED ConfigProto_Experimental_MlirBridgeRollout = 3
	// Enable the MLIR bridge in a fallback mode on a per graph basis based
	// on an analysis of the features used in the graph.
	// Running the MLIR bridge in the fallback mode means that it is
	// executed and it commits all the changes to the TF graph in case
	// of success. And it does not in case of failures and let the old bridge
	// to process the TF graph.
	ConfigProto_Experimental_MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED ConfigProto_Experimental_MlirBridgeRollout = 4
)

func (ConfigProto_Experimental_MlirBridgeRollout) Descriptor

func (ConfigProto_Experimental_MlirBridgeRollout) Enum

func (ConfigProto_Experimental_MlirBridgeRollout) EnumDescriptor deprecated

func (ConfigProto_Experimental_MlirBridgeRollout) EnumDescriptor() ([]byte, []int)

Deprecated: Use ConfigProto_Experimental_MlirBridgeRollout.Descriptor instead.

func (ConfigProto_Experimental_MlirBridgeRollout) Number

func (ConfigProto_Experimental_MlirBridgeRollout) String

func (ConfigProto_Experimental_MlirBridgeRollout) Type

type DebugOptions

type DebugOptions struct {

	// Debugging options
	DebugTensorWatchOpts []*DebugTensorWatch `protobuf:"bytes,4,rep,name=debug_tensor_watch_opts,json=debugTensorWatchOpts,proto3" json:"debug_tensor_watch_opts,omitempty"`
	// Caller-specified global step count.
	// Note that this is distinct from the session run count and the executor
	// step count.
	GlobalStep int64 `protobuf:"varint,10,opt,name=global_step,json=globalStep,proto3" json:"global_step,omitempty"`
	// Whether the total disk usage of tfdbg is to be reset to zero
	// in this Session.run call. This is used by wrappers and hooks
	// such as the local CLI ones to indicate that the dumped tensors
	// are cleaned up from the disk after each Session.run.
	ResetDiskByteUsage bool `protobuf:"varint,11,opt,name=reset_disk_byte_usage,json=resetDiskByteUsage,proto3" json:"reset_disk_byte_usage,omitempty"`
	// contains filtered or unexported fields
}

Options for initializing DebuggerState in TensorFlow Debugger (tfdbg).

func (*DebugOptions) Descriptor deprecated

func (*DebugOptions) Descriptor() ([]byte, []int)

Deprecated: Use DebugOptions.ProtoReflect.Descriptor instead.

func (*DebugOptions) GetDebugTensorWatchOpts

func (x *DebugOptions) GetDebugTensorWatchOpts() []*DebugTensorWatch

func (*DebugOptions) GetGlobalStep

func (x *DebugOptions) GetGlobalStep() int64

func (*DebugOptions) GetResetDiskByteUsage

func (x *DebugOptions) GetResetDiskByteUsage() bool

func (*DebugOptions) ProtoMessage

func (*DebugOptions) ProtoMessage()

func (*DebugOptions) ProtoReflect

func (x *DebugOptions) ProtoReflect() protoreflect.Message

func (*DebugOptions) Reset

func (x *DebugOptions) Reset()

func (*DebugOptions) String

func (x *DebugOptions) String() string

type DebugTensorWatch

type DebugTensorWatch struct {

	// Name of the node to watch.
	// Use "*" for wildcard. But note: currently, regex is not supported in
	// general.
	NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
	// Output slot to watch.
	// The semantics of output_slot == -1 is that all outputs of the node
	// will be watched (i.e., a wildcard).
	// Other negative values of output_slot are invalid and will lead to
	// errors currently.
	OutputSlot int32 `protobuf:"varint,2,opt,name=output_slot,json=outputSlot,proto3" json:"output_slot,omitempty"`
	// Name(s) of the debugging op(s).
	// One or more than one probes on a tensor.
	// e.g., {"DebugIdentity", "DebugNanCount"}
	DebugOps []string `protobuf:"bytes,3,rep,name=debug_ops,json=debugOps,proto3" json:"debug_ops,omitempty"`
	// URL(s) for debug targets(s).
	//
	// Supported URL formats are:
	//   - file:///foo/tfdbg_dump: Writes out Event content to file
	//     /foo/tfdbg_dump.  Assumes all directories can be created if they don't
	//     already exist.
	//   - grpc://localhost:11011: Sends an RPC request to an EventListener
	//     service running at localhost:11011 with the event.
	//   - memcbk:///event_key: Routes tensors to clients using the
	//     callback registered with the DebugCallbackRegistry for event_key.
	//
	// Each debug op listed in debug_ops will publish its output tensor (debug
	// signal) to all URLs in debug_urls.
	//
	// N.B. Session::Run() supports concurrent invocations of the same inputs
	// (feed keys), outputs and target nodes. If such concurrent invocations
	// are to be debugged, the callers of Session::Run() must use distinct
	// debug_urls to make sure that the streamed or dumped events do not overlap
	// among the invocations.
	// TODO(cais): More visible documentation of this in g3docs.
	DebugUrls []string `protobuf:"bytes,4,rep,name=debug_urls,json=debugUrls,proto3" json:"debug_urls,omitempty"`
	// Do not error out if debug op creation fails (e.g., due to dtype
	// incompatibility). Instead, just log the failure.
	TolerateDebugOpCreationFailures bool `` /* 161-byte string literal not displayed */
	// contains filtered or unexported fields
}

Option for watching a node in TensorFlow Debugger (tfdbg).

func (*DebugTensorWatch) Descriptor deprecated

func (*DebugTensorWatch) Descriptor() ([]byte, []int)

Deprecated: Use DebugTensorWatch.ProtoReflect.Descriptor instead.

func (*DebugTensorWatch) GetDebugOps

func (x *DebugTensorWatch) GetDebugOps() []string

func (*DebugTensorWatch) GetDebugUrls

func (x *DebugTensorWatch) GetDebugUrls() []string

func (*DebugTensorWatch) GetNodeName

func (x *DebugTensorWatch) GetNodeName() string

func (*DebugTensorWatch) GetOutputSlot

func (x *DebugTensorWatch) GetOutputSlot() int32

func (*DebugTensorWatch) GetTolerateDebugOpCreationFailures

func (x *DebugTensorWatch) GetTolerateDebugOpCreationFailures() bool

func (*DebugTensorWatch) ProtoMessage

func (*DebugTensorWatch) ProtoMessage()

func (*DebugTensorWatch) ProtoReflect

func (x *DebugTensorWatch) ProtoReflect() protoreflect.Message

func (*DebugTensorWatch) Reset

func (x *DebugTensorWatch) Reset()

func (*DebugTensorWatch) String

func (x *DebugTensorWatch) String() string

type DebuggedSourceFile

type DebuggedSourceFile struct {

	// The host name on which a source code file is located.
	Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
	// Path to the source code file.
	FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
	// The timestamp at which the source code file is last modified.
	LastModified int64 `protobuf:"varint,3,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"`
	// Byte size of the file.
	Bytes int64 `protobuf:"varint,4,opt,name=bytes,proto3" json:"bytes,omitempty"`
	// Line-by-line content of the source code file.
	Lines []string `protobuf:"bytes,5,rep,name=lines,proto3" json:"lines,omitempty"`
	// contains filtered or unexported fields
}

func (*DebuggedSourceFile) Descriptor deprecated

func (*DebuggedSourceFile) Descriptor() ([]byte, []int)

Deprecated: Use DebuggedSourceFile.ProtoReflect.Descriptor instead.

func (*DebuggedSourceFile) GetBytes

func (x *DebuggedSourceFile) GetBytes() int64

func (*DebuggedSourceFile) GetFilePath

func (x *DebuggedSourceFile) GetFilePath() string

func (*DebuggedSourceFile) GetHost

func (x *DebuggedSourceFile) GetHost() string

func (*DebuggedSourceFile) GetLastModified

func (x *DebuggedSourceFile) GetLastModified() int64

func (*DebuggedSourceFile) GetLines

func (x *DebuggedSourceFile) GetLines() []string

func (*DebuggedSourceFile) ProtoMessage

func (*DebuggedSourceFile) ProtoMessage()

func (*DebuggedSourceFile) ProtoReflect

func (x *DebuggedSourceFile) ProtoReflect() protoreflect.Message

func (*DebuggedSourceFile) Reset

func (x *DebuggedSourceFile) Reset()

func (*DebuggedSourceFile) String

func (x *DebuggedSourceFile) String() string

type DebuggedSourceFiles

type DebuggedSourceFiles struct {

	// A collection of source code files.
	SourceFiles []*DebuggedSourceFile `protobuf:"bytes,1,rep,name=source_files,json=sourceFiles,proto3" json:"source_files,omitempty"`
	// contains filtered or unexported fields
}

func (*DebuggedSourceFiles) Descriptor deprecated

func (*DebuggedSourceFiles) Descriptor() ([]byte, []int)

Deprecated: Use DebuggedSourceFiles.ProtoReflect.Descriptor instead.

func (*DebuggedSourceFiles) GetSourceFiles

func (x *DebuggedSourceFiles) GetSourceFiles() []*DebuggedSourceFile

func (*DebuggedSourceFiles) ProtoMessage

func (*DebuggedSourceFiles) ProtoMessage()

func (*DebuggedSourceFiles) ProtoReflect

func (x *DebuggedSourceFiles) ProtoReflect() protoreflect.Message

func (*DebuggedSourceFiles) Reset

func (x *DebuggedSourceFiles) Reset()

func (*DebuggedSourceFiles) String

func (x *DebuggedSourceFiles) String() string

type DictValue

type DictValue struct {
	Fields map[string]*StructuredValue `` /* 153-byte string literal not displayed */
	// contains filtered or unexported fields
}

Represents a Python dict keyed by `str`. The comment on Unicode from Value.string_value applies analogously.

func (*DictValue) Descriptor deprecated

func (*DictValue) Descriptor() ([]byte, []int)

Deprecated: Use DictValue.ProtoReflect.Descriptor instead.

func (*DictValue) GetFields

func (x *DictValue) GetFields() map[string]*StructuredValue

func (*DictValue) ProtoMessage

func (*DictValue) ProtoMessage()

func (*DictValue) ProtoReflect

func (x *DictValue) ProtoReflect() protoreflect.Message

func (*DictValue) Reset

func (x *DictValue) Reset()

func (*DictValue) String

func (x *DictValue) String() string

type FunctionSpec

type FunctionSpec struct {

	// Full arg spec from inspect.getfullargspec().
	Fullargspec *StructuredValue `protobuf:"bytes,1,opt,name=fullargspec,proto3" json:"fullargspec,omitempty"`
	// Whether this represents a class method.
	IsMethod bool `protobuf:"varint,2,opt,name=is_method,json=isMethod,proto3" json:"is_method,omitempty"`
	// The input signature, if specified.
	InputSignature *StructuredValue        `protobuf:"bytes,5,opt,name=input_signature,json=inputSignature,proto3" json:"input_signature,omitempty"`
	JitCompile     FunctionSpec_JitCompile `` /* 132-byte string literal not displayed */
	// contains filtered or unexported fields
}

Represents `FunctionSpec` used in `Function`. This represents a function that has been wrapped as a TensorFlow `Function`.

func (*FunctionSpec) Descriptor deprecated

func (*FunctionSpec) Descriptor() ([]byte, []int)

Deprecated: Use FunctionSpec.ProtoReflect.Descriptor instead.

func (*FunctionSpec) GetFullargspec

func (x *FunctionSpec) GetFullargspec() *StructuredValue

func (*FunctionSpec) GetInputSignature

func (x *FunctionSpec) GetInputSignature() *StructuredValue

func (*FunctionSpec) GetIsMethod

func (x *FunctionSpec) GetIsMethod() bool

func (*FunctionSpec) GetJitCompile

func (x *FunctionSpec) GetJitCompile() FunctionSpec_JitCompile

func (*FunctionSpec) ProtoMessage

func (*FunctionSpec) ProtoMessage()

func (*FunctionSpec) ProtoReflect

func (x *FunctionSpec) ProtoReflect() protoreflect.Message

func (*FunctionSpec) Reset

func (x *FunctionSpec) Reset()

func (*FunctionSpec) String

func (x *FunctionSpec) String() string

type FunctionSpec_JitCompile

type FunctionSpec_JitCompile int32

Whether the function should be compiled by XLA.

The public interface to `tf.function` uses an optional boolean to represent three distinct states for this field. Unfortunately, proto3 removes the ability to explicitly check for the presence or absence of a field, so we instead map to an enum.

See `tf.function` for details.

const (
	FunctionSpec_DEFAULT FunctionSpec_JitCompile = 0
	FunctionSpec_ON      FunctionSpec_JitCompile = 1
	FunctionSpec_OFF     FunctionSpec_JitCompile = 2
)

func (FunctionSpec_JitCompile) Descriptor

func (FunctionSpec_JitCompile) Enum

func (FunctionSpec_JitCompile) EnumDescriptor deprecated

func (FunctionSpec_JitCompile) EnumDescriptor() ([]byte, []int)

Deprecated: Use FunctionSpec_JitCompile.Descriptor instead.

func (FunctionSpec_JitCompile) Number

func (FunctionSpec_JitCompile) String

func (x FunctionSpec_JitCompile) String() string

func (FunctionSpec_JitCompile) Type

type GPUOptions

type GPUOptions struct {

	// Fraction of the available GPU memory to allocate for each process.
	// 1 means to allocate all of the GPU memory, 0.5 means the process
	// allocates up to ~50% of the available GPU memory.
	//
	// GPU memory is pre-allocated unless the allow_growth option is enabled.
	//
	// If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
	// the amount of memory available on the GPU device by using host memory as a
	// swap space. Accessing memory not available on the device will be
	// significantly slower as that would require memory transfer between the host
	// and the device. Options to reduce the memory requirement should be
	// considered before enabling this option as this may come with a negative
	// performance impact. Oversubscription using the unified memory requires
	// Pascal class or newer GPUs and it is currently only supported on the Linux
	// operating system. See
	// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
	// for the detailed requirements.
	PerProcessGpuMemoryFraction float64 `` /* 150-byte string literal not displayed */
	// If true, the allocator does not pre-allocate the entire specified
	// GPU memory region, instead starting small and growing as needed.
	AllowGrowth bool `protobuf:"varint,4,opt,name=allow_growth,json=allowGrowth,proto3" json:"allow_growth,omitempty"`
	// The type of GPU allocation strategy to use.
	//
	// Allowed values:
	// "": The empty string (default) uses a system-chosen default
	//     which may change over time.
	//
	// "BFC": A "Best-fit with coalescing" algorithm, simplified from a
	//        version of dlmalloc.
	AllocatorType string `protobuf:"bytes,2,opt,name=allocator_type,json=allocatorType,proto3" json:"allocator_type,omitempty"`
	// Delay deletion of up to this many bytes to reduce the number of
	// interactions with gpu driver code.  If 0, the system chooses
	// a reasonable default (several MBs).
	DeferredDeletionBytes int64 `` /* 127-byte string literal not displayed */
	// A comma-separated list of GPU ids that determines the 'visible'
	// to 'virtual' mapping of GPU devices.  For example, if TensorFlow
	// can see 8 GPU devices in the process, and one wanted to map
	// visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
	// then one would specify this field as "5,3".  This field is similar in
	// spirit to the CUDA_VISIBLE_DEVICES environment variable, except
	// it applies to the visible GPU devices in the process.
	//
	// NOTE:
	// 1. The GPU driver provides the process with the visible GPUs
	//    in an order which is not guaranteed to have any correlation to
	//    the *physical* GPU id in the machine.  This field is used for
	//    remapping "visible" to "virtual", which means this operates only
	//    after the process starts.  Users are required to use vendor
	//    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
	//    physical to visible device mapping prior to invoking TensorFlow.
	// 2. In the code, the ids in this list are also called "platform GPU id"s,
	//    and the 'virtual' ids of GPU devices (i.e. the ids in the device
	//    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
	//    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
	//    for more information.
	VisibleDeviceList string `protobuf:"bytes,5,opt,name=visible_device_list,json=visibleDeviceList,proto3" json:"visible_device_list,omitempty"`
	// In the event polling loop sleep this many microseconds between
	// PollEvents calls, when the queue is not empty.  If value is not
	// set or set to 0, gets set to a non-zero default.
	PollingActiveDelayUsecs int32 `` /* 135-byte string literal not displayed */
	// This field is deprecated and ignored.
	PollingInactiveDelayMsecs int32 `` /* 141-byte string literal not displayed */
	// Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
	// enabling this option forces all CPU tensors to be allocated with Cuda
	// pinned memory. Normally, TensorFlow will infer which tensors should be
	// allocated as the pinned memory. But in case where the inference is
	// incomplete, this option can significantly speed up the cross-device memory
	// copy performance as long as it fits the memory.
	// Note that this option is not something that should be
	// enabled by default for unknown or very large models, since all Cuda pinned
	// memory is unpageable, having too much pinned memory might negatively impact
	// the overall host system performance.
	ForceGpuCompatible bool `protobuf:"varint,8,opt,name=force_gpu_compatible,json=forceGpuCompatible,proto3" json:"force_gpu_compatible,omitempty"`
	// Everything inside experimental is subject to change and is not subject
	// to API stability guarantees in
	// https://www.tensorflow.org/guide/version_compat.
	Experimental *GPUOptions_Experimental `protobuf:"bytes,9,opt,name=experimental,proto3" json:"experimental,omitempty"`
	// contains filtered or unexported fields
}

func (*GPUOptions) Descriptor deprecated

func (*GPUOptions) Descriptor() ([]byte, []int)

Deprecated: Use GPUOptions.ProtoReflect.Descriptor instead.

func (*GPUOptions) GetAllocatorType

func (x *GPUOptions) GetAllocatorType() string

func (*GPUOptions) GetAllowGrowth

func (x *GPUOptions) GetAllowGrowth() bool

func (*GPUOptions) GetDeferredDeletionBytes

func (x *GPUOptions) GetDeferredDeletionBytes() int64

func (*GPUOptions) GetExperimental

func (x *GPUOptions) GetExperimental() *GPUOptions_Experimental

func (*GPUOptions) GetForceGpuCompatible

func (x *GPUOptions) GetForceGpuCompatible() bool

func (*GPUOptions) GetPerProcessGpuMemoryFraction

func (x *GPUOptions) GetPerProcessGpuMemoryFraction() float64

func (*GPUOptions) GetPollingActiveDelayUsecs

func (x *GPUOptions) GetPollingActiveDelayUsecs() int32

func (*GPUOptions) GetPollingInactiveDelayMsecs

func (x *GPUOptions) GetPollingInactiveDelayMsecs() int32

func (*GPUOptions) GetVisibleDeviceList

func (x *GPUOptions) GetVisibleDeviceList() string

func (*GPUOptions) ProtoMessage

func (*GPUOptions) ProtoMessage()

func (*GPUOptions) ProtoReflect

func (x *GPUOptions) ProtoReflect() protoreflect.Message

func (*GPUOptions) Reset

func (x *GPUOptions) Reset()

func (*GPUOptions) String

func (x *GPUOptions) String() string

type GPUOptions_Experimental

type GPUOptions_Experimental struct {

	// The multi virtual device settings. If empty (not set), it will create
	// single virtual device on each visible GPU, according to the settings
	// in "visible_device_list" above. Otherwise, the number of elements in the
	// list must be the same as the number of visible GPUs (after
	// "visible_device_list" filtering if it is set), and the string represented
	// device names (e.g. /device:GPU:<id>) will refer to the virtual
	// devices and have the <id> field assigned sequentially starting from 0,
	// according to the order they appear in this list and the "memory_limit"
	// list inside each element. For example,
	//   visible_device_list = "1,0"
	//   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
	//   virtual_devices {}
	// will create three virtual devices as:
	//   /device:GPU:0 -> visible GPU 1 with 1GB memory
	//   /device:GPU:1 -> visible GPU 1 with 2GB memory
	//   /device:GPU:2 -> visible GPU 0 with all available memory
	//
	// NOTE:
	// 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
	//    at the same time.
	// 2. Currently this setting is per-process, not per-session. Using
	//    different settings in different sessions within same process will
	//    result in undefined behavior.
	VirtualDevices []*GPUOptions_Experimental_VirtualDevices `protobuf:"bytes,1,rep,name=virtual_devices,json=virtualDevices,proto3" json:"virtual_devices,omitempty"`
	// If true, uses CUDA unified memory for memory allocations. If
	// per_process_gpu_memory_fraction option is greater than 1.0, then unified
	// memory is used regardless of the value for this field. See comments for
	// per_process_gpu_memory_fraction field for more details and requirements
	// of the unified memory. This option is useful to oversubscribe memory if
	// multiple processes are sharing a single GPU while individually using less
	// than 1.0 per process memory fraction.
	UseUnifiedMemory bool `protobuf:"varint,2,opt,name=use_unified_memory,json=useUnifiedMemory,proto3" json:"use_unified_memory,omitempty"`
	// If > 1, the number of device-to-device copy streams to create
	// for each GPUDevice.  Default value is 0, which is automatically
	// converted to 1.
	NumDevToDevCopyStreams int32 `` /* 136-byte string literal not displayed */
	// If non-empty, defines a good GPU ring order on a single worker based on
	// device interconnect.  This assumes that all workers have the same GPU
	// topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
	// This ring order is used by the RingReducer implementation of
	// CollectiveReduce, and serves as an override to automatic ring order
	// generation in OrderTaskDeviceMap() during CollectiveParam resolution.
	CollectiveRingOrder string `protobuf:"bytes,4,opt,name=collective_ring_order,json=collectiveRingOrder,proto3" json:"collective_ring_order,omitempty"`
	// If true then extra work is done by GPUDevice and GPUBFCAllocator to
	// keep track of when GPU memory is freed and when kernels actually
	// complete so that we can know when a nominally free memory chunk
	// is really not subject to pending use.
	TimestampedAllocator bool `protobuf:"varint,5,opt,name=timestamped_allocator,json=timestampedAllocator,proto3" json:"timestamped_allocator,omitempty"`
	// Parameters for GPUKernelTracker.  By default no kernel tracking is done.
	// Note that timestamped_allocator is only effective if some tracking is
	// specified.
	//
	// If kernel_tracker_max_interval = n > 0, then a tracking event
	// is inserted after every n kernels without an event.
	KernelTrackerMaxInterval int32 `` /* 138-byte string literal not displayed */
	// If kernel_tracker_max_bytes = n > 0, then a tracking event is
	// inserted after every series of kernels allocating a sum of
	// memory >= n.  If one kernel allocates b * n bytes, then one
	// event will be inserted after it, but it will count as b against
	// the pending limit.
	KernelTrackerMaxBytes int32 `` /* 129-byte string literal not displayed */
	// If kernel_tracker_max_pending > 0 then no more than this many
	// tracking events can be outstanding at a time.  An attempt to
	// launch an additional kernel will stall until an event
	// completes.
	KernelTrackerMaxPending int32 `` /* 135-byte string literal not displayed */
	// BFC Allocator can return an allocated chunk of memory upto 2x the
	// requested size. For virtual devices with tight memory constraints, and
	// proportionately large allocation requests, this can lead to a significant
	// reduction in available memory. The threshold below controls when a chunk
	// should be split if the chunk size exceeds requested memory size. It is
	// expressed as a fraction of total available memory for the tf device. For
	// example setting it to 0.05 would imply a chunk needs to be split if its
	// size exceeds the requested memory by 5% of the total virtual device/gpu
	// memory size.
	InternalFragmentationFraction float64 `` /* 153-byte string literal not displayed */
	// When true, use CUDA cudaMallocAsync API instead of TF gpu allocator.
	UseCudaMallocAsync bool `protobuf:"varint,11,opt,name=use_cuda_malloc_async,json=useCudaMallocAsync,proto3" json:"use_cuda_malloc_async,omitempty"`
	// contains filtered or unexported fields
}

func (*GPUOptions_Experimental) Descriptor deprecated

func (*GPUOptions_Experimental) Descriptor() ([]byte, []int)

Deprecated: Use GPUOptions_Experimental.ProtoReflect.Descriptor instead.

func (*GPUOptions_Experimental) GetCollectiveRingOrder

func (x *GPUOptions_Experimental) GetCollectiveRingOrder() string

func (*GPUOptions_Experimental) GetInternalFragmentationFraction

func (x *GPUOptions_Experimental) GetInternalFragmentationFraction() float64

func (*GPUOptions_Experimental) GetKernelTrackerMaxBytes

func (x *GPUOptions_Experimental) GetKernelTrackerMaxBytes() int32

func (*GPUOptions_Experimental) GetKernelTrackerMaxInterval

func (x *GPUOptions_Experimental) GetKernelTrackerMaxInterval() int32

func (*GPUOptions_Experimental) GetKernelTrackerMaxPending

func (x *GPUOptions_Experimental) GetKernelTrackerMaxPending() int32

func (*GPUOptions_Experimental) GetNumDevToDevCopyStreams

func (x *GPUOptions_Experimental) GetNumDevToDevCopyStreams() int32

func (*GPUOptions_Experimental) GetTimestampedAllocator

func (x *GPUOptions_Experimental) GetTimestampedAllocator() bool

func (*GPUOptions_Experimental) GetUseCudaMallocAsync

func (x *GPUOptions_Experimental) GetUseCudaMallocAsync() bool

func (*GPUOptions_Experimental) GetUseUnifiedMemory

func (x *GPUOptions_Experimental) GetUseUnifiedMemory() bool

func (*GPUOptions_Experimental) GetVirtualDevices

func (*GPUOptions_Experimental) ProtoMessage

func (*GPUOptions_Experimental) ProtoMessage()

func (*GPUOptions_Experimental) ProtoReflect

func (x *GPUOptions_Experimental) ProtoReflect() protoreflect.Message

func (*GPUOptions_Experimental) Reset

func (x *GPUOptions_Experimental) Reset()

func (*GPUOptions_Experimental) String

func (x *GPUOptions_Experimental) String() string

type GPUOptions_Experimental_VirtualDevices

type GPUOptions_Experimental_VirtualDevices struct {

	// Per "virtual" device memory limit, in MB. The number of elements in
	// the list is the number of virtual devices to create on the
	// corresponding visible GPU (see "virtual_devices" below).
	// If empty, it will create single virtual device taking all available
	// memory from the device.
	//
	// For the concept of "visible" and "virtual" GPU, see the comments for
	// "visible_device_list" above for more information.
	MemoryLimitMb []float32 `protobuf:"fixed32,1,rep,packed,name=memory_limit_mb,json=memoryLimitMb,proto3" json:"memory_limit_mb,omitempty"`
	// Priority values to use with the virtual devices. Use the cuda function
	// cudaDeviceGetStreamPriorityRange to query for valid range of values for
	// priority.
	//
	// On a P4000 GPU with cuda 10.1, the priority range reported was 0 for
	// least priority and -1 for greatest priority.
	//
	// If this field is not specified, then the virtual devices will be
	// created with the default. If this field has values set, then the size
	// of this must match with the above memory_limit_mb.
	Priority []int32 `protobuf:"varint,2,rep,packed,name=priority,proto3" json:"priority,omitempty"`
	// contains filtered or unexported fields
}

Configuration for breaking down a visible GPU into multiple "virtual" devices.

func (*GPUOptions_Experimental_VirtualDevices) Descriptor deprecated

func (*GPUOptions_Experimental_VirtualDevices) Descriptor() ([]byte, []int)

Deprecated: Use GPUOptions_Experimental_VirtualDevices.ProtoReflect.Descriptor instead.

func (*GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb

func (x *GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb() []float32

func (*GPUOptions_Experimental_VirtualDevices) GetPriority

func (x *GPUOptions_Experimental_VirtualDevices) GetPriority() []int32

func (*GPUOptions_Experimental_VirtualDevices) ProtoMessage

func (*GPUOptions_Experimental_VirtualDevices) ProtoReflect

func (*GPUOptions_Experimental_VirtualDevices) Reset

func (*GPUOptions_Experimental_VirtualDevices) String

type GraphOptions

type GraphOptions struct {

	// If true, use control flow to schedule the activation of Recv nodes.
	// (Currently ignored.)
	EnableRecvScheduling bool `protobuf:"varint,2,opt,name=enable_recv_scheduling,json=enableRecvScheduling,proto3" json:"enable_recv_scheduling,omitempty"`
	// Options controlling how graph is optimized.
	OptimizerOptions *OptimizerOptions `protobuf:"bytes,3,opt,name=optimizer_options,json=optimizerOptions,proto3" json:"optimizer_options,omitempty"`
	// The number of steps to run before returning a cost model detailing
	// the memory usage and performance of each node of the graph. 0 means
	// no cost model.
	BuildCostModel int64 `protobuf:"varint,4,opt,name=build_cost_model,json=buildCostModel,proto3" json:"build_cost_model,omitempty"`
	// The number of steps to skip before collecting statistics for the
	// cost model.
	BuildCostModelAfter int64 `protobuf:"varint,9,opt,name=build_cost_model_after,json=buildCostModelAfter,proto3" json:"build_cost_model_after,omitempty"`
	// Annotate each Node with Op output shape data, to the extent it can
	// be statically inferred.
	InferShapes bool `protobuf:"varint,5,opt,name=infer_shapes,json=inferShapes,proto3" json:"infer_shapes,omitempty"`
	// Only place the subgraphs that are run, rather than the entire graph.
	//
	// This is useful for interactive graph building, where one might
	// produce graphs that cannot be placed during the debugging
	// process.  In particular, it allows the client to continue work in
	// a session after adding a node to a graph whose placement
	// constraints are unsatisfiable.
	PlacePrunedGraph bool `protobuf:"varint,6,opt,name=place_pruned_graph,json=placePrunedGraph,proto3" json:"place_pruned_graph,omitempty"`
	// If true, transfer float values between processes as bfloat16.
	EnableBfloat16Sendrecv bool `` /* 130-byte string literal not displayed */
	// If > 0, record a timeline every this many steps.
	// EXPERIMENTAL: This currently has no effect in MasterSession.
	TimelineStep int32 `protobuf:"varint,8,opt,name=timeline_step,json=timelineStep,proto3" json:"timeline_step,omitempty"`
	// Options that control the type and amount of graph rewriting.
	// Not currently configurable via the public Python API (i.e. there is no API
	// stability guarantee if you import RewriterConfig explicitly).
	RewriteOptions *RewriterConfig `protobuf:"bytes,10,opt,name=rewrite_options,json=rewriteOptions,proto3" json:"rewrite_options,omitempty"`
	// contains filtered or unexported fields
}

func (*GraphOptions) Descriptor deprecated

func (*GraphOptions) Descriptor() ([]byte, []int)

Deprecated: Use GraphOptions.ProtoReflect.Descriptor instead.

func (*GraphOptions) GetBuildCostModel

func (x *GraphOptions) GetBuildCostModel() int64

func (*GraphOptions) GetBuildCostModelAfter

func (x *GraphOptions) GetBuildCostModelAfter() int64

func (*GraphOptions) GetEnableBfloat16Sendrecv

func (x *GraphOptions) GetEnableBfloat16Sendrecv() bool

func (*GraphOptions) GetEnableRecvScheduling

func (x *GraphOptions) GetEnableRecvScheduling() bool

func (*GraphOptions) GetInferShapes

func (x *GraphOptions) GetInferShapes() bool

func (*GraphOptions) GetOptimizerOptions

func (x *GraphOptions) GetOptimizerOptions() *OptimizerOptions

func (*GraphOptions) GetPlacePrunedGraph

func (x *GraphOptions) GetPlacePrunedGraph() bool

func (*GraphOptions) GetRewriteOptions

func (x *GraphOptions) GetRewriteOptions() *RewriterConfig

func (*GraphOptions) GetTimelineStep

func (x *GraphOptions) GetTimelineStep() int32

func (*GraphOptions) ProtoMessage

func (*GraphOptions) ProtoMessage()

func (*GraphOptions) ProtoReflect

func (x *GraphOptions) ProtoReflect() protoreflect.Message

func (*GraphOptions) Reset

func (x *GraphOptions) Reset()

func (*GraphOptions) String

func (x *GraphOptions) String() string

type JobDef

type JobDef struct {

	// The name of this job.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Mapping from task ID to "hostname:port" string.
	//
	// If the `name` field contains "worker", and the `tasks` map contains a
	// mapping from 7 to "example.org:2222", then the device prefix
	// "/job:worker/task:7" will be assigned to "example.org:2222".
	Tasks map[int32]string `` /* 152-byte string literal not displayed */
	// contains filtered or unexported fields
}

Defines a single job in a TensorFlow cluster.

func (*JobDef) Descriptor deprecated

func (*JobDef) Descriptor() ([]byte, []int)

Deprecated: Use JobDef.ProtoReflect.Descriptor instead.

func (*JobDef) GetName

func (x *JobDef) GetName() string

func (*JobDef) GetTasks

func (x *JobDef) GetTasks() map[int32]string

func (*JobDef) ProtoMessage

func (*JobDef) ProtoMessage()

func (*JobDef) ProtoReflect

func (x *JobDef) ProtoReflect() protoreflect.Message

func (*JobDef) Reset

func (x *JobDef) Reset()

func (*JobDef) String

func (x *JobDef) String() string

type ListValue

type ListValue struct {
	Values []*StructuredValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
	// contains filtered or unexported fields
}

Represents a Python list.

func (*ListValue) Descriptor deprecated

func (*ListValue) Descriptor() ([]byte, []int)

Deprecated: Use ListValue.ProtoReflect.Descriptor instead.

func (*ListValue) GetValues

func (x *ListValue) GetValues() []*StructuredValue

func (*ListValue) ProtoMessage

func (*ListValue) ProtoMessage()

func (*ListValue) ProtoReflect

func (x *ListValue) ProtoReflect() protoreflect.Message

func (*ListValue) Reset

func (x *ListValue) Reset()

func (*ListValue) String

func (x *ListValue) String() string

type MetaGraphDef

type MetaGraphDef struct {
	MetaInfoDef *MetaGraphDef_MetaInfoDef `protobuf:"bytes,1,opt,name=meta_info_def,json=metaInfoDef,proto3" json:"meta_info_def,omitempty"`
	// GraphDef.
	GraphDef *framework.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef,proto3" json:"graph_def,omitempty"`
	// SaverDef.
	SaverDef *SaverDef `protobuf:"bytes,3,opt,name=saver_def,json=saverDef,proto3" json:"saver_def,omitempty"`
	// collection_def: Map from collection name to collections.
	// See CollectionDef section for details.
	CollectionDef map[string]*CollectionDef `` /* 188-byte string literal not displayed */
	// signature_def: Map from user supplied key for a signature to a single
	// SignatureDef.
	SignatureDef map[string]*SignatureDef `` /* 185-byte string literal not displayed */
	// Asset file def to be used with the defined graph.
	AssetFileDef []*AssetFileDef `protobuf:"bytes,6,rep,name=asset_file_def,json=assetFileDef,proto3" json:"asset_file_def,omitempty"`
	// Extra information about the structure of functions and stateful objects.
	ObjectGraphDef *SavedObjectGraph `protobuf:"bytes,7,opt,name=object_graph_def,json=objectGraphDef,proto3" json:"object_graph_def,omitempty"`
	// contains filtered or unexported fields
}

NOTE: This protocol buffer is evolving, and will go through revisions in the coming months.

Protocol buffer containing the following which are necessary to restart training, run inference. It can be used to serialize/de-serialize memory objects necessary for running computation in a graph when crossing the process boundary. It can be used for long term storage of graphs, cross-language execution of graphs, etc.

MetaInfoDef
GraphDef
SaverDef
CollectionDef
TensorInfo
SignatureDef

func (*MetaGraphDef) Descriptor deprecated

func (*MetaGraphDef) Descriptor() ([]byte, []int)

Deprecated: Use MetaGraphDef.ProtoReflect.Descriptor instead.

func (*MetaGraphDef) GetAssetFileDef

func (x *MetaGraphDef) GetAssetFileDef() []*AssetFileDef

func (*MetaGraphDef) GetCollectionDef

func (x *MetaGraphDef) GetCollectionDef() map[string]*CollectionDef

func (*MetaGraphDef) GetGraphDef

func (x *MetaGraphDef) GetGraphDef() *framework.GraphDef

func (*MetaGraphDef) GetMetaInfoDef

func (x *MetaGraphDef) GetMetaInfoDef() *MetaGraphDef_MetaInfoDef

func (*MetaGraphDef) GetObjectGraphDef

func (x *MetaGraphDef) GetObjectGraphDef() *SavedObjectGraph

func (*MetaGraphDef) GetSaverDef

func (x *MetaGraphDef) GetSaverDef() *SaverDef

func (*MetaGraphDef) GetSignatureDef

func (x *MetaGraphDef) GetSignatureDef() map[string]*SignatureDef

func (*MetaGraphDef) ProtoMessage

func (*MetaGraphDef) ProtoMessage()

func (*MetaGraphDef) ProtoReflect

func (x *MetaGraphDef) ProtoReflect() protoreflect.Message

func (*MetaGraphDef) Reset

func (x *MetaGraphDef) Reset()

func (*MetaGraphDef) String

func (x *MetaGraphDef) String() string

type MetaGraphDef_MetaInfoDef

type MetaGraphDef_MetaInfoDef struct {

	// User specified Version string. Can be the name of the model and revision,
	// steps this model has been trained to, etc.
	MetaGraphVersion string `protobuf:"bytes,1,opt,name=meta_graph_version,json=metaGraphVersion,proto3" json:"meta_graph_version,omitempty"`
	// A copy of the OpDefs used by the producer of this graph_def.
	// Descriptions and Ops not used in graph_def are stripped out.
	StrippedOpList *framework.OpList `protobuf:"bytes,2,opt,name=stripped_op_list,json=strippedOpList,proto3" json:"stripped_op_list,omitempty"`
	// A serialized protobuf. Can be the time this meta graph is created, or
	// modified, or name of the model.
	AnyInfo *anypb.Any `protobuf:"bytes,3,opt,name=any_info,json=anyInfo,proto3" json:"any_info,omitempty"`
	// User supplied tag(s) on the meta_graph and included graph_def.
	//
	// MetaGraphDefs should be tagged with their capabilities or use-cases.
	// Examples: "train", "serve", "gpu", "tpu", etc.
	// These tags enable loaders to access the MetaGraph(s) appropriate for a
	// specific use-case or runtime environment.
	Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`
	// The __version__ string of the tensorflow build used to write this graph.
	// This will be populated by the framework, which will overwrite any user
	// supplied value.
	TensorflowVersion string `protobuf:"bytes,5,opt,name=tensorflow_version,json=tensorflowVersion,proto3" json:"tensorflow_version,omitempty"`
	// The __git_version__ string of the tensorflow build used to write this
	// graph. This will be populated by the framework, which will overwrite any
	// user supplied value.
	TensorflowGitVersion string `protobuf:"bytes,6,opt,name=tensorflow_git_version,json=tensorflowGitVersion,proto3" json:"tensorflow_git_version,omitempty"`
	// A flag to denote whether default-valued attrs have been stripped from
	// the nodes in this graph_def.
	StrippedDefaultAttrs bool `protobuf:"varint,7,opt,name=stripped_default_attrs,json=strippedDefaultAttrs,proto3" json:"stripped_default_attrs,omitempty"`
	// FunctionDef name to aliases mapping.
	FunctionAliases map[string]string `` /* 194-byte string literal not displayed */
	// contains filtered or unexported fields
}

Meta information regarding the graph to be exported. To be used by users of this protocol buffer to encode information regarding their meta graph.

func (*MetaGraphDef_MetaInfoDef) Descriptor deprecated

func (*MetaGraphDef_MetaInfoDef) Descriptor() ([]byte, []int)

Deprecated: Use MetaGraphDef_MetaInfoDef.ProtoReflect.Descriptor instead.

func (*MetaGraphDef_MetaInfoDef) GetAnyInfo

func (x *MetaGraphDef_MetaInfoDef) GetAnyInfo() *anypb.Any

func (*MetaGraphDef_MetaInfoDef) GetFunctionAliases

func (x *MetaGraphDef_MetaInfoDef) GetFunctionAliases() map[string]string

func (*MetaGraphDef_MetaInfoDef) GetMetaGraphVersion

func (x *MetaGraphDef_MetaInfoDef) GetMetaGraphVersion() string

func (*MetaGraphDef_MetaInfoDef) GetStrippedDefaultAttrs

func (x *MetaGraphDef_MetaInfoDef) GetStrippedDefaultAttrs() bool

func (*MetaGraphDef_MetaInfoDef) GetStrippedOpList

func (x *MetaGraphDef_MetaInfoDef) GetStrippedOpList() *framework.OpList

func (*MetaGraphDef_MetaInfoDef) GetTags

func (x *MetaGraphDef_MetaInfoDef) GetTags() []string

func (*MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion

func (x *MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion() string

func (*MetaGraphDef_MetaInfoDef) GetTensorflowVersion

func (x *MetaGraphDef_MetaInfoDef) GetTensorflowVersion() string

func (*MetaGraphDef_MetaInfoDef) ProtoMessage

func (*MetaGraphDef_MetaInfoDef) ProtoMessage()

func (*MetaGraphDef_MetaInfoDef) ProtoReflect

func (x *MetaGraphDef_MetaInfoDef) ProtoReflect() protoreflect.Message

func (*MetaGraphDef_MetaInfoDef) Reset

func (x *MetaGraphDef_MetaInfoDef) Reset()

func (*MetaGraphDef_MetaInfoDef) String

func (x *MetaGraphDef_MetaInfoDef) String() string

type NamedTensorProto

type NamedTensorProto struct {

	// Name of the tensor.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The client can populate a TensorProto using a tensorflow::Tensor`, or
	// directly using the protobuf field accessors.
	//
	// The client specifies whether the returned tensor values should be
	// filled tensor fields (float_val, int_val, etc.) or encoded in a
	// compact form in tensor.tensor_content.
	Tensor *framework.TensorProto `protobuf:"bytes,2,opt,name=tensor,proto3" json:"tensor,omitempty"`
	// contains filtered or unexported fields
}

A pair of tensor name and tensor values.

func (*NamedTensorProto) Descriptor deprecated

func (*NamedTensorProto) Descriptor() ([]byte, []int)

Deprecated: Use NamedTensorProto.ProtoReflect.Descriptor instead.

func (*NamedTensorProto) GetName

func (x *NamedTensorProto) GetName() string

func (*NamedTensorProto) GetTensor

func (x *NamedTensorProto) GetTensor() *framework.TensorProto

func (*NamedTensorProto) ProtoMessage

func (*NamedTensorProto) ProtoMessage()

func (*NamedTensorProto) ProtoReflect

func (x *NamedTensorProto) ProtoReflect() protoreflect.Message

func (*NamedTensorProto) Reset

func (x *NamedTensorProto) Reset()

func (*NamedTensorProto) String

func (x *NamedTensorProto) String() string

type NamedTupleValue

type NamedTupleValue struct {
	Name   string       `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Values []*PairValue `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
	// contains filtered or unexported fields
}

Represents Python's namedtuple.

func (*NamedTupleValue) Descriptor deprecated

func (*NamedTupleValue) Descriptor() ([]byte, []int)

Deprecated: Use NamedTupleValue.ProtoReflect.Descriptor instead.

func (*NamedTupleValue) GetName

func (x *NamedTupleValue) GetName() string

func (*NamedTupleValue) GetValues

func (x *NamedTupleValue) GetValues() []*PairValue

func (*NamedTupleValue) ProtoMessage

func (*NamedTupleValue) ProtoMessage()

func (*NamedTupleValue) ProtoReflect

func (x *NamedTupleValue) ProtoReflect() protoreflect.Message

func (*NamedTupleValue) Reset

func (x *NamedTupleValue) Reset()

func (*NamedTupleValue) String

func (x *NamedTupleValue) String() string

type NoneValue

type NoneValue struct {
	// contains filtered or unexported fields
}

Represents None.

func (*NoneValue) Descriptor deprecated

func (*NoneValue) Descriptor() ([]byte, []int)

Deprecated: Use NoneValue.ProtoReflect.Descriptor instead.

func (*NoneValue) ProtoMessage

func (*NoneValue) ProtoMessage()

func (*NoneValue) ProtoReflect

func (x *NoneValue) ProtoReflect() protoreflect.Message

func (*NoneValue) Reset

func (x *NoneValue) Reset()

func (*NoneValue) String

func (x *NoneValue) String() string

type OptimizerOptions

type OptimizerOptions struct {

	// If true, optimize the graph using common subexpression elimination.
	// Note: the optimization Level L1 will override this setting to true. So in
	// order to disable common subexpression elimination the opt_level has to be
	// set to L0.
	DoCommonSubexpressionElimination bool `` /* 162-byte string literal not displayed */
	// If true, perform constant folding optimization on the graph.
	// Note: the optimization Level L1 will override this setting to true. So in
	// order to disable constant folding the opt_level has to be set to L0.
	DoConstantFolding bool `protobuf:"varint,2,opt,name=do_constant_folding,json=doConstantFolding,proto3" json:"do_constant_folding,omitempty"`
	// Constant folding optimization replaces tensors whose values can be
	// predetermined, with constant nodes. To avoid inserting too large constants,
	// the size of each constant created can be limited. If this value is zero, a
	// default limit of 10 MiB will be applied. If constant folding optimization
	// is disabled, this value is ignored.
	MaxFoldedConstantInBytes int64 `` /* 140-byte string literal not displayed */
	// If true, perform function inlining on the graph.
	DoFunctionInlining bool `protobuf:"varint,4,opt,name=do_function_inlining,json=doFunctionInlining,proto3" json:"do_function_inlining,omitempty"`
	// Overall optimization level. The actual optimizations applied will be the
	// logical OR of the flags that this level implies and any flags already set.
	OptLevel       OptimizerOptions_Level          `protobuf:"varint,3,opt,name=opt_level,json=optLevel,proto3,enum=tensorflow.OptimizerOptions_Level" json:"opt_level,omitempty"`
	GlobalJitLevel OptimizerOptions_GlobalJitLevel `` /* 154-byte string literal not displayed */
	// contains filtered or unexported fields
}

Options passed to the graph optimizer

func (*OptimizerOptions) Descriptor deprecated

func (*OptimizerOptions) Descriptor() ([]byte, []int)

Deprecated: Use OptimizerOptions.ProtoReflect.Descriptor instead.

func (*OptimizerOptions) GetDoCommonSubexpressionElimination

func (x *OptimizerOptions) GetDoCommonSubexpressionElimination() bool

func (*OptimizerOptions) GetDoConstantFolding

func (x *OptimizerOptions) GetDoConstantFolding() bool

func (*OptimizerOptions) GetDoFunctionInlining

func (x *OptimizerOptions) GetDoFunctionInlining() bool

func (*OptimizerOptions) GetGlobalJitLevel

func (x *OptimizerOptions) GetGlobalJitLevel() OptimizerOptions_GlobalJitLevel

func (*OptimizerOptions) GetMaxFoldedConstantInBytes

func (x *OptimizerOptions) GetMaxFoldedConstantInBytes() int64

func (*OptimizerOptions) GetOptLevel

func (x *OptimizerOptions) GetOptLevel() OptimizerOptions_Level

func (*OptimizerOptions) ProtoMessage

func (*OptimizerOptions) ProtoMessage()

func (*OptimizerOptions) ProtoReflect

func (x *OptimizerOptions) ProtoReflect() protoreflect.Message

func (*OptimizerOptions) Reset

func (x *OptimizerOptions) Reset()

func (*OptimizerOptions) String

func (x *OptimizerOptions) String() string

type OptimizerOptions_GlobalJitLevel

type OptimizerOptions_GlobalJitLevel int32

Control the use of the compiler/jit. Experimental.

const (
	OptimizerOptions_DEFAULT OptimizerOptions_GlobalJitLevel = 0 // Default setting ("off" now, but later expected to be "on")
	OptimizerOptions_OFF     OptimizerOptions_GlobalJitLevel = -1
	// The following settings turn on compilation, with higher values being
	// more aggressive.  Higher values may reduce opportunities for parallelism
	// and may use more memory.  (At present, there is no distinction, but this
	// is expected to change.)
	OptimizerOptions_ON_1 OptimizerOptions_GlobalJitLevel = 1
	OptimizerOptions_ON_2 OptimizerOptions_GlobalJitLevel = 2
)

func (OptimizerOptions_GlobalJitLevel) Descriptor

func (OptimizerOptions_GlobalJitLevel) Enum

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor deprecated

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor() ([]byte, []int)

Deprecated: Use OptimizerOptions_GlobalJitLevel.Descriptor instead.

func (OptimizerOptions_GlobalJitLevel) Number

func (OptimizerOptions_GlobalJitLevel) String

func (OptimizerOptions_GlobalJitLevel) Type

type OptimizerOptions_Level

type OptimizerOptions_Level int32

Optimization level

const (
	// L1 is the default level.
	// Optimization performed at L1 :
	// 1. Common subexpression elimination
	// 2. Constant folding
	OptimizerOptions_L1 OptimizerOptions_Level = 0
	// No optimizations
	OptimizerOptions_L0 OptimizerOptions_Level = -1
)

func (OptimizerOptions_Level) Descriptor

func (OptimizerOptions_Level) Enum

func (OptimizerOptions_Level) EnumDescriptor deprecated

func (OptimizerOptions_Level) EnumDescriptor() ([]byte, []int)

Deprecated: Use OptimizerOptions_Level.Descriptor instead.

func (OptimizerOptions_Level) Number

func (OptimizerOptions_Level) String

func (x OptimizerOptions_Level) String() string

func (OptimizerOptions_Level) Type

type PairValue

type PairValue struct {
	Key   string           `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
	Value *StructuredValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

Represents a (key, value) pair.

func (*PairValue) Descriptor deprecated

func (*PairValue) Descriptor() ([]byte, []int)

Deprecated: Use PairValue.ProtoReflect.Descriptor instead.

func (*PairValue) GetKey

func (x *PairValue) GetKey() string

func (*PairValue) GetValue

func (x *PairValue) GetValue() *StructuredValue

func (*PairValue) ProtoMessage

func (*PairValue) ProtoMessage()

func (*PairValue) ProtoReflect

func (x *PairValue) ProtoReflect() protoreflect.Message

func (*PairValue) Reset

func (x *PairValue) Reset()

func (*PairValue) String

func (x *PairValue) String() string

type RPCOptions

type RPCOptions struct {

	// If true, always use RPC to contact the session target.
	//
	// If false (the default option), TensorFlow may use an optimized
	// transport for client-master communication that avoids the RPC
	// stack. This option is primarily for used testing the RPC stack.
	UseRpcForInprocessMaster bool `` /* 140-byte string literal not displayed */
	// The compression algorithm to be used. One of "deflate", "gzip".
	CompressionAlgorithm string `protobuf:"bytes,2,opt,name=compression_algorithm,json=compressionAlgorithm,proto3" json:"compression_algorithm,omitempty"`
	// If compression_algorithm is set, the compression level to be used.
	// From 0 (no compression), up to 3.
	CompressionLevel int32 `protobuf:"varint,3,opt,name=compression_level,json=compressionLevel,proto3" json:"compression_level,omitempty"`
	// Setting cache_rpc_response to true will enable sender side caching of
	// response for RecvTensorAsync and RecvBufAsync to allow receiver to retry
	// requests . This is only necessary when the network fabric is experiencing a
	// significant error rate.  Without it we'll fail a step on an network error,
	// while with it we'll be able to complete long steps (like complex
	// initializations) in the face of some network errors during RecvTensor.
	CacheRpcResponse bool `protobuf:"varint,4,opt,name=cache_rpc_response,json=cacheRpcResponse,proto3" json:"cache_rpc_response,omitempty"`
	// Disables TCP connection sharing when opening a new RPC channel.
	DisableSessionConnectionSharing bool `` /* 159-byte string literal not displayed */
	// Setting num_channels_per_target > 0 allows uses of multiple channels to
	// communicate to the same target. This can be used to improve the aggregate
	// throughput on high speed links (e.g 100G) where single connection is not
	// sufficient to maximize link utilization. Note that a single RPC only goes
	// on a single channel, this only helps in situations where there are multiple
	// transfers to the same target overlapping in time.
	NumChannelsPerTarget int32 `` /* 126-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*RPCOptions) Descriptor deprecated

func (*RPCOptions) Descriptor() ([]byte, []int)

Deprecated: Use RPCOptions.ProtoReflect.Descriptor instead.

func (*RPCOptions) GetCacheRpcResponse

func (x *RPCOptions) GetCacheRpcResponse() bool

func (*RPCOptions) GetCompressionAlgorithm

func (x *RPCOptions) GetCompressionAlgorithm() string

func (*RPCOptions) GetCompressionLevel

func (x *RPCOptions) GetCompressionLevel() int32

func (*RPCOptions) GetDisableSessionConnectionSharing

func (x *RPCOptions) GetDisableSessionConnectionSharing() bool

func (*RPCOptions) GetNumChannelsPerTarget

func (x *RPCOptions) GetNumChannelsPerTarget() int32

func (*RPCOptions) GetUseRpcForInprocessMaster

func (x *RPCOptions) GetUseRpcForInprocessMaster() bool

func (*RPCOptions) ProtoMessage

func (*RPCOptions) ProtoMessage()

func (*RPCOptions) ProtoReflect

func (x *RPCOptions) ProtoReflect() protoreflect.Message

func (*RPCOptions) Reset

func (x *RPCOptions) Reset()

func (*RPCOptions) String

func (x *RPCOptions) String() string

type RewriterConfig

type RewriterConfig struct {

	// CPU Conversion settings between NHCW and NCHW.
	CpuLayoutConversion RewriterConfig_CpuLayout `` /* 163-byte string literal not displayed */
	// Optimize tensor layouts (default is ON)
	// e.g. This will try to use NCHW layout on GPU which is faster.
	LayoutOptimizer RewriterConfig_Toggle `` /* 145-byte string literal not displayed */
	// Fold constants (default is ON)
	// Statically infer the value of tensors when possible, and materialize the
	// result using constants.
	ConstantFolding RewriterConfig_Toggle `` /* 145-byte string literal not displayed */
	// Shape optimizations (default is ON)
	// Simplify computations made on shapes.
	ShapeOptimization RewriterConfig_Toggle `` /* 152-byte string literal not displayed */
	// Remapping (default is ON)
	// Remap subgraphs onto more efficient implementations.
	Remapping RewriterConfig_Toggle `protobuf:"varint,14,opt,name=remapping,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"remapping,omitempty"`
	// Common subgraph elimination (default is ON)
	// e.g. Simplify arithmetic ops; merge ops with same value (like constants).
	CommonSubgraphElimination RewriterConfig_Toggle `` /* 178-byte string literal not displayed */
	// Arithmetic optimizations (default is ON)
	// e.g. Simplify arithmetic ops; merge ops with same value (like constants).
	ArithmeticOptimization RewriterConfig_Toggle `` /* 166-byte string literal not displayed */
	// Control dependency optimizations (default is ON).
	// Remove redundant control dependencies, which may enable other optimization.
	DependencyOptimization RewriterConfig_Toggle `` /* 166-byte string literal not displayed */
	// Loop optimizations (default is ON).
	LoopOptimization RewriterConfig_Toggle `` /* 148-byte string literal not displayed */
	// Function optimizations (default is ON).
	FunctionOptimization RewriterConfig_Toggle `` /* 161-byte string literal not displayed */
	// Strips debug-related nodes from the graph (off by default).
	DebugStripper RewriterConfig_Toggle `` /* 140-byte string literal not displayed */
	// If true, don't remove unnecessary ops from the graph
	DisableModelPruning bool `protobuf:"varint,2,opt,name=disable_model_pruning,json=disableModelPruning,proto3" json:"disable_model_pruning,omitempty"`
	// Try to allocate some independent Op outputs contiguously in order to
	// merge or eliminate downstream Ops (off by default).
	ScopedAllocatorOptimization RewriterConfig_Toggle `` /* 184-byte string literal not displayed */
	// Force small ops onto the CPU (default is OFF).
	PinToHostOptimization RewriterConfig_Toggle `` /* 168-byte string literal not displayed */
	// Enable the swap of kernel implementations based on the device placement
	// (default is ON).
	ImplementationSelector RewriterConfig_Toggle `` /* 167-byte string literal not displayed */
	// Optimize data types for CUDA (default is OFF).
	// This will try to use float16 on GPU which is faster.
	// Note that this can change the numerical stability of the graph and may
	// require the use of loss scaling to maintain model convergence.
	AutoMixedPrecision RewriterConfig_Toggle `` /* 157-byte string literal not displayed */
	// Optimize data types for MKL (default is OFF).
	// This will try to use bfloat16 on CPUs, which is faster.
	// Note that this can change the numerical stability of the graph.
	AutoMixedPrecisionMkl RewriterConfig_Toggle `` /* 168-byte string literal not displayed */
	// Disable the entire meta optimizer (off by default).
	DisableMetaOptimizer bool `protobuf:"varint,19,opt,name=disable_meta_optimizer,json=disableMetaOptimizer,proto3" json:"disable_meta_optimizer,omitempty"`
	// Optimizers registered by plugin (default is ON)
	UsePluginOptimizers RewriterConfig_Toggle `` /* 160-byte string literal not displayed */
	// Controls how many times we run the optimizers in meta optimizer (default
	// is once).
	MetaOptimizerIterations RewriterConfig_NumIterationsType `` /* 183-byte string literal not displayed */
	// The minimum number of nodes in a graph to optimizer. For smaller graphs,
	// optimization is skipped.
	// 0 means the system picks an appropriate number.
	// < 0 means do not skip optimization.
	MinGraphNodes int32 `protobuf:"varint,17,opt,name=min_graph_nodes,json=minGraphNodes,proto3" json:"min_graph_nodes,omitempty"`
	// Disable optimizations that assume compressed tensors. Note that this flag
	// is experimental and may be removed in the future.
	ExperimentalDisableCompressedTensorOptimization bool `` /* 210-byte string literal not displayed */
	// Disable folding quantization emulation ops such as FakeQuantWithMinMax* and
	// QuantizeAndDequantize*. Some compilers (e.g. the TF-to-tflite converter)
	// have to extract quantization configs (e.g. min/max range, number of bits,
	// and per-channel) from the quantization emulation ops. Note that this flag
	// is experimental and may be removed in the future. See b/174138564 for more
	// details.
	ExperimentalDisableFoldingQuantizationEmulation bool `` /* 210-byte string literal not displayed */
	// Configures memory optimization passes through the meta-optimizer. Has no
	// effect on manually requested memory optimization passes in the optimizers
	// field.
	MemoryOptimization RewriterConfig_MemOptType `` /* 158-byte string literal not displayed */
	// A node name scope for node names which are valid outputs of recomputations.
	// Inputs to nodes that match this scope may be recomputed (subject either to
	// manual annotation of those input nodes or to manual annotation and
	// heuristics depending on memory_optimization), but the nodes themselves will
	// not be recomputed. This matches any sub-scopes as well, meaning the scope
	// can appear not just as a top-level scope. For example, if the value is
	// "gradients/", the default, it will match node name "gradients/foo",
	// "foo/gradients/bar", but not "foo_gradients/"
	MemoryOptimizerTargetNodeNameScope string `` /* 171-byte string literal not displayed */
	// Maximum number of milliseconds to spend optimizing a single graph before
	// timing out. If less than or equal to 0 (default value) the optimizer will
	// never time out.
	MetaOptimizerTimeoutMs int64 `` /* 133-byte string literal not displayed */
	// Configures AutoParallel optimization passes either through the
	// meta-optimizer or when manually specified through the optimizers field.
	AutoParallel *AutoParallelOptions `protobuf:"bytes,5,opt,name=auto_parallel,json=autoParallel,proto3" json:"auto_parallel,omitempty"`
	// If true, any optimization pass failing will cause the MetaOptimizer to
	// stop with an error. By default - or when set to false, failing passes are
	// skipped silently.
	FailOnOptimizerErrors bool                    `` /* 130-byte string literal not displayed */
	ScopedAllocatorOpts   *ScopedAllocatorOptions `protobuf:"bytes,16,opt,name=scoped_allocator_opts,json=scopedAllocatorOpts,proto3" json:"scoped_allocator_opts,omitempty"`
	// If non-empty, will use this as an alternative way to specify a list of
	// optimizations to turn on and the order of the optimizations (replacing the
	// meta-optimizer).
	//
	// Of the RewriterConfig options, only the AutoParallel configuration options
	// (the auto_parallel field) apply to manually requested optimization passes
	// ("autoparallel"). Memory optimization passes ("memory") invoked here are
	// not configurable (in contrast to memory optimization passes through the
	// meta-optimizer) and act only on manual op annotations.
	//
	// Custom optimizers (see custom_optimizers) that are not part of this
	// schedule will be run after - in the order that they were specified.
	Optimizers []string `protobuf:"bytes,100,rep,name=optimizers,proto3" json:"optimizers,omitempty"`
	// list of CustomGraphOptimizers to apply.
	CustomOptimizers []*RewriterConfig_CustomGraphOptimizer `protobuf:"bytes,200,rep,name=custom_optimizers,json=customOptimizers,proto3" json:"custom_optimizers,omitempty"`
	// VerifierConfig specifying the verifiers to be run after every optimizer.
	InterOptimizerVerifierConfig *VerifierConfig `` /* 151-byte string literal not displayed */
	// VerifierConfig specifying the verifiers to be run at the end, after all
	// optimizers have run.
	PostOptimizationVerifierConfig *VerifierConfig `` /* 157-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*RewriterConfig) Descriptor deprecated

func (*RewriterConfig) Descriptor() ([]byte, []int)

Deprecated: Use RewriterConfig.ProtoReflect.Descriptor instead.

func (*RewriterConfig) GetArithmeticOptimization

func (x *RewriterConfig) GetArithmeticOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoMixedPrecision

func (x *RewriterConfig) GetAutoMixedPrecision() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoMixedPrecisionMkl

func (x *RewriterConfig) GetAutoMixedPrecisionMkl() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoParallel

func (x *RewriterConfig) GetAutoParallel() *AutoParallelOptions

func (*RewriterConfig) GetCommonSubgraphElimination

func (x *RewriterConfig) GetCommonSubgraphElimination() RewriterConfig_Toggle

func (*RewriterConfig) GetConstantFolding

func (x *RewriterConfig) GetConstantFolding() RewriterConfig_Toggle

func (*RewriterConfig) GetCpuLayoutConversion

func (x *RewriterConfig) GetCpuLayoutConversion() RewriterConfig_CpuLayout

func (*RewriterConfig) GetCustomOptimizers

func (x *RewriterConfig) GetCustomOptimizers() []*RewriterConfig_CustomGraphOptimizer

func (*RewriterConfig) GetDebugStripper

func (x *RewriterConfig) GetDebugStripper() RewriterConfig_Toggle

func (*RewriterConfig) GetDependencyOptimization

func (x *RewriterConfig) GetDependencyOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetDisableMetaOptimizer

func (x *RewriterConfig) GetDisableMetaOptimizer() bool

func (*RewriterConfig) GetDisableModelPruning

func (x *RewriterConfig) GetDisableModelPruning() bool

func (*RewriterConfig) GetExperimentalDisableCompressedTensorOptimization

func (x *RewriterConfig) GetExperimentalDisableCompressedTensorOptimization() bool

func (*RewriterConfig) GetExperimentalDisableFoldingQuantizationEmulation

func (x *RewriterConfig) GetExperimentalDisableFoldingQuantizationEmulation() bool

func (*RewriterConfig) GetFailOnOptimizerErrors

func (x *RewriterConfig) GetFailOnOptimizerErrors() bool

func (*RewriterConfig) GetFunctionOptimization

func (x *RewriterConfig) GetFunctionOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetImplementationSelector

func (x *RewriterConfig) GetImplementationSelector() RewriterConfig_Toggle

func (*RewriterConfig) GetInterOptimizerVerifierConfig

func (x *RewriterConfig) GetInterOptimizerVerifierConfig() *VerifierConfig

func (*RewriterConfig) GetLayoutOptimizer

func (x *RewriterConfig) GetLayoutOptimizer() RewriterConfig_Toggle

func (*RewriterConfig) GetLoopOptimization

func (x *RewriterConfig) GetLoopOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetMemoryOptimization

func (x *RewriterConfig) GetMemoryOptimization() RewriterConfig_MemOptType

func (*RewriterConfig) GetMemoryOptimizerTargetNodeNameScope

func (x *RewriterConfig) GetMemoryOptimizerTargetNodeNameScope() string

func (*RewriterConfig) GetMetaOptimizerIterations

func (x *RewriterConfig) GetMetaOptimizerIterations() RewriterConfig_NumIterationsType

func (*RewriterConfig) GetMetaOptimizerTimeoutMs

func (x *RewriterConfig) GetMetaOptimizerTimeoutMs() int64

func (*RewriterConfig) GetMinGraphNodes

func (x *RewriterConfig) GetMinGraphNodes() int32

func (*RewriterConfig) GetOptimizers

func (x *RewriterConfig) GetOptimizers() []string

func (*RewriterConfig) GetPinToHostOptimization

func (x *RewriterConfig) GetPinToHostOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetPostOptimizationVerifierConfig

func (x *RewriterConfig) GetPostOptimizationVerifierConfig() *VerifierConfig

func (*RewriterConfig) GetRemapping

func (x *RewriterConfig) GetRemapping() RewriterConfig_Toggle

func (*RewriterConfig) GetScopedAllocatorOptimization

func (x *RewriterConfig) GetScopedAllocatorOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetScopedAllocatorOpts

func (x *RewriterConfig) GetScopedAllocatorOpts() *ScopedAllocatorOptions

func (*RewriterConfig) GetShapeOptimization

func (x *RewriterConfig) GetShapeOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetUsePluginOptimizers

func (x *RewriterConfig) GetUsePluginOptimizers() RewriterConfig_Toggle

func (*RewriterConfig) ProtoMessage

func (*RewriterConfig) ProtoMessage()

func (*RewriterConfig) ProtoReflect

func (x *RewriterConfig) ProtoReflect() protoreflect.Message

func (*RewriterConfig) Reset

func (x *RewriterConfig) Reset()

func (*RewriterConfig) String

func (x *RewriterConfig) String() string

type RewriterConfig_CpuLayout

type RewriterConfig_CpuLayout int32

Enum for layout conversion between NCHW and NHWC on CPU. Default is OFF.

const (
	RewriterConfig_NO_CONVERSION_ON_CPU RewriterConfig_CpuLayout = 0
	RewriterConfig_NCHW_TO_NHWC         RewriterConfig_CpuLayout = 1
	RewriterConfig_NHWC_TO_NCHW         RewriterConfig_CpuLayout = 2
)

func (RewriterConfig_CpuLayout) Descriptor

func (RewriterConfig_CpuLayout) Enum

func (RewriterConfig_CpuLayout) EnumDescriptor deprecated

func (RewriterConfig_CpuLayout) EnumDescriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_CpuLayout.Descriptor instead.

func (RewriterConfig_CpuLayout) Number

func (RewriterConfig_CpuLayout) String

func (x RewriterConfig_CpuLayout) String() string

func (RewriterConfig_CpuLayout) Type

type RewriterConfig_CustomGraphOptimizer

type RewriterConfig_CustomGraphOptimizer struct {
	Name         string                          `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	ParameterMap map[string]*framework.AttrValue `` /* 185-byte string literal not displayed */
	// contains filtered or unexported fields
}

Message to describe custom graph optimizer and its parameters

func (*RewriterConfig_CustomGraphOptimizer) Descriptor deprecated

func (*RewriterConfig_CustomGraphOptimizer) Descriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_CustomGraphOptimizer.ProtoReflect.Descriptor instead.

func (*RewriterConfig_CustomGraphOptimizer) GetName

func (*RewriterConfig_CustomGraphOptimizer) GetParameterMap

func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage

func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage()

func (*RewriterConfig_CustomGraphOptimizer) ProtoReflect

func (*RewriterConfig_CustomGraphOptimizer) Reset

func (*RewriterConfig_CustomGraphOptimizer) String

type RewriterConfig_MemOptType

type RewriterConfig_MemOptType int32
const (
	// The default setting (SCHEDULING and SWAPPING HEURISTICS only)
	RewriterConfig_DEFAULT_MEM_OPT RewriterConfig_MemOptType = 0
	// Disabled in the meta-optimizer.
	RewriterConfig_NO_MEM_OPT RewriterConfig_MemOptType = 1
	// Driven by manual op-level annotations.
	RewriterConfig_MANUAL RewriterConfig_MemOptType = 2
	// Swapping heuristic will move a tensor from the GPU to the CPU and move
	// it back when needed to reduce peak memory usage.
	RewriterConfig_SWAPPING_HEURISTICS RewriterConfig_MemOptType = 4
	// Recomputation heuristics will recompute ops (such as Relu activation)
	// during backprop instead of storing them, reducing peak memory usage.
	RewriterConfig_RECOMPUTATION_HEURISTICS RewriterConfig_MemOptType = 5
	// Scheduling will split big ops such as AddN and try to enforce a schedule
	// of the new computations that decreases peak memory usage.
	RewriterConfig_SCHEDULING_HEURISTICS RewriterConfig_MemOptType = 6
	// Use any combination of swapping and recomputation heuristics.
	RewriterConfig_HEURISTICS RewriterConfig_MemOptType = 3
)

func (RewriterConfig_MemOptType) Descriptor

func (RewriterConfig_MemOptType) Enum

func (RewriterConfig_MemOptType) EnumDescriptor deprecated

func (RewriterConfig_MemOptType) EnumDescriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_MemOptType.Descriptor instead.

func (RewriterConfig_MemOptType) Number

func (RewriterConfig_MemOptType) String

func (x RewriterConfig_MemOptType) String() string

func (RewriterConfig_MemOptType) Type

type RewriterConfig_NumIterationsType

type RewriterConfig_NumIterationsType int32

Enum controlling the number of times to run optimizers. The default is to run them twice.

const (
	RewriterConfig_DEFAULT_NUM_ITERS RewriterConfig_NumIterationsType = 0
	RewriterConfig_ONE               RewriterConfig_NumIterationsType = 1
	RewriterConfig_TWO               RewriterConfig_NumIterationsType = 2
)

func (RewriterConfig_NumIterationsType) Descriptor

func (RewriterConfig_NumIterationsType) Enum

func (RewriterConfig_NumIterationsType) EnumDescriptor deprecated

func (RewriterConfig_NumIterationsType) EnumDescriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_NumIterationsType.Descriptor instead.

func (RewriterConfig_NumIterationsType) Number

func (RewriterConfig_NumIterationsType) String

func (RewriterConfig_NumIterationsType) Type

type RewriterConfig_Toggle

type RewriterConfig_Toggle int32
const (
	RewriterConfig_DEFAULT RewriterConfig_Toggle = 0
	RewriterConfig_ON      RewriterConfig_Toggle = 1
	RewriterConfig_OFF     RewriterConfig_Toggle = 2
	// Enable some aggressive optimizations that use assumptions that TF graphs
	// may break. For example, assume the shape of a placeholder matches its
	// actual feed.
	RewriterConfig_AGGRESSIVE RewriterConfig_Toggle = 3
)

func (RewriterConfig_Toggle) Descriptor

func (RewriterConfig_Toggle) Enum

func (RewriterConfig_Toggle) EnumDescriptor deprecated

func (RewriterConfig_Toggle) EnumDescriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_Toggle.Descriptor instead.

func (RewriterConfig_Toggle) Number

func (RewriterConfig_Toggle) String

func (x RewriterConfig_Toggle) String() string

func (RewriterConfig_Toggle) Type

type RunMetadata

type RunMetadata struct {

	// Statistics traced for this step. Populated if tracing is turned on via the
	// "RunOptions" proto.
	// EXPERIMENTAL: The format and set of events may change in future versions.
	StepStats *framework.StepStats `protobuf:"bytes,1,opt,name=step_stats,json=stepStats,proto3" json:"step_stats,omitempty"`
	// The cost graph for the computation defined by the run call.
	CostGraph *framework.CostGraphDef `protobuf:"bytes,2,opt,name=cost_graph,json=costGraph,proto3" json:"cost_graph,omitempty"`
	// Graphs of the partitions executed by executors.
	PartitionGraphs []*framework.GraphDef `protobuf:"bytes,3,rep,name=partition_graphs,json=partitionGraphs,proto3" json:"partition_graphs,omitempty"`
	// This is only populated for graphs that are run as functions in TensorFlow
	// V2. There will be an entry below for each function that is traced.
	// The main use cases of the post_optimization_graph and the partition_graphs
	// is to give the caller insight into the graphs that were actually run by the
	// runtime. Additional information (such as those in step_stats) will match
	// these graphs.
	// We also include the pre_optimization_graph since it is usually easier to
	// read, and is helpful in situations where the caller wants to get a high
	// level idea of what the built graph looks like (since the various graph
	// optimization passes might change the structure of the graph significantly).
	FunctionGraphs []*RunMetadata_FunctionGraphs `protobuf:"bytes,4,rep,name=function_graphs,json=functionGraphs,proto3" json:"function_graphs,omitempty"`
	// contains filtered or unexported fields
}

Metadata output (i.e., non-Tensor) for a single Run() call.

func (*RunMetadata) Descriptor deprecated

func (*RunMetadata) Descriptor() ([]byte, []int)

Deprecated: Use RunMetadata.ProtoReflect.Descriptor instead.

func (*RunMetadata) GetCostGraph

func (x *RunMetadata) GetCostGraph() *framework.CostGraphDef

func (*RunMetadata) GetFunctionGraphs

func (x *RunMetadata) GetFunctionGraphs() []*RunMetadata_FunctionGraphs

func (*RunMetadata) GetPartitionGraphs

func (x *RunMetadata) GetPartitionGraphs() []*framework.GraphDef

func (*RunMetadata) GetStepStats

func (x *RunMetadata) GetStepStats() *framework.StepStats

func (*RunMetadata) ProtoMessage

func (*RunMetadata) ProtoMessage()

func (*RunMetadata) ProtoReflect

func (x *RunMetadata) ProtoReflect() protoreflect.Message

func (*RunMetadata) Reset

func (x *RunMetadata) Reset()

func (*RunMetadata) String

func (x *RunMetadata) String() string

type RunMetadata_FunctionGraphs

type RunMetadata_FunctionGraphs struct {

	// TODO(nareshmodi): Include some sort of function/cache-key identifier?
	PartitionGraphs       []*framework.GraphDef `protobuf:"bytes,1,rep,name=partition_graphs,json=partitionGraphs,proto3" json:"partition_graphs,omitempty"`
	PreOptimizationGraph  *framework.GraphDef   `protobuf:"bytes,2,opt,name=pre_optimization_graph,json=preOptimizationGraph,proto3" json:"pre_optimization_graph,omitempty"`
	PostOptimizationGraph *framework.GraphDef   `` /* 126-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*RunMetadata_FunctionGraphs) Descriptor deprecated

func (*RunMetadata_FunctionGraphs) Descriptor() ([]byte, []int)

Deprecated: Use RunMetadata_FunctionGraphs.ProtoReflect.Descriptor instead.

func (*RunMetadata_FunctionGraphs) GetPartitionGraphs

func (x *RunMetadata_FunctionGraphs) GetPartitionGraphs() []*framework.GraphDef

func (*RunMetadata_FunctionGraphs) GetPostOptimizationGraph

func (x *RunMetadata_FunctionGraphs) GetPostOptimizationGraph() *framework.GraphDef

func (*RunMetadata_FunctionGraphs) GetPreOptimizationGraph

func (x *RunMetadata_FunctionGraphs) GetPreOptimizationGraph() *framework.GraphDef

func (*RunMetadata_FunctionGraphs) ProtoMessage

func (*RunMetadata_FunctionGraphs) ProtoMessage()

func (*RunMetadata_FunctionGraphs) ProtoReflect

func (*RunMetadata_FunctionGraphs) Reset

func (x *RunMetadata_FunctionGraphs) Reset()

func (*RunMetadata_FunctionGraphs) String

func (x *RunMetadata_FunctionGraphs) String() string

type RunOptions

type RunOptions struct {
	TraceLevel RunOptions_TraceLevel `` /* 130-byte string literal not displayed */
	// Time to wait for operation to complete in milliseconds.
	TimeoutInMs int64 `protobuf:"varint,2,opt,name=timeout_in_ms,json=timeoutInMs,proto3" json:"timeout_in_ms,omitempty"`
	// The thread pool to use, if session_inter_op_thread_pool is configured.
	// To use the caller thread set this to -1 - this uses the caller thread
	// to execute Session::Run() and thus avoids a context switch. Using the
	// caller thread to execute Session::Run() should be done ONLY for simple
	// graphs, where the overhead of an additional context switch is
	// comparable with the overhead of Session::Run().
	InterOpThreadPool int32 `protobuf:"varint,3,opt,name=inter_op_thread_pool,json=interOpThreadPool,proto3" json:"inter_op_thread_pool,omitempty"`
	// Whether the partition graph(s) executed by the executor(s) should be
	// outputted via RunMetadata.
	OutputPartitionGraphs bool `` /* 127-byte string literal not displayed */
	// EXPERIMENTAL.  Options used to initialize DebuggerState, if enabled.
	DebugOptions *DebugOptions `protobuf:"bytes,6,opt,name=debug_options,json=debugOptions,proto3" json:"debug_options,omitempty"`
	// When enabled, causes tensor allocation information to be included in
	// the error message when the Run() call fails because the allocator ran
	// out of memory (OOM).
	//
	// Enabling this option can slow down the Run() call.
	ReportTensorAllocationsUponOom bool                     `` /* 158-byte string literal not displayed */
	Experimental                   *RunOptions_Experimental `protobuf:"bytes,8,opt,name=experimental,proto3" json:"experimental,omitempty"`
	// contains filtered or unexported fields
}

Options for a single Run() call.

func (*RunOptions) Descriptor deprecated

func (*RunOptions) Descriptor() ([]byte, []int)

Deprecated: Use RunOptions.ProtoReflect.Descriptor instead.

func (*RunOptions) GetDebugOptions

func (x *RunOptions) GetDebugOptions() *DebugOptions

func (*RunOptions) GetExperimental

func (x *RunOptions) GetExperimental() *RunOptions_Experimental

func (*RunOptions) GetInterOpThreadPool

func (x *RunOptions) GetInterOpThreadPool() int32

func (*RunOptions) GetOutputPartitionGraphs

func (x *RunOptions) GetOutputPartitionGraphs() bool

func (*RunOptions) GetReportTensorAllocationsUponOom

func (x *RunOptions) GetReportTensorAllocationsUponOom() bool

func (*RunOptions) GetTimeoutInMs

func (x *RunOptions) GetTimeoutInMs() int64

func (*RunOptions) GetTraceLevel

func (x *RunOptions) GetTraceLevel() RunOptions_TraceLevel

func (*RunOptions) ProtoMessage

func (*RunOptions) ProtoMessage()

func (*RunOptions) ProtoReflect

func (x *RunOptions) ProtoReflect() protoreflect.Message

func (*RunOptions) Reset

func (x *RunOptions) Reset()

func (*RunOptions) String

func (x *RunOptions) String() string

type RunOptions_Experimental

type RunOptions_Experimental struct {

	// If non-zero, declares that this graph is going to use collective
	// ops and must synchronize step_ids with any other graph with this
	// same group_key value (in a distributed computation where tasks
	// run disjoint graphs).
	CollectiveGraphKey int64 `protobuf:"varint,1,opt,name=collective_graph_key,json=collectiveGraphKey,proto3" json:"collective_graph_key,omitempty"`
	// If true, then operations (using the inter-op pool) across all
	// session::run() calls will be centrally scheduled, optimizing for (median
	// and tail) latency.
	// Consider using this option for CPU-bound workloads like inference.
	UseRunHandlerPool     bool                                           `protobuf:"varint,2,opt,name=use_run_handler_pool,json=useRunHandlerPool,proto3" json:"use_run_handler_pool,omitempty"`
	RunHandlerPoolOptions *RunOptions_Experimental_RunHandlerPoolOptions `` /* 128-byte string literal not displayed */
	// contains filtered or unexported fields
}

Everything inside Experimental is subject to change and is not subject to API stability guarantees in https://www.tensorflow.org/guide/version_compat.

func (*RunOptions_Experimental) Descriptor deprecated

func (*RunOptions_Experimental) Descriptor() ([]byte, []int)

Deprecated: Use RunOptions_Experimental.ProtoReflect.Descriptor instead.

func (*RunOptions_Experimental) GetCollectiveGraphKey

func (x *RunOptions_Experimental) GetCollectiveGraphKey() int64

func (*RunOptions_Experimental) GetRunHandlerPoolOptions

func (*RunOptions_Experimental) GetUseRunHandlerPool

func (x *RunOptions_Experimental) GetUseRunHandlerPool() bool

func (*RunOptions_Experimental) ProtoMessage

func (*RunOptions_Experimental) ProtoMessage()

func (*RunOptions_Experimental) ProtoReflect

func (x *RunOptions_Experimental) ProtoReflect() protoreflect.Message

func (*RunOptions_Experimental) Reset

func (x *RunOptions_Experimental) Reset()

func (*RunOptions_Experimental) String

func (x *RunOptions_Experimental) String() string

type RunOptions_Experimental_RunHandlerPoolOptions

type RunOptions_Experimental_RunHandlerPoolOptions struct {

	// Priority of the request. The run handler thread pool will schedule ops
	// based on the priority number. The larger number means higher priority.
	Priority int64 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority,omitempty"`
	// contains filtered or unexported fields
}

Options for run handler thread pool.

func (*RunOptions_Experimental_RunHandlerPoolOptions) Descriptor deprecated

Deprecated: Use RunOptions_Experimental_RunHandlerPoolOptions.ProtoReflect.Descriptor instead.

func (*RunOptions_Experimental_RunHandlerPoolOptions) GetPriority

func (*RunOptions_Experimental_RunHandlerPoolOptions) ProtoMessage

func (*RunOptions_Experimental_RunHandlerPoolOptions) ProtoReflect

func (*RunOptions_Experimental_RunHandlerPoolOptions) Reset

func (*RunOptions_Experimental_RunHandlerPoolOptions) String

type RunOptions_TraceLevel

type RunOptions_TraceLevel int32

TODO(pbar) Turn this into a TraceOptions proto which allows tracing to be controlled in a more orthogonal manner?

const (
	RunOptions_NO_TRACE       RunOptions_TraceLevel = 0
	RunOptions_SOFTWARE_TRACE RunOptions_TraceLevel = 1
	RunOptions_HARDWARE_TRACE RunOptions_TraceLevel = 2
	RunOptions_FULL_TRACE     RunOptions_TraceLevel = 3
)

func (RunOptions_TraceLevel) Descriptor

func (RunOptions_TraceLevel) Enum

func (RunOptions_TraceLevel) EnumDescriptor deprecated

func (RunOptions_TraceLevel) EnumDescriptor() ([]byte, []int)

Deprecated: Use RunOptions_TraceLevel.Descriptor instead.

func (RunOptions_TraceLevel) Number

func (RunOptions_TraceLevel) String

func (x RunOptions_TraceLevel) String() string

func (RunOptions_TraceLevel) Type

type SaveableObject

type SaveableObject struct {

	// Node ids of concrete functions for saving and loading from a checkpoint.
	SaveFunction    int32 `protobuf:"varint,2,opt,name=save_function,json=saveFunction,proto3" json:"save_function,omitempty"`
	RestoreFunction int32 `protobuf:"varint,3,opt,name=restore_function,json=restoreFunction,proto3" json:"restore_function,omitempty"`
	// contains filtered or unexported fields
}

func (*SaveableObject) Descriptor deprecated

func (*SaveableObject) Descriptor() ([]byte, []int)

Deprecated: Use SaveableObject.ProtoReflect.Descriptor instead.

func (*SaveableObject) GetRestoreFunction

func (x *SaveableObject) GetRestoreFunction() int32

func (*SaveableObject) GetSaveFunction

func (x *SaveableObject) GetSaveFunction() int32

func (*SaveableObject) ProtoMessage

func (*SaveableObject) ProtoMessage()

func (*SaveableObject) ProtoReflect

func (x *SaveableObject) ProtoReflect() protoreflect.Message

func (*SaveableObject) Reset

func (x *SaveableObject) Reset()

func (*SaveableObject) String

func (x *SaveableObject) String() string

type SavedAsset

type SavedAsset struct {

	// Index into `MetaGraphDef.asset_file_def[]` that describes the Asset.
	//
	// Only the field `AssetFileDef.filename` is used. Other fields, such as
	// `AssetFileDef.tensor_info`, MUST be ignored.
	AssetFileDefIndex int32 `protobuf:"varint,1,opt,name=asset_file_def_index,json=assetFileDefIndex,proto3" json:"asset_file_def_index,omitempty"`
	// contains filtered or unexported fields
}

A SavedAsset points to an asset in the MetaGraph.

When bound to a function this object evaluates to a tensor with the absolute filename. Users should not depend on a particular part of the filename to remain stable (e.g. basename could be changed).

func (*SavedAsset) Descriptor deprecated

func (*SavedAsset) Descriptor() ([]byte, []int)

Deprecated: Use SavedAsset.ProtoReflect.Descriptor instead.

func (*SavedAsset) GetAssetFileDefIndex

func (x *SavedAsset) GetAssetFileDefIndex() int32

func (*SavedAsset) ProtoMessage

func (*SavedAsset) ProtoMessage()

func (*SavedAsset) ProtoReflect

func (x *SavedAsset) ProtoReflect() protoreflect.Message

func (*SavedAsset) Reset

func (x *SavedAsset) Reset()

func (*SavedAsset) String

func (x *SavedAsset) String() string

type SavedBareConcreteFunction

type SavedBareConcreteFunction struct {

	// Identifies a SavedConcreteFunction.
	ConcreteFunctionName string `protobuf:"bytes,1,opt,name=concrete_function_name,json=concreteFunctionName,proto3" json:"concrete_function_name,omitempty"`
	// A sequence of unique strings, one per Tensor argument.
	ArgumentKeywords []string `protobuf:"bytes,2,rep,name=argument_keywords,json=argumentKeywords,proto3" json:"argument_keywords,omitempty"`
	// The prefix of `argument_keywords` which may be identified by position.
	AllowedPositionalArguments int64 `` /* 142-byte string literal not displayed */
	// The spec of the function that this ConcreteFunction is traced from. This
	// allows the ConcreteFunction to be called with nest structure inputs. This
	// field may not be populated. If this field is absent, the concrete function
	// can only be called with flat inputs.
	// TODO(b/169361281): support calling saved ConcreteFunction with structured
	// inputs in C++ SavedModel API.
	FunctionSpec *FunctionSpec `protobuf:"bytes,4,opt,name=function_spec,json=functionSpec,proto3" json:"function_spec,omitempty"`
	// contains filtered or unexported fields
}

func (*SavedBareConcreteFunction) Descriptor deprecated

func (*SavedBareConcreteFunction) Descriptor() ([]byte, []int)

Deprecated: Use SavedBareConcreteFunction.ProtoReflect.Descriptor instead.

func (*SavedBareConcreteFunction) GetAllowedPositionalArguments

func (x *SavedBareConcreteFunction) GetAllowedPositionalArguments() int64

func (*SavedBareConcreteFunction) GetArgumentKeywords

func (x *SavedBareConcreteFunction) GetArgumentKeywords() []string

func (*SavedBareConcreteFunction) GetConcreteFunctionName

func (x *SavedBareConcreteFunction) GetConcreteFunctionName() string

func (*SavedBareConcreteFunction) GetFunctionSpec

func (x *SavedBareConcreteFunction) GetFunctionSpec() *FunctionSpec

func (*SavedBareConcreteFunction) ProtoMessage

func (*SavedBareConcreteFunction) ProtoMessage()

func (*SavedBareConcreteFunction) ProtoReflect

func (*SavedBareConcreteFunction) Reset

func (x *SavedBareConcreteFunction) Reset()

func (*SavedBareConcreteFunction) String

func (x *SavedBareConcreteFunction) String() string

type SavedConcreteFunction

type SavedConcreteFunction struct {
	BoundInputs []int32 `protobuf:"varint,2,rep,packed,name=bound_inputs,json=boundInputs,proto3" json:"bound_inputs,omitempty"`
	// Input in canonicalized form that was received to create this concrete
	// function.
	CanonicalizedInputSignature *StructuredValue `` /* 144-byte string literal not displayed */
	// Output that was the return value of this function after replacing all
	// Tensors with TensorSpecs. This can be an arbitrary nested function and will
	// be used to reconstruct the full structure from pure tensors.
	OutputSignature *StructuredValue `protobuf:"bytes,4,opt,name=output_signature,json=outputSignature,proto3" json:"output_signature,omitempty"`
	// contains filtered or unexported fields
}

Stores low-level information about a concrete function. Referenced in either a SavedFunction or a SavedBareConcreteFunction.

func (*SavedConcreteFunction) Descriptor deprecated

func (*SavedConcreteFunction) Descriptor() ([]byte, []int)

Deprecated: Use SavedConcreteFunction.ProtoReflect.Descriptor instead.

func (*SavedConcreteFunction) GetBoundInputs

func (x *SavedConcreteFunction) GetBoundInputs() []int32

func (*SavedConcreteFunction) GetCanonicalizedInputSignature

func (x *SavedConcreteFunction) GetCanonicalizedInputSignature() *StructuredValue

func (*SavedConcreteFunction) GetOutputSignature

func (x *SavedConcreteFunction) GetOutputSignature() *StructuredValue

func (*SavedConcreteFunction) ProtoMessage

func (*SavedConcreteFunction) ProtoMessage()

func (*SavedConcreteFunction) ProtoReflect

func (x *SavedConcreteFunction) ProtoReflect() protoreflect.Message

func (*SavedConcreteFunction) Reset

func (x *SavedConcreteFunction) Reset()

func (*SavedConcreteFunction) String

func (x *SavedConcreteFunction) String() string

type SavedConstant

type SavedConstant struct {

	// An Operation name for a ConstantOp in this SavedObjectGraph's MetaGraph.
	Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"`
	// contains filtered or unexported fields
}

func (*SavedConstant) Descriptor deprecated

func (*SavedConstant) Descriptor() ([]byte, []int)

Deprecated: Use SavedConstant.ProtoReflect.Descriptor instead.

func (*SavedConstant) GetOperation

func (x *SavedConstant) GetOperation() string

func (*SavedConstant) ProtoMessage

func (*SavedConstant) ProtoMessage()

func (*SavedConstant) ProtoReflect

func (x *SavedConstant) ProtoReflect() protoreflect.Message

func (*SavedConstant) Reset

func (x *SavedConstant) Reset()

func (*SavedConstant) String

func (x *SavedConstant) String() string

type SavedFunction

type SavedFunction struct {
	ConcreteFunctions []string      `protobuf:"bytes,1,rep,name=concrete_functions,json=concreteFunctions,proto3" json:"concrete_functions,omitempty"`
	FunctionSpec      *FunctionSpec `protobuf:"bytes,2,opt,name=function_spec,json=functionSpec,proto3" json:"function_spec,omitempty"`
	// contains filtered or unexported fields
}

A function with multiple signatures, possibly with non-Tensor arguments.

func (*SavedFunction) Descriptor deprecated

func (*SavedFunction) Descriptor() ([]byte, []int)

Deprecated: Use SavedFunction.ProtoReflect.Descriptor instead.

func (*SavedFunction) GetConcreteFunctions

func (x *SavedFunction) GetConcreteFunctions() []string

func (*SavedFunction) GetFunctionSpec

func (x *SavedFunction) GetFunctionSpec() *FunctionSpec

func (*SavedFunction) ProtoMessage

func (*SavedFunction) ProtoMessage()

func (*SavedFunction) ProtoReflect

func (x *SavedFunction) ProtoReflect() protoreflect.Message

func (*SavedFunction) Reset

func (x *SavedFunction) Reset()

func (*SavedFunction) String

func (x *SavedFunction) String() string

type SavedObject

type SavedObject struct {

	// Objects which this object depends on: named edges in the dependency
	// graph.
	//
	// Note: currently only valid if kind == "user_object" or "resource".
	Children []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"`
	// Slot variables owned by this object. This describes the three-way
	// (optimizer, variable, slot variable) relationship; none of the three
	// depend on the others directly.
	//
	// Note: currently only valid if kind == "user_object".
	SlotVariables []*TrackableObjectGraph_TrackableObject_SlotVariableReference `protobuf:"bytes,3,rep,name=slot_variables,json=slotVariables,proto3" json:"slot_variables,omitempty"`
	// Types that are assignable to Kind:
	//	*SavedObject_UserObject
	//	*SavedObject_Asset
	//	*SavedObject_Function
	//	*SavedObject_Variable
	//	*SavedObject_BareConcreteFunction
	//	*SavedObject_Constant
	//	*SavedObject_Resource
	//	*SavedObject_CapturedTensor
	Kind            isSavedObject_Kind         `protobuf_oneof:"kind"`
	SaveableObjects map[string]*SaveableObject `` /* 195-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*SavedObject) Descriptor deprecated

func (*SavedObject) Descriptor() ([]byte, []int)

Deprecated: Use SavedObject.ProtoReflect.Descriptor instead.

func (*SavedObject) GetAsset

func (x *SavedObject) GetAsset() *SavedAsset

func (*SavedObject) GetBareConcreteFunction

func (x *SavedObject) GetBareConcreteFunction() *SavedBareConcreteFunction

func (*SavedObject) GetCapturedTensor

func (x *SavedObject) GetCapturedTensor() *CapturedTensor

func (*SavedObject) GetChildren

func (*SavedObject) GetConstant

func (x *SavedObject) GetConstant() *SavedConstant

func (*SavedObject) GetFunction

func (x *SavedObject) GetFunction() *SavedFunction

func (*SavedObject) GetKind

func (m *SavedObject) GetKind() isSavedObject_Kind

func (*SavedObject) GetResource

func (x *SavedObject) GetResource() *SavedResource

func (*SavedObject) GetSaveableObjects

func (x *SavedObject) GetSaveableObjects() map[string]*SaveableObject

func (*SavedObject) GetUserObject

func (x *SavedObject) GetUserObject() *SavedUserObject

func (*SavedObject) GetVariable

func (x *SavedObject) GetVariable() *SavedVariable

func (*SavedObject) ProtoMessage

func (*SavedObject) ProtoMessage()

func (*SavedObject) ProtoReflect

func (x *SavedObject) ProtoReflect() protoreflect.Message

func (*SavedObject) Reset

func (x *SavedObject) Reset()

func (*SavedObject) String

func (x *SavedObject) String() string

type SavedObjectGraph

type SavedObjectGraph struct {

	// Flattened list of objects in the object graph.
	//
	// The position of the object in this list indicates its id.
	// Nodes[0] is considered the root node.
	Nodes []*SavedObject `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
	// Information about captures and output structures in concrete functions.
	// Referenced from SavedBareConcreteFunction and SavedFunction.
	ConcreteFunctions map[string]*SavedConcreteFunction `` /* 200-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*SavedObjectGraph) Descriptor deprecated

func (*SavedObjectGraph) Descriptor() ([]byte, []int)

Deprecated: Use SavedObjectGraph.ProtoReflect.Descriptor instead.

func (*SavedObjectGraph) GetConcreteFunctions

func (x *SavedObjectGraph) GetConcreteFunctions() map[string]*SavedConcreteFunction

func (*SavedObjectGraph) GetNodes

func (x *SavedObjectGraph) GetNodes() []*SavedObject

func (*SavedObjectGraph) ProtoMessage

func (*SavedObjectGraph) ProtoMessage()

func (*SavedObjectGraph) ProtoReflect

func (x *SavedObjectGraph) ProtoReflect() protoreflect.Message

func (*SavedObjectGraph) Reset

func (x *SavedObjectGraph) Reset()

func (*SavedObjectGraph) String

func (x *SavedObjectGraph) String() string

type SavedObject_Asset

type SavedObject_Asset struct {
	Asset *SavedAsset `protobuf:"bytes,5,opt,name=asset,proto3,oneof"`
}

type SavedObject_BareConcreteFunction

type SavedObject_BareConcreteFunction struct {
	BareConcreteFunction *SavedBareConcreteFunction `protobuf:"bytes,8,opt,name=bare_concrete_function,json=bareConcreteFunction,proto3,oneof"`
}

type SavedObject_CapturedTensor

type SavedObject_CapturedTensor struct {
	CapturedTensor *CapturedTensor `protobuf:"bytes,12,opt,name=captured_tensor,json=capturedTensor,proto3,oneof"`
}

type SavedObject_Constant

type SavedObject_Constant struct {
	Constant *SavedConstant `protobuf:"bytes,9,opt,name=constant,proto3,oneof"`
}

type SavedObject_Function

type SavedObject_Function struct {
	Function *SavedFunction `protobuf:"bytes,6,opt,name=function,proto3,oneof"`
}

type SavedObject_Resource

type SavedObject_Resource struct {
	Resource *SavedResource `protobuf:"bytes,10,opt,name=resource,proto3,oneof"`
}

type SavedObject_UserObject

type SavedObject_UserObject struct {
	UserObject *SavedUserObject `protobuf:"bytes,4,opt,name=user_object,json=userObject,proto3,oneof"`
}

type SavedObject_Variable

type SavedObject_Variable struct {
	Variable *SavedVariable `protobuf:"bytes,7,opt,name=variable,proto3,oneof"`
}

type SavedResource

type SavedResource struct {

	// A device specification indicating a required placement for the resource
	// creation function, e.g. "CPU". An empty string allows the user to select a
	// device.
	Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
	// contains filtered or unexported fields
}

A SavedResource represents a TF object that holds state during its lifetime. An object of this type can have a reference to a: create_resource() and an initialize() function.

func (*SavedResource) Descriptor deprecated

func (*SavedResource) Descriptor() ([]byte, []int)

Deprecated: Use SavedResource.ProtoReflect.Descriptor instead.

func (*SavedResource) GetDevice

func (x *SavedResource) GetDevice() string

func (*SavedResource) ProtoMessage

func (*SavedResource) ProtoMessage()

func (*SavedResource) ProtoReflect

func (x *SavedResource) ProtoReflect() protoreflect.Message

func (*SavedResource) Reset

func (x *SavedResource) Reset()

func (*SavedResource) String

func (x *SavedResource) String() string

type SavedUserObject

type SavedUserObject struct {

	// Corresponds to a registration of the type to use in the loading program.
	Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
	// Version information from the producer of this SavedUserObject.
	Version *framework.VersionDef `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
	// Metadata for deserializing this object.
	//
	// Deprecated! At the time of deprecation, Keras was the only user of this
	// field, and its saving and loading code will be updated shortly.
	// Please save your application-specific metadata to a separate file.
	//
	// Deprecated: Do not use.
	Metadata string `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"`
	// contains filtered or unexported fields
}

A SavedUserObject is an object (in the object-oriented language of the TensorFlow program) of some user- or framework-defined class other than those handled specifically by the other kinds of SavedObjects.

This object cannot be evaluated as a tensor, and therefore cannot be bound to an input of a function.

func (*SavedUserObject) Descriptor deprecated

func (*SavedUserObject) Descriptor() ([]byte, []int)

Deprecated: Use SavedUserObject.ProtoReflect.Descriptor instead.

func (*SavedUserObject) GetIdentifier

func (x *SavedUserObject) GetIdentifier() string

func (*SavedUserObject) GetMetadata deprecated

func (x *SavedUserObject) GetMetadata() string

Deprecated: Do not use.

func (*SavedUserObject) GetVersion

func (x *SavedUserObject) GetVersion() *framework.VersionDef

func (*SavedUserObject) ProtoMessage

func (*SavedUserObject) ProtoMessage()

func (*SavedUserObject) ProtoReflect

func (x *SavedUserObject) ProtoReflect() protoreflect.Message

func (*SavedUserObject) Reset

func (x *SavedUserObject) Reset()

func (*SavedUserObject) String

func (x *SavedUserObject) String() string

type SavedVariable

type SavedVariable struct {
	Dtype           framework.DataType                `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Shape           *framework.TensorShapeProto       `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Trainable       bool                              `protobuf:"varint,3,opt,name=trainable,proto3" json:"trainable,omitempty"`
	Synchronization framework.VariableSynchronization `protobuf:"varint,4,opt,name=synchronization,proto3,enum=tensorflow.VariableSynchronization" json:"synchronization,omitempty"`
	Aggregation     framework.VariableAggregation     `protobuf:"varint,5,opt,name=aggregation,proto3,enum=tensorflow.VariableAggregation" json:"aggregation,omitempty"`
	Name            string                            `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
	Device          string                            `protobuf:"bytes,7,opt,name=device,proto3" json:"device,omitempty"`
	// List of component variables for a distributed variable.
	//
	// When this field is non-empty, the SavedVariable will be assumed
	// to be a distributed variable defined by the components listed here.
	//
	// This is only supported by experimental loaders at the moment.
	ExperimentalDistributedVariableComponents []*SavedVariable `` /* 188-byte string literal not displayed */
	// contains filtered or unexported fields
}

Represents a Variable that is initialized by loading the contents from the checkpoint.

func (*SavedVariable) Descriptor deprecated

func (*SavedVariable) Descriptor() ([]byte, []int)

Deprecated: Use SavedVariable.ProtoReflect.Descriptor instead.

func (*SavedVariable) GetAggregation

func (x *SavedVariable) GetAggregation() framework.VariableAggregation

func (*SavedVariable) GetDevice

func (x *SavedVariable) GetDevice() string

func (*SavedVariable) GetDtype

func (x *SavedVariable) GetDtype() framework.DataType

func (*SavedVariable) GetExperimentalDistributedVariableComponents

func (x *SavedVariable) GetExperimentalDistributedVariableComponents() []*SavedVariable

func (*SavedVariable) GetName

func (x *SavedVariable) GetName() string

func (*SavedVariable) GetShape

func (x *SavedVariable) GetShape() *framework.TensorShapeProto

func (*SavedVariable) GetSynchronization

func (x *SavedVariable) GetSynchronization() framework.VariableSynchronization

func (*SavedVariable) GetTrainable

func (x *SavedVariable) GetTrainable() bool

func (*SavedVariable) ProtoMessage

func (*SavedVariable) ProtoMessage()

func (*SavedVariable) ProtoReflect

func (x *SavedVariable) ProtoReflect() protoreflect.Message

func (*SavedVariable) Reset

func (x *SavedVariable) Reset()

func (*SavedVariable) String

func (x *SavedVariable) String() string

type SaverDef

type SaverDef struct {

	// The name of the tensor in which to specify the filename when saving or
	// restoring a model checkpoint.
	FilenameTensorName string `protobuf:"bytes,1,opt,name=filename_tensor_name,json=filenameTensorName,proto3" json:"filename_tensor_name,omitempty"`
	// The operation to run when saving a model checkpoint.
	SaveTensorName string `protobuf:"bytes,2,opt,name=save_tensor_name,json=saveTensorName,proto3" json:"save_tensor_name,omitempty"`
	// The operation to run when restoring a model checkpoint.
	RestoreOpName string `protobuf:"bytes,3,opt,name=restore_op_name,json=restoreOpName,proto3" json:"restore_op_name,omitempty"`
	// Maximum number of checkpoints to keep.  If 0, no checkpoints are deleted.
	MaxToKeep int32 `protobuf:"varint,4,opt,name=max_to_keep,json=maxToKeep,proto3" json:"max_to_keep,omitempty"`
	// Shard the save files, one per device that has Variable nodes.
	Sharded bool `protobuf:"varint,5,opt,name=sharded,proto3" json:"sharded,omitempty"`
	// How often to keep an additional checkpoint. If not specified, only the last
	// "max_to_keep" checkpoints are kept; if specified, in addition to keeping
	// the last "max_to_keep" checkpoints, an additional checkpoint will be kept
	// for every n hours of training.
	KeepCheckpointEveryNHours float32                          `` /* 144-byte string literal not displayed */
	Version                   SaverDef_CheckpointFormatVersion `protobuf:"varint,7,opt,name=version,proto3,enum=tensorflow.SaverDef_CheckpointFormatVersion" json:"version,omitempty"`
	// contains filtered or unexported fields
}

Protocol buffer representing the configuration of a Saver.

func (*SaverDef) Descriptor deprecated

func (*SaverDef) Descriptor() ([]byte, []int)

Deprecated: Use SaverDef.ProtoReflect.Descriptor instead.

func (*SaverDef) GetFilenameTensorName

func (x *SaverDef) GetFilenameTensorName() string

func (*SaverDef) GetKeepCheckpointEveryNHours

func (x *SaverDef) GetKeepCheckpointEveryNHours() float32

func (*SaverDef) GetMaxToKeep

func (x *SaverDef) GetMaxToKeep() int32

func (*SaverDef) GetRestoreOpName

func (x *SaverDef) GetRestoreOpName() string

func (*SaverDef) GetSaveTensorName

func (x *SaverDef) GetSaveTensorName() string

func (*SaverDef) GetSharded

func (x *SaverDef) GetSharded() bool

func (*SaverDef) GetVersion

func (*SaverDef) ProtoMessage

func (*SaverDef) ProtoMessage()

func (*SaverDef) ProtoReflect

func (x *SaverDef) ProtoReflect() protoreflect.Message

func (*SaverDef) Reset

func (x *SaverDef) Reset()

func (*SaverDef) String

func (x *SaverDef) String() string

type SaverDef_CheckpointFormatVersion

type SaverDef_CheckpointFormatVersion int32

A version number that identifies a different on-disk checkpoint format. Usually, each subclass of BaseSaverBuilder works with a particular version/format. However, it is possible that the same builder may be upgraded to support a newer checkpoint format in the future.

const (
	// Internal legacy format.
	SaverDef_LEGACY SaverDef_CheckpointFormatVersion = 0
	// Deprecated format: tf.Saver() which works with tensorflow::table::Table.
	SaverDef_V1 SaverDef_CheckpointFormatVersion = 1
	// Current format: more efficient.
	SaverDef_V2 SaverDef_CheckpointFormatVersion = 2
)

func (SaverDef_CheckpointFormatVersion) Descriptor

func (SaverDef_CheckpointFormatVersion) Enum

func (SaverDef_CheckpointFormatVersion) EnumDescriptor deprecated

func (SaverDef_CheckpointFormatVersion) EnumDescriptor() ([]byte, []int)

Deprecated: Use SaverDef_CheckpointFormatVersion.Descriptor instead.

func (SaverDef_CheckpointFormatVersion) Number

func (SaverDef_CheckpointFormatVersion) String

func (SaverDef_CheckpointFormatVersion) Type

type ScopedAllocatorOptions

type ScopedAllocatorOptions struct {

	// If present, only perform optimization for these ops.
	EnableOp []string `protobuf:"bytes,1,rep,name=enable_op,json=enableOp,proto3" json:"enable_op,omitempty"`
	// contains filtered or unexported fields
}

func (*ScopedAllocatorOptions) Descriptor deprecated

func (*ScopedAllocatorOptions) Descriptor() ([]byte, []int)

Deprecated: Use ScopedAllocatorOptions.ProtoReflect.Descriptor instead.

func (*ScopedAllocatorOptions) GetEnableOp

func (x *ScopedAllocatorOptions) GetEnableOp() []string

func (*ScopedAllocatorOptions) ProtoMessage

func (*ScopedAllocatorOptions) ProtoMessage()

func (*ScopedAllocatorOptions) ProtoReflect

func (x *ScopedAllocatorOptions) ProtoReflect() protoreflect.Message

func (*ScopedAllocatorOptions) Reset

func (x *ScopedAllocatorOptions) Reset()

func (*ScopedAllocatorOptions) String

func (x *ScopedAllocatorOptions) String() string

type SessionMetadata

type SessionMetadata struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The version is optional. If set, needs to be >= 0.
	Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	// contains filtered or unexported fields
}

Metadata about the session.

This can be used by the runtime and the Ops for debugging, monitoring, etc.

The (name, version) tuple is expected to be a unique identifier for sessions within the same process.

NOTE: This is currently used and propagated only by the direct session.

func (*SessionMetadata) Descriptor deprecated

func (*SessionMetadata) Descriptor() ([]byte, []int)

Deprecated: Use SessionMetadata.ProtoReflect.Descriptor instead.

func (*SessionMetadata) GetName

func (x *SessionMetadata) GetName() string

func (*SessionMetadata) GetVersion

func (x *SessionMetadata) GetVersion() int64

func (*SessionMetadata) ProtoMessage

func (*SessionMetadata) ProtoMessage()

func (*SessionMetadata) ProtoReflect

func (x *SessionMetadata) ProtoReflect() protoreflect.Message

func (*SessionMetadata) Reset

func (x *SessionMetadata) Reset()

func (*SessionMetadata) String

func (x *SessionMetadata) String() string

type SignatureDef

type SignatureDef struct {

	// Named input parameters.
	Inputs map[string]*TensorInfo `` /* 153-byte string literal not displayed */
	// Named output parameters.
	Outputs map[string]*TensorInfo `` /* 155-byte string literal not displayed */
	// Extensible method_name information enabling third-party users to mark a
	// SignatureDef as supporting a particular method. This enables producers and
	// consumers of SignatureDefs, e.g. a model definition library and a serving
	// library to have a clear hand-off regarding the semantics of a computation.
	//
	// Note that multiple SignatureDefs in a single MetaGraphDef may have the same
	// method_name. This is commonly used to support multi-headed computation,
	// where a single graph computation may return multiple results.
	MethodName string `protobuf:"bytes,3,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
	// contains filtered or unexported fields
}

SignatureDef defines the signature of a computation supported by a TensorFlow graph.

For example, a model with two loss computations, sharing a single input, might have the following signature_def map, in a MetaGraphDef message.

Note that across the two SignatureDefs "loss_A" and "loss_B", the input key, output key, and method_name are identical, and will be used by system(s) that implement or rely upon this particular loss method. The output tensor names differ, demonstrating how different outputs can exist for the same method.

signature_def {
  key: "loss_A"
  value {
    inputs {
      key: "input"
      value {
        name: "input:0"
        dtype: DT_STRING
        tensor_shape: ...
      }
    }
    outputs {
      key: "loss_output"
      value {
        name: "loss_output_A:0"
        dtype: DT_FLOAT
        tensor_shape: ...
      }
    }
    method_name: "some/package/compute_loss"
  }
  ...
}
signature_def {
  key: "loss_B"
  value {
    inputs {
      key: "input"
      value {
        name: "input:0"
        dtype: DT_STRING
        tensor_shape: ...
      }
    }
    outputs {
      key: "loss_output"
      value {
        name: "loss_output_B:0"
        dtype: DT_FLOAT
        tensor_shape: ...
      }
    }
    method_name: "some/package/compute_loss"
  }
  ...
}

func (*SignatureDef) Descriptor deprecated

func (*SignatureDef) Descriptor() ([]byte, []int)

Deprecated: Use SignatureDef.ProtoReflect.Descriptor instead.

func (*SignatureDef) GetInputs

func (x *SignatureDef) GetInputs() map[string]*TensorInfo

func (*SignatureDef) GetMethodName

func (x *SignatureDef) GetMethodName() string

func (*SignatureDef) GetOutputs

func (x *SignatureDef) GetOutputs() map[string]*TensorInfo

func (*SignatureDef) ProtoMessage

func (*SignatureDef) ProtoMessage()

func (*SignatureDef) ProtoReflect

func (x *SignatureDef) ProtoReflect() protoreflect.Message

func (*SignatureDef) Reset

func (x *SignatureDef) Reset()

func (*SignatureDef) String

func (x *SignatureDef) String() string

type StructuredValue

type StructuredValue struct {

	// The kind of value.
	//
	// Types that are assignable to Kind:
	//	*StructuredValue_NoneValue
	//	*StructuredValue_Float64Value
	//	*StructuredValue_Int64Value
	//	*StructuredValue_StringValue
	//	*StructuredValue_BoolValue
	//	*StructuredValue_TensorShapeValue
	//	*StructuredValue_TensorDtypeValue
	//	*StructuredValue_TensorSpecValue
	//	*StructuredValue_TypeSpecValue
	//	*StructuredValue_BoundedTensorSpecValue
	//	*StructuredValue_ListValue
	//	*StructuredValue_TupleValue
	//	*StructuredValue_DictValue
	//	*StructuredValue_NamedTupleValue
	Kind isStructuredValue_Kind `protobuf_oneof:"kind"`
	// contains filtered or unexported fields
}

`StructuredValue` represents a dynamically typed value representing various data structures that are inspired by Python data structures typically used in TensorFlow functions as inputs and outputs.

For example when saving a Layer there may be a `training` argument. If the user passes a boolean True/False, that switches between two concrete TensorFlow functions. In order to switch between them in the same way after loading the SavedModel, we need to represent "True" and "False".

A more advanced example might be a function which takes a list of dictionaries mapping from strings to Tensors. In order to map from user-specified arguments `[{"a": tf.constant(1.)}, {"q": tf.constant(3.)}]` after load to the right saved TensorFlow function, we need to represent the nested structure and the strings, recording that we have a trace for anything matching `[{"a": tf.TensorSpec(None, tf.float32)}, {"q": tf.TensorSpec([], tf.float64)}]` as an example.

Likewise functions may return nested structures of Tensors, for example returning a dictionary mapping from strings to Tensors. In order for the loaded function to return the same structure we need to serialize it.

This is an ergonomic aid for working with loaded SavedModels, not a promise to serialize all possible function signatures. For example we do not expect to pickle generic Python objects, and ideally we'd stay language-agnostic.

func (*StructuredValue) Descriptor deprecated

func (*StructuredValue) Descriptor() ([]byte, []int)

Deprecated: Use StructuredValue.ProtoReflect.Descriptor instead.

func (*StructuredValue) GetBoolValue

func (x *StructuredValue) GetBoolValue() bool

func (*StructuredValue) GetBoundedTensorSpecValue

func (x *StructuredValue) GetBoundedTensorSpecValue() *BoundedTensorSpecProto

func (*StructuredValue) GetDictValue

func (x *StructuredValue) GetDictValue() *DictValue

func (*StructuredValue) GetFloat64Value

func (x *StructuredValue) GetFloat64Value() float64

func (*StructuredValue) GetInt64Value

func (x *StructuredValue) GetInt64Value() int64

func (*StructuredValue) GetKind

func (m *StructuredValue) GetKind() isStructuredValue_Kind

func (*StructuredValue) GetListValue

func (x *StructuredValue) GetListValue() *ListValue

func (*StructuredValue) GetNamedTupleValue

func (x *StructuredValue) GetNamedTupleValue() *NamedTupleValue

func (*StructuredValue) GetNoneValue

func (x *StructuredValue) GetNoneValue() *NoneValue

func (*StructuredValue) GetStringValue

func (x *StructuredValue) GetStringValue() string

func (*StructuredValue) GetTensorDtypeValue

func (x *StructuredValue) GetTensorDtypeValue() framework.DataType

func (*StructuredValue) GetTensorShapeValue

func (x *StructuredValue) GetTensorShapeValue() *framework.TensorShapeProto

func (*StructuredValue) GetTensorSpecValue

func (x *StructuredValue) GetTensorSpecValue() *TensorSpecProto

func (*StructuredValue) GetTupleValue

func (x *StructuredValue) GetTupleValue() *TupleValue

func (*StructuredValue) GetTypeSpecValue

func (x *StructuredValue) GetTypeSpecValue() *TypeSpecProto

func (*StructuredValue) ProtoMessage

func (*StructuredValue) ProtoMessage()

func (*StructuredValue) ProtoReflect

func (x *StructuredValue) ProtoReflect() protoreflect.Message

func (*StructuredValue) Reset

func (x *StructuredValue) Reset()

func (*StructuredValue) String

func (x *StructuredValue) String() string

type StructuredValue_BoolValue

type StructuredValue_BoolValue struct {
	// Represents a boolean value.
	BoolValue bool `protobuf:"varint,14,opt,name=bool_value,json=boolValue,proto3,oneof"`
}

type StructuredValue_BoundedTensorSpecValue

type StructuredValue_BoundedTensorSpecValue struct {
	// Represents a value for tf.BoundedTensorSpec.
	BoundedTensorSpecValue *BoundedTensorSpecProto `protobuf:"bytes,35,opt,name=bounded_tensor_spec_value,json=boundedTensorSpecValue,proto3,oneof"`
}

type StructuredValue_DictValue

type StructuredValue_DictValue struct {
	// Represents a dict `Value`.
	DictValue *DictValue `protobuf:"bytes,53,opt,name=dict_value,json=dictValue,proto3,oneof"`
}

type StructuredValue_Float64Value

type StructuredValue_Float64Value struct {
	// Represents a double-precision floating-point value (a Python `float`).
	Float64Value float64 `protobuf:"fixed64,11,opt,name=float64_value,json=float64Value,proto3,oneof"`
}

type StructuredValue_Int64Value

type StructuredValue_Int64Value struct {
	// Represents a signed integer value, limited to 64 bits.
	// Larger values from Python's arbitrary-precision integers are unsupported.
	Int64Value int64 `protobuf:"zigzag64,12,opt,name=int64_value,json=int64Value,proto3,oneof"`
}

type StructuredValue_ListValue

type StructuredValue_ListValue struct {
	// Represents a list of `Value`.
	ListValue *ListValue `protobuf:"bytes,51,opt,name=list_value,json=listValue,proto3,oneof"`
}

type StructuredValue_NamedTupleValue

type StructuredValue_NamedTupleValue struct {
	// Represents Python's namedtuple.
	NamedTupleValue *NamedTupleValue `protobuf:"bytes,54,opt,name=named_tuple_value,json=namedTupleValue,proto3,oneof"`
}

type StructuredValue_NoneValue

type StructuredValue_NoneValue struct {
	// Represents None.
	NoneValue *NoneValue `protobuf:"bytes,1,opt,name=none_value,json=noneValue,proto3,oneof"`
}

type StructuredValue_StringValue

type StructuredValue_StringValue struct {
	// Represents a string of Unicode characters stored in a Python `str`.
	// In Python 3, this is exactly what type `str` is.
	// In Python 2, this is the UTF-8 encoding of the characters.
	// For strings with ASCII characters only (as often used in TensorFlow code)
	// there is effectively no difference between the language versions.
	// The obsolescent `unicode` type of Python 2 is not supported here.
	StringValue string `protobuf:"bytes,13,opt,name=string_value,json=stringValue,proto3,oneof"`
}

type StructuredValue_TensorDtypeValue

type StructuredValue_TensorDtypeValue struct {
	// Represents an enum value for dtype.
	TensorDtypeValue framework.DataType `protobuf:"varint,32,opt,name=tensor_dtype_value,json=tensorDtypeValue,proto3,enum=tensorflow.DataType,oneof"`
}

type StructuredValue_TensorShapeValue

type StructuredValue_TensorShapeValue struct {
	// Represents a TensorShape.
	TensorShapeValue *framework.TensorShapeProto `protobuf:"bytes,31,opt,name=tensor_shape_value,json=tensorShapeValue,proto3,oneof"`
}

type StructuredValue_TensorSpecValue

type StructuredValue_TensorSpecValue struct {
	// Represents a value for tf.TensorSpec.
	TensorSpecValue *TensorSpecProto `protobuf:"bytes,33,opt,name=tensor_spec_value,json=tensorSpecValue,proto3,oneof"`
}

type StructuredValue_TupleValue

type StructuredValue_TupleValue struct {
	// Represents a tuple of `Value`.
	TupleValue *TupleValue `protobuf:"bytes,52,opt,name=tuple_value,json=tupleValue,proto3,oneof"`
}

type StructuredValue_TypeSpecValue

type StructuredValue_TypeSpecValue struct {
	// Represents a value for tf.TypeSpec.
	TypeSpecValue *TypeSpecProto `protobuf:"bytes,34,opt,name=type_spec_value,json=typeSpecValue,proto3,oneof"`
}

type TensorConnection

type TensorConnection struct {

	// A tensor name. The value of this tensor will be substituted for
	// the tensor named in `to_tensor`.
	FromTensor string `protobuf:"bytes,1,opt,name=from_tensor,json=fromTensor,proto3" json:"from_tensor,omitempty"`
	// A tensor name. The value of this tensor will be bound to the
	// value of the tensor named in `from_tensor`.
	ToTensor string `protobuf:"bytes,2,opt,name=to_tensor,json=toTensor,proto3" json:"to_tensor,omitempty"`
	// contains filtered or unexported fields
}

Defines a connection between two tensors in a `GraphDef`.

func (*TensorConnection) Descriptor deprecated

func (*TensorConnection) Descriptor() ([]byte, []int)

Deprecated: Use TensorConnection.ProtoReflect.Descriptor instead.

func (*TensorConnection) GetFromTensor

func (x *TensorConnection) GetFromTensor() string

func (*TensorConnection) GetToTensor

func (x *TensorConnection) GetToTensor() string

func (*TensorConnection) ProtoMessage

func (*TensorConnection) ProtoMessage()

func (*TensorConnection) ProtoReflect

func (x *TensorConnection) ProtoReflect() protoreflect.Message

func (*TensorConnection) Reset

func (x *TensorConnection) Reset()

func (*TensorConnection) String

func (x *TensorConnection) String() string

type TensorInfo

type TensorInfo struct {

	// Types that are assignable to Encoding:
	//	*TensorInfo_Name
	//	*TensorInfo_CooSparse_
	//	*TensorInfo_CompositeTensor_
	Encoding isTensorInfo_Encoding `protobuf_oneof:"encoding"`
	Dtype    framework.DataType    `protobuf:"varint,2,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	// The static shape should be recorded here, to the extent that it can
	// be known in advance.  In the case of a SparseTensor, this field describes
	// the logical shape of the represented tensor (aka dense_shape).
	TensorShape *framework.TensorShapeProto `protobuf:"bytes,3,opt,name=tensor_shape,json=tensorShape,proto3" json:"tensor_shape,omitempty"`
	// contains filtered or unexported fields
}

Information about a Tensor necessary for feeding or retrieval.

func (*TensorInfo) Descriptor deprecated

func (*TensorInfo) Descriptor() ([]byte, []int)

Deprecated: Use TensorInfo.ProtoReflect.Descriptor instead.

func (*TensorInfo) GetCompositeTensor

func (x *TensorInfo) GetCompositeTensor() *TensorInfo_CompositeTensor

func (*TensorInfo) GetCooSparse

func (x *TensorInfo) GetCooSparse() *TensorInfo_CooSparse

func (*TensorInfo) GetDtype

func (x *TensorInfo) GetDtype() framework.DataType

func (*TensorInfo) GetEncoding

func (m *TensorInfo) GetEncoding() isTensorInfo_Encoding

func (*TensorInfo) GetName

func (x *TensorInfo) GetName() string

func (*TensorInfo) GetTensorShape

func (x *TensorInfo) GetTensorShape() *framework.TensorShapeProto

func (*TensorInfo) ProtoMessage

func (*TensorInfo) ProtoMessage()

func (*TensorInfo) ProtoReflect

func (x *TensorInfo) ProtoReflect() protoreflect.Message

func (*TensorInfo) Reset

func (x *TensorInfo) Reset()

func (*TensorInfo) String

func (x *TensorInfo) String() string

type TensorInfo_CompositeTensor

type TensorInfo_CompositeTensor struct {

	// The serialized TypeSpec for the composite tensor.
	TypeSpec *TypeSpecProto `protobuf:"bytes,1,opt,name=type_spec,json=typeSpec,proto3" json:"type_spec,omitempty"`
	// A TensorInfo for each flattened component tensor.
	Components []*TensorInfo `protobuf:"bytes,2,rep,name=components,proto3" json:"components,omitempty"`
	// contains filtered or unexported fields
}

Generic encoding for composite tensors.

func (*TensorInfo_CompositeTensor) Descriptor deprecated

func (*TensorInfo_CompositeTensor) Descriptor() ([]byte, []int)

Deprecated: Use TensorInfo_CompositeTensor.ProtoReflect.Descriptor instead.

func (*TensorInfo_CompositeTensor) GetComponents

func (x *TensorInfo_CompositeTensor) GetComponents() []*TensorInfo

func (*TensorInfo_CompositeTensor) GetTypeSpec

func (x *TensorInfo_CompositeTensor) GetTypeSpec() *TypeSpecProto

func (*TensorInfo_CompositeTensor) ProtoMessage

func (*TensorInfo_CompositeTensor) ProtoMessage()

func (*TensorInfo_CompositeTensor) ProtoReflect

func (*TensorInfo_CompositeTensor) Reset

func (x *TensorInfo_CompositeTensor) Reset()

func (*TensorInfo_CompositeTensor) String

func (x *TensorInfo_CompositeTensor) String() string

type TensorInfo_CompositeTensor_

type TensorInfo_CompositeTensor_ struct {
	// Generic encoding for CompositeTensors.
	CompositeTensor *TensorInfo_CompositeTensor `protobuf:"bytes,5,opt,name=composite_tensor,json=compositeTensor,proto3,oneof"`
}

type TensorInfo_CooSparse

type TensorInfo_CooSparse struct {

	// The shape of the values Tensor is [?].  Its dtype must be the dtype of
	// the SparseTensor as a whole, given in the enclosing TensorInfo.
	ValuesTensorName string `protobuf:"bytes,1,opt,name=values_tensor_name,json=valuesTensorName,proto3" json:"values_tensor_name,omitempty"`
	// The indices Tensor must have dtype int64 and shape [?, ?].
	IndicesTensorName string `protobuf:"bytes,2,opt,name=indices_tensor_name,json=indicesTensorName,proto3" json:"indices_tensor_name,omitempty"`
	// The dynamic logical shape represented by the SparseTensor is recorded in
	// the Tensor referenced here.  It must have dtype int64 and shape [?].
	DenseShapeTensorName string `protobuf:"bytes,3,opt,name=dense_shape_tensor_name,json=denseShapeTensorName,proto3" json:"dense_shape_tensor_name,omitempty"`
	// contains filtered or unexported fields
}

For sparse tensors, The COO encoding stores a triple of values, indices, and shape.

func (*TensorInfo_CooSparse) Descriptor deprecated

func (*TensorInfo_CooSparse) Descriptor() ([]byte, []int)

Deprecated: Use TensorInfo_CooSparse.ProtoReflect.Descriptor instead.

func (*TensorInfo_CooSparse) GetDenseShapeTensorName

func (x *TensorInfo_CooSparse) GetDenseShapeTensorName() string

func (*TensorInfo_CooSparse) GetIndicesTensorName

func (x *TensorInfo_CooSparse) GetIndicesTensorName() string

func (*TensorInfo_CooSparse) GetValuesTensorName

func (x *TensorInfo_CooSparse) GetValuesTensorName() string

func (*TensorInfo_CooSparse) ProtoMessage

func (*TensorInfo_CooSparse) ProtoMessage()

func (*TensorInfo_CooSparse) ProtoReflect

func (x *TensorInfo_CooSparse) ProtoReflect() protoreflect.Message

func (*TensorInfo_CooSparse) Reset

func (x *TensorInfo_CooSparse) Reset()

func (*TensorInfo_CooSparse) String

func (x *TensorInfo_CooSparse) String() string

type TensorInfo_CooSparse_

type TensorInfo_CooSparse_ struct {
	// There are many possible encodings of sparse matrices
	// (https://en.wikipedia.org/wiki/Sparse_matrix).  Currently, TensorFlow
	// uses only the COO encoding.  This is supported and documented in the
	// SparseTensor Python class.
	CooSparse *TensorInfo_CooSparse `protobuf:"bytes,4,opt,name=coo_sparse,json=cooSparse,proto3,oneof"`
}

type TensorInfo_Name

type TensorInfo_Name struct {
	// For dense `Tensor`s, the name of the tensor in the graph.
	Name string `protobuf:"bytes,1,opt,name=name,proto3,oneof"`
}

type TensorSpecProto

type TensorSpecProto struct {
	Name  string                      `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Shape *framework.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Dtype framework.DataType          `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	// contains filtered or unexported fields
}

A protobuf to represent tf.TensorSpec.

func (*TensorSpecProto) Descriptor deprecated

func (*TensorSpecProto) Descriptor() ([]byte, []int)

Deprecated: Use TensorSpecProto.ProtoReflect.Descriptor instead.

func (*TensorSpecProto) GetDtype

func (x *TensorSpecProto) GetDtype() framework.DataType

func (*TensorSpecProto) GetName

func (x *TensorSpecProto) GetName() string

func (*TensorSpecProto) GetShape

func (*TensorSpecProto) ProtoMessage

func (*TensorSpecProto) ProtoMessage()

func (*TensorSpecProto) ProtoReflect

func (x *TensorSpecProto) ProtoReflect() protoreflect.Message

func (*TensorSpecProto) Reset

func (x *TensorSpecProto) Reset()

func (*TensorSpecProto) String

func (x *TensorSpecProto) String() string

type ThreadPoolOptionProto

type ThreadPoolOptionProto struct {

	// The number of threads in the pool.
	//
	// 0 means the system picks a value based on where this option proto is used
	// (see the declaration of the specific field for more info).
	NumThreads int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads,proto3" json:"num_threads,omitempty"`
	// The global name of the threadpool.
	//
	// If empty, then the threadpool is made and used according to the scope it's
	// in - e.g., for a session threadpool, it is used by that session only.
	//
	// If non-empty, then:
	// - a global threadpool associated with this name is looked
	//   up or created. This allows, for example, sharing one threadpool across
	//   many sessions (e.g., like the default behavior, if
	//   inter_op_parallelism_threads is not configured), but still partitioning
	//   into a large and small pool.
	// - if the threadpool for this global_name already exists, then it is an
	//   error if the existing pool was created using a different num_threads
	//   value as is specified on this call.
	// - threadpools created this way are never garbage collected.
	GlobalName string `protobuf:"bytes,2,opt,name=global_name,json=globalName,proto3" json:"global_name,omitempty"`
	// contains filtered or unexported fields
}

func (*ThreadPoolOptionProto) Descriptor deprecated

func (*ThreadPoolOptionProto) Descriptor() ([]byte, []int)

Deprecated: Use ThreadPoolOptionProto.ProtoReflect.Descriptor instead.

func (*ThreadPoolOptionProto) GetGlobalName

func (x *ThreadPoolOptionProto) GetGlobalName() string

func (*ThreadPoolOptionProto) GetNumThreads

func (x *ThreadPoolOptionProto) GetNumThreads() int32

func (*ThreadPoolOptionProto) ProtoMessage

func (*ThreadPoolOptionProto) ProtoMessage()

func (*ThreadPoolOptionProto) ProtoReflect

func (x *ThreadPoolOptionProto) ProtoReflect() protoreflect.Message

func (*ThreadPoolOptionProto) Reset

func (x *ThreadPoolOptionProto) Reset()

func (*ThreadPoolOptionProto) String

func (x *ThreadPoolOptionProto) String() string

type TrackableObjectGraph

type TrackableObjectGraph struct {
	Nodes []*TrackableObjectGraph_TrackableObject `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph) Descriptor deprecated

func (*TrackableObjectGraph) Descriptor() ([]byte, []int)

Deprecated: Use TrackableObjectGraph.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph) GetNodes

func (*TrackableObjectGraph) ProtoMessage

func (*TrackableObjectGraph) ProtoMessage()

func (*TrackableObjectGraph) ProtoReflect

func (x *TrackableObjectGraph) ProtoReflect() protoreflect.Message

func (*TrackableObjectGraph) Reset

func (x *TrackableObjectGraph) Reset()

func (*TrackableObjectGraph) String

func (x *TrackableObjectGraph) String() string

type TrackableObjectGraph_TrackableObject

type TrackableObjectGraph_TrackableObject struct {

	// Objects which this object depends on.
	Children []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"`
	// Serialized data specific to this object.
	Attributes []*TrackableObjectGraph_TrackableObject_SerializedTensor `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// Slot variables owned by this object.
	SlotVariables []*TrackableObjectGraph_TrackableObject_SlotVariableReference `protobuf:"bytes,3,rep,name=slot_variables,json=slotVariables,proto3" json:"slot_variables,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph_TrackableObject) Descriptor deprecated

func (*TrackableObjectGraph_TrackableObject) Descriptor() ([]byte, []int)

Deprecated: Use TrackableObjectGraph_TrackableObject.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph_TrackableObject) GetAttributes

func (*TrackableObjectGraph_TrackableObject) GetChildren

func (*TrackableObjectGraph_TrackableObject) GetSlotVariables

func (*TrackableObjectGraph_TrackableObject) ProtoMessage

func (*TrackableObjectGraph_TrackableObject) ProtoMessage()

func (*TrackableObjectGraph_TrackableObject) ProtoReflect

func (*TrackableObjectGraph_TrackableObject) Reset

func (*TrackableObjectGraph_TrackableObject) String

type TrackableObjectGraph_TrackableObject_ObjectReference

type TrackableObjectGraph_TrackableObject_ObjectReference struct {

	// An index into `TrackableObjectGraph.nodes`, indicating the object
	// being referenced.
	NodeId int32 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
	// A user-provided name for the edge.
	LocalName string `protobuf:"bytes,2,opt,name=local_name,json=localName,proto3" json:"local_name,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Descriptor deprecated

Deprecated: Use TrackableObjectGraph_TrackableObject_ObjectReference.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph_TrackableObject_ObjectReference) GetLocalName

func (*TrackableObjectGraph_TrackableObject_ObjectReference) GetNodeId

func (*TrackableObjectGraph_TrackableObject_ObjectReference) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_ObjectReference) ProtoReflect

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Reset

func (*TrackableObjectGraph_TrackableObject_ObjectReference) String

type TrackableObjectGraph_TrackableObject_SerializedTensor

type TrackableObjectGraph_TrackableObject_SerializedTensor struct {

	// A name for the Tensor. Simple variables have only one
	// `SerializedTensor` named "VARIABLE_VALUE" by convention. This value may
	// be restored on object creation as an optimization.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The full name of the variable/tensor, if applicable. Used to allow
	// name-based loading of checkpoints which were saved using an
	// object-based API. Should match the checkpoint key which would have been
	// assigned by tf.train.Saver.
	FullName string `protobuf:"bytes,2,opt,name=full_name,json=fullName,proto3" json:"full_name,omitempty"`
	// The generated name of the Tensor in the checkpoint.
	CheckpointKey string `protobuf:"bytes,3,opt,name=checkpoint_key,json=checkpointKey,proto3" json:"checkpoint_key,omitempty"`
	// Whether checkpoints should be considered as matching even without this
	// value restored. Used for non-critical values which don't affect the
	// TensorFlow graph, such as layer configurations.
	OptionalRestore bool `protobuf:"varint,4,opt,name=optional_restore,json=optionalRestore,proto3" json:"optional_restore,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Descriptor deprecated

Deprecated: Use TrackableObjectGraph_TrackableObject_SerializedTensor.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetCheckpointKey

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetFullName

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetName

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetOptionalRestore

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) ProtoReflect

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Reset

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) String

type TrackableObjectGraph_TrackableObject_SlotVariableReference

type TrackableObjectGraph_TrackableObject_SlotVariableReference struct {

	// An index into `TrackableObjectGraph.nodes`, indicating the
	// variable object this slot was created for.
	OriginalVariableNodeId int32 `` /* 132-byte string literal not displayed */
	// The name of the slot (e.g. "m"/"v").
	SlotName string `protobuf:"bytes,2,opt,name=slot_name,json=slotName,proto3" json:"slot_name,omitempty"`
	// An index into `TrackableObjectGraph.nodes`, indicating the
	// `Object` with the value of the slot variable.
	SlotVariableNodeId int32 `protobuf:"varint,3,opt,name=slot_variable_node_id,json=slotVariableNodeId,proto3" json:"slot_variable_node_id,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Descriptor deprecated

Deprecated: Use TrackableObjectGraph_TrackableObject_SlotVariableReference.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetOriginalVariableNodeId

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetSlotName

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetSlotVariableNodeId

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) ProtoReflect

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Reset

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) String

type TupleValue

type TupleValue struct {
	Values []*StructuredValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
	// contains filtered or unexported fields
}

Represents a Python tuple.

func (*TupleValue) Descriptor deprecated

func (*TupleValue) Descriptor() ([]byte, []int)

Deprecated: Use TupleValue.ProtoReflect.Descriptor instead.

func (*TupleValue) GetValues

func (x *TupleValue) GetValues() []*StructuredValue

func (*TupleValue) ProtoMessage

func (*TupleValue) ProtoMessage()

func (*TupleValue) ProtoReflect

func (x *TupleValue) ProtoReflect() protoreflect.Message

func (*TupleValue) Reset

func (x *TupleValue) Reset()

func (*TupleValue) String

func (x *TupleValue) String() string

type TypeSpecProto

type TypeSpecProto struct {
	TypeSpecClass TypeSpecProto_TypeSpecClass `` /* 147-byte string literal not displayed */
	// The value returned by TypeSpec._serialize().
	TypeState *StructuredValue `protobuf:"bytes,2,opt,name=type_state,json=typeState,proto3" json:"type_state,omitempty"`
	// The name of the TypeSpec class.
	//  * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is
	//    the one registered under this name. For types registered outside
	//    core TensorFlow by an add-on library, that library must be loaded
	//    before this value can be deserialized by StructureCoder.
	//  * If type_spec_class specifies a particular TypeSpec class, this field is
	//    redundant with the type_spec_class enum, and is only used for error
	//    reporting in older binaries that do not know the tupe_spec_class enum.
	TypeSpecClassName string `protobuf:"bytes,3,opt,name=type_spec_class_name,json=typeSpecClassName,proto3" json:"type_spec_class_name,omitempty"`
	// contains filtered or unexported fields
}

Represents a tf.TypeSpec

func (*TypeSpecProto) Descriptor deprecated

func (*TypeSpecProto) Descriptor() ([]byte, []int)

Deprecated: Use TypeSpecProto.ProtoReflect.Descriptor instead.

func (*TypeSpecProto) GetTypeSpecClass

func (x *TypeSpecProto) GetTypeSpecClass() TypeSpecProto_TypeSpecClass

func (*TypeSpecProto) GetTypeSpecClassName

func (x *TypeSpecProto) GetTypeSpecClassName() string

func (*TypeSpecProto) GetTypeState

func (x *TypeSpecProto) GetTypeState() *StructuredValue

func (*TypeSpecProto) ProtoMessage

func (*TypeSpecProto) ProtoMessage()

func (*TypeSpecProto) ProtoReflect

func (x *TypeSpecProto) ProtoReflect() protoreflect.Message

func (*TypeSpecProto) Reset

func (x *TypeSpecProto) Reset()

func (*TypeSpecProto) String

func (x *TypeSpecProto) String() string

type TypeSpecProto_TypeSpecClass

type TypeSpecProto_TypeSpecClass int32
const (
	TypeSpecProto_UNKNOWN              TypeSpecProto_TypeSpecClass = 0
	TypeSpecProto_SPARSE_TENSOR_SPEC   TypeSpecProto_TypeSpecClass = 1  // tf.SparseTensorSpec
	TypeSpecProto_INDEXED_SLICES_SPEC  TypeSpecProto_TypeSpecClass = 2  // tf.IndexedSlicesSpec
	TypeSpecProto_RAGGED_TENSOR_SPEC   TypeSpecProto_TypeSpecClass = 3  // tf.RaggedTensorSpec
	TypeSpecProto_TENSOR_ARRAY_SPEC    TypeSpecProto_TypeSpecClass = 4  // tf.TensorArraySpec
	TypeSpecProto_DATA_DATASET_SPEC    TypeSpecProto_TypeSpecClass = 5  // tf.data.DatasetSpec
	TypeSpecProto_DATA_ITERATOR_SPEC   TypeSpecProto_TypeSpecClass = 6  // IteratorSpec from data/ops/iterator_ops.py
	TypeSpecProto_OPTIONAL_SPEC        TypeSpecProto_TypeSpecClass = 7  // tf.OptionalSpec
	TypeSpecProto_PER_REPLICA_SPEC     TypeSpecProto_TypeSpecClass = 8  // PerReplicaSpec from distribute/values.py
	TypeSpecProto_VARIABLE_SPEC        TypeSpecProto_TypeSpecClass = 9  // tf.VariableSpec
	TypeSpecProto_ROW_PARTITION_SPEC   TypeSpecProto_TypeSpecClass = 10 // RowPartitionSpec from ragged/row_partition.py
	TypeSpecProto_REGISTERED_TYPE_SPEC TypeSpecProto_TypeSpecClass = 12 // The type registered as type_spec_class_name.
	TypeSpecProto_EXTENSION_TYPE_SPEC  TypeSpecProto_TypeSpecClass = 13 // Subclasses of tf.ExtensionType
)

func (TypeSpecProto_TypeSpecClass) Descriptor

func (TypeSpecProto_TypeSpecClass) Enum

func (TypeSpecProto_TypeSpecClass) EnumDescriptor deprecated

func (TypeSpecProto_TypeSpecClass) EnumDescriptor() ([]byte, []int)

Deprecated: Use TypeSpecProto_TypeSpecClass.Descriptor instead.

func (TypeSpecProto_TypeSpecClass) Number

func (TypeSpecProto_TypeSpecClass) String

func (TypeSpecProto_TypeSpecClass) Type

type VerifierConfig

type VerifierConfig struct {

	// Deadline for completion of all verification i.e. all the Toggle ON
	// verifiers must complete execution within this time.
	VerificationTimeoutInMs int64 `` /* 135-byte string literal not displayed */
	// Perform structural validation on a tensorflow graph. Default is OFF.
	StructureVerifier VerifierConfig_Toggle `` /* 151-byte string literal not displayed */
	// contains filtered or unexported fields
}

The config for graph verifiers.

func (*VerifierConfig) Descriptor deprecated

func (*VerifierConfig) Descriptor() ([]byte, []int)

Deprecated: Use VerifierConfig.ProtoReflect.Descriptor instead.

func (*VerifierConfig) GetStructureVerifier

func (x *VerifierConfig) GetStructureVerifier() VerifierConfig_Toggle

func (*VerifierConfig) GetVerificationTimeoutInMs

func (x *VerifierConfig) GetVerificationTimeoutInMs() int64

func (*VerifierConfig) ProtoMessage

func (*VerifierConfig) ProtoMessage()

func (*VerifierConfig) ProtoReflect

func (x *VerifierConfig) ProtoReflect() protoreflect.Message

func (*VerifierConfig) Reset

func (x *VerifierConfig) Reset()

func (*VerifierConfig) String

func (x *VerifierConfig) String() string

type VerifierConfig_Toggle

type VerifierConfig_Toggle int32
const (
	VerifierConfig_DEFAULT VerifierConfig_Toggle = 0
	VerifierConfig_ON      VerifierConfig_Toggle = 1
	VerifierConfig_OFF     VerifierConfig_Toggle = 2
)

func (VerifierConfig_Toggle) Descriptor

func (VerifierConfig_Toggle) Enum

func (VerifierConfig_Toggle) EnumDescriptor deprecated

func (VerifierConfig_Toggle) EnumDescriptor() ([]byte, []int)

Deprecated: Use VerifierConfig_Toggle.Descriptor instead.

func (VerifierConfig_Toggle) Number

func (VerifierConfig_Toggle) String

func (x VerifierConfig_Toggle) String() string

func (VerifierConfig_Toggle) Type

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL