protobuf

package
v0.0.0-...-018471a Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 16, 2020 License: MIT Imports: 8 Imported by: 3

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	ErrInvalidLengthCluster        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowCluster          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupCluster = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthConfig        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowConfig          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupConfig = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthDebug        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowDebug          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupDebug = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthMetaGraph        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowMetaGraph          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupMetaGraph = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthNamedTensor        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowNamedTensor          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupNamedTensor = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthRewriterConfig        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowRewriterConfig          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupRewriterConfig = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthSavedModel        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowSavedModel          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupSavedModel = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthSavedObjectGraph        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowSavedObjectGraph          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupSavedObjectGraph = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthSaver        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowSaver          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupSaver = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthStruct        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowStruct          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupStruct = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthTrackableObjectGraph        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowTrackableObjectGraph          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupTrackableObjectGraph = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthVerifierConfig        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowVerifierConfig          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupVerifierConfig = fmt.Errorf("proto: unexpected end of group")
)
View Source
var Code_name = map[int32]string{
	0:  "OK",
	1:  "CANCELLED",
	2:  "UNKNOWN",
	3:  "INVALID_ARGUMENT",
	4:  "DEADLINE_EXCEEDED",
	5:  "NOT_FOUND",
	6:  "ALREADY_EXISTS",
	7:  "PERMISSION_DENIED",
	16: "UNAUTHENTICATED",
	8:  "RESOURCE_EXHAUSTED",
	9:  "FAILED_PRECONDITION",
	10: "ABORTED",
	11: "OUT_OF_RANGE",
	12: "UNIMPLEMENTED",
	13: "INTERNAL",
	14: "UNAVAILABLE",
	15: "DATA_LOSS",
	20: "DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_",
}
View Source
var Code_value = map[string]int32{
	"OK":                  0,
	"CANCELLED":           1,
	"UNKNOWN":             2,
	"INVALID_ARGUMENT":    3,
	"DEADLINE_EXCEEDED":   4,
	"NOT_FOUND":           5,
	"ALREADY_EXISTS":      6,
	"PERMISSION_DENIED":   7,
	"UNAUTHENTICATED":     16,
	"RESOURCE_EXHAUSTED":  8,
	"FAILED_PRECONDITION": 9,
	"ABORTED":             10,
	"OUT_OF_RANGE":        11,
	"UNIMPLEMENTED":       12,
	"INTERNAL":            13,
	"UNAVAILABLE":         14,
	"DATA_LOSS":           15,
	"DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_": 20,
}
View Source
var OptimizerOptions_GlobalJitLevel_name = map[int32]string{
	0:  "DEFAULT",
	-1: "OFF",
	1:  "ON_1",
	2:  "ON_2",
}
View Source
var OptimizerOptions_GlobalJitLevel_value = map[string]int32{
	"DEFAULT": 0,
	"OFF":     -1,
	"ON_1":    1,
	"ON_2":    2,
}
View Source
var OptimizerOptions_Level_name = map[int32]string{
	0:  "L1",
	-1: "L0",
}
View Source
var OptimizerOptions_Level_value = map[string]int32{
	"L1": 0,
	"L0": -1,
}
View Source
var RewriterConfig_MemOptType_name = map[int32]string{
	0: "DEFAULT_MEM_OPT",
	1: "NO_MEM_OPT",
	2: "MANUAL",
	4: "SWAPPING_HEURISTICS",
	5: "RECOMPUTATION_HEURISTICS",
	6: "SCHEDULING_HEURISTICS",
	3: "HEURISTICS",
}
View Source
var RewriterConfig_MemOptType_value = map[string]int32{
	"DEFAULT_MEM_OPT":          0,
	"NO_MEM_OPT":               1,
	"MANUAL":                   2,
	"SWAPPING_HEURISTICS":      4,
	"RECOMPUTATION_HEURISTICS": 5,
	"SCHEDULING_HEURISTICS":    6,
	"HEURISTICS":               3,
}
View Source
var RewriterConfig_NumIterationsType_name = map[int32]string{
	0: "DEFAULT_NUM_ITERS",
	1: "ONE",
	2: "TWO",
}
View Source
var RewriterConfig_NumIterationsType_value = map[string]int32{
	"DEFAULT_NUM_ITERS": 0,
	"ONE":               1,
	"TWO":               2,
}
View Source
var RewriterConfig_Toggle_name = map[int32]string{
	0: "DEFAULT",
	1: "ON",
	2: "OFF",
	3: "AGGRESSIVE",
}
View Source
var RewriterConfig_Toggle_value = map[string]int32{
	"DEFAULT":    0,
	"ON":         1,
	"OFF":        2,
	"AGGRESSIVE": 3,
}
View Source
var RunOptions_TraceLevel_name = map[int32]string{
	0: "NO_TRACE",
	1: "SOFTWARE_TRACE",
	2: "HARDWARE_TRACE",
	3: "FULL_TRACE",
}
View Source
var RunOptions_TraceLevel_value = map[string]int32{
	"NO_TRACE":       0,
	"SOFTWARE_TRACE": 1,
	"HARDWARE_TRACE": 2,
	"FULL_TRACE":     3,
}
View Source
var SaverDef_CheckpointFormatVersion_name = map[int32]string{
	0: "LEGACY",
	1: "V1",
	2: "V2",
}
View Source
var SaverDef_CheckpointFormatVersion_value = map[string]int32{
	"LEGACY": 0,
	"V1":     1,
	"V2":     2,
}
View Source
var TypeSpecProto_TypeSpecClass_name = map[int32]string{
	0:  "UNKNOWN",
	1:  "SPARSE_TENSOR_SPEC",
	2:  "INDEXED_SLICES_SPEC",
	3:  "RAGGED_TENSOR_SPEC",
	4:  "TENSOR_ARRAY_SPEC",
	5:  "DATA_DATASET_SPEC",
	6:  "DATA_ITERATOR_SPEC",
	7:  "OPTIONAL_SPEC",
	8:  "PER_REPLICA_SPEC",
	9:  "VARIABLE_SPEC",
	10: "ROW_PARTITION_SPEC",
}
View Source
var TypeSpecProto_TypeSpecClass_value = map[string]int32{
	"UNKNOWN":             0,
	"SPARSE_TENSOR_SPEC":  1,
	"INDEXED_SLICES_SPEC": 2,
	"RAGGED_TENSOR_SPEC":  3,
	"TENSOR_ARRAY_SPEC":   4,
	"DATA_DATASET_SPEC":   5,
	"DATA_ITERATOR_SPEC":  6,
	"OPTIONAL_SPEC":       7,
	"PER_REPLICA_SPEC":    8,
	"VARIABLE_SPEC":       9,
	"ROW_PARTITION_SPEC":  10,
}
View Source
var VerifierConfig_Toggle_name = map[int32]string{
	0: "DEFAULT",
	1: "ON",
	2: "OFF",
}
View Source
var VerifierConfig_Toggle_value = map[string]int32{
	"DEFAULT": 0,
	"ON":      1,
	"OFF":     2,
}

Functions

This section is empty.

Types

type AssetFileDef

type AssetFileDef struct {
	// The tensor to bind the asset filename to.
	TensorInfo *TensorInfo `protobuf:"bytes,1,opt,name=tensor_info,json=tensorInfo,proto3" json:"tensor_info,omitempty"`
	// The filename within an assets directory. Note: does not include the path
	// prefix, i.e. directories. For an asset at /tmp/path/vocab.txt, the filename
	// would be "vocab.txt".
	Filename string `protobuf:"bytes,2,opt,name=filename,proto3" json:"filename,omitempty"`
}

An asset file def for a single file or a set of sharded files with the same name.

func (*AssetFileDef) Descriptor

func (*AssetFileDef) Descriptor() ([]byte, []int)

func (*AssetFileDef) GetFilename

func (m *AssetFileDef) GetFilename() string

func (*AssetFileDef) GetTensorInfo

func (m *AssetFileDef) GetTensorInfo() *TensorInfo

func (*AssetFileDef) Marshal

func (m *AssetFileDef) Marshal() (dAtA []byte, err error)

func (*AssetFileDef) MarshalTo

func (m *AssetFileDef) MarshalTo(dAtA []byte) (int, error)

func (*AssetFileDef) MarshalToSizedBuffer

func (m *AssetFileDef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*AssetFileDef) ProtoMessage

func (*AssetFileDef) ProtoMessage()

func (*AssetFileDef) Reset

func (m *AssetFileDef) Reset()

func (*AssetFileDef) Size

func (m *AssetFileDef) Size() (n int)

func (*AssetFileDef) String

func (m *AssetFileDef) String() string

func (*AssetFileDef) Unmarshal

func (m *AssetFileDef) Unmarshal(dAtA []byte) error

func (*AssetFileDef) XXX_DiscardUnknown

func (m *AssetFileDef) XXX_DiscardUnknown()

func (*AssetFileDef) XXX_Marshal

func (m *AssetFileDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AssetFileDef) XXX_Merge

func (m *AssetFileDef) XXX_Merge(src proto.Message)

func (*AssetFileDef) XXX_Size

func (m *AssetFileDef) XXX_Size() int

func (*AssetFileDef) XXX_Unmarshal

func (m *AssetFileDef) XXX_Unmarshal(b []byte) error

type AutoParallelOptions

type AutoParallelOptions struct {
	Enable      bool  `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"`
	NumReplicas int32 `protobuf:"varint,2,opt,name=num_replicas,json=numReplicas,proto3" json:"num_replicas,omitempty"`
}

func (*AutoParallelOptions) Descriptor

func (*AutoParallelOptions) Descriptor() ([]byte, []int)

func (*AutoParallelOptions) GetEnable

func (m *AutoParallelOptions) GetEnable() bool

func (*AutoParallelOptions) GetNumReplicas

func (m *AutoParallelOptions) GetNumReplicas() int32

func (*AutoParallelOptions) Marshal

func (m *AutoParallelOptions) Marshal() (dAtA []byte, err error)

func (*AutoParallelOptions) MarshalTo

func (m *AutoParallelOptions) MarshalTo(dAtA []byte) (int, error)

func (*AutoParallelOptions) MarshalToSizedBuffer

func (m *AutoParallelOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*AutoParallelOptions) ProtoMessage

func (*AutoParallelOptions) ProtoMessage()

func (*AutoParallelOptions) Reset

func (m *AutoParallelOptions) Reset()

func (*AutoParallelOptions) Size

func (m *AutoParallelOptions) Size() (n int)

func (*AutoParallelOptions) String

func (m *AutoParallelOptions) String() string

func (*AutoParallelOptions) Unmarshal

func (m *AutoParallelOptions) Unmarshal(dAtA []byte) error

func (*AutoParallelOptions) XXX_DiscardUnknown

func (m *AutoParallelOptions) XXX_DiscardUnknown()

func (*AutoParallelOptions) XXX_Marshal

func (m *AutoParallelOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AutoParallelOptions) XXX_Merge

func (m *AutoParallelOptions) XXX_Merge(src proto.Message)

func (*AutoParallelOptions) XXX_Size

func (m *AutoParallelOptions) XXX_Size() int

func (*AutoParallelOptions) XXX_Unmarshal

func (m *AutoParallelOptions) XXX_Unmarshal(b []byte) error

type BoundedTensorSpecProto

type BoundedTensorSpecProto struct {
	Name    string                      `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Shape   *framework.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Dtype   framework.DataType          `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Minimum *framework.TensorProto      `protobuf:"bytes,4,opt,name=minimum,proto3" json:"minimum,omitempty"`
	Maximum *framework.TensorProto      `protobuf:"bytes,5,opt,name=maximum,proto3" json:"maximum,omitempty"`
}

A protobuf to represent tf.BoundedTensorSpec.

func (*BoundedTensorSpecProto) Descriptor

func (*BoundedTensorSpecProto) Descriptor() ([]byte, []int)

func (*BoundedTensorSpecProto) GetDtype

func (*BoundedTensorSpecProto) GetMaximum

func (m *BoundedTensorSpecProto) GetMaximum() *framework.TensorProto

func (*BoundedTensorSpecProto) GetMinimum

func (m *BoundedTensorSpecProto) GetMinimum() *framework.TensorProto

func (*BoundedTensorSpecProto) GetName

func (m *BoundedTensorSpecProto) GetName() string

func (*BoundedTensorSpecProto) GetShape

func (*BoundedTensorSpecProto) Marshal

func (m *BoundedTensorSpecProto) Marshal() (dAtA []byte, err error)

func (*BoundedTensorSpecProto) MarshalTo

func (m *BoundedTensorSpecProto) MarshalTo(dAtA []byte) (int, error)

func (*BoundedTensorSpecProto) MarshalToSizedBuffer

func (m *BoundedTensorSpecProto) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*BoundedTensorSpecProto) ProtoMessage

func (*BoundedTensorSpecProto) ProtoMessage()

func (*BoundedTensorSpecProto) Reset

func (m *BoundedTensorSpecProto) Reset()

func (*BoundedTensorSpecProto) Size

func (m *BoundedTensorSpecProto) Size() (n int)

func (*BoundedTensorSpecProto) String

func (m *BoundedTensorSpecProto) String() string

func (*BoundedTensorSpecProto) Unmarshal

func (m *BoundedTensorSpecProto) Unmarshal(dAtA []byte) error

func (*BoundedTensorSpecProto) XXX_DiscardUnknown

func (m *BoundedTensorSpecProto) XXX_DiscardUnknown()

func (*BoundedTensorSpecProto) XXX_Marshal

func (m *BoundedTensorSpecProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BoundedTensorSpecProto) XXX_Merge

func (m *BoundedTensorSpecProto) XXX_Merge(src proto.Message)

func (*BoundedTensorSpecProto) XXX_Size

func (m *BoundedTensorSpecProto) XXX_Size() int

func (*BoundedTensorSpecProto) XXX_Unmarshal

func (m *BoundedTensorSpecProto) XXX_Unmarshal(b []byte) error

type CallableOptions

type CallableOptions struct {
	// Tensors to be fed in the callable. Each feed is the name of a tensor.
	Feed []string `protobuf:"bytes,1,rep,name=feed,proto3" json:"feed,omitempty"`
	// Fetches. A list of tensor names. The caller of the callable expects a
	// tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
	// order of specified fetches does not change the execution order.
	Fetch []string `protobuf:"bytes,2,rep,name=fetch,proto3" json:"fetch,omitempty"`
	// Target Nodes. A list of node names. The named nodes will be run by the
	// callable but their outputs will not be returned.
	Target []string `protobuf:"bytes,3,rep,name=target,proto3" json:"target,omitempty"`
	// Options that will be applied to each run.
	RunOptions *RunOptions `protobuf:"bytes,4,opt,name=run_options,json=runOptions,proto3" json:"run_options,omitempty"`
	// Tensors to be connected in the callable. Each TensorConnection denotes
	// a pair of tensors in the graph, between which an edge will be created
	// in the callable.
	TensorConnection []*TensorConnection `protobuf:"bytes,5,rep,name=tensor_connection,json=tensorConnection,proto3" json:"tensor_connection,omitempty"`
	// The Tensor objects fed in the callable and fetched from the callable
	// are expected to be backed by host (CPU) memory by default.
	//
	// The options below allow changing that - feeding tensors backed by
	// device memory, or returning tensors that are backed by device memory.
	//
	// The maps below map the name of a feed/fetch tensor (which appears in
	// 'feed' or 'fetch' fields above), to the fully qualified name of the device
	// owning the memory backing the contents of the tensor.
	//
	// For example, creating a callable with the following options:
	//
	// CallableOptions {
	//   feed: "a:0"
	//   feed: "b:0"
	//
	//   fetch: "x:0"
	//   fetch: "y:0"
	//
	//   feed_devices: {
	//     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
	//   }
	//
	//   fetch_devices: {
	//     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
	//  }
	// }
	//
	// means that the Callable expects:
	// - The first argument ("a:0") is a Tensor backed by GPU memory.
	// - The second argument ("b:0") is a Tensor backed by host memory.
	// and of its return values:
	// - The first output ("x:0") will be backed by host memory.
	// - The second output ("y:0") will be backed by GPU memory.
	//
	// FEEDS:
	// It is the responsibility of the caller to ensure that the memory of the fed
	// tensors will be correctly initialized and synchronized before it is
	// accessed by operations executed during the call to Session::RunCallable().
	//
	// This is typically ensured by using the TensorFlow memory allocators
	// (Device::GetAllocator()) to create the Tensor to be fed.
	//
	// Alternatively, for CUDA-enabled GPU devices, this typically means that the
	// operation that produced the contents of the tensor has completed, i.e., the
	// CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
	// cuStreamSynchronize()).
	FeedDevices  map[string]string `` /* 182-byte string literal not displayed */
	FetchDevices map[string]string `` /* 185-byte string literal not displayed */
	// By default, RunCallable() will synchronize the GPU stream before returning
	// fetched tensors on a GPU device, to ensure that the values in those tensors
	// have been produced. This simplifies interacting with the tensors, but
	// potentially incurs a performance hit.
	//
	// If this options is set to true, the caller is responsible for ensuring
	// that the values in the fetched tensors have been produced before they are
	// used. The caller can do this by invoking `Device::Sync()` on the underlying
	// device(s), or by feeding the tensors back to the same Session using
	// `feed_devices` with the same corresponding device name.
	FetchSkipSync bool `protobuf:"varint,8,opt,name=fetch_skip_sync,json=fetchSkipSync,proto3" json:"fetch_skip_sync,omitempty"`
}

Defines a subgraph in another `GraphDef` as a set of feed points and nodes to be fetched or executed.

Compare with the arguments to `Session::Run()`.

func (*CallableOptions) Descriptor

func (*CallableOptions) Descriptor() ([]byte, []int)

func (*CallableOptions) GetFeed

func (m *CallableOptions) GetFeed() []string

func (*CallableOptions) GetFeedDevices

func (m *CallableOptions) GetFeedDevices() map[string]string

func (*CallableOptions) GetFetch

func (m *CallableOptions) GetFetch() []string

func (*CallableOptions) GetFetchDevices

func (m *CallableOptions) GetFetchDevices() map[string]string

func (*CallableOptions) GetFetchSkipSync

func (m *CallableOptions) GetFetchSkipSync() bool

func (*CallableOptions) GetRunOptions

func (m *CallableOptions) GetRunOptions() *RunOptions

func (*CallableOptions) GetTarget

func (m *CallableOptions) GetTarget() []string

func (*CallableOptions) GetTensorConnection

func (m *CallableOptions) GetTensorConnection() []*TensorConnection

func (*CallableOptions) Marshal

func (m *CallableOptions) Marshal() (dAtA []byte, err error)

func (*CallableOptions) MarshalTo

func (m *CallableOptions) MarshalTo(dAtA []byte) (int, error)

func (*CallableOptions) MarshalToSizedBuffer

func (m *CallableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CallableOptions) ProtoMessage

func (*CallableOptions) ProtoMessage()

func (*CallableOptions) Reset

func (m *CallableOptions) Reset()

func (*CallableOptions) Size

func (m *CallableOptions) Size() (n int)

func (*CallableOptions) String

func (m *CallableOptions) String() string

func (*CallableOptions) Unmarshal

func (m *CallableOptions) Unmarshal(dAtA []byte) error

func (*CallableOptions) XXX_DiscardUnknown

func (m *CallableOptions) XXX_DiscardUnknown()

func (*CallableOptions) XXX_Marshal

func (m *CallableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CallableOptions) XXX_Merge

func (m *CallableOptions) XXX_Merge(src proto.Message)

func (*CallableOptions) XXX_Size

func (m *CallableOptions) XXX_Size() int

func (*CallableOptions) XXX_Unmarshal

func (m *CallableOptions) XXX_Unmarshal(b []byte) error

type ClusterDef

type ClusterDef struct {
	// The jobs that comprise the cluster.
	Job []*JobDef `protobuf:"bytes,1,rep,name=job,proto3" json:"job,omitempty"`
}

Defines a TensorFlow cluster as a set of jobs.

func (*ClusterDef) Descriptor

func (*ClusterDef) Descriptor() ([]byte, []int)

func (*ClusterDef) GetJob

func (m *ClusterDef) GetJob() []*JobDef

func (*ClusterDef) Marshal

func (m *ClusterDef) Marshal() (dAtA []byte, err error)

func (*ClusterDef) MarshalTo

func (m *ClusterDef) MarshalTo(dAtA []byte) (int, error)

func (*ClusterDef) MarshalToSizedBuffer

func (m *ClusterDef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ClusterDef) ProtoMessage

func (*ClusterDef) ProtoMessage()

func (*ClusterDef) Reset

func (m *ClusterDef) Reset()

func (*ClusterDef) Size

func (m *ClusterDef) Size() (n int)

func (*ClusterDef) String

func (m *ClusterDef) String() string

func (*ClusterDef) Unmarshal

func (m *ClusterDef) Unmarshal(dAtA []byte) error

func (*ClusterDef) XXX_DiscardUnknown

func (m *ClusterDef) XXX_DiscardUnknown()

func (*ClusterDef) XXX_Marshal

func (m *ClusterDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterDef) XXX_Merge

func (m *ClusterDef) XXX_Merge(src proto.Message)

func (*ClusterDef) XXX_Size

func (m *ClusterDef) XXX_Size() int

func (*ClusterDef) XXX_Unmarshal

func (m *ClusterDef) XXX_Unmarshal(b []byte) error

type Code

type Code int32

The canonical error codes for TensorFlow APIs.

Warnings:

  • Do not change any numeric assignments.
  • Changes to this list should only be made if there is a compelling need that can't be satisfied in another way. Such changes must be approved by at least two OWNERS.
  • These error codes must match gRPC and protobuf error codes (except for DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_).

Sometimes multiple error codes may apply. Services should return the most specific error code that applies. For example, prefer OUT_OF_RANGE over FAILED_PRECONDITION if both codes apply. Similarly prefer NOT_FOUND or ALREADY_EXISTS over FAILED_PRECONDITION.

const (
	// Not an error; returned on success
	Code_OK Code = 0
	// The operation was cancelled (typically by the caller).
	Code_CANCELLED Code = 1
	// Unknown error.  An example of where this error may be returned is
	// if a Status value received from another address space belongs to
	// an error-space that is not known in this address space.  Also
	// errors raised by APIs that do not return enough error information
	// may be converted to this error.
	Code_UNKNOWN Code = 2
	// Client specified an invalid argument.  Note that this differs
	// from FAILED_PRECONDITION.  INVALID_ARGUMENT indicates arguments
	// that are problematic regardless of the state of the system
	// (e.g., a malformed file name).
	Code_INVALID_ARGUMENT Code = 3
	// Deadline expired before operation could complete.  For operations
	// that change the state of the system, this error may be returned
	// even if the operation has completed successfully.  For example, a
	// successful response from a server could have been delayed long
	// enough for the deadline to expire.
	Code_DEADLINE_EXCEEDED Code = 4
	// Some requested entity (e.g., file or directory) was not found.
	// For privacy reasons, this code *may* be returned when the client
	// does not have the access right to the entity.
	Code_NOT_FOUND Code = 5
	// Some entity that we attempted to create (e.g., file or directory)
	// already exists.
	Code_ALREADY_EXISTS Code = 6
	// The caller does not have permission to execute the specified
	// operation.  PERMISSION_DENIED must not be used for rejections
	// caused by exhausting some resource (use RESOURCE_EXHAUSTED
	// instead for those errors).  PERMISSION_DENIED must not be
	// used if the caller can not be identified (use UNAUTHENTICATED
	// instead for those errors).
	Code_PERMISSION_DENIED Code = 7
	// The request does not have valid authentication credentials for the
	// operation.
	Code_UNAUTHENTICATED Code = 16
	// Some resource has been exhausted, perhaps a per-user quota, or
	// perhaps the entire file system is out of space.
	Code_RESOURCE_EXHAUSTED Code = 8
	// Operation was rejected because the system is not in a state
	// required for the operation's execution.  For example, directory
	// to be deleted may be non-empty, an rmdir operation is applied to
	// a non-directory, etc.
	//
	// A litmus test that may help a service implementor in deciding
	// between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
	//  (a) Use UNAVAILABLE if the client can retry just the failing call.
	//  (b) Use ABORTED if the client should retry at a higher-level
	//      (e.g., restarting a read-modify-write sequence).
	//  (c) Use FAILED_PRECONDITION if the client should not retry until
	//      the system state has been explicitly fixed.  E.g., if an "rmdir"
	//      fails because the directory is non-empty, FAILED_PRECONDITION
	//      should be returned since the client should not retry unless
	//      they have first fixed up the directory by deleting files from it.
	//  (d) Use FAILED_PRECONDITION if the client performs conditional
	//      REST Get/Update/Delete on a resource and the resource on the
	//      server does not match the condition. E.g., conflicting
	//      read-modify-write on the same resource.
	Code_FAILED_PRECONDITION Code = 9
	// The operation was aborted, typically due to a concurrency issue
	// like sequencer check failures, transaction aborts, etc.
	//
	// See litmus test above for deciding between FAILED_PRECONDITION,
	// ABORTED, and UNAVAILABLE.
	Code_ABORTED Code = 10
	// Operation tried to iterate past the valid input range.  E.g., seeking or
	// reading past end of file.
	//
	// Unlike INVALID_ARGUMENT, this error indicates a problem that may
	// be fixed if the system state changes. For example, a 32-bit file
	// system will generate INVALID_ARGUMENT if asked to read at an
	// offset that is not in the range [0,2^32-1], but it will generate
	// OUT_OF_RANGE if asked to read from an offset past the current
	// file size.
	//
	// There is a fair bit of overlap between FAILED_PRECONDITION and
	// OUT_OF_RANGE.  We recommend using OUT_OF_RANGE (the more specific
	// error) when it applies so that callers who are iterating through
	// a space can easily look for an OUT_OF_RANGE error to detect when
	// they are done.
	Code_OUT_OF_RANGE Code = 11
	// Operation is not implemented or not supported/enabled in this service.
	Code_UNIMPLEMENTED Code = 12
	// Internal errors.  Means some invariant expected by the underlying
	// system has been broken.  If you see one of these errors,
	// something is very broken.
	Code_INTERNAL Code = 13
	// The service is currently unavailable.  This is a most likely a
	// transient condition and may be corrected by retrying with
	// a backoff.
	//
	// See litmus test above for deciding between FAILED_PRECONDITION,
	// ABORTED, and UNAVAILABLE.
	Code_UNAVAILABLE Code = 14
	// Unrecoverable data loss or corruption.
	Code_DATA_LOSS Code = 15
	// An extra enum entry to prevent people from writing code that
	// fails to compile when a new code is added.
	//
	// Nobody should ever reference this enumeration entry. In particular,
	// if you write C++ code that switches on this enumeration, add a default:
	// case instead of a case that mentions this enumeration entry.
	//
	// Nobody should rely on the value (currently 20) listed here.  It
	// may change in the future.
	Code_DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ Code = 20
)

func (Code) EnumDescriptor

func (Code) EnumDescriptor() ([]byte, []int)

func (Code) String

func (x Code) String() string

type CollectionDef

type CollectionDef struct {
	// Types that are valid to be assigned to Kind:
	//	*CollectionDef_NodeList_
	//	*CollectionDef_BytesList_
	//	*CollectionDef_Int64List_
	//	*CollectionDef_FloatList_
	//	*CollectionDef_AnyList_
	Kind isCollectionDef_Kind `protobuf_oneof:"kind"`
}

CollectionDef should cover most collections. To add a user-defined collection, do one of the following:

  1. For simple data types, such as string, int, float: tf.add_to_collection("your_collection_name", your_simple_value) strings will be stored as bytes_list.

2. For Protobuf types, there are three ways to add them:

  1. tf.add_to_collection("your_collection_name", your_proto.SerializeToString())

    collection_def { key: "user_defined_bytes_collection" value { bytes_list { value: "queue_name: \"test_queue\"\n" } } }

    or

  2. tf.add_to_collection("your_collection_name", str(your_proto))

    collection_def { key: "user_defined_string_collection" value { bytes_list { value: "\n\ntest_queue" } } }

    or

  3. any_buf = any_pb2.Any() tf.add_to_collection("your_collection_name", any_buf.Pack(your_proto))

    collection_def { key: "user_defined_any_collection" value { any_list { value { type_url: "type.googleapis.com/tensorflow.QueueRunnerDef" value: "\n\ntest_queue" } } } }

  1. For Python objects, implement to_proto() and from_proto(), and register them in the following manner: ops.register_proto_function("your_collection_name", proto_type, to_proto=YourPythonObject.to_proto, from_proto=YourPythonObject.from_proto) These functions will be invoked to serialize and de-serialize the collection. For example, ops.register_proto_function(ops.GraphKeys.GLOBAL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=Variable.to_proto, from_proto=Variable.from_proto)

func (*CollectionDef) Descriptor

func (*CollectionDef) Descriptor() ([]byte, []int)

func (*CollectionDef) GetAnyList

func (m *CollectionDef) GetAnyList() *CollectionDef_AnyList

func (*CollectionDef) GetBytesList

func (m *CollectionDef) GetBytesList() *CollectionDef_BytesList

func (*CollectionDef) GetFloatList

func (m *CollectionDef) GetFloatList() *CollectionDef_FloatList

func (*CollectionDef) GetInt64List

func (m *CollectionDef) GetInt64List() *CollectionDef_Int64List

func (*CollectionDef) GetKind

func (m *CollectionDef) GetKind() isCollectionDef_Kind

func (*CollectionDef) GetNodeList

func (m *CollectionDef) GetNodeList() *CollectionDef_NodeList

func (*CollectionDef) Marshal

func (m *CollectionDef) Marshal() (dAtA []byte, err error)

func (*CollectionDef) MarshalTo

func (m *CollectionDef) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef) MarshalToSizedBuffer

func (m *CollectionDef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef) ProtoMessage

func (*CollectionDef) ProtoMessage()

func (*CollectionDef) Reset

func (m *CollectionDef) Reset()

func (*CollectionDef) Size

func (m *CollectionDef) Size() (n int)

func (*CollectionDef) String

func (m *CollectionDef) String() string

func (*CollectionDef) Unmarshal

func (m *CollectionDef) Unmarshal(dAtA []byte) error

func (*CollectionDef) XXX_DiscardUnknown

func (m *CollectionDef) XXX_DiscardUnknown()

func (*CollectionDef) XXX_Marshal

func (m *CollectionDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CollectionDef) XXX_Merge

func (m *CollectionDef) XXX_Merge(src proto.Message)

func (*CollectionDef) XXX_OneofWrappers

func (*CollectionDef) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*CollectionDef) XXX_Size

func (m *CollectionDef) XXX_Size() int

func (*CollectionDef) XXX_Unmarshal

func (m *CollectionDef) XXX_Unmarshal(b []byte) error

type CollectionDef_AnyList

type CollectionDef_AnyList struct {
	Value []*types.Any `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}

AnyList is used for collecting Any protos.

func (*CollectionDef_AnyList) Descriptor

func (*CollectionDef_AnyList) Descriptor() ([]byte, []int)

func (*CollectionDef_AnyList) GetValue

func (m *CollectionDef_AnyList) GetValue() []*types.Any

func (*CollectionDef_AnyList) Marshal

func (m *CollectionDef_AnyList) Marshal() (dAtA []byte, err error)

func (*CollectionDef_AnyList) MarshalTo

func (m *CollectionDef_AnyList) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_AnyList) MarshalToSizedBuffer

func (m *CollectionDef_AnyList) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_AnyList) ProtoMessage

func (*CollectionDef_AnyList) ProtoMessage()

func (*CollectionDef_AnyList) Reset

func (m *CollectionDef_AnyList) Reset()

func (*CollectionDef_AnyList) Size

func (m *CollectionDef_AnyList) Size() (n int)

func (*CollectionDef_AnyList) String

func (m *CollectionDef_AnyList) String() string

func (*CollectionDef_AnyList) Unmarshal

func (m *CollectionDef_AnyList) Unmarshal(dAtA []byte) error

func (*CollectionDef_AnyList) XXX_DiscardUnknown

func (m *CollectionDef_AnyList) XXX_DiscardUnknown()

func (*CollectionDef_AnyList) XXX_Marshal

func (m *CollectionDef_AnyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CollectionDef_AnyList) XXX_Merge

func (m *CollectionDef_AnyList) XXX_Merge(src proto.Message)

func (*CollectionDef_AnyList) XXX_Size

func (m *CollectionDef_AnyList) XXX_Size() int

func (*CollectionDef_AnyList) XXX_Unmarshal

func (m *CollectionDef_AnyList) XXX_Unmarshal(b []byte) error

type CollectionDef_AnyList_

type CollectionDef_AnyList_ struct {
	AnyList *CollectionDef_AnyList `protobuf:"bytes,5,opt,name=any_list,json=anyList,proto3,oneof" json:"any_list,omitempty"`
}

func (*CollectionDef_AnyList_) MarshalTo

func (m *CollectionDef_AnyList_) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_AnyList_) MarshalToSizedBuffer

func (m *CollectionDef_AnyList_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_AnyList_) Size

func (m *CollectionDef_AnyList_) Size() (n int)

type CollectionDef_BytesList

type CollectionDef_BytesList struct {
	Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}

BytesList is used for collecting strings and serialized protobufs. For example:

collection_def {
  key: "trainable_variables"
  value {
    bytes_list {
      value: "\n\017conv1/weights:0\022\024conv1/weights/Assign
             \032\024conv1/weights/read:0"
      value: "\n\016conv1/biases:0\022\023conv1/biases/Assign\032
             \023conv1/biases/read:0"
    }
  }
}

func (*CollectionDef_BytesList) Descriptor

func (*CollectionDef_BytesList) Descriptor() ([]byte, []int)

func (*CollectionDef_BytesList) GetValue

func (m *CollectionDef_BytesList) GetValue() [][]byte

func (*CollectionDef_BytesList) Marshal

func (m *CollectionDef_BytesList) Marshal() (dAtA []byte, err error)

func (*CollectionDef_BytesList) MarshalTo

func (m *CollectionDef_BytesList) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_BytesList) MarshalToSizedBuffer

func (m *CollectionDef_BytesList) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_BytesList) ProtoMessage

func (*CollectionDef_BytesList) ProtoMessage()

func (*CollectionDef_BytesList) Reset

func (m *CollectionDef_BytesList) Reset()

func (*CollectionDef_BytesList) Size

func (m *CollectionDef_BytesList) Size() (n int)

func (*CollectionDef_BytesList) String

func (m *CollectionDef_BytesList) String() string

func (*CollectionDef_BytesList) Unmarshal

func (m *CollectionDef_BytesList) Unmarshal(dAtA []byte) error

func (*CollectionDef_BytesList) XXX_DiscardUnknown

func (m *CollectionDef_BytesList) XXX_DiscardUnknown()

func (*CollectionDef_BytesList) XXX_Marshal

func (m *CollectionDef_BytesList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CollectionDef_BytesList) XXX_Merge

func (m *CollectionDef_BytesList) XXX_Merge(src proto.Message)

func (*CollectionDef_BytesList) XXX_Size

func (m *CollectionDef_BytesList) XXX_Size() int

func (*CollectionDef_BytesList) XXX_Unmarshal

func (m *CollectionDef_BytesList) XXX_Unmarshal(b []byte) error

type CollectionDef_BytesList_

type CollectionDef_BytesList_ struct {
	BytesList *CollectionDef_BytesList `protobuf:"bytes,2,opt,name=bytes_list,json=bytesList,proto3,oneof" json:"bytes_list,omitempty"`
}

func (*CollectionDef_BytesList_) MarshalTo

func (m *CollectionDef_BytesList_) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_BytesList_) MarshalToSizedBuffer

func (m *CollectionDef_BytesList_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_BytesList_) Size

func (m *CollectionDef_BytesList_) Size() (n int)

type CollectionDef_FloatList

type CollectionDef_FloatList struct {
	Value []float32 `protobuf:"fixed32,1,rep,packed,name=value,proto3" json:"value,omitempty"`
}

FloatList is used for collecting float values.

func (*CollectionDef_FloatList) Descriptor

func (*CollectionDef_FloatList) Descriptor() ([]byte, []int)

func (*CollectionDef_FloatList) GetValue

func (m *CollectionDef_FloatList) GetValue() []float32

func (*CollectionDef_FloatList) Marshal

func (m *CollectionDef_FloatList) Marshal() (dAtA []byte, err error)

func (*CollectionDef_FloatList) MarshalTo

func (m *CollectionDef_FloatList) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_FloatList) MarshalToSizedBuffer

func (m *CollectionDef_FloatList) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_FloatList) ProtoMessage

func (*CollectionDef_FloatList) ProtoMessage()

func (*CollectionDef_FloatList) Reset

func (m *CollectionDef_FloatList) Reset()

func (*CollectionDef_FloatList) Size

func (m *CollectionDef_FloatList) Size() (n int)

func (*CollectionDef_FloatList) String

func (m *CollectionDef_FloatList) String() string

func (*CollectionDef_FloatList) Unmarshal

func (m *CollectionDef_FloatList) Unmarshal(dAtA []byte) error

func (*CollectionDef_FloatList) XXX_DiscardUnknown

func (m *CollectionDef_FloatList) XXX_DiscardUnknown()

func (*CollectionDef_FloatList) XXX_Marshal

func (m *CollectionDef_FloatList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CollectionDef_FloatList) XXX_Merge

func (m *CollectionDef_FloatList) XXX_Merge(src proto.Message)

func (*CollectionDef_FloatList) XXX_Size

func (m *CollectionDef_FloatList) XXX_Size() int

func (*CollectionDef_FloatList) XXX_Unmarshal

func (m *CollectionDef_FloatList) XXX_Unmarshal(b []byte) error

type CollectionDef_FloatList_

type CollectionDef_FloatList_ struct {
	FloatList *CollectionDef_FloatList `protobuf:"bytes,4,opt,name=float_list,json=floatList,proto3,oneof" json:"float_list,omitempty"`
}

func (*CollectionDef_FloatList_) MarshalTo

func (m *CollectionDef_FloatList_) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_FloatList_) MarshalToSizedBuffer

func (m *CollectionDef_FloatList_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_FloatList_) Size

func (m *CollectionDef_FloatList_) Size() (n int)

type CollectionDef_Int64List

type CollectionDef_Int64List struct {
	Value []int64 `protobuf:"varint,1,rep,packed,name=value,proto3" json:"value,omitempty"`
}

Int64List is used for collecting int, int64 and long values.

func (*CollectionDef_Int64List) Descriptor

func (*CollectionDef_Int64List) Descriptor() ([]byte, []int)

func (*CollectionDef_Int64List) GetValue

func (m *CollectionDef_Int64List) GetValue() []int64

func (*CollectionDef_Int64List) Marshal

func (m *CollectionDef_Int64List) Marshal() (dAtA []byte, err error)

func (*CollectionDef_Int64List) MarshalTo

func (m *CollectionDef_Int64List) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_Int64List) MarshalToSizedBuffer

func (m *CollectionDef_Int64List) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_Int64List) ProtoMessage

func (*CollectionDef_Int64List) ProtoMessage()

func (*CollectionDef_Int64List) Reset

func (m *CollectionDef_Int64List) Reset()

func (*CollectionDef_Int64List) Size

func (m *CollectionDef_Int64List) Size() (n int)

func (*CollectionDef_Int64List) String

func (m *CollectionDef_Int64List) String() string

func (*CollectionDef_Int64List) Unmarshal

func (m *CollectionDef_Int64List) Unmarshal(dAtA []byte) error

func (*CollectionDef_Int64List) XXX_DiscardUnknown

func (m *CollectionDef_Int64List) XXX_DiscardUnknown()

func (*CollectionDef_Int64List) XXX_Marshal

func (m *CollectionDef_Int64List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CollectionDef_Int64List) XXX_Merge

func (m *CollectionDef_Int64List) XXX_Merge(src proto.Message)

func (*CollectionDef_Int64List) XXX_Size

func (m *CollectionDef_Int64List) XXX_Size() int

func (*CollectionDef_Int64List) XXX_Unmarshal

func (m *CollectionDef_Int64List) XXX_Unmarshal(b []byte) error

type CollectionDef_Int64List_

type CollectionDef_Int64List_ struct {
	Int64List *CollectionDef_Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,proto3,oneof" json:"int64_list,omitempty"`
}

func (*CollectionDef_Int64List_) MarshalTo

func (m *CollectionDef_Int64List_) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_Int64List_) MarshalToSizedBuffer

func (m *CollectionDef_Int64List_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_Int64List_) Size

func (m *CollectionDef_Int64List_) Size() (n int)

type CollectionDef_NodeList

type CollectionDef_NodeList struct {
	Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}

NodeList is used for collecting nodes in graph. For example

collection_def {
  key: "summaries"
  value {
    node_list {
      value: "input_producer/ScalarSummary:0"
      value: "shuffle_batch/ScalarSummary:0"
      value: "ImageSummary:0"
    }
  }

func (*CollectionDef_NodeList) Descriptor

func (*CollectionDef_NodeList) Descriptor() ([]byte, []int)

func (*CollectionDef_NodeList) GetValue

func (m *CollectionDef_NodeList) GetValue() []string

func (*CollectionDef_NodeList) Marshal

func (m *CollectionDef_NodeList) Marshal() (dAtA []byte, err error)

func (*CollectionDef_NodeList) MarshalTo

func (m *CollectionDef_NodeList) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_NodeList) MarshalToSizedBuffer

func (m *CollectionDef_NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_NodeList) ProtoMessage

func (*CollectionDef_NodeList) ProtoMessage()

func (*CollectionDef_NodeList) Reset

func (m *CollectionDef_NodeList) Reset()

func (*CollectionDef_NodeList) Size

func (m *CollectionDef_NodeList) Size() (n int)

func (*CollectionDef_NodeList) String

func (m *CollectionDef_NodeList) String() string

func (*CollectionDef_NodeList) Unmarshal

func (m *CollectionDef_NodeList) Unmarshal(dAtA []byte) error

func (*CollectionDef_NodeList) XXX_DiscardUnknown

func (m *CollectionDef_NodeList) XXX_DiscardUnknown()

func (*CollectionDef_NodeList) XXX_Marshal

func (m *CollectionDef_NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CollectionDef_NodeList) XXX_Merge

func (m *CollectionDef_NodeList) XXX_Merge(src proto.Message)

func (*CollectionDef_NodeList) XXX_Size

func (m *CollectionDef_NodeList) XXX_Size() int

func (*CollectionDef_NodeList) XXX_Unmarshal

func (m *CollectionDef_NodeList) XXX_Unmarshal(b []byte) error

type CollectionDef_NodeList_

type CollectionDef_NodeList_ struct {
	NodeList *CollectionDef_NodeList `protobuf:"bytes,1,opt,name=node_list,json=nodeList,proto3,oneof" json:"node_list,omitempty"`
}

func (*CollectionDef_NodeList_) MarshalTo

func (m *CollectionDef_NodeList_) MarshalTo(dAtA []byte) (int, error)

func (*CollectionDef_NodeList_) MarshalToSizedBuffer

func (m *CollectionDef_NodeList_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CollectionDef_NodeList_) Size

func (m *CollectionDef_NodeList_) Size() (n int)

type ConfigProto

type ConfigProto struct {
	// Map from device type name (e.g., "CPU" or "GPU" ) to maximum
	// number of devices of that type to use.  If a particular device
	// type is not found in the map, the system picks an appropriate
	// number.
	DeviceCount map[string]int32 `` /* 183-byte string literal not displayed */
	// The execution of an individual op (for some op types) can be
	// parallelized on a pool of intra_op_parallelism_threads.
	// 0 means the system picks an appropriate number.
	//
	// If you create an ordinary session, e.g., from Python or C++,
	// then there is exactly one intra op thread pool per process.
	// The first session created determines the number of threads in this pool.
	// All subsequent sessions reuse/share this one global pool.
	//
	// There are notable exceptions to the default behavior describe above:
	// 1. There is an environment variable  for overriding this thread pool,
	//    named TF_OVERRIDE_GLOBAL_THREADPOOL.
	// 2. When connecting to a server, such as a remote `tf.train.Server`
	//    instance, then this option will be ignored altogether.
	IntraOpParallelismThreads int32 `` /* 141-byte string literal not displayed */
	// Nodes that perform blocking operations are enqueued on a pool of
	// inter_op_parallelism_threads available in each process.
	//
	// 0 means the system picks an appropriate number.
	// Negative means all operations are performed in caller's thread.
	//
	// Note that the first Session created in the process sets the
	// number of threads for all future sessions unless use_per_session_threads is
	// true or session_inter_op_thread_pool is configured.
	InterOpParallelismThreads int32 `` /* 141-byte string literal not displayed */
	// If true, use a new set of threads for this session rather than the global
	// pool of threads. Only supported by direct sessions.
	//
	// If false, use the global threads created by the first session, or the
	// per-session thread pools configured by session_inter_op_thread_pool.
	//
	// This option is deprecated. The same effect can be achieved by setting
	// session_inter_op_thread_pool to have one element, whose num_threads equals
	// inter_op_parallelism_threads.
	UsePerSessionThreads bool `` /* 126-byte string literal not displayed */
	// This option is experimental - it may be replaced with a different mechanism
	// in the future.
	//
	// Configures session thread pools. If this is configured, then RunOptions for
	// a Run call can select the thread pool to use.
	//
	// The intended use is for when some session invocations need to run in a
	// background pool limited to a small number of threads:
	// - For example, a session may be configured to have one large pool (for
	// regular compute) and one small pool (for periodic, low priority work);
	// using the small pool is currently the mechanism for limiting the inter-op
	// parallelism of the low priority work.  Note that it does not limit the
	// parallelism of work spawned by a single op kernel implementation.
	// - Using this setting is normally not needed in training, but may help some
	// serving use cases.
	// - It is also generally recommended to set the global_name field of this
	// proto, to avoid creating multiple large pools. It is typically better to
	// run the non-low-priority work, even across sessions, in a single large
	// pool.
	SessionInterOpThreadPool []*ThreadPoolOptionProto `` /* 140-byte string literal not displayed */
	// Assignment of Nodes to Devices is recomputed every placement_period
	// steps until the system warms up (at which point the recomputation
	// typically slows down automatically).
	PlacementPeriod int32 `protobuf:"varint,3,opt,name=placement_period,json=placementPeriod,proto3" json:"placement_period,omitempty"`
	// When any filters are present sessions will ignore all devices which do not
	// match the filters. Each filter can be partially specified, e.g. "/job:ps"
	// "/job:worker/replica:3", etc.
	DeviceFilters []string `protobuf:"bytes,4,rep,name=device_filters,json=deviceFilters,proto3" json:"device_filters,omitempty"`
	// Options that apply to all GPUs.
	GpuOptions *GPUOptions `protobuf:"bytes,6,opt,name=gpu_options,json=gpuOptions,proto3" json:"gpu_options,omitempty"`
	// Whether soft placement is allowed. If allow_soft_placement is true,
	// an op will be placed on CPU if
	//   1. there's no GPU implementation for the OP
	// or
	//   2. no GPU devices are known or registered
	// or
	//   3. need to co-locate with reftype input(s) which are from CPU.
	AllowSoftPlacement bool `protobuf:"varint,7,opt,name=allow_soft_placement,json=allowSoftPlacement,proto3" json:"allow_soft_placement,omitempty"`
	// Whether device placements should be logged.
	LogDevicePlacement bool `protobuf:"varint,8,opt,name=log_device_placement,json=logDevicePlacement,proto3" json:"log_device_placement,omitempty"`
	// Options that apply to all graphs.
	GraphOptions *GraphOptions `protobuf:"bytes,10,opt,name=graph_options,json=graphOptions,proto3" json:"graph_options,omitempty"`
	// Global timeout for all blocking operations in this session.  If non-zero,
	// and not overridden on a per-operation basis, this value will be used as the
	// deadline for all blocking operations.
	OperationTimeoutInMs int64 `` /* 127-byte string literal not displayed */
	// Options that apply when this session uses the distributed runtime.
	RpcOptions *RPCOptions `protobuf:"bytes,13,opt,name=rpc_options,json=rpcOptions,proto3" json:"rpc_options,omitempty"`
	// Optional list of all workers to use in this session.
	ClusterDef *ClusterDef `protobuf:"bytes,14,opt,name=cluster_def,json=clusterDef,proto3" json:"cluster_def,omitempty"`
	// If true, any resources such as Variables used in the session will not be
	// shared with other sessions. However, when clusterspec propagation is
	// enabled, this field is ignored and sessions are always isolated.
	IsolateSessionState bool `protobuf:"varint,15,opt,name=isolate_session_state,json=isolateSessionState,proto3" json:"isolate_session_state,omitempty"`
	// When true, WorkerSessions are created with device attributes from the
	// full cluster.
	// This is helpful when a worker wants to partition a graph
	// (for example during a PartitionedCallOp).
	ShareClusterDevicesInSession bool                      `` /* 153-byte string literal not displayed */
	Experimental                 *ConfigProto_Experimental `protobuf:"bytes,16,opt,name=experimental,proto3" json:"experimental,omitempty"`
}

Session configuration parameters. The system picks appropriate values for fields that are not set.

func (*ConfigProto) Descriptor

func (*ConfigProto) Descriptor() ([]byte, []int)

func (*ConfigProto) GetAllowSoftPlacement

func (m *ConfigProto) GetAllowSoftPlacement() bool

func (*ConfigProto) GetClusterDef

func (m *ConfigProto) GetClusterDef() *ClusterDef

func (*ConfigProto) GetDeviceCount

func (m *ConfigProto) GetDeviceCount() map[string]int32

func (*ConfigProto) GetDeviceFilters

func (m *ConfigProto) GetDeviceFilters() []string

func (*ConfigProto) GetExperimental

func (m *ConfigProto) GetExperimental() *ConfigProto_Experimental

func (*ConfigProto) GetGpuOptions

func (m *ConfigProto) GetGpuOptions() *GPUOptions

func (*ConfigProto) GetGraphOptions

func (m *ConfigProto) GetGraphOptions() *GraphOptions

func (*ConfigProto) GetInterOpParallelismThreads

func (m *ConfigProto) GetInterOpParallelismThreads() int32

func (*ConfigProto) GetIntraOpParallelismThreads

func (m *ConfigProto) GetIntraOpParallelismThreads() int32

func (*ConfigProto) GetIsolateSessionState

func (m *ConfigProto) GetIsolateSessionState() bool

func (*ConfigProto) GetLogDevicePlacement

func (m *ConfigProto) GetLogDevicePlacement() bool

func (*ConfigProto) GetOperationTimeoutInMs

func (m *ConfigProto) GetOperationTimeoutInMs() int64

func (*ConfigProto) GetPlacementPeriod

func (m *ConfigProto) GetPlacementPeriod() int32

func (*ConfigProto) GetRpcOptions

func (m *ConfigProto) GetRpcOptions() *RPCOptions

func (*ConfigProto) GetSessionInterOpThreadPool

func (m *ConfigProto) GetSessionInterOpThreadPool() []*ThreadPoolOptionProto

func (*ConfigProto) GetShareClusterDevicesInSession

func (m *ConfigProto) GetShareClusterDevicesInSession() bool

func (*ConfigProto) GetUsePerSessionThreads

func (m *ConfigProto) GetUsePerSessionThreads() bool

func (*ConfigProto) Marshal

func (m *ConfigProto) Marshal() (dAtA []byte, err error)

func (*ConfigProto) MarshalTo

func (m *ConfigProto) MarshalTo(dAtA []byte) (int, error)

func (*ConfigProto) MarshalToSizedBuffer

func (m *ConfigProto) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ConfigProto) ProtoMessage

func (*ConfigProto) ProtoMessage()

func (*ConfigProto) Reset

func (m *ConfigProto) Reset()

func (*ConfigProto) Size

func (m *ConfigProto) Size() (n int)

func (*ConfigProto) String

func (m *ConfigProto) String() string

func (*ConfigProto) Unmarshal

func (m *ConfigProto) Unmarshal(dAtA []byte) error

func (*ConfigProto) XXX_DiscardUnknown

func (m *ConfigProto) XXX_DiscardUnknown()

func (*ConfigProto) XXX_Marshal

func (m *ConfigProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ConfigProto) XXX_Merge

func (m *ConfigProto) XXX_Merge(src proto.Message)

func (*ConfigProto) XXX_Size

func (m *ConfigProto) XXX_Size() int

func (*ConfigProto) XXX_Unmarshal

func (m *ConfigProto) XXX_Unmarshal(b []byte) error

type ConfigProto_Experimental

type ConfigProto_Experimental struct {
	// Task name for group resolution.
	CollectiveGroupLeader string `` /* 126-byte string literal not displayed */
	// Which executor to use, the default executor will be used
	// if it is an empty string or "DEFAULT"
	ExecutorType string `protobuf:"bytes,3,opt,name=executor_type,json=executorType,proto3" json:"executor_type,omitempty"`
	// Guidance to formatting of large RecvBuf fields for transfer.
	// Any positive value sets the max chunk size.  0 defaults to 4096.
	// Any negative value indicates no max, i.e. one chunk only.
	RecvBufMaxChunk int32 `protobuf:"varint,4,opt,name=recv_buf_max_chunk,json=recvBufMaxChunk,proto3" json:"recv_buf_max_chunk,omitempty"`
	// If true, and supported by the platform, the runtime will attempt to
	// use NUMA affinity where applicable.  One consequence will be the
	// existence of as many CPU devices as there are available NUMA nodes.
	UseNumaAffinity bool `protobuf:"varint,5,opt,name=use_numa_affinity,json=useNumaAffinity,proto3" json:"use_numa_affinity,omitempty"`
	// If true, make collective op execution order sequential and deterministic
	// for potentially concurrent collective instances.
	CollectiveDeterministicSequentialExecution bool `` /* 192-byte string literal not displayed */
	// If true, use NCCL for CollectiveOps.  This feature is highly
	// experimental.
	CollectiveNccl bool `protobuf:"varint,7,opt,name=collective_nccl,json=collectiveNccl,proto3" json:"collective_nccl,omitempty"`
	// In the following, session state means the value of a variable, elements
	// in a hash table, or any other resource, accessible by worker sessions
	// held by a TF server.
	//
	// When ClusterSpec propagation is enabled, the value of
	// isolate_session_state is ignored when deciding whether to share session
	// states in a TF server (for backwards compatibility reasons).
	// - If share_session_state_in_clusterspec_propagation is true, the session
	// states are shared.
	// - If share_session_state_in_clusterspec_propagation is false, session
	// states are isolated.
	//
	// When clusterspec propagation is not used, the value of
	// share_session_state_in_clusterspec_propagation is ignored when deciding
	// whether to share session states in a TF server.
	// - If isolate_session_state is true, session states are isolated.
	// - If isolate_session_state is false, session states are shared.
	//
	// TODO(b/129330037): Add a single API that consistently treats
	// isolate_session_state and ClusterSpec propagation.
	ShareSessionStateInClusterspecPropagation bool `` /* 193-byte string literal not displayed */
	// If using a direct session, disable spinning while waiting for work in
	// the thread pool. This may result in higher latency for completing ops,
	// but in the case where there is a lot of spinning may result in lower
	// CPU usage.
	DisableThreadSpinning bool `` /* 127-byte string literal not displayed */
	// This was promoted to a non-experimental API. Please use
	// ConfigProto.share_cluster_devices_in_session instead.
	ShareClusterDevicesInSession bool `` /* 153-byte string literal not displayed */
	// Metadata about the session.
	//
	// If set, this can be used by the runtime and the Ops for debugging,
	// monitoring, etc.
	//
	// NOTE: This is currently used and propagated only by the direct session.
	SessionMetadata *SessionMetadata `protobuf:"bytes,11,opt,name=session_metadata,json=sessionMetadata,proto3" json:"session_metadata,omitempty"`
	// If true, the session may treat the graph as being static for optimization
	// purposes.
	//
	// If this option is set to true when a session is created, the full
	// GraphDef must be passed in a single call to Session::Create(), and
	// Session::Extend() may not be supported.
	OptimizeForStaticGraph bool `` /* 133-byte string literal not displayed */
	// Whether to enable the MLIR-based TF->XLA bridge.
	//
	// This is a replacement to the existing bridge, and not ready for
	// production usage yet.
	// If this option is set to true when a session is created, MLIR is used to
	// perform the set of graph transformations to put the graph in a form that
	// can be executed with delegation of some computations to an accelerator.
	// This builds on the model of XLA where a subset of the graph is
	// encapsulated and attached to a "compile" operation, whose result is fed
	// to an "execute" operation. The kernel for these operations is responsible
	// to lower the encapsulated graph to a particular device.
	EnableMlirBridge bool `protobuf:"varint,13,opt,name=enable_mlir_bridge,json=enableMlirBridge,proto3" json:"enable_mlir_bridge,omitempty"`
	// Whether to enable the MLIR-based Graph optimizations.
	//
	// This will become a part of standard Tensorflow graph optimization
	// pipeline, currently this is only used for gradual migration and testing
	// new passes that are replacing existing optimizations in Grappler.
	EnableMlirGraphOptimization bool `` /* 148-byte string literal not displayed */
	// If true, the session will not store an additional copy of the graph for
	// each subgraph.
	//
	// If this option is set to true when a session is created, the
	// `RunOptions.output_partition_graphs` options must not be set.
	DisableOutputPartitionGraphs bool `` /* 151-byte string literal not displayed */
	// Minimum number of batches run through the XLA graph before XLA fusion
	// autotuner is enabled. Default value of zero disables the autotuner.
	//
	// The XLA fusion autotuner can improve performance by executing a heuristic
	// search on the compiler parameters.
	XlaFusionAutotunerThresh int64 `` /* 139-byte string literal not displayed */
}

Everything inside Experimental is subject to change and is not subject to API stability guarantees in https://www.tensorflow.org/guide/version_compat.

func (*ConfigProto_Experimental) Descriptor

func (*ConfigProto_Experimental) Descriptor() ([]byte, []int)

func (*ConfigProto_Experimental) GetCollectiveDeterministicSequentialExecution

func (m *ConfigProto_Experimental) GetCollectiveDeterministicSequentialExecution() bool

func (*ConfigProto_Experimental) GetCollectiveGroupLeader

func (m *ConfigProto_Experimental) GetCollectiveGroupLeader() string

func (*ConfigProto_Experimental) GetCollectiveNccl

func (m *ConfigProto_Experimental) GetCollectiveNccl() bool

func (*ConfigProto_Experimental) GetDisableOutputPartitionGraphs

func (m *ConfigProto_Experimental) GetDisableOutputPartitionGraphs() bool

func (*ConfigProto_Experimental) GetDisableThreadSpinning

func (m *ConfigProto_Experimental) GetDisableThreadSpinning() bool

func (*ConfigProto_Experimental) GetEnableMlirBridge

func (m *ConfigProto_Experimental) GetEnableMlirBridge() bool

func (*ConfigProto_Experimental) GetEnableMlirGraphOptimization

func (m *ConfigProto_Experimental) GetEnableMlirGraphOptimization() bool

func (*ConfigProto_Experimental) GetExecutorType

func (m *ConfigProto_Experimental) GetExecutorType() string

func (*ConfigProto_Experimental) GetOptimizeForStaticGraph

func (m *ConfigProto_Experimental) GetOptimizeForStaticGraph() bool

func (*ConfigProto_Experimental) GetRecvBufMaxChunk

func (m *ConfigProto_Experimental) GetRecvBufMaxChunk() int32

func (*ConfigProto_Experimental) GetSessionMetadata

func (m *ConfigProto_Experimental) GetSessionMetadata() *SessionMetadata

func (*ConfigProto_Experimental) GetShareClusterDevicesInSession

func (m *ConfigProto_Experimental) GetShareClusterDevicesInSession() bool

func (*ConfigProto_Experimental) GetShareSessionStateInClusterspecPropagation

func (m *ConfigProto_Experimental) GetShareSessionStateInClusterspecPropagation() bool

func (*ConfigProto_Experimental) GetUseNumaAffinity

func (m *ConfigProto_Experimental) GetUseNumaAffinity() bool

func (*ConfigProto_Experimental) GetXlaFusionAutotunerThresh

func (m *ConfigProto_Experimental) GetXlaFusionAutotunerThresh() int64

func (*ConfigProto_Experimental) Marshal

func (m *ConfigProto_Experimental) Marshal() (dAtA []byte, err error)

func (*ConfigProto_Experimental) MarshalTo

func (m *ConfigProto_Experimental) MarshalTo(dAtA []byte) (int, error)

func (*ConfigProto_Experimental) MarshalToSizedBuffer

func (m *ConfigProto_Experimental) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ConfigProto_Experimental) ProtoMessage

func (*ConfigProto_Experimental) ProtoMessage()

func (*ConfigProto_Experimental) Reset

func (m *ConfigProto_Experimental) Reset()

func (*ConfigProto_Experimental) Size

func (m *ConfigProto_Experimental) Size() (n int)

func (*ConfigProto_Experimental) String

func (m *ConfigProto_Experimental) String() string

func (*ConfigProto_Experimental) Unmarshal

func (m *ConfigProto_Experimental) Unmarshal(dAtA []byte) error

func (*ConfigProto_Experimental) XXX_DiscardUnknown

func (m *ConfigProto_Experimental) XXX_DiscardUnknown()

func (*ConfigProto_Experimental) XXX_Marshal

func (m *ConfigProto_Experimental) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ConfigProto_Experimental) XXX_Merge

func (m *ConfigProto_Experimental) XXX_Merge(src proto.Message)

func (*ConfigProto_Experimental) XXX_Size

func (m *ConfigProto_Experimental) XXX_Size() int

func (*ConfigProto_Experimental) XXX_Unmarshal

func (m *ConfigProto_Experimental) XXX_Unmarshal(b []byte) error

type DebugOptions

type DebugOptions struct {
	// Debugging options
	DebugTensorWatchOpts []*DebugTensorWatch `protobuf:"bytes,4,rep,name=debug_tensor_watch_opts,json=debugTensorWatchOpts,proto3" json:"debug_tensor_watch_opts,omitempty"`
	// Caller-specified global step count.
	// Note that this is distinct from the session run count and the executor
	// step count.
	GlobalStep int64 `protobuf:"varint,10,opt,name=global_step,json=globalStep,proto3" json:"global_step,omitempty"`
	// Whether the total disk usage of tfdbg is to be reset to zero
	// in this Session.run call. This is used by wrappers and hooks
	// such as the local CLI ones to indicate that the dumped tensors
	// are cleaned up from the disk after each Session.run.
	ResetDiskByteUsage bool `protobuf:"varint,11,opt,name=reset_disk_byte_usage,json=resetDiskByteUsage,proto3" json:"reset_disk_byte_usage,omitempty"`
}

Options for initializing DebuggerState in TensorFlow Debugger (tfdbg).

func (*DebugOptions) Descriptor

func (*DebugOptions) Descriptor() ([]byte, []int)

func (*DebugOptions) GetDebugTensorWatchOpts

func (m *DebugOptions) GetDebugTensorWatchOpts() []*DebugTensorWatch

func (*DebugOptions) GetGlobalStep

func (m *DebugOptions) GetGlobalStep() int64

func (*DebugOptions) GetResetDiskByteUsage

func (m *DebugOptions) GetResetDiskByteUsage() bool

func (*DebugOptions) Marshal

func (m *DebugOptions) Marshal() (dAtA []byte, err error)

func (*DebugOptions) MarshalTo

func (m *DebugOptions) MarshalTo(dAtA []byte) (int, error)

func (*DebugOptions) MarshalToSizedBuffer

func (m *DebugOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DebugOptions) ProtoMessage

func (*DebugOptions) ProtoMessage()

func (*DebugOptions) Reset

func (m *DebugOptions) Reset()

func (*DebugOptions) Size

func (m *DebugOptions) Size() (n int)

func (*DebugOptions) String

func (m *DebugOptions) String() string

func (*DebugOptions) Unmarshal

func (m *DebugOptions) Unmarshal(dAtA []byte) error

func (*DebugOptions) XXX_DiscardUnknown

func (m *DebugOptions) XXX_DiscardUnknown()

func (*DebugOptions) XXX_Marshal

func (m *DebugOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DebugOptions) XXX_Merge

func (m *DebugOptions) XXX_Merge(src proto.Message)

func (*DebugOptions) XXX_Size

func (m *DebugOptions) XXX_Size() int

func (*DebugOptions) XXX_Unmarshal

func (m *DebugOptions) XXX_Unmarshal(b []byte) error

type DebugTensorWatch

type DebugTensorWatch struct {
	// Name of the node to watch.
	// Use "*" for wildcard. But note: currently, regex is not supported in
	// general.
	NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
	// Output slot to watch.
	// The semantics of output_slot == -1 is that all outputs of the node
	// will be watched (i.e., a wildcard).
	// Other negative values of output_slot are invalid and will lead to
	// errors currently.
	OutputSlot int32 `protobuf:"varint,2,opt,name=output_slot,json=outputSlot,proto3" json:"output_slot,omitempty"`
	// Name(s) of the debugging op(s).
	// One or more than one probes on a tensor.
	// e.g., {"DebugIdentity", "DebugNanCount"}
	DebugOps []string `protobuf:"bytes,3,rep,name=debug_ops,json=debugOps,proto3" json:"debug_ops,omitempty"`
	// URL(s) for debug targets(s).
	//
	// Supported URL formats are:
	//   - file:///foo/tfdbg_dump: Writes out Event content to file
	//     /foo/tfdbg_dump.  Assumes all directories can be created if they don't
	//     already exist.
	//   - grpc://localhost:11011: Sends an RPC request to an EventListener
	//     service running at localhost:11011 with the event.
	//   - memcbk:///event_key: Routes tensors to clients using the
	//     callback registered with the DebugCallbackRegistry for event_key.
	//
	// Each debug op listed in debug_ops will publish its output tensor (debug
	// signal) to all URLs in debug_urls.
	//
	// N.B. Session::Run() supports concurrent invocations of the same inputs
	// (feed keys), outputs and target nodes. If such concurrent invocations
	// are to be debugged, the callers of Session::Run() must use distinct
	// debug_urls to make sure that the streamed or dumped events do not overlap
	// among the invocations.
	// TODO(cais): More visible documentation of this in g3docs.
	DebugUrls []string `protobuf:"bytes,4,rep,name=debug_urls,json=debugUrls,proto3" json:"debug_urls,omitempty"`
	// Do not error out if debug op creation fails (e.g., due to dtype
	// incompatibility). Instead, just log the failure.
	TolerateDebugOpCreationFailures bool `` /* 161-byte string literal not displayed */
}

Option for watching a node in TensorFlow Debugger (tfdbg).

func (*DebugTensorWatch) Descriptor

func (*DebugTensorWatch) Descriptor() ([]byte, []int)

func (*DebugTensorWatch) GetDebugOps

func (m *DebugTensorWatch) GetDebugOps() []string

func (*DebugTensorWatch) GetDebugUrls

func (m *DebugTensorWatch) GetDebugUrls() []string

func (*DebugTensorWatch) GetNodeName

func (m *DebugTensorWatch) GetNodeName() string

func (*DebugTensorWatch) GetOutputSlot

func (m *DebugTensorWatch) GetOutputSlot() int32

func (*DebugTensorWatch) GetTolerateDebugOpCreationFailures

func (m *DebugTensorWatch) GetTolerateDebugOpCreationFailures() bool

func (*DebugTensorWatch) Marshal

func (m *DebugTensorWatch) Marshal() (dAtA []byte, err error)

func (*DebugTensorWatch) MarshalTo

func (m *DebugTensorWatch) MarshalTo(dAtA []byte) (int, error)

func (*DebugTensorWatch) MarshalToSizedBuffer

func (m *DebugTensorWatch) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DebugTensorWatch) ProtoMessage

func (*DebugTensorWatch) ProtoMessage()

func (*DebugTensorWatch) Reset

func (m *DebugTensorWatch) Reset()

func (*DebugTensorWatch) Size

func (m *DebugTensorWatch) Size() (n int)

func (*DebugTensorWatch) String

func (m *DebugTensorWatch) String() string

func (*DebugTensorWatch) Unmarshal

func (m *DebugTensorWatch) Unmarshal(dAtA []byte) error

func (*DebugTensorWatch) XXX_DiscardUnknown

func (m *DebugTensorWatch) XXX_DiscardUnknown()

func (*DebugTensorWatch) XXX_Marshal

func (m *DebugTensorWatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DebugTensorWatch) XXX_Merge

func (m *DebugTensorWatch) XXX_Merge(src proto.Message)

func (*DebugTensorWatch) XXX_Size

func (m *DebugTensorWatch) XXX_Size() int

func (*DebugTensorWatch) XXX_Unmarshal

func (m *DebugTensorWatch) XXX_Unmarshal(b []byte) error

type DebuggedSourceFile

type DebuggedSourceFile struct {
	// The host name on which a source code file is located.
	Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
	// Path to the source code file.
	FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
	// The timestamp at which the source code file is last modified.
	LastModified int64 `protobuf:"varint,3,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"`
	// Byte size of the file.
	Bytes int64 `protobuf:"varint,4,opt,name=bytes,proto3" json:"bytes,omitempty"`
	// Line-by-line content of the source code file.
	Lines []string `protobuf:"bytes,5,rep,name=lines,proto3" json:"lines,omitempty"`
}

func (*DebuggedSourceFile) Descriptor

func (*DebuggedSourceFile) Descriptor() ([]byte, []int)

func (*DebuggedSourceFile) GetBytes

func (m *DebuggedSourceFile) GetBytes() int64

func (*DebuggedSourceFile) GetFilePath

func (m *DebuggedSourceFile) GetFilePath() string

func (*DebuggedSourceFile) GetHost

func (m *DebuggedSourceFile) GetHost() string

func (*DebuggedSourceFile) GetLastModified

func (m *DebuggedSourceFile) GetLastModified() int64

func (*DebuggedSourceFile) GetLines

func (m *DebuggedSourceFile) GetLines() []string

func (*DebuggedSourceFile) Marshal

func (m *DebuggedSourceFile) Marshal() (dAtA []byte, err error)

func (*DebuggedSourceFile) MarshalTo

func (m *DebuggedSourceFile) MarshalTo(dAtA []byte) (int, error)

func (*DebuggedSourceFile) MarshalToSizedBuffer

func (m *DebuggedSourceFile) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DebuggedSourceFile) ProtoMessage

func (*DebuggedSourceFile) ProtoMessage()

func (*DebuggedSourceFile) Reset

func (m *DebuggedSourceFile) Reset()

func (*DebuggedSourceFile) Size

func (m *DebuggedSourceFile) Size() (n int)

func (*DebuggedSourceFile) String

func (m *DebuggedSourceFile) String() string

func (*DebuggedSourceFile) Unmarshal

func (m *DebuggedSourceFile) Unmarshal(dAtA []byte) error

func (*DebuggedSourceFile) XXX_DiscardUnknown

func (m *DebuggedSourceFile) XXX_DiscardUnknown()

func (*DebuggedSourceFile) XXX_Marshal

func (m *DebuggedSourceFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DebuggedSourceFile) XXX_Merge

func (m *DebuggedSourceFile) XXX_Merge(src proto.Message)

func (*DebuggedSourceFile) XXX_Size

func (m *DebuggedSourceFile) XXX_Size() int

func (*DebuggedSourceFile) XXX_Unmarshal

func (m *DebuggedSourceFile) XXX_Unmarshal(b []byte) error

type DebuggedSourceFiles

type DebuggedSourceFiles struct {
	// A collection of source code files.
	SourceFiles []*DebuggedSourceFile `protobuf:"bytes,1,rep,name=source_files,json=sourceFiles,proto3" json:"source_files,omitempty"`
}

func (*DebuggedSourceFiles) Descriptor

func (*DebuggedSourceFiles) Descriptor() ([]byte, []int)

func (*DebuggedSourceFiles) GetSourceFiles

func (m *DebuggedSourceFiles) GetSourceFiles() []*DebuggedSourceFile

func (*DebuggedSourceFiles) Marshal

func (m *DebuggedSourceFiles) Marshal() (dAtA []byte, err error)

func (*DebuggedSourceFiles) MarshalTo

func (m *DebuggedSourceFiles) MarshalTo(dAtA []byte) (int, error)

func (*DebuggedSourceFiles) MarshalToSizedBuffer

func (m *DebuggedSourceFiles) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DebuggedSourceFiles) ProtoMessage

func (*DebuggedSourceFiles) ProtoMessage()

func (*DebuggedSourceFiles) Reset

func (m *DebuggedSourceFiles) Reset()

func (*DebuggedSourceFiles) Size

func (m *DebuggedSourceFiles) Size() (n int)

func (*DebuggedSourceFiles) String

func (m *DebuggedSourceFiles) String() string

func (*DebuggedSourceFiles) Unmarshal

func (m *DebuggedSourceFiles) Unmarshal(dAtA []byte) error

func (*DebuggedSourceFiles) XXX_DiscardUnknown

func (m *DebuggedSourceFiles) XXX_DiscardUnknown()

func (*DebuggedSourceFiles) XXX_Marshal

func (m *DebuggedSourceFiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DebuggedSourceFiles) XXX_Merge

func (m *DebuggedSourceFiles) XXX_Merge(src proto.Message)

func (*DebuggedSourceFiles) XXX_Size

func (m *DebuggedSourceFiles) XXX_Size() int

func (*DebuggedSourceFiles) XXX_Unmarshal

func (m *DebuggedSourceFiles) XXX_Unmarshal(b []byte) error

type DictValue

type DictValue struct {
	Fields map[string]*StructuredValue `` /* 153-byte string literal not displayed */
}

Represents a Python dict keyed by `str`. The comment on Unicode from Value.string_value applies analogously.

func (*DictValue) Descriptor

func (*DictValue) Descriptor() ([]byte, []int)

func (*DictValue) GetFields

func (m *DictValue) GetFields() map[string]*StructuredValue

func (*DictValue) Marshal

func (m *DictValue) Marshal() (dAtA []byte, err error)

func (*DictValue) MarshalTo

func (m *DictValue) MarshalTo(dAtA []byte) (int, error)

func (*DictValue) MarshalToSizedBuffer

func (m *DictValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DictValue) ProtoMessage

func (*DictValue) ProtoMessage()

func (*DictValue) Reset

func (m *DictValue) Reset()

func (*DictValue) Size

func (m *DictValue) Size() (n int)

func (*DictValue) String

func (m *DictValue) String() string

func (*DictValue) Unmarshal

func (m *DictValue) Unmarshal(dAtA []byte) error

func (*DictValue) XXX_DiscardUnknown

func (m *DictValue) XXX_DiscardUnknown()

func (*DictValue) XXX_Marshal

func (m *DictValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DictValue) XXX_Merge

func (m *DictValue) XXX_Merge(src proto.Message)

func (*DictValue) XXX_Size

func (m *DictValue) XXX_Size() int

func (*DictValue) XXX_Unmarshal

func (m *DictValue) XXX_Unmarshal(b []byte) error

type FunctionSpec

type FunctionSpec struct {
	// Full arg spec from inspect.getfullargspec().
	Fullargspec *StructuredValue `protobuf:"bytes,1,opt,name=fullargspec,proto3" json:"fullargspec,omitempty"`
	// Whether this represents a class method.
	IsMethod bool `protobuf:"varint,2,opt,name=is_method,json=isMethod,proto3" json:"is_method,omitempty"`
	// The input signature, if specified.
	InputSignature *StructuredValue `protobuf:"bytes,5,opt,name=input_signature,json=inputSignature,proto3" json:"input_signature,omitempty"`
}

Represents `FunctionSpec` used in `Function`. This represents a function that has been wrapped as a TensorFlow `Function`.

func (*FunctionSpec) Descriptor

func (*FunctionSpec) Descriptor() ([]byte, []int)

func (*FunctionSpec) GetFullargspec

func (m *FunctionSpec) GetFullargspec() *StructuredValue

func (*FunctionSpec) GetInputSignature

func (m *FunctionSpec) GetInputSignature() *StructuredValue

func (*FunctionSpec) GetIsMethod

func (m *FunctionSpec) GetIsMethod() bool

func (*FunctionSpec) Marshal

func (m *FunctionSpec) Marshal() (dAtA []byte, err error)

func (*FunctionSpec) MarshalTo

func (m *FunctionSpec) MarshalTo(dAtA []byte) (int, error)

func (*FunctionSpec) MarshalToSizedBuffer

func (m *FunctionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*FunctionSpec) ProtoMessage

func (*FunctionSpec) ProtoMessage()

func (*FunctionSpec) Reset

func (m *FunctionSpec) Reset()

func (*FunctionSpec) Size

func (m *FunctionSpec) Size() (n int)

func (*FunctionSpec) String

func (m *FunctionSpec) String() string

func (*FunctionSpec) Unmarshal

func (m *FunctionSpec) Unmarshal(dAtA []byte) error

func (*FunctionSpec) XXX_DiscardUnknown

func (m *FunctionSpec) XXX_DiscardUnknown()

func (*FunctionSpec) XXX_Marshal

func (m *FunctionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*FunctionSpec) XXX_Merge

func (m *FunctionSpec) XXX_Merge(src proto.Message)

func (*FunctionSpec) XXX_Size

func (m *FunctionSpec) XXX_Size() int

func (*FunctionSpec) XXX_Unmarshal

func (m *FunctionSpec) XXX_Unmarshal(b []byte) error

type GPUOptions

type GPUOptions struct {
	// Fraction of the available GPU memory to allocate for each process.
	// 1 means to allocate all of the GPU memory, 0.5 means the process
	// allocates up to ~50% of the available GPU memory.
	//
	// GPU memory is pre-allocated unless the allow_growth option is enabled.
	//
	// If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
	// the amount of memory available on the GPU device by using host memory as a
	// swap space. Accessing memory not available on the device will be
	// significantly slower as that would require memory transfer between the host
	// and the device. Options to reduce the memory requirement should be
	// considered before enabling this option as this may come with a negative
	// performance impact. Oversubscription using the unified memory requires
	// Pascal class or newer GPUs and it is currently only supported on the Linux
	// operating system. See
	// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
	// for the detailed requirements.
	PerProcessGpuMemoryFraction float64 `` /* 150-byte string literal not displayed */
	// If true, the allocator does not pre-allocate the entire specified
	// GPU memory region, instead starting small and growing as needed.
	AllowGrowth bool `protobuf:"varint,4,opt,name=allow_growth,json=allowGrowth,proto3" json:"allow_growth,omitempty"`
	// The type of GPU allocation strategy to use.
	//
	// Allowed values:
	// "": The empty string (default) uses a system-chosen default
	//     which may change over time.
	//
	// "BFC": A "Best-fit with coalescing" algorithm, simplified from a
	//        version of dlmalloc.
	AllocatorType string `protobuf:"bytes,2,opt,name=allocator_type,json=allocatorType,proto3" json:"allocator_type,omitempty"`
	// Delay deletion of up to this many bytes to reduce the number of
	// interactions with gpu driver code.  If 0, the system chooses
	// a reasonable default (several MBs).
	DeferredDeletionBytes int64 `` /* 127-byte string literal not displayed */
	// A comma-separated list of GPU ids that determines the 'visible'
	// to 'virtual' mapping of GPU devices.  For example, if TensorFlow
	// can see 8 GPU devices in the process, and one wanted to map
	// visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
	// then one would specify this field as "5,3".  This field is similar in
	// spirit to the CUDA_VISIBLE_DEVICES environment variable, except
	// it applies to the visible GPU devices in the process.
	//
	// NOTE:
	// 1. The GPU driver provides the process with the visible GPUs
	//    in an order which is not guaranteed to have any correlation to
	//    the *physical* GPU id in the machine.  This field is used for
	//    remapping "visible" to "virtual", which means this operates only
	//    after the process starts.  Users are required to use vendor
	//    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
	//    physical to visible device mapping prior to invoking TensorFlow.
	// 2. In the code, the ids in this list are also called "platform GPU id"s,
	//    and the 'virtual' ids of GPU devices (i.e. the ids in the device
	//    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
	//    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
	//    for more information.
	VisibleDeviceList string `protobuf:"bytes,5,opt,name=visible_device_list,json=visibleDeviceList,proto3" json:"visible_device_list,omitempty"`
	// In the event polling loop sleep this many microseconds between
	// PollEvents calls, when the queue is not empty.  If value is not
	// set or set to 0, gets set to a non-zero default.
	PollingActiveDelayUsecs int32 `` /* 135-byte string literal not displayed */
	// This field is deprecated and ignored.
	PollingInactiveDelayMsecs int32 `` /* 141-byte string literal not displayed */
	// Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
	// enabling this option forces all CPU tensors to be allocated with Cuda
	// pinned memory. Normally, TensorFlow will infer which tensors should be
	// allocated as the pinned memory. But in case where the inference is
	// incomplete, this option can significantly speed up the cross-device memory
	// copy performance as long as it fits the memory.
	// Note that this option is not something that should be
	// enabled by default for unknown or very large models, since all Cuda pinned
	// memory is unpageable, having too much pinned memory might negatively impact
	// the overall host system performance.
	ForceGpuCompatible bool `protobuf:"varint,8,opt,name=force_gpu_compatible,json=forceGpuCompatible,proto3" json:"force_gpu_compatible,omitempty"`
	// Everything inside experimental is subject to change and is not subject
	// to API stability guarantees in
	// https://www.tensorflow.org/guide/version_compat.
	Experimental *GPUOptions_Experimental `protobuf:"bytes,9,opt,name=experimental,proto3" json:"experimental,omitempty"`
}

func (*GPUOptions) Descriptor

func (*GPUOptions) Descriptor() ([]byte, []int)

func (*GPUOptions) GetAllocatorType

func (m *GPUOptions) GetAllocatorType() string

func (*GPUOptions) GetAllowGrowth

func (m *GPUOptions) GetAllowGrowth() bool

func (*GPUOptions) GetDeferredDeletionBytes

func (m *GPUOptions) GetDeferredDeletionBytes() int64

func (*GPUOptions) GetExperimental

func (m *GPUOptions) GetExperimental() *GPUOptions_Experimental

func (*GPUOptions) GetForceGpuCompatible

func (m *GPUOptions) GetForceGpuCompatible() bool

func (*GPUOptions) GetPerProcessGpuMemoryFraction

func (m *GPUOptions) GetPerProcessGpuMemoryFraction() float64

func (*GPUOptions) GetPollingActiveDelayUsecs

func (m *GPUOptions) GetPollingActiveDelayUsecs() int32

func (*GPUOptions) GetPollingInactiveDelayMsecs

func (m *GPUOptions) GetPollingInactiveDelayMsecs() int32

func (*GPUOptions) GetVisibleDeviceList

func (m *GPUOptions) GetVisibleDeviceList() string

func (*GPUOptions) Marshal

func (m *GPUOptions) Marshal() (dAtA []byte, err error)

func (*GPUOptions) MarshalTo

func (m *GPUOptions) MarshalTo(dAtA []byte) (int, error)

func (*GPUOptions) MarshalToSizedBuffer

func (m *GPUOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*GPUOptions) ProtoMessage

func (*GPUOptions) ProtoMessage()

func (*GPUOptions) Reset

func (m *GPUOptions) Reset()

func (*GPUOptions) Size

func (m *GPUOptions) Size() (n int)

func (*GPUOptions) String

func (m *GPUOptions) String() string

func (*GPUOptions) Unmarshal

func (m *GPUOptions) Unmarshal(dAtA []byte) error

func (*GPUOptions) XXX_DiscardUnknown

func (m *GPUOptions) XXX_DiscardUnknown()

func (*GPUOptions) XXX_Marshal

func (m *GPUOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GPUOptions) XXX_Merge

func (m *GPUOptions) XXX_Merge(src proto.Message)

func (*GPUOptions) XXX_Size

func (m *GPUOptions) XXX_Size() int

func (*GPUOptions) XXX_Unmarshal

func (m *GPUOptions) XXX_Unmarshal(b []byte) error

type GPUOptions_Experimental

type GPUOptions_Experimental struct {
	// The multi virtual device settings. If empty (not set), it will create
	// single virtual device on each visible GPU, according to the settings
	// in "visible_device_list" above. Otherwise, the number of elements in the
	// list must be the same as the number of visible GPUs (after
	// "visible_device_list" filtering if it is set), and the string represented
	// device names (e.g. /device:GPU:<id>) will refer to the virtual
	// devices and have the <id> field assigned sequentially starting from 0,
	// according to the order they appear in this list and the "memory_limit"
	// list inside each element. For example,
	//   visible_device_list = "1,0"
	//   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
	//   virtual_devices {}
	// will create three virtual devices as:
	//   /device:GPU:0 -> visible GPU 1 with 1GB memory
	//   /device:GPU:1 -> visible GPU 1 with 2GB memory
	//   /device:GPU:2 -> visible GPU 0 with all available memory
	//
	// NOTE:
	// 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
	//    at the same time.
	// 2. Currently this setting is per-process, not per-session. Using
	//    different settings in different sessions within same process will
	//    result in undefined behavior.
	VirtualDevices []*GPUOptions_Experimental_VirtualDevices `protobuf:"bytes,1,rep,name=virtual_devices,json=virtualDevices,proto3" json:"virtual_devices,omitempty"`
	// If true, uses CUDA unified memory for memory allocations. If
	// per_process_gpu_memory_fraction option is greater than 1.0, then unified
	// memory is used regardless of the value for this field. See comments for
	// per_process_gpu_memory_fraction field for more details and requirements
	// of the unified memory. This option is useful to oversubscribe memory if
	// multiple processes are sharing a single GPU while individually using less
	// than 1.0 per process memory fraction.
	UseUnifiedMemory bool `protobuf:"varint,2,opt,name=use_unified_memory,json=useUnifiedMemory,proto3" json:"use_unified_memory,omitempty"`
	// If > 1, the number of device-to-device copy streams to create
	// for each GPUDevice.  Default value is 0, which is automatically
	// converted to 1.
	NumDevToDevCopyStreams int32 `` /* 136-byte string literal not displayed */
	// If non-empty, defines a good GPU ring order on a single worker based on
	// device interconnect.  This assumes that all workers have the same GPU
	// topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
	// This ring order is used by the RingReducer implementation of
	// CollectiveReduce, and serves as an override to automatic ring order
	// generation in OrderTaskDeviceMap() during CollectiveParam resolution.
	CollectiveRingOrder string `protobuf:"bytes,4,opt,name=collective_ring_order,json=collectiveRingOrder,proto3" json:"collective_ring_order,omitempty"`
	// If true then extra work is done by GPUDevice and GPUBFCAllocator to
	// keep track of when GPU memory is freed and when kernels actually
	// complete so that we can know when a nominally free memory chunk
	// is really not subject to pending use.
	TimestampedAllocator bool `protobuf:"varint,5,opt,name=timestamped_allocator,json=timestampedAllocator,proto3" json:"timestamped_allocator,omitempty"`
	// Parameters for GPUKernelTracker.  By default no kernel tracking is done.
	// Note that timestamped_allocator is only effective if some tracking is
	// specified.
	//
	// If kernel_tracker_max_interval = n > 0, then a tracking event
	// is inserted after every n kernels without an event.
	KernelTrackerMaxInterval int32 `` /* 138-byte string literal not displayed */
	// If kernel_tracker_max_bytes = n > 0, then a tracking event is
	// inserted after every series of kernels allocating a sum of
	// memory >= n.  If one kernel allocates b * n bytes, then one
	// event will be inserted after it, but it will count as b against
	// the pending limit.
	KernelTrackerMaxBytes int32 `` /* 129-byte string literal not displayed */
	// If kernel_tracker_max_pending > 0 then no more than this many
	// tracking events can be outstanding at a time.  An attempt to
	// launch an additional kernel will stall until an event
	// completes.
	KernelTrackerMaxPending int32 `` /* 135-byte string literal not displayed */
}

func (*GPUOptions_Experimental) Descriptor

func (*GPUOptions_Experimental) Descriptor() ([]byte, []int)

func (*GPUOptions_Experimental) GetCollectiveRingOrder

func (m *GPUOptions_Experimental) GetCollectiveRingOrder() string

func (*GPUOptions_Experimental) GetKernelTrackerMaxBytes

func (m *GPUOptions_Experimental) GetKernelTrackerMaxBytes() int32

func (*GPUOptions_Experimental) GetKernelTrackerMaxInterval

func (m *GPUOptions_Experimental) GetKernelTrackerMaxInterval() int32

func (*GPUOptions_Experimental) GetKernelTrackerMaxPending

func (m *GPUOptions_Experimental) GetKernelTrackerMaxPending() int32

func (*GPUOptions_Experimental) GetNumDevToDevCopyStreams

func (m *GPUOptions_Experimental) GetNumDevToDevCopyStreams() int32

func (*GPUOptions_Experimental) GetTimestampedAllocator

func (m *GPUOptions_Experimental) GetTimestampedAllocator() bool

func (*GPUOptions_Experimental) GetUseUnifiedMemory

func (m *GPUOptions_Experimental) GetUseUnifiedMemory() bool

func (*GPUOptions_Experimental) GetVirtualDevices

func (*GPUOptions_Experimental) Marshal

func (m *GPUOptions_Experimental) Marshal() (dAtA []byte, err error)

func (*GPUOptions_Experimental) MarshalTo

func (m *GPUOptions_Experimental) MarshalTo(dAtA []byte) (int, error)

func (*GPUOptions_Experimental) MarshalToSizedBuffer

func (m *GPUOptions_Experimental) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*GPUOptions_Experimental) ProtoMessage

func (*GPUOptions_Experimental) ProtoMessage()

func (*GPUOptions_Experimental) Reset

func (m *GPUOptions_Experimental) Reset()

func (*GPUOptions_Experimental) Size

func (m *GPUOptions_Experimental) Size() (n int)

func (*GPUOptions_Experimental) String

func (m *GPUOptions_Experimental) String() string

func (*GPUOptions_Experimental) Unmarshal

func (m *GPUOptions_Experimental) Unmarshal(dAtA []byte) error

func (*GPUOptions_Experimental) XXX_DiscardUnknown

func (m *GPUOptions_Experimental) XXX_DiscardUnknown()

func (*GPUOptions_Experimental) XXX_Marshal

func (m *GPUOptions_Experimental) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GPUOptions_Experimental) XXX_Merge

func (m *GPUOptions_Experimental) XXX_Merge(src proto.Message)

func (*GPUOptions_Experimental) XXX_Size

func (m *GPUOptions_Experimental) XXX_Size() int

func (*GPUOptions_Experimental) XXX_Unmarshal

func (m *GPUOptions_Experimental) XXX_Unmarshal(b []byte) error

type GPUOptions_Experimental_VirtualDevices

type GPUOptions_Experimental_VirtualDevices struct {
	// Per "virtual" device memory limit, in MB. The number of elements in
	// the list is the number of virtual devices to create on the
	// corresponding visible GPU (see "virtual_devices" below).
	// If empty, it will create single virtual device taking all available
	// memory from the device.
	//
	// For the concept of "visible" and "virtual" GPU, see the comments for
	// "visible_device_list" above for more information.
	MemoryLimitMb []float32 `protobuf:"fixed32,1,rep,packed,name=memory_limit_mb,json=memoryLimitMb,proto3" json:"memory_limit_mb,omitempty"`
}

Configuration for breaking down a visible GPU into multiple "virtual" devices.

func (*GPUOptions_Experimental_VirtualDevices) Descriptor

func (*GPUOptions_Experimental_VirtualDevices) Descriptor() ([]byte, []int)

func (*GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb

func (m *GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb() []float32

func (*GPUOptions_Experimental_VirtualDevices) Marshal

func (m *GPUOptions_Experimental_VirtualDevices) Marshal() (dAtA []byte, err error)

func (*GPUOptions_Experimental_VirtualDevices) MarshalTo

func (m *GPUOptions_Experimental_VirtualDevices) MarshalTo(dAtA []byte) (int, error)

func (*GPUOptions_Experimental_VirtualDevices) MarshalToSizedBuffer

func (m *GPUOptions_Experimental_VirtualDevices) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*GPUOptions_Experimental_VirtualDevices) ProtoMessage

func (*GPUOptions_Experimental_VirtualDevices) Reset

func (*GPUOptions_Experimental_VirtualDevices) Size

func (*GPUOptions_Experimental_VirtualDevices) String

func (*GPUOptions_Experimental_VirtualDevices) Unmarshal

func (m *GPUOptions_Experimental_VirtualDevices) Unmarshal(dAtA []byte) error

func (*GPUOptions_Experimental_VirtualDevices) XXX_DiscardUnknown

func (m *GPUOptions_Experimental_VirtualDevices) XXX_DiscardUnknown()

func (*GPUOptions_Experimental_VirtualDevices) XXX_Marshal

func (m *GPUOptions_Experimental_VirtualDevices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GPUOptions_Experimental_VirtualDevices) XXX_Merge

func (*GPUOptions_Experimental_VirtualDevices) XXX_Size

func (*GPUOptions_Experimental_VirtualDevices) XXX_Unmarshal

func (m *GPUOptions_Experimental_VirtualDevices) XXX_Unmarshal(b []byte) error

type GraphOptions

type GraphOptions struct {
	// If true, use control flow to schedule the activation of Recv nodes.
	// (Currently ignored.)
	EnableRecvScheduling bool `protobuf:"varint,2,opt,name=enable_recv_scheduling,json=enableRecvScheduling,proto3" json:"enable_recv_scheduling,omitempty"`
	// Options controlling how graph is optimized.
	OptimizerOptions *OptimizerOptions `protobuf:"bytes,3,opt,name=optimizer_options,json=optimizerOptions,proto3" json:"optimizer_options,omitempty"`
	// The number of steps to run before returning a cost model detailing
	// the memory usage and performance of each node of the graph. 0 means
	// no cost model.
	BuildCostModel int64 `protobuf:"varint,4,opt,name=build_cost_model,json=buildCostModel,proto3" json:"build_cost_model,omitempty"`
	// The number of steps to skip before collecting statistics for the
	// cost model.
	BuildCostModelAfter int64 `protobuf:"varint,9,opt,name=build_cost_model_after,json=buildCostModelAfter,proto3" json:"build_cost_model_after,omitempty"`
	// Annotate each Node with Op output shape data, to the extent it can
	// be statically inferred.
	InferShapes bool `protobuf:"varint,5,opt,name=infer_shapes,json=inferShapes,proto3" json:"infer_shapes,omitempty"`
	// Only place the subgraphs that are run, rather than the entire graph.
	//
	// This is useful for interactive graph building, where one might
	// produce graphs that cannot be placed during the debugging
	// process.  In particular, it allows the client to continue work in
	// a session after adding a node to a graph whose placement
	// constraints are unsatisfiable.
	PlacePrunedGraph bool `protobuf:"varint,6,opt,name=place_pruned_graph,json=placePrunedGraph,proto3" json:"place_pruned_graph,omitempty"`
	// If true, transfer float values between processes as bfloat16.
	EnableBfloat16Sendrecv bool `` /* 130-byte string literal not displayed */
	// If > 0, record a timeline every this many steps.
	// EXPERIMENTAL: This currently has no effect in MasterSession.
	TimelineStep int32 `protobuf:"varint,8,opt,name=timeline_step,json=timelineStep,proto3" json:"timeline_step,omitempty"`
	// Options that control the type and amount of graph rewriting.
	// Not currently configurable via the public Python API (i.e. there is no API
	// stability guarantee if you import RewriterConfig explicitly).
	RewriteOptions *RewriterConfig `protobuf:"bytes,10,opt,name=rewrite_options,json=rewriteOptions,proto3" json:"rewrite_options,omitempty"`
}

func (*GraphOptions) Descriptor

func (*GraphOptions) Descriptor() ([]byte, []int)

func (*GraphOptions) GetBuildCostModel

func (m *GraphOptions) GetBuildCostModel() int64

func (*GraphOptions) GetBuildCostModelAfter

func (m *GraphOptions) GetBuildCostModelAfter() int64

func (*GraphOptions) GetEnableBfloat16Sendrecv

func (m *GraphOptions) GetEnableBfloat16Sendrecv() bool

func (*GraphOptions) GetEnableRecvScheduling

func (m *GraphOptions) GetEnableRecvScheduling() bool

func (*GraphOptions) GetInferShapes

func (m *GraphOptions) GetInferShapes() bool

func (*GraphOptions) GetOptimizerOptions

func (m *GraphOptions) GetOptimizerOptions() *OptimizerOptions

func (*GraphOptions) GetPlacePrunedGraph

func (m *GraphOptions) GetPlacePrunedGraph() bool

func (*GraphOptions) GetRewriteOptions

func (m *GraphOptions) GetRewriteOptions() *RewriterConfig

func (*GraphOptions) GetTimelineStep

func (m *GraphOptions) GetTimelineStep() int32

func (*GraphOptions) Marshal

func (m *GraphOptions) Marshal() (dAtA []byte, err error)

func (*GraphOptions) MarshalTo

func (m *GraphOptions) MarshalTo(dAtA []byte) (int, error)

func (*GraphOptions) MarshalToSizedBuffer

func (m *GraphOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*GraphOptions) ProtoMessage

func (*GraphOptions) ProtoMessage()

func (*GraphOptions) Reset

func (m *GraphOptions) Reset()

func (*GraphOptions) Size

func (m *GraphOptions) Size() (n int)

func (*GraphOptions) String

func (m *GraphOptions) String() string

func (*GraphOptions) Unmarshal

func (m *GraphOptions) Unmarshal(dAtA []byte) error

func (*GraphOptions) XXX_DiscardUnknown

func (m *GraphOptions) XXX_DiscardUnknown()

func (*GraphOptions) XXX_Marshal

func (m *GraphOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GraphOptions) XXX_Merge

func (m *GraphOptions) XXX_Merge(src proto.Message)

func (*GraphOptions) XXX_Size

func (m *GraphOptions) XXX_Size() int

func (*GraphOptions) XXX_Unmarshal

func (m *GraphOptions) XXX_Unmarshal(b []byte) error

type JobDef

type JobDef struct {
	// The name of this job.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Mapping from task ID to "hostname:port" string.
	//
	// If the `name` field contains "worker", and the `tasks` map contains a
	// mapping from 7 to "example.org:2222", then the device prefix
	// "/job:worker/task:7" will be assigned to "example.org:2222".
	Tasks map[int32]string `` /* 152-byte string literal not displayed */
}

Defines a single job in a TensorFlow cluster.

func (*JobDef) Descriptor

func (*JobDef) Descriptor() ([]byte, []int)

func (*JobDef) GetName

func (m *JobDef) GetName() string

func (*JobDef) GetTasks

func (m *JobDef) GetTasks() map[int32]string

func (*JobDef) Marshal

func (m *JobDef) Marshal() (dAtA []byte, err error)

func (*JobDef) MarshalTo

func (m *JobDef) MarshalTo(dAtA []byte) (int, error)

func (*JobDef) MarshalToSizedBuffer

func (m *JobDef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*JobDef) ProtoMessage

func (*JobDef) ProtoMessage()

func (*JobDef) Reset

func (m *JobDef) Reset()

func (*JobDef) Size

func (m *JobDef) Size() (n int)

func (*JobDef) String

func (m *JobDef) String() string

func (*JobDef) Unmarshal

func (m *JobDef) Unmarshal(dAtA []byte) error

func (*JobDef) XXX_DiscardUnknown

func (m *JobDef) XXX_DiscardUnknown()

func (*JobDef) XXX_Marshal

func (m *JobDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobDef) XXX_Merge

func (m *JobDef) XXX_Merge(src proto.Message)

func (*JobDef) XXX_Size

func (m *JobDef) XXX_Size() int

func (*JobDef) XXX_Unmarshal

func (m *JobDef) XXX_Unmarshal(b []byte) error

type ListValue

type ListValue struct {
	Values []*StructuredValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
}

Represents a Python list.

func (*ListValue) Descriptor

func (*ListValue) Descriptor() ([]byte, []int)

func (*ListValue) GetValues

func (m *ListValue) GetValues() []*StructuredValue

func (*ListValue) Marshal

func (m *ListValue) Marshal() (dAtA []byte, err error)

func (*ListValue) MarshalTo

func (m *ListValue) MarshalTo(dAtA []byte) (int, error)

func (*ListValue) MarshalToSizedBuffer

func (m *ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ListValue) ProtoMessage

func (*ListValue) ProtoMessage()

func (*ListValue) Reset

func (m *ListValue) Reset()

func (*ListValue) Size

func (m *ListValue) Size() (n int)

func (*ListValue) String

func (m *ListValue) String() string

func (*ListValue) Unmarshal

func (m *ListValue) Unmarshal(dAtA []byte) error

func (*ListValue) XXX_DiscardUnknown

func (m *ListValue) XXX_DiscardUnknown()

func (*ListValue) XXX_Marshal

func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListValue) XXX_Merge

func (m *ListValue) XXX_Merge(src proto.Message)

func (*ListValue) XXX_Size

func (m *ListValue) XXX_Size() int

func (*ListValue) XXX_Unmarshal

func (m *ListValue) XXX_Unmarshal(b []byte) error

type MetaGraphDef

type MetaGraphDef struct {
	MetaInfoDef *MetaGraphDef_MetaInfoDef `protobuf:"bytes,1,opt,name=meta_info_def,json=metaInfoDef,proto3" json:"meta_info_def,omitempty"`
	// GraphDef.
	GraphDef *framework.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef,proto3" json:"graph_def,omitempty"`
	// SaverDef.
	SaverDef *SaverDef `protobuf:"bytes,3,opt,name=saver_def,json=saverDef,proto3" json:"saver_def,omitempty"`
	// collection_def: Map from collection name to collections.
	// See CollectionDef section for details.
	CollectionDef map[string]*CollectionDef `` /* 188-byte string literal not displayed */
	// signature_def: Map from user supplied key for a signature to a single
	// SignatureDef.
	SignatureDef map[string]*SignatureDef `` /* 185-byte string literal not displayed */
	// Asset file def to be used with the defined graph.
	AssetFileDef []*AssetFileDef `protobuf:"bytes,6,rep,name=asset_file_def,json=assetFileDef,proto3" json:"asset_file_def,omitempty"`
	// Extra information about the structure of functions and stateful objects.
	ObjectGraphDef *SavedObjectGraph `protobuf:"bytes,7,opt,name=object_graph_def,json=objectGraphDef,proto3" json:"object_graph_def,omitempty"`
}

NOTE: This protocol buffer is evolving, and will go through revisions in the coming months.

Protocol buffer containing the following which are necessary to restart training, run inference. It can be used to serialize/de-serialize memory objects necessary for running computation in a graph when crossing the process boundary. It can be used for long term storage of graphs, cross-language execution of graphs, etc.

MetaInfoDef
GraphDef
SaverDef
CollectionDef
TensorInfo
SignatureDef

func (*MetaGraphDef) Descriptor

func (*MetaGraphDef) Descriptor() ([]byte, []int)

func (*MetaGraphDef) GetAssetFileDef

func (m *MetaGraphDef) GetAssetFileDef() []*AssetFileDef

func (*MetaGraphDef) GetCollectionDef

func (m *MetaGraphDef) GetCollectionDef() map[string]*CollectionDef

func (*MetaGraphDef) GetGraphDef

func (m *MetaGraphDef) GetGraphDef() *framework.GraphDef

func (*MetaGraphDef) GetMetaInfoDef

func (m *MetaGraphDef) GetMetaInfoDef() *MetaGraphDef_MetaInfoDef

func (*MetaGraphDef) GetObjectGraphDef

func (m *MetaGraphDef) GetObjectGraphDef() *SavedObjectGraph

func (*MetaGraphDef) GetSaverDef

func (m *MetaGraphDef) GetSaverDef() *SaverDef

func (*MetaGraphDef) GetSignatureDef

func (m *MetaGraphDef) GetSignatureDef() map[string]*SignatureDef

func (*MetaGraphDef) Marshal

func (m *MetaGraphDef) Marshal() (dAtA []byte, err error)

func (*MetaGraphDef) MarshalTo

func (m *MetaGraphDef) MarshalTo(dAtA []byte) (int, error)

func (*MetaGraphDef) MarshalToSizedBuffer

func (m *MetaGraphDef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*MetaGraphDef) ProtoMessage

func (*MetaGraphDef) ProtoMessage()

func (*MetaGraphDef) Reset

func (m *MetaGraphDef) Reset()

func (*MetaGraphDef) Size

func (m *MetaGraphDef) Size() (n int)

func (*MetaGraphDef) String

func (m *MetaGraphDef) String() string

func (*MetaGraphDef) Unmarshal

func (m *MetaGraphDef) Unmarshal(dAtA []byte) error

func (*MetaGraphDef) XXX_DiscardUnknown

func (m *MetaGraphDef) XXX_DiscardUnknown()

func (*MetaGraphDef) XXX_Marshal

func (m *MetaGraphDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*MetaGraphDef) XXX_Merge

func (m *MetaGraphDef) XXX_Merge(src proto.Message)

func (*MetaGraphDef) XXX_Size

func (m *MetaGraphDef) XXX_Size() int

func (*MetaGraphDef) XXX_Unmarshal

func (m *MetaGraphDef) XXX_Unmarshal(b []byte) error

type MetaGraphDef_MetaInfoDef

type MetaGraphDef_MetaInfoDef struct {
	// User specified Version string. Can be the name of the model and revision,
	// steps this model has been trained to, etc.
	MetaGraphVersion string `protobuf:"bytes,1,opt,name=meta_graph_version,json=metaGraphVersion,proto3" json:"meta_graph_version,omitempty"`
	// A copy of the OpDefs used by the producer of this graph_def.
	// Descriptions and Ops not used in graph_def are stripped out.
	StrippedOpList *framework.OpList `protobuf:"bytes,2,opt,name=stripped_op_list,json=strippedOpList,proto3" json:"stripped_op_list,omitempty"`
	// A serialized protobuf. Can be the time this meta graph is created, or
	// modified, or name of the model.
	AnyInfo *types.Any `protobuf:"bytes,3,opt,name=any_info,json=anyInfo,proto3" json:"any_info,omitempty"`
	// User supplied tag(s) on the meta_graph and included graph_def.
	//
	// MetaGraphDefs should be tagged with their capabilities or use-cases.
	// Examples: "train", "serve", "gpu", "tpu", etc.
	// These tags enable loaders to access the MetaGraph(s) appropriate for a
	// specific use-case or runtime environment.
	Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`
	// The __version__ string of the tensorflow build used to write this graph.
	// This will be populated by the framework, which will overwrite any user
	// supplied value.
	TensorflowVersion string `protobuf:"bytes,5,opt,name=tensorflow_version,json=tensorflowVersion,proto3" json:"tensorflow_version,omitempty"`
	// The __git_version__ string of the tensorflow build used to write this
	// graph. This will be populated by the framework, which will overwrite any
	// user supplied value.
	TensorflowGitVersion string `protobuf:"bytes,6,opt,name=tensorflow_git_version,json=tensorflowGitVersion,proto3" json:"tensorflow_git_version,omitempty"`
	// A flag to denote whether default-valued attrs have been stripped from
	// the nodes in this graph_def.
	StrippedDefaultAttrs bool `protobuf:"varint,7,opt,name=stripped_default_attrs,json=strippedDefaultAttrs,proto3" json:"stripped_default_attrs,omitempty"`
	// FunctionDef name to aliases mapping.
	FunctionAliases map[string]string `` /* 194-byte string literal not displayed */
}

Meta information regarding the graph to be exported. To be used by users of this protocol buffer to encode information regarding their meta graph.

func (*MetaGraphDef_MetaInfoDef) Descriptor

func (*MetaGraphDef_MetaInfoDef) Descriptor() ([]byte, []int)

func (*MetaGraphDef_MetaInfoDef) GetAnyInfo

func (m *MetaGraphDef_MetaInfoDef) GetAnyInfo() *types.Any

func (*MetaGraphDef_MetaInfoDef) GetFunctionAliases

func (m *MetaGraphDef_MetaInfoDef) GetFunctionAliases() map[string]string

func (*MetaGraphDef_MetaInfoDef) GetMetaGraphVersion

func (m *MetaGraphDef_MetaInfoDef) GetMetaGraphVersion() string

func (*MetaGraphDef_MetaInfoDef) GetStrippedDefaultAttrs

func (m *MetaGraphDef_MetaInfoDef) GetStrippedDefaultAttrs() bool

func (*MetaGraphDef_MetaInfoDef) GetStrippedOpList

func (m *MetaGraphDef_MetaInfoDef) GetStrippedOpList() *framework.OpList

func (*MetaGraphDef_MetaInfoDef) GetTags

func (m *MetaGraphDef_MetaInfoDef) GetTags() []string

func (*MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion

func (m *MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion() string

func (*MetaGraphDef_MetaInfoDef) GetTensorflowVersion

func (m *MetaGraphDef_MetaInfoDef) GetTensorflowVersion() string

func (*MetaGraphDef_MetaInfoDef) Marshal

func (m *MetaGraphDef_MetaInfoDef) Marshal() (dAtA []byte, err error)

func (*MetaGraphDef_MetaInfoDef) MarshalTo

func (m *MetaGraphDef_MetaInfoDef) MarshalTo(dAtA []byte) (int, error)

func (*MetaGraphDef_MetaInfoDef) MarshalToSizedBuffer

func (m *MetaGraphDef_MetaInfoDef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*MetaGraphDef_MetaInfoDef) ProtoMessage

func (*MetaGraphDef_MetaInfoDef) ProtoMessage()

func (*MetaGraphDef_MetaInfoDef) Reset

func (m *MetaGraphDef_MetaInfoDef) Reset()

func (*MetaGraphDef_MetaInfoDef) Size

func (m *MetaGraphDef_MetaInfoDef) Size() (n int)

func (*MetaGraphDef_MetaInfoDef) String

func (m *MetaGraphDef_MetaInfoDef) String() string

func (*MetaGraphDef_MetaInfoDef) Unmarshal

func (m *MetaGraphDef_MetaInfoDef) Unmarshal(dAtA []byte) error

func (*MetaGraphDef_MetaInfoDef) XXX_DiscardUnknown

func (m *MetaGraphDef_MetaInfoDef) XXX_DiscardUnknown()

func (*MetaGraphDef_MetaInfoDef) XXX_Marshal

func (m *MetaGraphDef_MetaInfoDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*MetaGraphDef_MetaInfoDef) XXX_Merge

func (m *MetaGraphDef_MetaInfoDef) XXX_Merge(src proto.Message)

func (*MetaGraphDef_MetaInfoDef) XXX_Size

func (m *MetaGraphDef_MetaInfoDef) XXX_Size() int

func (*MetaGraphDef_MetaInfoDef) XXX_Unmarshal

func (m *MetaGraphDef_MetaInfoDef) XXX_Unmarshal(b []byte) error

type NamedTensorProto

type NamedTensorProto struct {
	// Name of the tensor.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The client can populate a TensorProto using a tensorflow::Tensor`, or
	// directly using the protobuf field accessors.
	//
	// The client specifies whether the returned tensor values should be
	// filled tensor fields (float_val, int_val, etc.) or encoded in a
	// compact form in tensor.tensor_content.
	Tensor *framework.TensorProto `protobuf:"bytes,2,opt,name=tensor,proto3" json:"tensor,omitempty"`
}

A pair of tensor name and tensor values.

func (*NamedTensorProto) Descriptor

func (*NamedTensorProto) Descriptor() ([]byte, []int)

func (*NamedTensorProto) GetName

func (m *NamedTensorProto) GetName() string

func (*NamedTensorProto) GetTensor

func (m *NamedTensorProto) GetTensor() *framework.TensorProto

func (*NamedTensorProto) Marshal

func (m *NamedTensorProto) Marshal() (dAtA []byte, err error)

func (*NamedTensorProto) MarshalTo

func (m *NamedTensorProto) MarshalTo(dAtA []byte) (int, error)

func (*NamedTensorProto) MarshalToSizedBuffer

func (m *NamedTensorProto) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*NamedTensorProto) ProtoMessage

func (*NamedTensorProto) ProtoMessage()

func (*NamedTensorProto) Reset

func (m *NamedTensorProto) Reset()

func (*NamedTensorProto) Size

func (m *NamedTensorProto) Size() (n int)

func (*NamedTensorProto) String

func (m *NamedTensorProto) String() string

func (*NamedTensorProto) Unmarshal

func (m *NamedTensorProto) Unmarshal(dAtA []byte) error

func (*NamedTensorProto) XXX_DiscardUnknown

func (m *NamedTensorProto) XXX_DiscardUnknown()

func (*NamedTensorProto) XXX_Marshal

func (m *NamedTensorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NamedTensorProto) XXX_Merge

func (m *NamedTensorProto) XXX_Merge(src proto.Message)

func (*NamedTensorProto) XXX_Size

func (m *NamedTensorProto) XXX_Size() int

func (*NamedTensorProto) XXX_Unmarshal

func (m *NamedTensorProto) XXX_Unmarshal(b []byte) error

type NamedTupleValue

type NamedTupleValue struct {
	Name   string       `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Values []*PairValue `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
}

Represents Python's namedtuple.

func (*NamedTupleValue) Descriptor

func (*NamedTupleValue) Descriptor() ([]byte, []int)

func (*NamedTupleValue) GetName

func (m *NamedTupleValue) GetName() string

func (*NamedTupleValue) GetValues

func (m *NamedTupleValue) GetValues() []*PairValue

func (*NamedTupleValue) Marshal

func (m *NamedTupleValue) Marshal() (dAtA []byte, err error)

func (*NamedTupleValue) MarshalTo

func (m *NamedTupleValue) MarshalTo(dAtA []byte) (int, error)

func (*NamedTupleValue) MarshalToSizedBuffer

func (m *NamedTupleValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*NamedTupleValue) ProtoMessage

func (*NamedTupleValue) ProtoMessage()

func (*NamedTupleValue) Reset

func (m *NamedTupleValue) Reset()

func (*NamedTupleValue) Size

func (m *NamedTupleValue) Size() (n int)

func (*NamedTupleValue) String

func (m *NamedTupleValue) String() string

func (*NamedTupleValue) Unmarshal

func (m *NamedTupleValue) Unmarshal(dAtA []byte) error

func (*NamedTupleValue) XXX_DiscardUnknown

func (m *NamedTupleValue) XXX_DiscardUnknown()

func (*NamedTupleValue) XXX_Marshal

func (m *NamedTupleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NamedTupleValue) XXX_Merge

func (m *NamedTupleValue) XXX_Merge(src proto.Message)

func (*NamedTupleValue) XXX_Size

func (m *NamedTupleValue) XXX_Size() int

func (*NamedTupleValue) XXX_Unmarshal

func (m *NamedTupleValue) XXX_Unmarshal(b []byte) error

type NoneValue

type NoneValue struct {
}

Represents None.

func (*NoneValue) Descriptor

func (*NoneValue) Descriptor() ([]byte, []int)

func (*NoneValue) Marshal

func (m *NoneValue) Marshal() (dAtA []byte, err error)

func (*NoneValue) MarshalTo

func (m *NoneValue) MarshalTo(dAtA []byte) (int, error)

func (*NoneValue) MarshalToSizedBuffer

func (m *NoneValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*NoneValue) ProtoMessage

func (*NoneValue) ProtoMessage()

func (*NoneValue) Reset

func (m *NoneValue) Reset()

func (*NoneValue) Size

func (m *NoneValue) Size() (n int)

func (*NoneValue) String

func (m *NoneValue) String() string

func (*NoneValue) Unmarshal

func (m *NoneValue) Unmarshal(dAtA []byte) error

func (*NoneValue) XXX_DiscardUnknown

func (m *NoneValue) XXX_DiscardUnknown()

func (*NoneValue) XXX_Marshal

func (m *NoneValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NoneValue) XXX_Merge

func (m *NoneValue) XXX_Merge(src proto.Message)

func (*NoneValue) XXX_Size

func (m *NoneValue) XXX_Size() int

func (*NoneValue) XXX_Unmarshal

func (m *NoneValue) XXX_Unmarshal(b []byte) error

type OptimizerOptions

type OptimizerOptions struct {
	// If true, optimize the graph using common subexpression elimination.
	DoCommonSubexpressionElimination bool `` /* 162-byte string literal not displayed */
	// If true, perform constant folding optimization on the graph.
	DoConstantFolding bool `protobuf:"varint,2,opt,name=do_constant_folding,json=doConstantFolding,proto3" json:"do_constant_folding,omitempty"`
	// Constant folding optimization replaces tensors whose values can be
	// predetermined, with constant nodes. To avoid inserting too large constants,
	// the size of each constant created can be limited. If this value is zero, a
	// default limit of 10 MiB will be applied. If constant folding optimization
	// is disabled, this value is ignored.
	MaxFoldedConstantInBytes int64 `` /* 140-byte string literal not displayed */
	// If true, perform function inlining on the graph.
	DoFunctionInlining bool `protobuf:"varint,4,opt,name=do_function_inlining,json=doFunctionInlining,proto3" json:"do_function_inlining,omitempty"`
	// Overall optimization level. The actual optimizations applied will be the
	// logical OR of the flags that this level implies and any flags already set.
	OptLevel       OptimizerOptions_Level          `protobuf:"varint,3,opt,name=opt_level,json=optLevel,proto3,enum=tensorflow.OptimizerOptions_Level" json:"opt_level,omitempty"`
	GlobalJitLevel OptimizerOptions_GlobalJitLevel `` /* 154-byte string literal not displayed */
}

Options passed to the graph optimizer

func (*OptimizerOptions) Descriptor

func (*OptimizerOptions) Descriptor() ([]byte, []int)

func (*OptimizerOptions) GetDoCommonSubexpressionElimination

func (m *OptimizerOptions) GetDoCommonSubexpressionElimination() bool

func (*OptimizerOptions) GetDoConstantFolding

func (m *OptimizerOptions) GetDoConstantFolding() bool

func (*OptimizerOptions) GetDoFunctionInlining

func (m *OptimizerOptions) GetDoFunctionInlining() bool

func (*OptimizerOptions) GetGlobalJitLevel

func (m *OptimizerOptions) GetGlobalJitLevel() OptimizerOptions_GlobalJitLevel

func (*OptimizerOptions) GetMaxFoldedConstantInBytes

func (m *OptimizerOptions) GetMaxFoldedConstantInBytes() int64

func (*OptimizerOptions) GetOptLevel

func (m *OptimizerOptions) GetOptLevel() OptimizerOptions_Level

func (*OptimizerOptions) Marshal

func (m *OptimizerOptions) Marshal() (dAtA []byte, err error)

func (*OptimizerOptions) MarshalTo

func (m *OptimizerOptions) MarshalTo(dAtA []byte) (int, error)

func (*OptimizerOptions) MarshalToSizedBuffer

func (m *OptimizerOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*OptimizerOptions) ProtoMessage

func (*OptimizerOptions) ProtoMessage()

func (*OptimizerOptions) Reset

func (m *OptimizerOptions) Reset()

func (*OptimizerOptions) Size

func (m *OptimizerOptions) Size() (n int)

func (*OptimizerOptions) String

func (m *OptimizerOptions) String() string

func (*OptimizerOptions) Unmarshal

func (m *OptimizerOptions) Unmarshal(dAtA []byte) error

func (*OptimizerOptions) XXX_DiscardUnknown

func (m *OptimizerOptions) XXX_DiscardUnknown()

func (*OptimizerOptions) XXX_Marshal

func (m *OptimizerOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OptimizerOptions) XXX_Merge

func (m *OptimizerOptions) XXX_Merge(src proto.Message)

func (*OptimizerOptions) XXX_Size

func (m *OptimizerOptions) XXX_Size() int

func (*OptimizerOptions) XXX_Unmarshal

func (m *OptimizerOptions) XXX_Unmarshal(b []byte) error

type OptimizerOptions_GlobalJitLevel

type OptimizerOptions_GlobalJitLevel int32

Control the use of the compiler/jit. Experimental.

const (
	OptimizerOptions_DEFAULT OptimizerOptions_GlobalJitLevel = 0
	OptimizerOptions_OFF     OptimizerOptions_GlobalJitLevel = -1
	// The following settings turn on compilation, with higher values being
	// more aggressive.  Higher values may reduce opportunities for parallelism
	// and may use more memory.  (At present, there is no distinction, but this
	// is expected to change.)
	OptimizerOptions_ON_1 OptimizerOptions_GlobalJitLevel = 1
	OptimizerOptions_ON_2 OptimizerOptions_GlobalJitLevel = 2
)

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor() ([]byte, []int)

func (OptimizerOptions_GlobalJitLevel) String

type OptimizerOptions_Level

type OptimizerOptions_Level int32

Optimization level

const (
	// L1 is the default level.
	// Optimization performed at L1 :
	// 1. Common subexpression elimination
	// 2. Constant folding
	OptimizerOptions_L1 OptimizerOptions_Level = 0
	// No optimizations
	OptimizerOptions_L0 OptimizerOptions_Level = -1
)

func (OptimizerOptions_Level) EnumDescriptor

func (OptimizerOptions_Level) EnumDescriptor() ([]byte, []int)

func (OptimizerOptions_Level) String

func (x OptimizerOptions_Level) String() string

type PairValue

type PairValue struct {
	Key   string           `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
	Value *StructuredValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}

Represents a (key, value) pair.

func (*PairValue) Descriptor

func (*PairValue) Descriptor() ([]byte, []int)

func (*PairValue) GetKey

func (m *PairValue) GetKey() string

func (*PairValue) GetValue

func (m *PairValue) GetValue() *StructuredValue

func (*PairValue) Marshal

func (m *PairValue) Marshal() (dAtA []byte, err error)

func (*PairValue) MarshalTo

func (m *PairValue) MarshalTo(dAtA []byte) (int, error)

func (*PairValue) MarshalToSizedBuffer

func (m *PairValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*PairValue) ProtoMessage

func (*PairValue) ProtoMessage()

func (*PairValue) Reset

func (m *PairValue) Reset()

func (*PairValue) Size

func (m *PairValue) Size() (n int)

func (*PairValue) String

func (m *PairValue) String() string

func (*PairValue) Unmarshal

func (m *PairValue) Unmarshal(dAtA []byte) error

func (*PairValue) XXX_DiscardUnknown

func (m *PairValue) XXX_DiscardUnknown()

func (*PairValue) XXX_Marshal

func (m *PairValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PairValue) XXX_Merge

func (m *PairValue) XXX_Merge(src proto.Message)

func (*PairValue) XXX_Size

func (m *PairValue) XXX_Size() int

func (*PairValue) XXX_Unmarshal

func (m *PairValue) XXX_Unmarshal(b []byte) error

type RPCOptions

type RPCOptions struct {
	// If true, always use RPC to contact the session target.
	//
	// If false (the default option), TensorFlow may use an optimized
	// transport for client-master communication that avoids the RPC
	// stack. This option is primarily for used testing the RPC stack.
	UseRpcForInprocessMaster bool `` /* 140-byte string literal not displayed */
	// The compression algorithm to be used. One of "deflate", "gzip".
	CompressionAlgorithm string `protobuf:"bytes,2,opt,name=compression_algorithm,json=compressionAlgorithm,proto3" json:"compression_algorithm,omitempty"`
	// If compression_algorithm is set, the compression level to be used.
	// From 0 (no compression), up to 3.
	CompressionLevel int32 `protobuf:"varint,3,opt,name=compression_level,json=compressionLevel,proto3" json:"compression_level,omitempty"`
	// Setting cache_rpc_response to true will enable sender side caching of
	// response for RecvTensorAsync and RecvBufAsync to allow receiver to retry
	// requests . This is only necessary when the network fabric is experiencing a
	// significant error rate.  Without it we'll fail a step on an network error,
	// while with it we'll be able to complete long steps (like complex
	// initializations) in the face of some network errors during RecvTensor.
	CacheRpcResponse bool `protobuf:"varint,4,opt,name=cache_rpc_response,json=cacheRpcResponse,proto3" json:"cache_rpc_response,omitempty"`
	// Disables TCP connection sharing when opening a new RPC channel.
	DisableSessionConnectionSharing bool `` /* 159-byte string literal not displayed */
}

func (*RPCOptions) Descriptor

func (*RPCOptions) Descriptor() ([]byte, []int)

func (*RPCOptions) GetCacheRpcResponse

func (m *RPCOptions) GetCacheRpcResponse() bool

func (*RPCOptions) GetCompressionAlgorithm

func (m *RPCOptions) GetCompressionAlgorithm() string

func (*RPCOptions) GetCompressionLevel

func (m *RPCOptions) GetCompressionLevel() int32

func (*RPCOptions) GetDisableSessionConnectionSharing

func (m *RPCOptions) GetDisableSessionConnectionSharing() bool

func (*RPCOptions) GetUseRpcForInprocessMaster

func (m *RPCOptions) GetUseRpcForInprocessMaster() bool

func (*RPCOptions) Marshal

func (m *RPCOptions) Marshal() (dAtA []byte, err error)

func (*RPCOptions) MarshalTo

func (m *RPCOptions) MarshalTo(dAtA []byte) (int, error)

func (*RPCOptions) MarshalToSizedBuffer

func (m *RPCOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RPCOptions) ProtoMessage

func (*RPCOptions) ProtoMessage()

func (*RPCOptions) Reset

func (m *RPCOptions) Reset()

func (*RPCOptions) Size

func (m *RPCOptions) Size() (n int)

func (*RPCOptions) String

func (m *RPCOptions) String() string

func (*RPCOptions) Unmarshal

func (m *RPCOptions) Unmarshal(dAtA []byte) error

func (*RPCOptions) XXX_DiscardUnknown

func (m *RPCOptions) XXX_DiscardUnknown()

func (*RPCOptions) XXX_Marshal

func (m *RPCOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RPCOptions) XXX_Merge

func (m *RPCOptions) XXX_Merge(src proto.Message)

func (*RPCOptions) XXX_Size

func (m *RPCOptions) XXX_Size() int

func (*RPCOptions) XXX_Unmarshal

func (m *RPCOptions) XXX_Unmarshal(b []byte) error

type RewriterConfig

type RewriterConfig struct {
	// Optimize tensor layouts (default is ON)
	// e.g. This will try to use NCHW layout on GPU which is faster.
	LayoutOptimizer RewriterConfig_Toggle `` /* 145-byte string literal not displayed */
	// Fold constants (default is ON)
	// Statically infer the value of tensors when possible, and materialize the
	// result using constants.
	ConstantFolding RewriterConfig_Toggle `` /* 145-byte string literal not displayed */
	// Shape optimizations (default is ON)
	// Simplify computations made on shapes.
	ShapeOptimization RewriterConfig_Toggle `` /* 152-byte string literal not displayed */
	// Remapping (default is ON)
	// Remap subgraphs onto more efficient implementations.
	Remapping RewriterConfig_Toggle `protobuf:"varint,14,opt,name=remapping,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"remapping,omitempty"`
	// Common subgraph elimination (default is ON)
	// e.g. Simplify arithmetic ops; merge ops with same value (like constants).
	CommonSubgraphElimination RewriterConfig_Toggle `` /* 178-byte string literal not displayed */
	// Arithmetic optimizations (default is ON)
	// e.g. Simplify arithmetic ops; merge ops with same value (like constants).
	ArithmeticOptimization RewriterConfig_Toggle `` /* 166-byte string literal not displayed */
	// Control dependency optimizations (default is ON).
	// Remove redundant control dependencies, which may enable other optimization.
	DependencyOptimization RewriterConfig_Toggle `` /* 166-byte string literal not displayed */
	// Loop optimizations (default is ON).
	LoopOptimization RewriterConfig_Toggle `` /* 148-byte string literal not displayed */
	// Function optimizations (default is ON).
	FunctionOptimization RewriterConfig_Toggle `` /* 161-byte string literal not displayed */
	// Strips debug-related nodes from the graph (off by default).
	DebugStripper RewriterConfig_Toggle `` /* 140-byte string literal not displayed */
	// If true, don't remove unnecessary ops from the graph
	DisableModelPruning bool `protobuf:"varint,2,opt,name=disable_model_pruning,json=disableModelPruning,proto3" json:"disable_model_pruning,omitempty"`
	// Try to allocate some independent Op outputs contiguously in order to
	// merge or eliminate downstream Ops (off by default).
	ScopedAllocatorOptimization RewriterConfig_Toggle `` /* 184-byte string literal not displayed */
	// Force small ops onto the CPU (default is OFF).
	PinToHostOptimization RewriterConfig_Toggle `` /* 168-byte string literal not displayed */
	// Enable the swap of kernel implementations based on the device placement
	// (default is ON).
	ImplementationSelector RewriterConfig_Toggle `` /* 167-byte string literal not displayed */
	// Optimize data types (default is OFF).
	// e.g., This will try to use float16 on GPU which is faster.
	// Note that this can change the numerical stability of the graph and may
	// require the use of loss scaling to maintain model convergence.
	AutoMixedPrecision RewriterConfig_Toggle `` /* 157-byte string literal not displayed */
	// Disable the entire meta optimizer (off by default).
	DisableMetaOptimizer bool `protobuf:"varint,19,opt,name=disable_meta_optimizer,json=disableMetaOptimizer,proto3" json:"disable_meta_optimizer,omitempty"`
	// Controls how many times we run the optimizers in meta optimizer (default
	// is once).
	MetaOptimizerIterations RewriterConfig_NumIterationsType `` /* 183-byte string literal not displayed */
	// The minimum number of nodes in a graph to optimizer. For smaller graphs,
	// optimization is skipped.
	// 0 means the system picks an appropriate number.
	// < 0 means do not skip optimization.
	MinGraphNodes int32 `protobuf:"varint,17,opt,name=min_graph_nodes,json=minGraphNodes,proto3" json:"min_graph_nodes,omitempty"`
	// Configures memory optimization passes through the meta-optimizer. Has no
	// effect on manually requested memory optimization passes in the optimizers
	// field.
	MemoryOptimization RewriterConfig_MemOptType `` /* 158-byte string literal not displayed */
	// A node name scope for node names which are valid outputs of recomputations.
	// Inputs to nodes that match this scope may be recomputed (subject either to
	// manual annotation of those input nodes or to manual annotation and
	// heuristics depending on memory_optimization), but the nodes themselves will
	// not be recomputed. This matches any sub-scopes as well, meaning the scope
	// can appear not just as a top-level scope. For example, if the value is
	// "gradients/", the default, it will match node name "gradients/foo",
	// "foo/gradients/bar", but not "foo_gradients/"
	MemoryOptimizerTargetNodeNameScope string `` /* 171-byte string literal not displayed */
	// Maximum number of milliseconds to spend optimizing a single graph before
	// timing out. If equal to 0 the system picks a default (currently 5 minutes).
	// If less than 0 the optimizer will never time out.
	MetaOptimizerTimeoutMs int64 `` /* 133-byte string literal not displayed */
	// Configures AutoParallel optimization passes either through the
	// meta-optimizer or when manually specified through the optimizers field.
	AutoParallel *AutoParallelOptions `protobuf:"bytes,5,opt,name=auto_parallel,json=autoParallel,proto3" json:"auto_parallel,omitempty"`
	// If true, any optimization pass failing will cause the MetaOptimizer to
	// stop with an error. By default - or when set to false, failing passes are
	// skipped silently.
	FailOnOptimizerErrors bool                    `` /* 130-byte string literal not displayed */
	ScopedAllocatorOpts   *ScopedAllocatorOptions `protobuf:"bytes,16,opt,name=scoped_allocator_opts,json=scopedAllocatorOpts,proto3" json:"scoped_allocator_opts,omitempty"`
	// If non-empty, will use this as an alternative way to specify a list of
	// optimizations to turn on and the order of the optimizations (replacing the
	// meta-optimizer).
	//
	// Of the RewriterConfig options, only the AutoParallel configuration options
	// (the auto_parallel field) apply to manually requested optimization passes
	// ("autoparallel"). Memory optimization passes ("memory") invoked here are
	// not configurable (in contrast to memory optimization passes through the
	// meta-optimizer) and act only on manual op annotations.
	//
	// Custom optimizers (see custom_optimizers) that are not part of this
	// schedule will be run after - in the order that they were specified.
	Optimizers []string `protobuf:"bytes,100,rep,name=optimizers,proto3" json:"optimizers,omitempty"`
	// list of CustomGraphOptimizers to apply.
	CustomOptimizers []*RewriterConfig_CustomGraphOptimizer `protobuf:"bytes,200,rep,name=custom_optimizers,json=customOptimizers,proto3" json:"custom_optimizers,omitempty"`
	// VerifierConfig specifying the verifiers to be run after every optimizer.
	InterOptimizerVerifierConfig *VerifierConfig `` /* 151-byte string literal not displayed */
	// VerifierConfig specifying the verifiers to be run at the end, after all
	// optimizers have run.
	PostOptimizationVerifierConfig *VerifierConfig `` /* 157-byte string literal not displayed */
}

func (*RewriterConfig) Descriptor

func (*RewriterConfig) Descriptor() ([]byte, []int)

func (*RewriterConfig) GetArithmeticOptimization

func (m *RewriterConfig) GetArithmeticOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoMixedPrecision

func (m *RewriterConfig) GetAutoMixedPrecision() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoParallel

func (m *RewriterConfig) GetAutoParallel() *AutoParallelOptions

func (*RewriterConfig) GetCommonSubgraphElimination

func (m *RewriterConfig) GetCommonSubgraphElimination() RewriterConfig_Toggle

func (*RewriterConfig) GetConstantFolding

func (m *RewriterConfig) GetConstantFolding() RewriterConfig_Toggle

func (*RewriterConfig) GetCustomOptimizers

func (m *RewriterConfig) GetCustomOptimizers() []*RewriterConfig_CustomGraphOptimizer

func (*RewriterConfig) GetDebugStripper

func (m *RewriterConfig) GetDebugStripper() RewriterConfig_Toggle

func (*RewriterConfig) GetDependencyOptimization

func (m *RewriterConfig) GetDependencyOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetDisableMetaOptimizer

func (m *RewriterConfig) GetDisableMetaOptimizer() bool

func (*RewriterConfig) GetDisableModelPruning

func (m *RewriterConfig) GetDisableModelPruning() bool

func (*RewriterConfig) GetFailOnOptimizerErrors

func (m *RewriterConfig) GetFailOnOptimizerErrors() bool

func (*RewriterConfig) GetFunctionOptimization

func (m *RewriterConfig) GetFunctionOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetImplementationSelector

func (m *RewriterConfig) GetImplementationSelector() RewriterConfig_Toggle

func (*RewriterConfig) GetInterOptimizerVerifierConfig

func (m *RewriterConfig) GetInterOptimizerVerifierConfig() *VerifierConfig

func (*RewriterConfig) GetLayoutOptimizer

func (m *RewriterConfig) GetLayoutOptimizer() RewriterConfig_Toggle

func (*RewriterConfig) GetLoopOptimization

func (m *RewriterConfig) GetLoopOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetMemoryOptimization

func (m *RewriterConfig) GetMemoryOptimization() RewriterConfig_MemOptType

func (*RewriterConfig) GetMemoryOptimizerTargetNodeNameScope

func (m *RewriterConfig) GetMemoryOptimizerTargetNodeNameScope() string

func (*RewriterConfig) GetMetaOptimizerIterations

func (m *RewriterConfig) GetMetaOptimizerIterations() RewriterConfig_NumIterationsType

func (*RewriterConfig) GetMetaOptimizerTimeoutMs

func (m *RewriterConfig) GetMetaOptimizerTimeoutMs() int64

func (*RewriterConfig) GetMinGraphNodes

func (m *RewriterConfig) GetMinGraphNodes() int32

func (*RewriterConfig) GetOptimizers

func (m *RewriterConfig) GetOptimizers() []string

func (*RewriterConfig) GetPinToHostOptimization

func (m *RewriterConfig) GetPinToHostOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetPostOptimizationVerifierConfig

func (m *RewriterConfig) GetPostOptimizationVerifierConfig() *VerifierConfig

func (*RewriterConfig) GetRemapping

func (m *RewriterConfig) GetRemapping() RewriterConfig_Toggle

func (*RewriterConfig) GetScopedAllocatorOptimization

func (m *RewriterConfig) GetScopedAllocatorOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetScopedAllocatorOpts

func (m *RewriterConfig) GetScopedAllocatorOpts() *ScopedAllocatorOptions

func (*RewriterConfig) GetShapeOptimization

func (m *RewriterConfig) GetShapeOptimization() RewriterConfig_Toggle

func (*RewriterConfig) Marshal

func (m *RewriterConfig) Marshal() (dAtA []byte, err error)

func (*RewriterConfig) MarshalTo

func (m *RewriterConfig) MarshalTo(dAtA []byte) (int, error)

func (*RewriterConfig) MarshalToSizedBuffer

func (m *RewriterConfig) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RewriterConfig) ProtoMessage

func (*RewriterConfig) ProtoMessage()

func (*RewriterConfig) Reset

func (m *RewriterConfig) Reset()

func (*RewriterConfig) Size

func (m *RewriterConfig) Size() (n int)

func (*RewriterConfig) String

func (m *RewriterConfig) String() string

func (*RewriterConfig) Unmarshal

func (m *RewriterConfig) Unmarshal(dAtA []byte) error

func (*RewriterConfig) XXX_DiscardUnknown

func (m *RewriterConfig) XXX_DiscardUnknown()

func (*RewriterConfig) XXX_Marshal

func (m *RewriterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RewriterConfig) XXX_Merge

func (m *RewriterConfig) XXX_Merge(src proto.Message)

func (*RewriterConfig) XXX_Size

func (m *RewriterConfig) XXX_Size() int

func (*RewriterConfig) XXX_Unmarshal

func (m *RewriterConfig) XXX_Unmarshal(b []byte) error

type RewriterConfig_CustomGraphOptimizer

type RewriterConfig_CustomGraphOptimizer struct {
	Name         string                          `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	ParameterMap map[string]*framework.AttrValue `` /* 185-byte string literal not displayed */
}

Message to describe custom graph optimizer and its parameters

func (*RewriterConfig_CustomGraphOptimizer) Descriptor

func (*RewriterConfig_CustomGraphOptimizer) Descriptor() ([]byte, []int)

func (*RewriterConfig_CustomGraphOptimizer) GetName

func (*RewriterConfig_CustomGraphOptimizer) GetParameterMap

func (*RewriterConfig_CustomGraphOptimizer) Marshal

func (m *RewriterConfig_CustomGraphOptimizer) Marshal() (dAtA []byte, err error)

func (*RewriterConfig_CustomGraphOptimizer) MarshalTo

func (m *RewriterConfig_CustomGraphOptimizer) MarshalTo(dAtA []byte) (int, error)

func (*RewriterConfig_CustomGraphOptimizer) MarshalToSizedBuffer

func (m *RewriterConfig_CustomGraphOptimizer) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage

func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage()

func (*RewriterConfig_CustomGraphOptimizer) Reset

func (*RewriterConfig_CustomGraphOptimizer) Size

func (*RewriterConfig_CustomGraphOptimizer) String

func (*RewriterConfig_CustomGraphOptimizer) Unmarshal

func (m *RewriterConfig_CustomGraphOptimizer) Unmarshal(dAtA []byte) error

func (*RewriterConfig_CustomGraphOptimizer) XXX_DiscardUnknown

func (m *RewriterConfig_CustomGraphOptimizer) XXX_DiscardUnknown()

func (*RewriterConfig_CustomGraphOptimizer) XXX_Marshal

func (m *RewriterConfig_CustomGraphOptimizer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RewriterConfig_CustomGraphOptimizer) XXX_Merge

func (*RewriterConfig_CustomGraphOptimizer) XXX_Size

func (*RewriterConfig_CustomGraphOptimizer) XXX_Unmarshal

func (m *RewriterConfig_CustomGraphOptimizer) XXX_Unmarshal(b []byte) error

type RewriterConfig_MemOptType

type RewriterConfig_MemOptType int32
const (
	// The default setting (SCHEDULING and SWAPPING HEURISTICS only)
	RewriterConfig_DEFAULT_MEM_OPT RewriterConfig_MemOptType = 0
	// Disabled in the meta-optimizer.
	RewriterConfig_NO_MEM_OPT RewriterConfig_MemOptType = 1
	// Driven by manual op-level annotations.
	RewriterConfig_MANUAL RewriterConfig_MemOptType = 2
	// Swapping heuristic will move a tensor from the GPU to the CPU and move
	// it back when needed to reduce peak memory usage.
	RewriterConfig_SWAPPING_HEURISTICS RewriterConfig_MemOptType = 4
	// Recomputation heuristics will recompute ops (such as Relu activation)
	// during backprop instead of storing them, reducing peak memory usage.
	RewriterConfig_RECOMPUTATION_HEURISTICS RewriterConfig_MemOptType = 5
	// Scheduling will split big ops such as AddN and try to enforce a schedule
	// of the new computations that decreases peak memory usage.
	RewriterConfig_SCHEDULING_HEURISTICS RewriterConfig_MemOptType = 6
	// Use any combination of swapping and recomputation heuristics.
	RewriterConfig_HEURISTICS RewriterConfig_MemOptType = 3
)

func (RewriterConfig_MemOptType) EnumDescriptor

func (RewriterConfig_MemOptType) EnumDescriptor() ([]byte, []int)

func (RewriterConfig_MemOptType) String

func (x RewriterConfig_MemOptType) String() string

type RewriterConfig_NumIterationsType

type RewriterConfig_NumIterationsType int32

Enum controlling the number of times to run optimizers. The default is to run them twice.

const (
	RewriterConfig_DEFAULT_NUM_ITERS RewriterConfig_NumIterationsType = 0
	RewriterConfig_ONE               RewriterConfig_NumIterationsType = 1
	RewriterConfig_TWO               RewriterConfig_NumIterationsType = 2
)

func (RewriterConfig_NumIterationsType) EnumDescriptor

func (RewriterConfig_NumIterationsType) EnumDescriptor() ([]byte, []int)

func (RewriterConfig_NumIterationsType) String

type RewriterConfig_Toggle

type RewriterConfig_Toggle int32
const (
	RewriterConfig_DEFAULT RewriterConfig_Toggle = 0
	RewriterConfig_ON      RewriterConfig_Toggle = 1
	RewriterConfig_OFF     RewriterConfig_Toggle = 2
	// Enable some aggressive optimizations that use assumptions that TF graphs
	// may break. For example, assume the shape of a placeholder matches its
	// actual feed.
	RewriterConfig_AGGRESSIVE RewriterConfig_Toggle = 3
)

func (RewriterConfig_Toggle) EnumDescriptor

func (RewriterConfig_Toggle) EnumDescriptor() ([]byte, []int)

func (RewriterConfig_Toggle) String

func (x RewriterConfig_Toggle) String() string

type RunMetadata

type RunMetadata struct {
	// Statistics traced for this step. Populated if tracing is turned on via the
	// "RunOptions" proto.
	// EXPERIMENTAL: The format and set of events may change in future versions.
	StepStats *framework.StepStats `protobuf:"bytes,1,opt,name=step_stats,json=stepStats,proto3" json:"step_stats,omitempty"`
	// The cost graph for the computation defined by the run call.
	CostGraph *framework.CostGraphDef `protobuf:"bytes,2,opt,name=cost_graph,json=costGraph,proto3" json:"cost_graph,omitempty"`
	// Graphs of the partitions executed by executors.
	PartitionGraphs []*framework.GraphDef `protobuf:"bytes,3,rep,name=partition_graphs,json=partitionGraphs,proto3" json:"partition_graphs,omitempty"`
	// This is only populated for graphs that are run as functions in TensorFlow
	// V2. There will be an entry below for each function that is traced.
	// The main use cases of the post_optimization_graph and the partition_graphs
	// is to give the caller insight into the graphs that were actually run by the
	// runtime. Additional information (such as those in step_stats) will match
	// these graphs.
	// We also include the pre_optimization_graph since it is usually easier to
	// read, and is helpful in situations where the caller wants to get a high
	// level idea of what the built graph looks like (since the various graph
	// optimization passes might change the structure of the graph significantly).
	FunctionGraphs []*RunMetadata_FunctionGraphs `protobuf:"bytes,4,rep,name=function_graphs,json=functionGraphs,proto3" json:"function_graphs,omitempty"`
}

Metadata output (i.e., non-Tensor) for a single Run() call.

func (*RunMetadata) Descriptor

func (*RunMetadata) Descriptor() ([]byte, []int)

func (*RunMetadata) GetCostGraph

func (m *RunMetadata) GetCostGraph() *framework.CostGraphDef

func (*RunMetadata) GetFunctionGraphs

func (m *RunMetadata) GetFunctionGraphs() []*RunMetadata_FunctionGraphs

func (*RunMetadata) GetPartitionGraphs

func (m *RunMetadata) GetPartitionGraphs() []*framework.GraphDef

func (*RunMetadata) GetStepStats

func (m *RunMetadata) GetStepStats() *framework.StepStats

func (*RunMetadata) Marshal

func (m *RunMetadata) Marshal() (dAtA []byte, err error)

func (*RunMetadata) MarshalTo

func (m *RunMetadata) MarshalTo(dAtA []byte) (int, error)

func (*RunMetadata) MarshalToSizedBuffer

func (m *RunMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RunMetadata) ProtoMessage

func (*RunMetadata) ProtoMessage()

func (*RunMetadata) Reset

func (m *RunMetadata) Reset()

func (*RunMetadata) Size

func (m *RunMetadata) Size() (n int)

func (*RunMetadata) String

func (m *RunMetadata) String() string

func (*RunMetadata) Unmarshal

func (m *RunMetadata) Unmarshal(dAtA []byte) error

func (*RunMetadata) XXX_DiscardUnknown

func (m *RunMetadata) XXX_DiscardUnknown()

func (*RunMetadata) XXX_Marshal

func (m *RunMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RunMetadata) XXX_Merge

func (m *RunMetadata) XXX_Merge(src proto.Message)

func (*RunMetadata) XXX_Size

func (m *RunMetadata) XXX_Size() int

func (*RunMetadata) XXX_Unmarshal

func (m *RunMetadata) XXX_Unmarshal(b []byte) error

type RunMetadata_FunctionGraphs

type RunMetadata_FunctionGraphs struct {
	// TODO(nareshmodi): Include some sort of function/cache-key identifier?
	PartitionGraphs       []*framework.GraphDef `protobuf:"bytes,1,rep,name=partition_graphs,json=partitionGraphs,proto3" json:"partition_graphs,omitempty"`
	PreOptimizationGraph  *framework.GraphDef   `protobuf:"bytes,2,opt,name=pre_optimization_graph,json=preOptimizationGraph,proto3" json:"pre_optimization_graph,omitempty"`
	PostOptimizationGraph *framework.GraphDef   `` /* 126-byte string literal not displayed */
}

func (*RunMetadata_FunctionGraphs) Descriptor

func (*RunMetadata_FunctionGraphs) Descriptor() ([]byte, []int)

func (*RunMetadata_FunctionGraphs) GetPartitionGraphs

func (m *RunMetadata_FunctionGraphs) GetPartitionGraphs() []*framework.GraphDef

func (*RunMetadata_FunctionGraphs) GetPostOptimizationGraph

func (m *RunMetadata_FunctionGraphs) GetPostOptimizationGraph() *framework.GraphDef

func (*RunMetadata_FunctionGraphs) GetPreOptimizationGraph

func (m *RunMetadata_FunctionGraphs) GetPreOptimizationGraph() *framework.GraphDef

func (*RunMetadata_FunctionGraphs) Marshal

func (m *RunMetadata_FunctionGraphs) Marshal() (dAtA []byte, err error)

func (*RunMetadata_FunctionGraphs) MarshalTo

func (m *RunMetadata_FunctionGraphs) MarshalTo(dAtA []byte) (int, error)

func (*RunMetadata_FunctionGraphs) MarshalToSizedBuffer

func (m *RunMetadata_FunctionGraphs) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RunMetadata_FunctionGraphs) ProtoMessage

func (*RunMetadata_FunctionGraphs) ProtoMessage()

func (*RunMetadata_FunctionGraphs) Reset

func (m *RunMetadata_FunctionGraphs) Reset()

func (*RunMetadata_FunctionGraphs) Size

func (m *RunMetadata_FunctionGraphs) Size() (n int)

func (*RunMetadata_FunctionGraphs) String

func (m *RunMetadata_FunctionGraphs) String() string

func (*RunMetadata_FunctionGraphs) Unmarshal

func (m *RunMetadata_FunctionGraphs) Unmarshal(dAtA []byte) error

func (*RunMetadata_FunctionGraphs) XXX_DiscardUnknown

func (m *RunMetadata_FunctionGraphs) XXX_DiscardUnknown()

func (*RunMetadata_FunctionGraphs) XXX_Marshal

func (m *RunMetadata_FunctionGraphs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RunMetadata_FunctionGraphs) XXX_Merge

func (m *RunMetadata_FunctionGraphs) XXX_Merge(src proto.Message)

func (*RunMetadata_FunctionGraphs) XXX_Size

func (m *RunMetadata_FunctionGraphs) XXX_Size() int

func (*RunMetadata_FunctionGraphs) XXX_Unmarshal

func (m *RunMetadata_FunctionGraphs) XXX_Unmarshal(b []byte) error

type RunOptions

type RunOptions struct {
	TraceLevel RunOptions_TraceLevel `` /* 130-byte string literal not displayed */
	// Time to wait for operation to complete in milliseconds.
	TimeoutInMs int64 `protobuf:"varint,2,opt,name=timeout_in_ms,json=timeoutInMs,proto3" json:"timeout_in_ms,omitempty"`
	// The thread pool to use, if session_inter_op_thread_pool is configured.
	// To use the caller thread set this to -1 - this uses the caller thread
	// to execute Session::Run() and thus avoids a context switch. Using the
	// caller thread to execute Session::Run() should be done ONLY for simple
	// graphs, where the overhead of an additional context switch is
	// comparable with the overhead of Session::Run().
	InterOpThreadPool int32 `protobuf:"varint,3,opt,name=inter_op_thread_pool,json=interOpThreadPool,proto3" json:"inter_op_thread_pool,omitempty"`
	// Whether the partition graph(s) executed by the executor(s) should be
	// outputted via RunMetadata.
	OutputPartitionGraphs bool `` /* 127-byte string literal not displayed */
	// EXPERIMENTAL.  Options used to initialize DebuggerState, if enabled.
	DebugOptions *DebugOptions `protobuf:"bytes,6,opt,name=debug_options,json=debugOptions,proto3" json:"debug_options,omitempty"`
	// When enabled, causes tensor allocation information to be included in
	// the error message when the Run() call fails because the allocator ran
	// out of memory (OOM).
	//
	// Enabling this option can slow down the Run() call.
	ReportTensorAllocationsUponOom bool                     `` /* 158-byte string literal not displayed */
	Experimental                   *RunOptions_Experimental `protobuf:"bytes,8,opt,name=experimental,proto3" json:"experimental,omitempty"`
}

Options for a single Run() call.

func (*RunOptions) Descriptor

func (*RunOptions) Descriptor() ([]byte, []int)

func (*RunOptions) GetDebugOptions

func (m *RunOptions) GetDebugOptions() *DebugOptions

func (*RunOptions) GetExperimental

func (m *RunOptions) GetExperimental() *RunOptions_Experimental

func (*RunOptions) GetInterOpThreadPool

func (m *RunOptions) GetInterOpThreadPool() int32

func (*RunOptions) GetOutputPartitionGraphs

func (m *RunOptions) GetOutputPartitionGraphs() bool

func (*RunOptions) GetReportTensorAllocationsUponOom

func (m *RunOptions) GetReportTensorAllocationsUponOom() bool

func (*RunOptions) GetTimeoutInMs

func (m *RunOptions) GetTimeoutInMs() int64

func (*RunOptions) GetTraceLevel

func (m *RunOptions) GetTraceLevel() RunOptions_TraceLevel

func (*RunOptions) Marshal

func (m *RunOptions) Marshal() (dAtA []byte, err error)

func (*RunOptions) MarshalTo

func (m *RunOptions) MarshalTo(dAtA []byte) (int, error)

func (*RunOptions) MarshalToSizedBuffer

func (m *RunOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RunOptions) ProtoMessage

func (*RunOptions) ProtoMessage()

func (*RunOptions) Reset

func (m *RunOptions) Reset()

func (*RunOptions) Size

func (m *RunOptions) Size() (n int)

func (*RunOptions) String

func (m *RunOptions) String() string

func (*RunOptions) Unmarshal

func (m *RunOptions) Unmarshal(dAtA []byte) error

func (*RunOptions) XXX_DiscardUnknown

func (m *RunOptions) XXX_DiscardUnknown()

func (*RunOptions) XXX_Marshal

func (m *RunOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RunOptions) XXX_Merge

func (m *RunOptions) XXX_Merge(src proto.Message)

func (*RunOptions) XXX_Size

func (m *RunOptions) XXX_Size() int

func (*RunOptions) XXX_Unmarshal

func (m *RunOptions) XXX_Unmarshal(b []byte) error

type RunOptions_Experimental

type RunOptions_Experimental struct {
	// If non-zero, declares that this graph is going to use collective
	// ops and must synchronize step_ids with any other graph with this
	// same group_key value (in a distributed computation where tasks
	// run disjoint graphs).
	CollectiveGraphKey int64 `protobuf:"varint,1,opt,name=collective_graph_key,json=collectiveGraphKey,proto3" json:"collective_graph_key,omitempty"`
	// If true, then operations (using the inter-op pool) across all
	// session::run() calls will be centrally scheduled, optimizing for (median
	// and tail) latency.
	// Consider using this option for CPU-bound workloads like inference.
	UseRunHandlerPool     bool                                           `protobuf:"varint,2,opt,name=use_run_handler_pool,json=useRunHandlerPool,proto3" json:"use_run_handler_pool,omitempty"`
	RunHandlerPoolOptions *RunOptions_Experimental_RunHandlerPoolOptions `` /* 128-byte string literal not displayed */
}

Everything inside Experimental is subject to change and is not subject to API stability guarantees in https://www.tensorflow.org/guide/version_compat.

func (*RunOptions_Experimental) Descriptor

func (*RunOptions_Experimental) Descriptor() ([]byte, []int)

func (*RunOptions_Experimental) GetCollectiveGraphKey

func (m *RunOptions_Experimental) GetCollectiveGraphKey() int64

func (*RunOptions_Experimental) GetRunHandlerPoolOptions

func (*RunOptions_Experimental) GetUseRunHandlerPool

func (m *RunOptions_Experimental) GetUseRunHandlerPool() bool

func (*RunOptions_Experimental) Marshal

func (m *RunOptions_Experimental) Marshal() (dAtA []byte, err error)

func (*RunOptions_Experimental) MarshalTo

func (m *RunOptions_Experimental) MarshalTo(dAtA []byte) (int, error)

func (*RunOptions_Experimental) MarshalToSizedBuffer

func (m *RunOptions_Experimental) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RunOptions_Experimental) ProtoMessage

func (*RunOptions_Experimental) ProtoMessage()

func (*RunOptions_Experimental) Reset

func (m *RunOptions_Experimental) Reset()

func (*RunOptions_Experimental) Size

func (m *RunOptions_Experimental) Size() (n int)

func (*RunOptions_Experimental) String

func (m *RunOptions_Experimental) String() string

func (*RunOptions_Experimental) Unmarshal

func (m *RunOptions_Experimental) Unmarshal(dAtA []byte) error

func (*RunOptions_Experimental) XXX_DiscardUnknown

func (m *RunOptions_Experimental) XXX_DiscardUnknown()

func (*RunOptions_Experimental) XXX_Marshal

func (m *RunOptions_Experimental) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RunOptions_Experimental) XXX_Merge

func (m *RunOptions_Experimental) XXX_Merge(src proto.Message)

func (*RunOptions_Experimental) XXX_Size

func (m *RunOptions_Experimental) XXX_Size() int

func (*RunOptions_Experimental) XXX_Unmarshal

func (m *RunOptions_Experimental) XXX_Unmarshal(b []byte) error

type RunOptions_Experimental_RunHandlerPoolOptions

type RunOptions_Experimental_RunHandlerPoolOptions struct {
	// Priority of the request. The run handler thread pool will schedule ops
	// based on the priority number. The larger number means higher priority.
	Priority int64 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority,omitempty"`
}

Options for run handler thread pool.

func (*RunOptions_Experimental_RunHandlerPoolOptions) Descriptor

func (*RunOptions_Experimental_RunHandlerPoolOptions) GetPriority

func (*RunOptions_Experimental_RunHandlerPoolOptions) Marshal

func (m *RunOptions_Experimental_RunHandlerPoolOptions) Marshal() (dAtA []byte, err error)

func (*RunOptions_Experimental_RunHandlerPoolOptions) MarshalTo

func (*RunOptions_Experimental_RunHandlerPoolOptions) MarshalToSizedBuffer

func (m *RunOptions_Experimental_RunHandlerPoolOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RunOptions_Experimental_RunHandlerPoolOptions) ProtoMessage

func (*RunOptions_Experimental_RunHandlerPoolOptions) Reset

func (*RunOptions_Experimental_RunHandlerPoolOptions) Size

func (*RunOptions_Experimental_RunHandlerPoolOptions) String

func (*RunOptions_Experimental_RunHandlerPoolOptions) Unmarshal

func (*RunOptions_Experimental_RunHandlerPoolOptions) XXX_DiscardUnknown

func (m *RunOptions_Experimental_RunHandlerPoolOptions) XXX_DiscardUnknown()

func (*RunOptions_Experimental_RunHandlerPoolOptions) XXX_Marshal

func (m *RunOptions_Experimental_RunHandlerPoolOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RunOptions_Experimental_RunHandlerPoolOptions) XXX_Merge

func (*RunOptions_Experimental_RunHandlerPoolOptions) XXX_Size

func (*RunOptions_Experimental_RunHandlerPoolOptions) XXX_Unmarshal

type RunOptions_TraceLevel

type RunOptions_TraceLevel int32

TODO(pbar) Turn this into a TraceOptions proto which allows tracing to be controlled in a more orthogonal manner?

const (
	RunOptions_NO_TRACE       RunOptions_TraceLevel = 0
	RunOptions_SOFTWARE_TRACE RunOptions_TraceLevel = 1
	RunOptions_HARDWARE_TRACE RunOptions_TraceLevel = 2
	RunOptions_FULL_TRACE     RunOptions_TraceLevel = 3
)

func (RunOptions_TraceLevel) EnumDescriptor

func (RunOptions_TraceLevel) EnumDescriptor() ([]byte, []int)

func (RunOptions_TraceLevel) String

func (x RunOptions_TraceLevel) String() string

type SavedAsset

type SavedAsset struct {
	// Index into `MetaGraphDef.asset_file_def[]` that describes the Asset.
	//
	// Only the field `AssetFileDef.filename` is used. Other fields, such as
	// `AssetFileDef.tensor_info`, MUST be ignored.
	AssetFileDefIndex int32 `protobuf:"varint,1,opt,name=asset_file_def_index,json=assetFileDefIndex,proto3" json:"asset_file_def_index,omitempty"`
}

A SavedAsset points to an asset in the MetaGraph.

When bound to a function this object evaluates to a tensor with the absolute filename. Users should not depend on a particular part of the filename to remain stable (e.g. basename could be changed).

func (*SavedAsset) Descriptor

func (*SavedAsset) Descriptor() ([]byte, []int)

func (*SavedAsset) GetAssetFileDefIndex

func (m *SavedAsset) GetAssetFileDefIndex() int32

func (*SavedAsset) Marshal

func (m *SavedAsset) Marshal() (dAtA []byte, err error)

func (*SavedAsset) MarshalTo

func (m *SavedAsset) MarshalTo(dAtA []byte) (int, error)

func (*SavedAsset) MarshalToSizedBuffer

func (m *SavedAsset) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedAsset) ProtoMessage

func (*SavedAsset) ProtoMessage()

func (*SavedAsset) Reset

func (m *SavedAsset) Reset()

func (*SavedAsset) Size

func (m *SavedAsset) Size() (n int)

func (*SavedAsset) String

func (m *SavedAsset) String() string

func (*SavedAsset) Unmarshal

func (m *SavedAsset) Unmarshal(dAtA []byte) error

func (*SavedAsset) XXX_DiscardUnknown

func (m *SavedAsset) XXX_DiscardUnknown()

func (*SavedAsset) XXX_Marshal

func (m *SavedAsset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedAsset) XXX_Merge

func (m *SavedAsset) XXX_Merge(src proto.Message)

func (*SavedAsset) XXX_Size

func (m *SavedAsset) XXX_Size() int

func (*SavedAsset) XXX_Unmarshal

func (m *SavedAsset) XXX_Unmarshal(b []byte) error

type SavedBareConcreteFunction

type SavedBareConcreteFunction struct {
	// Identifies a SavedConcreteFunction.
	ConcreteFunctionName string `protobuf:"bytes,1,opt,name=concrete_function_name,json=concreteFunctionName,proto3" json:"concrete_function_name,omitempty"`
	// A sequence of unique strings, one per Tensor argument.
	ArgumentKeywords []string `protobuf:"bytes,2,rep,name=argument_keywords,json=argumentKeywords,proto3" json:"argument_keywords,omitempty"`
	// The prefix of `argument_keywords` which may be identified by position.
	AllowedPositionalArguments int64 `` /* 142-byte string literal not displayed */
}

func (*SavedBareConcreteFunction) Descriptor

func (*SavedBareConcreteFunction) Descriptor() ([]byte, []int)

func (*SavedBareConcreteFunction) GetAllowedPositionalArguments

func (m *SavedBareConcreteFunction) GetAllowedPositionalArguments() int64

func (*SavedBareConcreteFunction) GetArgumentKeywords

func (m *SavedBareConcreteFunction) GetArgumentKeywords() []string

func (*SavedBareConcreteFunction) GetConcreteFunctionName

func (m *SavedBareConcreteFunction) GetConcreteFunctionName() string

func (*SavedBareConcreteFunction) Marshal

func (m *SavedBareConcreteFunction) Marshal() (dAtA []byte, err error)

func (*SavedBareConcreteFunction) MarshalTo

func (m *SavedBareConcreteFunction) MarshalTo(dAtA []byte) (int, error)

func (*SavedBareConcreteFunction) MarshalToSizedBuffer

func (m *SavedBareConcreteFunction) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedBareConcreteFunction) ProtoMessage

func (*SavedBareConcreteFunction) ProtoMessage()

func (*SavedBareConcreteFunction) Reset

func (m *SavedBareConcreteFunction) Reset()

func (*SavedBareConcreteFunction) Size

func (m *SavedBareConcreteFunction) Size() (n int)

func (*SavedBareConcreteFunction) String

func (m *SavedBareConcreteFunction) String() string

func (*SavedBareConcreteFunction) Unmarshal

func (m *SavedBareConcreteFunction) Unmarshal(dAtA []byte) error

func (*SavedBareConcreteFunction) XXX_DiscardUnknown

func (m *SavedBareConcreteFunction) XXX_DiscardUnknown()

func (*SavedBareConcreteFunction) XXX_Marshal

func (m *SavedBareConcreteFunction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedBareConcreteFunction) XXX_Merge

func (m *SavedBareConcreteFunction) XXX_Merge(src proto.Message)

func (*SavedBareConcreteFunction) XXX_Size

func (m *SavedBareConcreteFunction) XXX_Size() int

func (*SavedBareConcreteFunction) XXX_Unmarshal

func (m *SavedBareConcreteFunction) XXX_Unmarshal(b []byte) error

type SavedConcreteFunction

type SavedConcreteFunction struct {
	// Bound inputs to the function. The SavedObjects identified by the node ids
	// given here are appended as extra inputs to the caller-supplied inputs.
	// The only types of SavedObjects valid here are SavedVariable, SavedResource
	// and SavedAsset.
	BoundInputs []int32 `protobuf:"varint,2,rep,packed,name=bound_inputs,json=boundInputs,proto3" json:"bound_inputs,omitempty"`
	// Input in canonicalized form that was received to create this concrete
	// function.
	CanonicalizedInputSignature *StructuredValue `` /* 144-byte string literal not displayed */
	// Output that was the return value of this function after replacing all
	// Tensors with TensorSpecs. This can be an arbitrary nested function and will
	// be used to reconstruct the full structure from pure tensors.
	OutputSignature *StructuredValue `protobuf:"bytes,4,opt,name=output_signature,json=outputSignature,proto3" json:"output_signature,omitempty"`
}

Stores low-level information about a concrete function. Referenced in either a SavedFunction or a SavedBareConcreteFunction.

func (*SavedConcreteFunction) Descriptor

func (*SavedConcreteFunction) Descriptor() ([]byte, []int)

func (*SavedConcreteFunction) GetBoundInputs

func (m *SavedConcreteFunction) GetBoundInputs() []int32

func (*SavedConcreteFunction) GetCanonicalizedInputSignature

func (m *SavedConcreteFunction) GetCanonicalizedInputSignature() *StructuredValue

func (*SavedConcreteFunction) GetOutputSignature

func (m *SavedConcreteFunction) GetOutputSignature() *StructuredValue

func (*SavedConcreteFunction) Marshal

func (m *SavedConcreteFunction) Marshal() (dAtA []byte, err error)

func (*SavedConcreteFunction) MarshalTo

func (m *SavedConcreteFunction) MarshalTo(dAtA []byte) (int, error)

func (*SavedConcreteFunction) MarshalToSizedBuffer

func (m *SavedConcreteFunction) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedConcreteFunction) ProtoMessage

func (*SavedConcreteFunction) ProtoMessage()

func (*SavedConcreteFunction) Reset

func (m *SavedConcreteFunction) Reset()

func (*SavedConcreteFunction) Size

func (m *SavedConcreteFunction) Size() (n int)

func (*SavedConcreteFunction) String

func (m *SavedConcreteFunction) String() string

func (*SavedConcreteFunction) Unmarshal

func (m *SavedConcreteFunction) Unmarshal(dAtA []byte) error

func (*SavedConcreteFunction) XXX_DiscardUnknown

func (m *SavedConcreteFunction) XXX_DiscardUnknown()

func (*SavedConcreteFunction) XXX_Marshal

func (m *SavedConcreteFunction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedConcreteFunction) XXX_Merge

func (m *SavedConcreteFunction) XXX_Merge(src proto.Message)

func (*SavedConcreteFunction) XXX_Size

func (m *SavedConcreteFunction) XXX_Size() int

func (*SavedConcreteFunction) XXX_Unmarshal

func (m *SavedConcreteFunction) XXX_Unmarshal(b []byte) error

type SavedConstant

type SavedConstant struct {
	// An Operation name for a ConstantOp in this SavedObjectGraph's MetaGraph.
	Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"`
}

func (*SavedConstant) Descriptor

func (*SavedConstant) Descriptor() ([]byte, []int)

func (*SavedConstant) GetOperation

func (m *SavedConstant) GetOperation() string

func (*SavedConstant) Marshal

func (m *SavedConstant) Marshal() (dAtA []byte, err error)

func (*SavedConstant) MarshalTo

func (m *SavedConstant) MarshalTo(dAtA []byte) (int, error)

func (*SavedConstant) MarshalToSizedBuffer

func (m *SavedConstant) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedConstant) ProtoMessage

func (*SavedConstant) ProtoMessage()

func (*SavedConstant) Reset

func (m *SavedConstant) Reset()

func (*SavedConstant) Size

func (m *SavedConstant) Size() (n int)

func (*SavedConstant) String

func (m *SavedConstant) String() string

func (*SavedConstant) Unmarshal

func (m *SavedConstant) Unmarshal(dAtA []byte) error

func (*SavedConstant) XXX_DiscardUnknown

func (m *SavedConstant) XXX_DiscardUnknown()

func (*SavedConstant) XXX_Marshal

func (m *SavedConstant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedConstant) XXX_Merge

func (m *SavedConstant) XXX_Merge(src proto.Message)

func (*SavedConstant) XXX_Size

func (m *SavedConstant) XXX_Size() int

func (*SavedConstant) XXX_Unmarshal

func (m *SavedConstant) XXX_Unmarshal(b []byte) error

type SavedFunction

type SavedFunction struct {
	ConcreteFunctions []string      `protobuf:"bytes,1,rep,name=concrete_functions,json=concreteFunctions,proto3" json:"concrete_functions,omitempty"`
	FunctionSpec      *FunctionSpec `protobuf:"bytes,2,opt,name=function_spec,json=functionSpec,proto3" json:"function_spec,omitempty"`
}

A function with multiple signatures, possibly with non-Tensor arguments.

func (*SavedFunction) Descriptor

func (*SavedFunction) Descriptor() ([]byte, []int)

func (*SavedFunction) GetConcreteFunctions

func (m *SavedFunction) GetConcreteFunctions() []string

func (*SavedFunction) GetFunctionSpec

func (m *SavedFunction) GetFunctionSpec() *FunctionSpec

func (*SavedFunction) Marshal

func (m *SavedFunction) Marshal() (dAtA []byte, err error)

func (*SavedFunction) MarshalTo

func (m *SavedFunction) MarshalTo(dAtA []byte) (int, error)

func (*SavedFunction) MarshalToSizedBuffer

func (m *SavedFunction) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedFunction) ProtoMessage

func (*SavedFunction) ProtoMessage()

func (*SavedFunction) Reset

func (m *SavedFunction) Reset()

func (*SavedFunction) Size

func (m *SavedFunction) Size() (n int)

func (*SavedFunction) String

func (m *SavedFunction) String() string

func (*SavedFunction) Unmarshal

func (m *SavedFunction) Unmarshal(dAtA []byte) error

func (*SavedFunction) XXX_DiscardUnknown

func (m *SavedFunction) XXX_DiscardUnknown()

func (*SavedFunction) XXX_Marshal

func (m *SavedFunction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedFunction) XXX_Merge

func (m *SavedFunction) XXX_Merge(src proto.Message)

func (*SavedFunction) XXX_Size

func (m *SavedFunction) XXX_Size() int

func (*SavedFunction) XXX_Unmarshal

func (m *SavedFunction) XXX_Unmarshal(b []byte) error

type SavedModel

type SavedModel struct {
	// The schema version of the SavedModel instance. Used for versioning when
	// making future changes to the specification/implementation. Initial value
	// at release will be 1.
	SavedModelSchemaVersion int64 `` /* 135-byte string literal not displayed */
	// One or more MetaGraphs.
	MetaGraphs []*MetaGraphDef `protobuf:"bytes,2,rep,name=meta_graphs,json=metaGraphs,proto3" json:"meta_graphs,omitempty"`
}

SavedModel is the high level serialization format for TensorFlow Models. See [todo: doc links, similar to session_bundle] for more information.

func (*SavedModel) Descriptor

func (*SavedModel) Descriptor() ([]byte, []int)

func (*SavedModel) GetMetaGraphs

func (m *SavedModel) GetMetaGraphs() []*MetaGraphDef

func (*SavedModel) GetSavedModelSchemaVersion

func (m *SavedModel) GetSavedModelSchemaVersion() int64

func (*SavedModel) Marshal

func (m *SavedModel) Marshal() (dAtA []byte, err error)

func (*SavedModel) MarshalTo

func (m *SavedModel) MarshalTo(dAtA []byte) (int, error)

func (*SavedModel) MarshalToSizedBuffer

func (m *SavedModel) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedModel) ProtoMessage

func (*SavedModel) ProtoMessage()

func (*SavedModel) Reset

func (m *SavedModel) Reset()

func (*SavedModel) Size

func (m *SavedModel) Size() (n int)

func (*SavedModel) String

func (m *SavedModel) String() string

func (*SavedModel) Unmarshal

func (m *SavedModel) Unmarshal(dAtA []byte) error

func (*SavedModel) XXX_DiscardUnknown

func (m *SavedModel) XXX_DiscardUnknown()

func (*SavedModel) XXX_Marshal

func (m *SavedModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedModel) XXX_Merge

func (m *SavedModel) XXX_Merge(src proto.Message)

func (*SavedModel) XXX_Size

func (m *SavedModel) XXX_Size() int

func (*SavedModel) XXX_Unmarshal

func (m *SavedModel) XXX_Unmarshal(b []byte) error

type SavedObject

type SavedObject struct {
	// Objects which this object depends on: named edges in the dependency
	// graph.
	//
	// Note: currently only valid if kind == "user_object".
	Children []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"`
	// Slot variables owned by this object. This describes the three-way
	// (optimizer, variable, slot variable) relationship; none of the three
	// depend on the others directly.
	//
	// Note: currently only valid if kind == "user_object".
	SlotVariables []*TrackableObjectGraph_TrackableObject_SlotVariableReference `protobuf:"bytes,3,rep,name=slot_variables,json=slotVariables,proto3" json:"slot_variables,omitempty"`
	// Types that are valid to be assigned to Kind:
	//	*SavedObject_UserObject
	//	*SavedObject_Asset
	//	*SavedObject_Function
	//	*SavedObject_Variable
	//	*SavedObject_BareConcreteFunction
	//	*SavedObject_Constant
	//	*SavedObject_Resource
	Kind isSavedObject_Kind `protobuf_oneof:"kind"`
}

func (*SavedObject) Descriptor

func (*SavedObject) Descriptor() ([]byte, []int)

func (*SavedObject) GetAsset

func (m *SavedObject) GetAsset() *SavedAsset

func (*SavedObject) GetBareConcreteFunction

func (m *SavedObject) GetBareConcreteFunction() *SavedBareConcreteFunction

func (*SavedObject) GetChildren

func (*SavedObject) GetConstant

func (m *SavedObject) GetConstant() *SavedConstant

func (*SavedObject) GetFunction

func (m *SavedObject) GetFunction() *SavedFunction

func (*SavedObject) GetKind

func (m *SavedObject) GetKind() isSavedObject_Kind

func (*SavedObject) GetResource

func (m *SavedObject) GetResource() *SavedResource

func (*SavedObject) GetUserObject

func (m *SavedObject) GetUserObject() *SavedUserObject

func (*SavedObject) GetVariable

func (m *SavedObject) GetVariable() *SavedVariable

func (*SavedObject) Marshal

func (m *SavedObject) Marshal() (dAtA []byte, err error)

func (*SavedObject) MarshalTo

func (m *SavedObject) MarshalTo(dAtA []byte) (int, error)

func (*SavedObject) MarshalToSizedBuffer

func (m *SavedObject) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObject) ProtoMessage

func (*SavedObject) ProtoMessage()

func (*SavedObject) Reset

func (m *SavedObject) Reset()

func (*SavedObject) Size

func (m *SavedObject) Size() (n int)

func (*SavedObject) String

func (m *SavedObject) String() string

func (*SavedObject) Unmarshal

func (m *SavedObject) Unmarshal(dAtA []byte) error

func (*SavedObject) XXX_DiscardUnknown

func (m *SavedObject) XXX_DiscardUnknown()

func (*SavedObject) XXX_Marshal

func (m *SavedObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedObject) XXX_Merge

func (m *SavedObject) XXX_Merge(src proto.Message)

func (*SavedObject) XXX_OneofWrappers

func (*SavedObject) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*SavedObject) XXX_Size

func (m *SavedObject) XXX_Size() int

func (*SavedObject) XXX_Unmarshal

func (m *SavedObject) XXX_Unmarshal(b []byte) error

type SavedObjectGraph

type SavedObjectGraph struct {
	// Flattened list of objects in the object graph.
	//
	// The position of the object in this list indicates its id.
	// Nodes[0] is considered the root node.
	Nodes []*SavedObject `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
	// Information about captures and output structures in concrete functions.
	// Referenced from SavedBareConcreteFunction and SavedFunction.
	ConcreteFunctions map[string]*SavedConcreteFunction `` /* 200-byte string literal not displayed */
}

func (*SavedObjectGraph) Descriptor

func (*SavedObjectGraph) Descriptor() ([]byte, []int)

func (*SavedObjectGraph) GetConcreteFunctions

func (m *SavedObjectGraph) GetConcreteFunctions() map[string]*SavedConcreteFunction

func (*SavedObjectGraph) GetNodes

func (m *SavedObjectGraph) GetNodes() []*SavedObject

func (*SavedObjectGraph) Marshal

func (m *SavedObjectGraph) Marshal() (dAtA []byte, err error)

func (*SavedObjectGraph) MarshalTo

func (m *SavedObjectGraph) MarshalTo(dAtA []byte) (int, error)

func (*SavedObjectGraph) MarshalToSizedBuffer

func (m *SavedObjectGraph) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObjectGraph) ProtoMessage

func (*SavedObjectGraph) ProtoMessage()

func (*SavedObjectGraph) Reset

func (m *SavedObjectGraph) Reset()

func (*SavedObjectGraph) Size

func (m *SavedObjectGraph) Size() (n int)

func (*SavedObjectGraph) String

func (m *SavedObjectGraph) String() string

func (*SavedObjectGraph) Unmarshal

func (m *SavedObjectGraph) Unmarshal(dAtA []byte) error

func (*SavedObjectGraph) XXX_DiscardUnknown

func (m *SavedObjectGraph) XXX_DiscardUnknown()

func (*SavedObjectGraph) XXX_Marshal

func (m *SavedObjectGraph) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedObjectGraph) XXX_Merge

func (m *SavedObjectGraph) XXX_Merge(src proto.Message)

func (*SavedObjectGraph) XXX_Size

func (m *SavedObjectGraph) XXX_Size() int

func (*SavedObjectGraph) XXX_Unmarshal

func (m *SavedObjectGraph) XXX_Unmarshal(b []byte) error

type SavedObject_Asset

type SavedObject_Asset struct {
	Asset *SavedAsset `protobuf:"bytes,5,opt,name=asset,proto3,oneof" json:"asset,omitempty"`
}

func (*SavedObject_Asset) MarshalTo

func (m *SavedObject_Asset) MarshalTo(dAtA []byte) (int, error)

func (*SavedObject_Asset) MarshalToSizedBuffer

func (m *SavedObject_Asset) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObject_Asset) Size

func (m *SavedObject_Asset) Size() (n int)

type SavedObject_BareConcreteFunction

type SavedObject_BareConcreteFunction struct {
	BareConcreteFunction *SavedBareConcreteFunction `` /* 129-byte string literal not displayed */
}

func (*SavedObject_BareConcreteFunction) MarshalTo

func (m *SavedObject_BareConcreteFunction) MarshalTo(dAtA []byte) (int, error)

func (*SavedObject_BareConcreteFunction) MarshalToSizedBuffer

func (m *SavedObject_BareConcreteFunction) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObject_BareConcreteFunction) Size

func (m *SavedObject_BareConcreteFunction) Size() (n int)

type SavedObject_Constant

type SavedObject_Constant struct {
	Constant *SavedConstant `protobuf:"bytes,9,opt,name=constant,proto3,oneof" json:"constant,omitempty"`
}

func (*SavedObject_Constant) MarshalTo

func (m *SavedObject_Constant) MarshalTo(dAtA []byte) (int, error)

func (*SavedObject_Constant) MarshalToSizedBuffer

func (m *SavedObject_Constant) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObject_Constant) Size

func (m *SavedObject_Constant) Size() (n int)

type SavedObject_Function

type SavedObject_Function struct {
	Function *SavedFunction `protobuf:"bytes,6,opt,name=function,proto3,oneof" json:"function,omitempty"`
}

func (*SavedObject_Function) MarshalTo

func (m *SavedObject_Function) MarshalTo(dAtA []byte) (int, error)

func (*SavedObject_Function) MarshalToSizedBuffer

func (m *SavedObject_Function) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObject_Function) Size

func (m *SavedObject_Function) Size() (n int)

type SavedObject_Resource

type SavedObject_Resource struct {
	Resource *SavedResource `protobuf:"bytes,10,opt,name=resource,proto3,oneof" json:"resource,omitempty"`
}

func (*SavedObject_Resource) MarshalTo

func (m *SavedObject_Resource) MarshalTo(dAtA []byte) (int, error)

func (*SavedObject_Resource) MarshalToSizedBuffer

func (m *SavedObject_Resource) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObject_Resource) Size

func (m *SavedObject_Resource) Size() (n int)

type SavedObject_UserObject

type SavedObject_UserObject struct {
	UserObject *SavedUserObject `protobuf:"bytes,4,opt,name=user_object,json=userObject,proto3,oneof" json:"user_object,omitempty"`
}

func (*SavedObject_UserObject) MarshalTo

func (m *SavedObject_UserObject) MarshalTo(dAtA []byte) (int, error)

func (*SavedObject_UserObject) MarshalToSizedBuffer

func (m *SavedObject_UserObject) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObject_UserObject) Size

func (m *SavedObject_UserObject) Size() (n int)

type SavedObject_Variable

type SavedObject_Variable struct {
	Variable *SavedVariable `protobuf:"bytes,7,opt,name=variable,proto3,oneof" json:"variable,omitempty"`
}

func (*SavedObject_Variable) MarshalTo

func (m *SavedObject_Variable) MarshalTo(dAtA []byte) (int, error)

func (*SavedObject_Variable) MarshalToSizedBuffer

func (m *SavedObject_Variable) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedObject_Variable) Size

func (m *SavedObject_Variable) Size() (n int)

type SavedResource

type SavedResource struct {
	// A device specification indicating a required placement for the resource
	// creation function, e.g. "CPU". An empty string allows the user to select a
	// device.
	Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
}

A SavedResource represents a TF object that holds state during its lifetime. An object of this type can have a reference to a: create_resource() and an initialize() function.

func (*SavedResource) Descriptor

func (*SavedResource) Descriptor() ([]byte, []int)

func (*SavedResource) GetDevice

func (m *SavedResource) GetDevice() string

func (*SavedResource) Marshal

func (m *SavedResource) Marshal() (dAtA []byte, err error)

func (*SavedResource) MarshalTo

func (m *SavedResource) MarshalTo(dAtA []byte) (int, error)

func (*SavedResource) MarshalToSizedBuffer

func (m *SavedResource) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedResource) ProtoMessage

func (*SavedResource) ProtoMessage()

func (*SavedResource) Reset

func (m *SavedResource) Reset()

func (*SavedResource) Size

func (m *SavedResource) Size() (n int)

func (*SavedResource) String

func (m *SavedResource) String() string

func (*SavedResource) Unmarshal

func (m *SavedResource) Unmarshal(dAtA []byte) error

func (*SavedResource) XXX_DiscardUnknown

func (m *SavedResource) XXX_DiscardUnknown()

func (*SavedResource) XXX_Marshal

func (m *SavedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedResource) XXX_Merge

func (m *SavedResource) XXX_Merge(src proto.Message)

func (*SavedResource) XXX_Size

func (m *SavedResource) XXX_Size() int

func (*SavedResource) XXX_Unmarshal

func (m *SavedResource) XXX_Unmarshal(b []byte) error

type SavedUserObject

type SavedUserObject struct {
	// Corresponds to a registration of the type to use in the loading program.
	Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
	// Version information from the producer of this SavedUserObject.
	Version *framework.VersionDef `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
	// Initialization-related metadata.
	Metadata string `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"`
}

A SavedUserObject is an object (in the object-oriented language of the TensorFlow program) of some user- or framework-defined class other than those handled specifically by the other kinds of SavedObjects.

This object cannot be evaluated as a tensor, and therefore cannot be bound to an input of a function.

func (*SavedUserObject) Descriptor

func (*SavedUserObject) Descriptor() ([]byte, []int)

func (*SavedUserObject) GetIdentifier

func (m *SavedUserObject) GetIdentifier() string

func (*SavedUserObject) GetMetadata

func (m *SavedUserObject) GetMetadata() string

func (*SavedUserObject) GetVersion

func (m *SavedUserObject) GetVersion() *framework.VersionDef

func (*SavedUserObject) Marshal

func (m *SavedUserObject) Marshal() (dAtA []byte, err error)

func (*SavedUserObject) MarshalTo

func (m *SavedUserObject) MarshalTo(dAtA []byte) (int, error)

func (*SavedUserObject) MarshalToSizedBuffer

func (m *SavedUserObject) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedUserObject) ProtoMessage

func (*SavedUserObject) ProtoMessage()

func (*SavedUserObject) Reset

func (m *SavedUserObject) Reset()

func (*SavedUserObject) Size

func (m *SavedUserObject) Size() (n int)

func (*SavedUserObject) String

func (m *SavedUserObject) String() string

func (*SavedUserObject) Unmarshal

func (m *SavedUserObject) Unmarshal(dAtA []byte) error

func (*SavedUserObject) XXX_DiscardUnknown

func (m *SavedUserObject) XXX_DiscardUnknown()

func (*SavedUserObject) XXX_Marshal

func (m *SavedUserObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedUserObject) XXX_Merge

func (m *SavedUserObject) XXX_Merge(src proto.Message)

func (*SavedUserObject) XXX_Size

func (m *SavedUserObject) XXX_Size() int

func (*SavedUserObject) XXX_Unmarshal

func (m *SavedUserObject) XXX_Unmarshal(b []byte) error

type SavedVariable

type SavedVariable struct {
	Dtype           framework.DataType                `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Shape           *framework.TensorShapeProto       `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Trainable       bool                              `protobuf:"varint,3,opt,name=trainable,proto3" json:"trainable,omitempty"`
	Synchronization framework.VariableSynchronization `protobuf:"varint,4,opt,name=synchronization,proto3,enum=tensorflow.VariableSynchronization" json:"synchronization,omitempty"`
	Aggregation     framework.VariableAggregation     `protobuf:"varint,5,opt,name=aggregation,proto3,enum=tensorflow.VariableAggregation" json:"aggregation,omitempty"`
	Name            string                            `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
}

Represents a Variable that is initialized by loading the contents from the checkpoint.

func (*SavedVariable) Descriptor

func (*SavedVariable) Descriptor() ([]byte, []int)

func (*SavedVariable) GetAggregation

func (m *SavedVariable) GetAggregation() framework.VariableAggregation

func (*SavedVariable) GetDtype

func (m *SavedVariable) GetDtype() framework.DataType

func (*SavedVariable) GetName

func (m *SavedVariable) GetName() string

func (*SavedVariable) GetShape

func (m *SavedVariable) GetShape() *framework.TensorShapeProto

func (*SavedVariable) GetSynchronization

func (m *SavedVariable) GetSynchronization() framework.VariableSynchronization

func (*SavedVariable) GetTrainable

func (m *SavedVariable) GetTrainable() bool

func (*SavedVariable) Marshal

func (m *SavedVariable) Marshal() (dAtA []byte, err error)

func (*SavedVariable) MarshalTo

func (m *SavedVariable) MarshalTo(dAtA []byte) (int, error)

func (*SavedVariable) MarshalToSizedBuffer

func (m *SavedVariable) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SavedVariable) ProtoMessage

func (*SavedVariable) ProtoMessage()

func (*SavedVariable) Reset

func (m *SavedVariable) Reset()

func (*SavedVariable) Size

func (m *SavedVariable) Size() (n int)

func (*SavedVariable) String

func (m *SavedVariable) String() string

func (*SavedVariable) Unmarshal

func (m *SavedVariable) Unmarshal(dAtA []byte) error

func (*SavedVariable) XXX_DiscardUnknown

func (m *SavedVariable) XXX_DiscardUnknown()

func (*SavedVariable) XXX_Marshal

func (m *SavedVariable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SavedVariable) XXX_Merge

func (m *SavedVariable) XXX_Merge(src proto.Message)

func (*SavedVariable) XXX_Size

func (m *SavedVariable) XXX_Size() int

func (*SavedVariable) XXX_Unmarshal

func (m *SavedVariable) XXX_Unmarshal(b []byte) error

type SaverDef

type SaverDef struct {
	// The name of the tensor in which to specify the filename when saving or
	// restoring a model checkpoint.
	FilenameTensorName string `protobuf:"bytes,1,opt,name=filename_tensor_name,json=filenameTensorName,proto3" json:"filename_tensor_name,omitempty"`
	// The operation to run when saving a model checkpoint.
	SaveTensorName string `protobuf:"bytes,2,opt,name=save_tensor_name,json=saveTensorName,proto3" json:"save_tensor_name,omitempty"`
	// The operation to run when restoring a model checkpoint.
	RestoreOpName string `protobuf:"bytes,3,opt,name=restore_op_name,json=restoreOpName,proto3" json:"restore_op_name,omitempty"`
	// Maximum number of checkpoints to keep.  If 0, no checkpoints are deleted.
	MaxToKeep int32 `protobuf:"varint,4,opt,name=max_to_keep,json=maxToKeep,proto3" json:"max_to_keep,omitempty"`
	// Shard the save files, one per device that has Variable nodes.
	Sharded bool `protobuf:"varint,5,opt,name=sharded,proto3" json:"sharded,omitempty"`
	// How often to keep an additional checkpoint. If not specified, only the last
	// "max_to_keep" checkpoints are kept; if specified, in addition to keeping
	// the last "max_to_keep" checkpoints, an additional checkpoint will be kept
	// for every n hours of training.
	KeepCheckpointEveryNHours float32                          `` /* 144-byte string literal not displayed */
	Version                   SaverDef_CheckpointFormatVersion `protobuf:"varint,7,opt,name=version,proto3,enum=tensorflow.SaverDef_CheckpointFormatVersion" json:"version,omitempty"`
}

Protocol buffer representing the configuration of a Saver.

func (*SaverDef) Descriptor

func (*SaverDef) Descriptor() ([]byte, []int)

func (*SaverDef) GetFilenameTensorName

func (m *SaverDef) GetFilenameTensorName() string

func (*SaverDef) GetKeepCheckpointEveryNHours

func (m *SaverDef) GetKeepCheckpointEveryNHours() float32

func (*SaverDef) GetMaxToKeep

func (m *SaverDef) GetMaxToKeep() int32

func (*SaverDef) GetRestoreOpName

func (m *SaverDef) GetRestoreOpName() string

func (*SaverDef) GetSaveTensorName

func (m *SaverDef) GetSaveTensorName() string

func (*SaverDef) GetSharded

func (m *SaverDef) GetSharded() bool

func (*SaverDef) GetVersion

func (*SaverDef) Marshal

func (m *SaverDef) Marshal() (dAtA []byte, err error)

func (*SaverDef) MarshalTo

func (m *SaverDef) MarshalTo(dAtA []byte) (int, error)

func (*SaverDef) MarshalToSizedBuffer

func (m *SaverDef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SaverDef) ProtoMessage

func (*SaverDef) ProtoMessage()

func (*SaverDef) Reset

func (m *SaverDef) Reset()

func (*SaverDef) Size

func (m *SaverDef) Size() (n int)

func (*SaverDef) String

func (m *SaverDef) String() string

func (*SaverDef) Unmarshal

func (m *SaverDef) Unmarshal(dAtA []byte) error

func (*SaverDef) XXX_DiscardUnknown

func (m *SaverDef) XXX_DiscardUnknown()

func (*SaverDef) XXX_Marshal

func (m *SaverDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SaverDef) XXX_Merge

func (m *SaverDef) XXX_Merge(src proto.Message)

func (*SaverDef) XXX_Size

func (m *SaverDef) XXX_Size() int

func (*SaverDef) XXX_Unmarshal

func (m *SaverDef) XXX_Unmarshal(b []byte) error

type SaverDef_CheckpointFormatVersion

type SaverDef_CheckpointFormatVersion int32

A version number that identifies a different on-disk checkpoint format. Usually, each subclass of BaseSaverBuilder works with a particular version/format. However, it is possible that the same builder may be upgraded to support a newer checkpoint format in the future.

const (
	// Internal legacy format.
	SaverDef_LEGACY SaverDef_CheckpointFormatVersion = 0
	// Deprecated format: tf.Saver() which works with tensorflow::table::Table.
	SaverDef_V1 SaverDef_CheckpointFormatVersion = 1
	// Current format: more efficient.
	SaverDef_V2 SaverDef_CheckpointFormatVersion = 2
)

func (SaverDef_CheckpointFormatVersion) EnumDescriptor

func (SaverDef_CheckpointFormatVersion) EnumDescriptor() ([]byte, []int)

func (SaverDef_CheckpointFormatVersion) String

type ScopedAllocatorOptions

type ScopedAllocatorOptions struct {
	// If present, only perform optimization for these ops.
	EnableOp []string `protobuf:"bytes,1,rep,name=enable_op,json=enableOp,proto3" json:"enable_op,omitempty"`
}

func (*ScopedAllocatorOptions) Descriptor

func (*ScopedAllocatorOptions) Descriptor() ([]byte, []int)

func (*ScopedAllocatorOptions) GetEnableOp

func (m *ScopedAllocatorOptions) GetEnableOp() []string

func (*ScopedAllocatorOptions) Marshal

func (m *ScopedAllocatorOptions) Marshal() (dAtA []byte, err error)

func (*ScopedAllocatorOptions) MarshalTo

func (m *ScopedAllocatorOptions) MarshalTo(dAtA []byte) (int, error)

func (*ScopedAllocatorOptions) MarshalToSizedBuffer

func (m *ScopedAllocatorOptions) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ScopedAllocatorOptions) ProtoMessage

func (*ScopedAllocatorOptions) ProtoMessage()

func (*ScopedAllocatorOptions) Reset

func (m *ScopedAllocatorOptions) Reset()

func (*ScopedAllocatorOptions) Size

func (m *ScopedAllocatorOptions) Size() (n int)

func (*ScopedAllocatorOptions) String

func (m *ScopedAllocatorOptions) String() string

func (*ScopedAllocatorOptions) Unmarshal

func (m *ScopedAllocatorOptions) Unmarshal(dAtA []byte) error

func (*ScopedAllocatorOptions) XXX_DiscardUnknown

func (m *ScopedAllocatorOptions) XXX_DiscardUnknown()

func (*ScopedAllocatorOptions) XXX_Marshal

func (m *ScopedAllocatorOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ScopedAllocatorOptions) XXX_Merge

func (m *ScopedAllocatorOptions) XXX_Merge(src proto.Message)

func (*ScopedAllocatorOptions) XXX_Size

func (m *ScopedAllocatorOptions) XXX_Size() int

func (*ScopedAllocatorOptions) XXX_Unmarshal

func (m *ScopedAllocatorOptions) XXX_Unmarshal(b []byte) error

type SessionMetadata

type SessionMetadata struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The version is optional. If set, needs to be >= 0.
	Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
}

Metadata about the session.

This can be used by the runtime and the Ops for debugging, monitoring, etc.

The (name, version) tuple is expected to be a unique identifier for sessions within the same process.

NOTE: This is currently used and propagated only by the direct session.

func (*SessionMetadata) Descriptor

func (*SessionMetadata) Descriptor() ([]byte, []int)

func (*SessionMetadata) GetName

func (m *SessionMetadata) GetName() string

func (*SessionMetadata) GetVersion

func (m *SessionMetadata) GetVersion() int64

func (*SessionMetadata) Marshal

func (m *SessionMetadata) Marshal() (dAtA []byte, err error)

func (*SessionMetadata) MarshalTo

func (m *SessionMetadata) MarshalTo(dAtA []byte) (int, error)

func (*SessionMetadata) MarshalToSizedBuffer

func (m *SessionMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SessionMetadata) ProtoMessage

func (*SessionMetadata) ProtoMessage()

func (*SessionMetadata) Reset

func (m *SessionMetadata) Reset()

func (*SessionMetadata) Size

func (m *SessionMetadata) Size() (n int)

func (*SessionMetadata) String

func (m *SessionMetadata) String() string

func (*SessionMetadata) Unmarshal

func (m *SessionMetadata) Unmarshal(dAtA []byte) error

func (*SessionMetadata) XXX_DiscardUnknown

func (m *SessionMetadata) XXX_DiscardUnknown()

func (*SessionMetadata) XXX_Marshal

func (m *SessionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SessionMetadata) XXX_Merge

func (m *SessionMetadata) XXX_Merge(src proto.Message)

func (*SessionMetadata) XXX_Size

func (m *SessionMetadata) XXX_Size() int

func (*SessionMetadata) XXX_Unmarshal

func (m *SessionMetadata) XXX_Unmarshal(b []byte) error

type SignatureDef

type SignatureDef struct {
	// Named input parameters.
	Inputs map[string]*TensorInfo `` /* 153-byte string literal not displayed */
	// Named output parameters.
	Outputs map[string]*TensorInfo `` /* 155-byte string literal not displayed */
	// Extensible method_name information enabling third-party users to mark a
	// SignatureDef as supporting a particular method. This enables producers and
	// consumers of SignatureDefs, e.g. a model definition library and a serving
	// library to have a clear hand-off regarding the semantics of a computation.
	//
	// Note that multiple SignatureDefs in a single MetaGraphDef may have the same
	// method_name. This is commonly used to support multi-headed computation,
	// where a single graph computation may return multiple results.
	MethodName string `protobuf:"bytes,3,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
}

SignatureDef defines the signature of a computation supported by a TensorFlow graph.

For example, a model with two loss computations, sharing a single input, might have the following signature_def map.

Note that across the two SignatureDefs "loss_A" and "loss_B", the input key, output key, and method_name are identical, and will be used by system(s) that implement or rely upon this particular loss method. The output tensor names differ, demonstrating how different outputs can exist for the same method.

signature_def {
  key: "loss_A"
  value {
    inputs {
      key: "input"
      value {
        name: "input:0"
        dtype: DT_STRING
        tensor_shape: ...
      }
    }
    outputs {
      key: "loss_output"
      value {
        name: "loss_output_A:0"
        dtype: DT_FLOAT
        tensor_shape: ...
      }
    }
  }
  ...
  method_name: "some/package/compute_loss"
}
signature_def {
  key: "loss_B"
  value {
    inputs {
      key: "input"
      value {
        name: "input:0"
        dtype: DT_STRING
        tensor_shape: ...
      }
    }
    outputs {
      key: "loss_output"
      value {
        name: "loss_output_B:0"
        dtype: DT_FLOAT
        tensor_shape: ...
      }
    }
  }
  ...
  method_name: "some/package/compute_loss"
}

func (*SignatureDef) Descriptor

func (*SignatureDef) Descriptor() ([]byte, []int)

func (*SignatureDef) GetInputs

func (m *SignatureDef) GetInputs() map[string]*TensorInfo

func (*SignatureDef) GetMethodName

func (m *SignatureDef) GetMethodName() string

func (*SignatureDef) GetOutputs

func (m *SignatureDef) GetOutputs() map[string]*TensorInfo

func (*SignatureDef) Marshal

func (m *SignatureDef) Marshal() (dAtA []byte, err error)

func (*SignatureDef) MarshalTo

func (m *SignatureDef) MarshalTo(dAtA []byte) (int, error)

func (*SignatureDef) MarshalToSizedBuffer

func (m *SignatureDef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SignatureDef) ProtoMessage

func (*SignatureDef) ProtoMessage()

func (*SignatureDef) Reset

func (m *SignatureDef) Reset()

func (*SignatureDef) Size

func (m *SignatureDef) Size() (n int)

func (*SignatureDef) String

func (m *SignatureDef) String() string

func (*SignatureDef) Unmarshal

func (m *SignatureDef) Unmarshal(dAtA []byte) error

func (*SignatureDef) XXX_DiscardUnknown

func (m *SignatureDef) XXX_DiscardUnknown()

func (*SignatureDef) XXX_Marshal

func (m *SignatureDef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SignatureDef) XXX_Merge

func (m *SignatureDef) XXX_Merge(src proto.Message)

func (*SignatureDef) XXX_Size

func (m *SignatureDef) XXX_Size() int

func (*SignatureDef) XXX_Unmarshal

func (m *SignatureDef) XXX_Unmarshal(b []byte) error

type StructuredValue

type StructuredValue struct {
	// The kind of value.
	//
	// Types that are valid to be assigned to Kind:
	//	*StructuredValue_NoneValue
	//	*StructuredValue_Float64Value
	//	*StructuredValue_Int64Value
	//	*StructuredValue_StringValue
	//	*StructuredValue_BoolValue
	//	*StructuredValue_TensorShapeValue
	//	*StructuredValue_TensorDtypeValue
	//	*StructuredValue_TensorSpecValue
	//	*StructuredValue_TypeSpecValue
	//	*StructuredValue_BoundedTensorSpecValue
	//	*StructuredValue_ListValue
	//	*StructuredValue_TupleValue
	//	*StructuredValue_DictValue
	//	*StructuredValue_NamedTupleValue
	Kind isStructuredValue_Kind `protobuf_oneof:"kind"`
}

`StructuredValue` represents a dynamically typed value representing various data structures that are inspired by Python data structures typically used in TensorFlow functions as inputs and outputs.

For example when saving a Layer there may be a `training` argument. If the user passes a boolean True/False, that switches between two concrete TensorFlow functions. In order to switch between them in the same way after loading the SavedModel, we need to represent "True" and "False".

A more advanced example might be a function which takes a list of dictionaries mapping from strings to Tensors. In order to map from user-specified arguments `[{"a": tf.constant(1.)}, {"q": tf.constant(3.)}]` after load to the right saved TensorFlow function, we need to represent the nested structure and the strings, recording that we have a trace for anything matching `[{"a": tf.TensorSpec(None, tf.float32)}, {"q": tf.TensorSpec([], tf.float64)}]` as an example.

Likewise functions may return nested structures of Tensors, for example returning a dictionary mapping from strings to Tensors. In order for the loaded function to return the same structure we need to serialize it.

This is an ergonomic aid for working with loaded SavedModels, not a promise to serialize all possible function signatures. For example we do not expect to pickle generic Python objects, and ideally we'd stay language-agnostic.

func (*StructuredValue) Descriptor

func (*StructuredValue) Descriptor() ([]byte, []int)

func (*StructuredValue) GetBoolValue

func (m *StructuredValue) GetBoolValue() bool

func (*StructuredValue) GetBoundedTensorSpecValue

func (m *StructuredValue) GetBoundedTensorSpecValue() *BoundedTensorSpecProto

func (*StructuredValue) GetDictValue

func (m *StructuredValue) GetDictValue() *DictValue

func (*StructuredValue) GetFloat64Value

func (m *StructuredValue) GetFloat64Value() float64

func (*StructuredValue) GetInt64Value

func (m *StructuredValue) GetInt64Value() int64

func (*StructuredValue) GetKind

func (m *StructuredValue) GetKind() isStructuredValue_Kind

func (*StructuredValue) GetListValue

func (m *StructuredValue) GetListValue() *ListValue

func (*StructuredValue) GetNamedTupleValue

func (m *StructuredValue) GetNamedTupleValue() *NamedTupleValue

func (*StructuredValue) GetNoneValue

func (m *StructuredValue) GetNoneValue() *NoneValue

func (*StructuredValue) GetStringValue

func (m *StructuredValue) GetStringValue() string

func (*StructuredValue) GetTensorDtypeValue

func (m *StructuredValue) GetTensorDtypeValue() framework.DataType

func (*StructuredValue) GetTensorShapeValue

func (m *StructuredValue) GetTensorShapeValue() *framework.TensorShapeProto

func (*StructuredValue) GetTensorSpecValue

func (m *StructuredValue) GetTensorSpecValue() *TensorSpecProto

func (*StructuredValue) GetTupleValue

func (m *StructuredValue) GetTupleValue() *TupleValue

func (*StructuredValue) GetTypeSpecValue

func (m *StructuredValue) GetTypeSpecValue() *TypeSpecProto

func (*StructuredValue) Marshal

func (m *StructuredValue) Marshal() (dAtA []byte, err error)

func (*StructuredValue) MarshalTo

func (m *StructuredValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue) MarshalToSizedBuffer

func (m *StructuredValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue) ProtoMessage

func (*StructuredValue) ProtoMessage()

func (*StructuredValue) Reset

func (m *StructuredValue) Reset()

func (*StructuredValue) Size

func (m *StructuredValue) Size() (n int)

func (*StructuredValue) String

func (m *StructuredValue) String() string

func (*StructuredValue) Unmarshal

func (m *StructuredValue) Unmarshal(dAtA []byte) error

func (*StructuredValue) XXX_DiscardUnknown

func (m *StructuredValue) XXX_DiscardUnknown()

func (*StructuredValue) XXX_Marshal

func (m *StructuredValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*StructuredValue) XXX_Merge

func (m *StructuredValue) XXX_Merge(src proto.Message)

func (*StructuredValue) XXX_OneofWrappers

func (*StructuredValue) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*StructuredValue) XXX_Size

func (m *StructuredValue) XXX_Size() int

func (*StructuredValue) XXX_Unmarshal

func (m *StructuredValue) XXX_Unmarshal(b []byte) error

type StructuredValue_BoolValue

type StructuredValue_BoolValue struct {
	BoolValue bool `protobuf:"varint,14,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"`
}

func (*StructuredValue_BoolValue) MarshalTo

func (m *StructuredValue_BoolValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_BoolValue) MarshalToSizedBuffer

func (m *StructuredValue_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_BoolValue) Size

func (m *StructuredValue_BoolValue) Size() (n int)

type StructuredValue_BoundedTensorSpecValue

type StructuredValue_BoundedTensorSpecValue struct {
	BoundedTensorSpecValue *BoundedTensorSpecProto `` /* 138-byte string literal not displayed */
}

func (*StructuredValue_BoundedTensorSpecValue) MarshalTo

func (m *StructuredValue_BoundedTensorSpecValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_BoundedTensorSpecValue) MarshalToSizedBuffer

func (m *StructuredValue_BoundedTensorSpecValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_BoundedTensorSpecValue) Size

type StructuredValue_DictValue

type StructuredValue_DictValue struct {
	DictValue *DictValue `protobuf:"bytes,53,opt,name=dict_value,json=dictValue,proto3,oneof" json:"dict_value,omitempty"`
}

func (*StructuredValue_DictValue) MarshalTo

func (m *StructuredValue_DictValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_DictValue) MarshalToSizedBuffer

func (m *StructuredValue_DictValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_DictValue) Size

func (m *StructuredValue_DictValue) Size() (n int)

type StructuredValue_Float64Value

type StructuredValue_Float64Value struct {
	Float64Value float64 `protobuf:"fixed64,11,opt,name=float64_value,json=float64Value,proto3,oneof" json:"float64_value,omitempty"`
}

func (*StructuredValue_Float64Value) MarshalTo

func (m *StructuredValue_Float64Value) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_Float64Value) MarshalToSizedBuffer

func (m *StructuredValue_Float64Value) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_Float64Value) Size

func (m *StructuredValue_Float64Value) Size() (n int)

type StructuredValue_Int64Value

type StructuredValue_Int64Value struct {
	Int64Value int64 `protobuf:"zigzag64,12,opt,name=int64_value,json=int64Value,proto3,oneof" json:"int64_value,omitempty"`
}

func (*StructuredValue_Int64Value) MarshalTo

func (m *StructuredValue_Int64Value) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_Int64Value) MarshalToSizedBuffer

func (m *StructuredValue_Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_Int64Value) Size

func (m *StructuredValue_Int64Value) Size() (n int)

type StructuredValue_ListValue

type StructuredValue_ListValue struct {
	ListValue *ListValue `protobuf:"bytes,51,opt,name=list_value,json=listValue,proto3,oneof" json:"list_value,omitempty"`
}

func (*StructuredValue_ListValue) MarshalTo

func (m *StructuredValue_ListValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_ListValue) MarshalToSizedBuffer

func (m *StructuredValue_ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_ListValue) Size

func (m *StructuredValue_ListValue) Size() (n int)

type StructuredValue_NamedTupleValue

type StructuredValue_NamedTupleValue struct {
	NamedTupleValue *NamedTupleValue `protobuf:"bytes,54,opt,name=named_tuple_value,json=namedTupleValue,proto3,oneof" json:"named_tuple_value,omitempty"`
}

func (*StructuredValue_NamedTupleValue) MarshalTo

func (m *StructuredValue_NamedTupleValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_NamedTupleValue) MarshalToSizedBuffer

func (m *StructuredValue_NamedTupleValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_NamedTupleValue) Size

func (m *StructuredValue_NamedTupleValue) Size() (n int)

type StructuredValue_NoneValue

type StructuredValue_NoneValue struct {
	NoneValue *NoneValue `protobuf:"bytes,1,opt,name=none_value,json=noneValue,proto3,oneof" json:"none_value,omitempty"`
}

func (*StructuredValue_NoneValue) MarshalTo

func (m *StructuredValue_NoneValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_NoneValue) MarshalToSizedBuffer

func (m *StructuredValue_NoneValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_NoneValue) Size

func (m *StructuredValue_NoneValue) Size() (n int)

type StructuredValue_StringValue

type StructuredValue_StringValue struct {
	StringValue string `protobuf:"bytes,13,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"`
}

func (*StructuredValue_StringValue) MarshalTo

func (m *StructuredValue_StringValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_StringValue) MarshalToSizedBuffer

func (m *StructuredValue_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_StringValue) Size

func (m *StructuredValue_StringValue) Size() (n int)

type StructuredValue_TensorDtypeValue

type StructuredValue_TensorDtypeValue struct {
	TensorDtypeValue framework.DataType `` /* 144-byte string literal not displayed */
}

func (*StructuredValue_TensorDtypeValue) MarshalTo

func (m *StructuredValue_TensorDtypeValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_TensorDtypeValue) MarshalToSizedBuffer

func (m *StructuredValue_TensorDtypeValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_TensorDtypeValue) Size

func (m *StructuredValue_TensorDtypeValue) Size() (n int)

type StructuredValue_TensorShapeValue

type StructuredValue_TensorShapeValue struct {
	TensorShapeValue *framework.TensorShapeProto `protobuf:"bytes,31,opt,name=tensor_shape_value,json=tensorShapeValue,proto3,oneof" json:"tensor_shape_value,omitempty"`
}

func (*StructuredValue_TensorShapeValue) MarshalTo

func (m *StructuredValue_TensorShapeValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_TensorShapeValue) MarshalToSizedBuffer

func (m *StructuredValue_TensorShapeValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_TensorShapeValue) Size

func (m *StructuredValue_TensorShapeValue) Size() (n int)

type StructuredValue_TensorSpecValue

type StructuredValue_TensorSpecValue struct {
	TensorSpecValue *TensorSpecProto `protobuf:"bytes,33,opt,name=tensor_spec_value,json=tensorSpecValue,proto3,oneof" json:"tensor_spec_value,omitempty"`
}

func (*StructuredValue_TensorSpecValue) MarshalTo

func (m *StructuredValue_TensorSpecValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_TensorSpecValue) MarshalToSizedBuffer

func (m *StructuredValue_TensorSpecValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_TensorSpecValue) Size

func (m *StructuredValue_TensorSpecValue) Size() (n int)

type StructuredValue_TupleValue

type StructuredValue_TupleValue struct {
	TupleValue *TupleValue `protobuf:"bytes,52,opt,name=tuple_value,json=tupleValue,proto3,oneof" json:"tuple_value,omitempty"`
}

func (*StructuredValue_TupleValue) MarshalTo

func (m *StructuredValue_TupleValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_TupleValue) MarshalToSizedBuffer

func (m *StructuredValue_TupleValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_TupleValue) Size

func (m *StructuredValue_TupleValue) Size() (n int)

type StructuredValue_TypeSpecValue

type StructuredValue_TypeSpecValue struct {
	TypeSpecValue *TypeSpecProto `protobuf:"bytes,34,opt,name=type_spec_value,json=typeSpecValue,proto3,oneof" json:"type_spec_value,omitempty"`
}

func (*StructuredValue_TypeSpecValue) MarshalTo

func (m *StructuredValue_TypeSpecValue) MarshalTo(dAtA []byte) (int, error)

func (*StructuredValue_TypeSpecValue) MarshalToSizedBuffer

func (m *StructuredValue_TypeSpecValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StructuredValue_TypeSpecValue) Size

func (m *StructuredValue_TypeSpecValue) Size() (n int)

type TensorConnection

type TensorConnection struct {
	// A tensor name. The value of this tensor will be substituted for
	// the tensor named in `to_tensor`.
	FromTensor string `protobuf:"bytes,1,opt,name=from_tensor,json=fromTensor,proto3" json:"from_tensor,omitempty"`
	// A tensor name. The value of this tensor will be bound to the
	// value of the tensor named in `from_tensor`.
	ToTensor string `protobuf:"bytes,2,opt,name=to_tensor,json=toTensor,proto3" json:"to_tensor,omitempty"`
}

Defines a connection between two tensors in a `GraphDef`.

func (*TensorConnection) Descriptor

func (*TensorConnection) Descriptor() ([]byte, []int)

func (*TensorConnection) GetFromTensor

func (m *TensorConnection) GetFromTensor() string

func (*TensorConnection) GetToTensor

func (m *TensorConnection) GetToTensor() string

func (*TensorConnection) Marshal

func (m *TensorConnection) Marshal() (dAtA []byte, err error)

func (*TensorConnection) MarshalTo

func (m *TensorConnection) MarshalTo(dAtA []byte) (int, error)

func (*TensorConnection) MarshalToSizedBuffer

func (m *TensorConnection) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TensorConnection) ProtoMessage

func (*TensorConnection) ProtoMessage()

func (*TensorConnection) Reset

func (m *TensorConnection) Reset()

func (*TensorConnection) Size

func (m *TensorConnection) Size() (n int)

func (*TensorConnection) String

func (m *TensorConnection) String() string

func (*TensorConnection) Unmarshal

func (m *TensorConnection) Unmarshal(dAtA []byte) error

func (*TensorConnection) XXX_DiscardUnknown

func (m *TensorConnection) XXX_DiscardUnknown()

func (*TensorConnection) XXX_Marshal

func (m *TensorConnection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TensorConnection) XXX_Merge

func (m *TensorConnection) XXX_Merge(src proto.Message)

func (*TensorConnection) XXX_Size

func (m *TensorConnection) XXX_Size() int

func (*TensorConnection) XXX_Unmarshal

func (m *TensorConnection) XXX_Unmarshal(b []byte) error

type TensorInfo

type TensorInfo struct {
	// Types that are valid to be assigned to Encoding:
	//	*TensorInfo_Name
	//	*TensorInfo_CooSparse_
	//	*TensorInfo_CompositeTensor_
	Encoding isTensorInfo_Encoding `protobuf_oneof:"encoding"`
	Dtype    framework.DataType    `protobuf:"varint,2,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	// The static shape should be recorded here, to the extent that it can
	// be known in advance.  In the case of a SparseTensor, this field describes
	// the logical shape of the represented tensor (aka dense_shape).
	TensorShape *framework.TensorShapeProto `protobuf:"bytes,3,opt,name=tensor_shape,json=tensorShape,proto3" json:"tensor_shape,omitempty"`
}

Information about a Tensor necessary for feeding or retrieval.

func (*TensorInfo) Descriptor

func (*TensorInfo) Descriptor() ([]byte, []int)

func (*TensorInfo) GetCompositeTensor

func (m *TensorInfo) GetCompositeTensor() *TensorInfo_CompositeTensor

func (*TensorInfo) GetCooSparse

func (m *TensorInfo) GetCooSparse() *TensorInfo_CooSparse

func (*TensorInfo) GetDtype

func (m *TensorInfo) GetDtype() framework.DataType

func (*TensorInfo) GetEncoding

func (m *TensorInfo) GetEncoding() isTensorInfo_Encoding

func (*TensorInfo) GetName

func (m *TensorInfo) GetName() string

func (*TensorInfo) GetTensorShape

func (m *TensorInfo) GetTensorShape() *framework.TensorShapeProto

func (*TensorInfo) Marshal

func (m *TensorInfo) Marshal() (dAtA []byte, err error)

func (*TensorInfo) MarshalTo

func (m *TensorInfo) MarshalTo(dAtA []byte) (int, error)

func (*TensorInfo) MarshalToSizedBuffer

func (m *TensorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TensorInfo) ProtoMessage

func (*TensorInfo) ProtoMessage()

func (*TensorInfo) Reset

func (m *TensorInfo) Reset()

func (*TensorInfo) Size

func (m *TensorInfo) Size() (n int)

func (*TensorInfo) String

func (m *TensorInfo) String() string

func (*TensorInfo) Unmarshal

func (m *TensorInfo) Unmarshal(dAtA []byte) error

func (*TensorInfo) XXX_DiscardUnknown

func (m *TensorInfo) XXX_DiscardUnknown()

func (*TensorInfo) XXX_Marshal

func (m *TensorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TensorInfo) XXX_Merge

func (m *TensorInfo) XXX_Merge(src proto.Message)

func (*TensorInfo) XXX_OneofWrappers

func (*TensorInfo) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*TensorInfo) XXX_Size

func (m *TensorInfo) XXX_Size() int

func (*TensorInfo) XXX_Unmarshal

func (m *TensorInfo) XXX_Unmarshal(b []byte) error

type TensorInfo_CompositeTensor

type TensorInfo_CompositeTensor struct {
	// The serialized TypeSpec for the composite tensor.
	TypeSpec *TypeSpecProto `protobuf:"bytes,1,opt,name=type_spec,json=typeSpec,proto3" json:"type_spec,omitempty"`
	// A TensorInfo for each flattened component tensor.
	Components []*TensorInfo `protobuf:"bytes,2,rep,name=components,proto3" json:"components,omitempty"`
}

Generic encoding for composite tensors.

func (*TensorInfo_CompositeTensor) Descriptor

func (*TensorInfo_CompositeTensor) Descriptor() ([]byte, []int)

func (*TensorInfo_CompositeTensor) GetComponents

func (m *TensorInfo_CompositeTensor) GetComponents() []*TensorInfo

func (*TensorInfo_CompositeTensor) GetTypeSpec

func (m *TensorInfo_CompositeTensor) GetTypeSpec() *TypeSpecProto

func (*TensorInfo_CompositeTensor) Marshal

func (m *TensorInfo_CompositeTensor) Marshal() (dAtA []byte, err error)

func (*TensorInfo_CompositeTensor) MarshalTo

func (m *TensorInfo_CompositeTensor) MarshalTo(dAtA []byte) (int, error)

func (*TensorInfo_CompositeTensor) MarshalToSizedBuffer

func (m *TensorInfo_CompositeTensor) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TensorInfo_CompositeTensor) ProtoMessage

func (*TensorInfo_CompositeTensor) ProtoMessage()

func (*TensorInfo_CompositeTensor) Reset

func (m *TensorInfo_CompositeTensor) Reset()

func (*TensorInfo_CompositeTensor) Size

func (m *TensorInfo_CompositeTensor) Size() (n int)

func (*TensorInfo_CompositeTensor) String

func (m *TensorInfo_CompositeTensor) String() string

func (*TensorInfo_CompositeTensor) Unmarshal

func (m *TensorInfo_CompositeTensor) Unmarshal(dAtA []byte) error

func (*TensorInfo_CompositeTensor) XXX_DiscardUnknown

func (m *TensorInfo_CompositeTensor) XXX_DiscardUnknown()

func (*TensorInfo_CompositeTensor) XXX_Marshal

func (m *TensorInfo_CompositeTensor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TensorInfo_CompositeTensor) XXX_Merge

func (m *TensorInfo_CompositeTensor) XXX_Merge(src proto.Message)

func (*TensorInfo_CompositeTensor) XXX_Size

func (m *TensorInfo_CompositeTensor) XXX_Size() int

func (*TensorInfo_CompositeTensor) XXX_Unmarshal

func (m *TensorInfo_CompositeTensor) XXX_Unmarshal(b []byte) error

type TensorInfo_CompositeTensor_

type TensorInfo_CompositeTensor_ struct {
	CompositeTensor *TensorInfo_CompositeTensor `protobuf:"bytes,5,opt,name=composite_tensor,json=compositeTensor,proto3,oneof" json:"composite_tensor,omitempty"`
}

func (*TensorInfo_CompositeTensor_) MarshalTo

func (m *TensorInfo_CompositeTensor_) MarshalTo(dAtA []byte) (int, error)

func (*TensorInfo_CompositeTensor_) MarshalToSizedBuffer

func (m *TensorInfo_CompositeTensor_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TensorInfo_CompositeTensor_) Size

func (m *TensorInfo_CompositeTensor_) Size() (n int)

type TensorInfo_CooSparse

type TensorInfo_CooSparse struct {
	// The shape of the values Tensor is [?].  Its dtype must be the dtype of
	// the SparseTensor as a whole, given in the enclosing TensorInfo.
	ValuesTensorName string `protobuf:"bytes,1,opt,name=values_tensor_name,json=valuesTensorName,proto3" json:"values_tensor_name,omitempty"`
	// The indices Tensor must have dtype int64 and shape [?, ?].
	IndicesTensorName string `protobuf:"bytes,2,opt,name=indices_tensor_name,json=indicesTensorName,proto3" json:"indices_tensor_name,omitempty"`
	// The dynamic logical shape represented by the SparseTensor is recorded in
	// the Tensor referenced here.  It must have dtype int64 and shape [?].
	DenseShapeTensorName string `protobuf:"bytes,3,opt,name=dense_shape_tensor_name,json=denseShapeTensorName,proto3" json:"dense_shape_tensor_name,omitempty"`
}

For sparse tensors, The COO encoding stores a triple of values, indices, and shape.

func (*TensorInfo_CooSparse) Descriptor

func (*TensorInfo_CooSparse) Descriptor() ([]byte, []int)

func (*TensorInfo_CooSparse) GetDenseShapeTensorName

func (m *TensorInfo_CooSparse) GetDenseShapeTensorName() string

func (*TensorInfo_CooSparse) GetIndicesTensorName

func (m *TensorInfo_CooSparse) GetIndicesTensorName() string

func (*TensorInfo_CooSparse) GetValuesTensorName

func (m *TensorInfo_CooSparse) GetValuesTensorName() string

func (*TensorInfo_CooSparse) Marshal

func (m *TensorInfo_CooSparse) Marshal() (dAtA []byte, err error)

func (*TensorInfo_CooSparse) MarshalTo

func (m *TensorInfo_CooSparse) MarshalTo(dAtA []byte) (int, error)

func (*TensorInfo_CooSparse) MarshalToSizedBuffer

func (m *TensorInfo_CooSparse) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TensorInfo_CooSparse) ProtoMessage

func (*TensorInfo_CooSparse) ProtoMessage()

func (*TensorInfo_CooSparse) Reset

func (m *TensorInfo_CooSparse) Reset()

func (*TensorInfo_CooSparse) Size

func (m *TensorInfo_CooSparse) Size() (n int)

func (*TensorInfo_CooSparse) String

func (m *TensorInfo_CooSparse) String() string

func (*TensorInfo_CooSparse) Unmarshal

func (m *TensorInfo_CooSparse) Unmarshal(dAtA []byte) error

func (*TensorInfo_CooSparse) XXX_DiscardUnknown

func (m *TensorInfo_CooSparse) XXX_DiscardUnknown()

func (*TensorInfo_CooSparse) XXX_Marshal

func (m *TensorInfo_CooSparse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TensorInfo_CooSparse) XXX_Merge

func (m *TensorInfo_CooSparse) XXX_Merge(src proto.Message)

func (*TensorInfo_CooSparse) XXX_Size

func (m *TensorInfo_CooSparse) XXX_Size() int

func (*TensorInfo_CooSparse) XXX_Unmarshal

func (m *TensorInfo_CooSparse) XXX_Unmarshal(b []byte) error

type TensorInfo_CooSparse_

type TensorInfo_CooSparse_ struct {
	CooSparse *TensorInfo_CooSparse `protobuf:"bytes,4,opt,name=coo_sparse,json=cooSparse,proto3,oneof" json:"coo_sparse,omitempty"`
}

func (*TensorInfo_CooSparse_) MarshalTo

func (m *TensorInfo_CooSparse_) MarshalTo(dAtA []byte) (int, error)

func (*TensorInfo_CooSparse_) MarshalToSizedBuffer

func (m *TensorInfo_CooSparse_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TensorInfo_CooSparse_) Size

func (m *TensorInfo_CooSparse_) Size() (n int)

type TensorInfo_Name

type TensorInfo_Name struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3,oneof" json:"name,omitempty"`
}

func (*TensorInfo_Name) MarshalTo

func (m *TensorInfo_Name) MarshalTo(dAtA []byte) (int, error)

func (*TensorInfo_Name) MarshalToSizedBuffer

func (m *TensorInfo_Name) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TensorInfo_Name) Size

func (m *TensorInfo_Name) Size() (n int)

type TensorSpecProto

type TensorSpecProto struct {
	Name  string                      `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Shape *framework.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Dtype framework.DataType          `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
}

A protobuf to represent tf.TensorSpec.

func (*TensorSpecProto) Descriptor

func (*TensorSpecProto) Descriptor() ([]byte, []int)

func (*TensorSpecProto) GetDtype

func (m *TensorSpecProto) GetDtype() framework.DataType

func (*TensorSpecProto) GetName

func (m *TensorSpecProto) GetName() string

func (*TensorSpecProto) GetShape

func (*TensorSpecProto) Marshal

func (m *TensorSpecProto) Marshal() (dAtA []byte, err error)

func (*TensorSpecProto) MarshalTo

func (m *TensorSpecProto) MarshalTo(dAtA []byte) (int, error)

func (*TensorSpecProto) MarshalToSizedBuffer

func (m *TensorSpecProto) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TensorSpecProto) ProtoMessage

func (*TensorSpecProto) ProtoMessage()

func (*TensorSpecProto) Reset

func (m *TensorSpecProto) Reset()

func (*TensorSpecProto) Size

func (m *TensorSpecProto) Size() (n int)

func (*TensorSpecProto) String

func (m *TensorSpecProto) String() string

func (*TensorSpecProto) Unmarshal

func (m *TensorSpecProto) Unmarshal(dAtA []byte) error

func (*TensorSpecProto) XXX_DiscardUnknown

func (m *TensorSpecProto) XXX_DiscardUnknown()

func (*TensorSpecProto) XXX_Marshal

func (m *TensorSpecProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TensorSpecProto) XXX_Merge

func (m *TensorSpecProto) XXX_Merge(src proto.Message)

func (*TensorSpecProto) XXX_Size

func (m *TensorSpecProto) XXX_Size() int

func (*TensorSpecProto) XXX_Unmarshal

func (m *TensorSpecProto) XXX_Unmarshal(b []byte) error

type ThreadPoolOptionProto

type ThreadPoolOptionProto struct {
	// The number of threads in the pool.
	//
	// 0 means the system picks a value based on where this option proto is used
	// (see the declaration of the specific field for more info).
	NumThreads int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads,proto3" json:"num_threads,omitempty"`
	// The global name of the threadpool.
	//
	// If empty, then the threadpool is made and used according to the scope it's
	// in - e.g., for a session threadpool, it is used by that session only.
	//
	// If non-empty, then:
	// - a global threadpool associated with this name is looked
	//   up or created. This allows, for example, sharing one threadpool across
	//   many sessions (e.g., like the default behavior, if
	//   inter_op_parallelism_threads is not configured), but still partitioning
	//   into a large and small pool.
	// - if the threadpool for this global_name already exists, then it is an
	//   error if the existing pool was created using a different num_threads
	//   value as is specified on this call.
	// - threadpools created this way are never garbage collected.
	GlobalName string `protobuf:"bytes,2,opt,name=global_name,json=globalName,proto3" json:"global_name,omitempty"`
}

func (*ThreadPoolOptionProto) Descriptor

func (*ThreadPoolOptionProto) Descriptor() ([]byte, []int)

func (*ThreadPoolOptionProto) GetGlobalName

func (m *ThreadPoolOptionProto) GetGlobalName() string

func (*ThreadPoolOptionProto) GetNumThreads

func (m *ThreadPoolOptionProto) GetNumThreads() int32

func (*ThreadPoolOptionProto) Marshal

func (m *ThreadPoolOptionProto) Marshal() (dAtA []byte, err error)

func (*ThreadPoolOptionProto) MarshalTo

func (m *ThreadPoolOptionProto) MarshalTo(dAtA []byte) (int, error)

func (*ThreadPoolOptionProto) MarshalToSizedBuffer

func (m *ThreadPoolOptionProto) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ThreadPoolOptionProto) ProtoMessage

func (*ThreadPoolOptionProto) ProtoMessage()

func (*ThreadPoolOptionProto) Reset

func (m *ThreadPoolOptionProto) Reset()

func (*ThreadPoolOptionProto) Size

func (m *ThreadPoolOptionProto) Size() (n int)

func (*ThreadPoolOptionProto) String

func (m *ThreadPoolOptionProto) String() string

func (*ThreadPoolOptionProto) Unmarshal

func (m *ThreadPoolOptionProto) Unmarshal(dAtA []byte) error

func (*ThreadPoolOptionProto) XXX_DiscardUnknown

func (m *ThreadPoolOptionProto) XXX_DiscardUnknown()

func (*ThreadPoolOptionProto) XXX_Marshal

func (m *ThreadPoolOptionProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ThreadPoolOptionProto) XXX_Merge

func (m *ThreadPoolOptionProto) XXX_Merge(src proto.Message)

func (*ThreadPoolOptionProto) XXX_Size

func (m *ThreadPoolOptionProto) XXX_Size() int

func (*ThreadPoolOptionProto) XXX_Unmarshal

func (m *ThreadPoolOptionProto) XXX_Unmarshal(b []byte) error

type TrackableObjectGraph

type TrackableObjectGraph struct {
	Nodes []*TrackableObjectGraph_TrackableObject `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
}

func (*TrackableObjectGraph) Descriptor

func (*TrackableObjectGraph) Descriptor() ([]byte, []int)

func (*TrackableObjectGraph) GetNodes

func (*TrackableObjectGraph) Marshal

func (m *TrackableObjectGraph) Marshal() (dAtA []byte, err error)

func (*TrackableObjectGraph) MarshalTo

func (m *TrackableObjectGraph) MarshalTo(dAtA []byte) (int, error)

func (*TrackableObjectGraph) MarshalToSizedBuffer

func (m *TrackableObjectGraph) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TrackableObjectGraph) ProtoMessage

func (*TrackableObjectGraph) ProtoMessage()

func (*TrackableObjectGraph) Reset

func (m *TrackableObjectGraph) Reset()

func (*TrackableObjectGraph) Size

func (m *TrackableObjectGraph) Size() (n int)

func (*TrackableObjectGraph) String

func (m *TrackableObjectGraph) String() string

func (*TrackableObjectGraph) Unmarshal

func (m *TrackableObjectGraph) Unmarshal(dAtA []byte) error

func (*TrackableObjectGraph) XXX_DiscardUnknown

func (m *TrackableObjectGraph) XXX_DiscardUnknown()

func (*TrackableObjectGraph) XXX_Marshal

func (m *TrackableObjectGraph) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TrackableObjectGraph) XXX_Merge

func (m *TrackableObjectGraph) XXX_Merge(src proto.Message)

func (*TrackableObjectGraph) XXX_Size

func (m *TrackableObjectGraph) XXX_Size() int

func (*TrackableObjectGraph) XXX_Unmarshal

func (m *TrackableObjectGraph) XXX_Unmarshal(b []byte) error

type TrackableObjectGraph_TrackableObject

type TrackableObjectGraph_TrackableObject struct {
	// Objects which this object depends on.
	Children []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"`
	// Serialized data specific to this object.
	Attributes []*TrackableObjectGraph_TrackableObject_SerializedTensor `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// Slot variables owned by this object.
	SlotVariables []*TrackableObjectGraph_TrackableObject_SlotVariableReference `protobuf:"bytes,3,rep,name=slot_variables,json=slotVariables,proto3" json:"slot_variables,omitempty"`
}

func (*TrackableObjectGraph_TrackableObject) Descriptor

func (*TrackableObjectGraph_TrackableObject) Descriptor() ([]byte, []int)

func (*TrackableObjectGraph_TrackableObject) GetAttributes

func (*TrackableObjectGraph_TrackableObject) GetChildren

func (*TrackableObjectGraph_TrackableObject) GetSlotVariables

func (*TrackableObjectGraph_TrackableObject) Marshal

func (m *TrackableObjectGraph_TrackableObject) Marshal() (dAtA []byte, err error)

func (*TrackableObjectGraph_TrackableObject) MarshalTo

func (m *TrackableObjectGraph_TrackableObject) MarshalTo(dAtA []byte) (int, error)

func (*TrackableObjectGraph_TrackableObject) MarshalToSizedBuffer

func (m *TrackableObjectGraph_TrackableObject) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TrackableObjectGraph_TrackableObject) ProtoMessage

func (*TrackableObjectGraph_TrackableObject) ProtoMessage()

func (*TrackableObjectGraph_TrackableObject) Reset

func (*TrackableObjectGraph_TrackableObject) Size

func (*TrackableObjectGraph_TrackableObject) String

func (*TrackableObjectGraph_TrackableObject) Unmarshal

func (m *TrackableObjectGraph_TrackableObject) Unmarshal(dAtA []byte) error

func (*TrackableObjectGraph_TrackableObject) XXX_DiscardUnknown

func (m *TrackableObjectGraph_TrackableObject) XXX_DiscardUnknown()

func (*TrackableObjectGraph_TrackableObject) XXX_Marshal

func (m *TrackableObjectGraph_TrackableObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TrackableObjectGraph_TrackableObject) XXX_Merge

func (*TrackableObjectGraph_TrackableObject) XXX_Size

func (*TrackableObjectGraph_TrackableObject) XXX_Unmarshal

func (m *TrackableObjectGraph_TrackableObject) XXX_Unmarshal(b []byte) error

type TrackableObjectGraph_TrackableObject_ObjectReference

type TrackableObjectGraph_TrackableObject_ObjectReference struct {
	// An index into `TrackableObjectGraph.nodes`, indicating the object
	// being referenced.
	NodeId int32 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
	// A user-provided name for the edge.
	LocalName string `protobuf:"bytes,2,opt,name=local_name,json=localName,proto3" json:"local_name,omitempty"`
}

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Descriptor

func (*TrackableObjectGraph_TrackableObject_ObjectReference) GetLocalName

func (*TrackableObjectGraph_TrackableObject_ObjectReference) GetNodeId

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Marshal

func (*TrackableObjectGraph_TrackableObject_ObjectReference) MarshalTo

func (*TrackableObjectGraph_TrackableObject_ObjectReference) MarshalToSizedBuffer

func (m *TrackableObjectGraph_TrackableObject_ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TrackableObjectGraph_TrackableObject_ObjectReference) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Reset

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Size

func (*TrackableObjectGraph_TrackableObject_ObjectReference) String

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Unmarshal

func (*TrackableObjectGraph_TrackableObject_ObjectReference) XXX_DiscardUnknown

func (*TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Marshal

func (m *TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Merge

func (*TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Size

func (*TrackableObjectGraph_TrackableObject_ObjectReference) XXX_Unmarshal

type TrackableObjectGraph_TrackableObject_SerializedTensor

type TrackableObjectGraph_TrackableObject_SerializedTensor struct {
	// A name for the Tensor. Simple variables have only one
	// `SerializedTensor` named "VARIABLE_VALUE" by convention. This value may
	// be restored on object creation as an optimization.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The full name of the variable/tensor, if applicable. Used to allow
	// name-based loading of checkpoints which were saved using an
	// object-based API. Should match the checkpoint key which would have been
	// assigned by tf.train.Saver.
	FullName string `protobuf:"bytes,2,opt,name=full_name,json=fullName,proto3" json:"full_name,omitempty"`
	// The generated name of the Tensor in the checkpoint.
	CheckpointKey string `protobuf:"bytes,3,opt,name=checkpoint_key,json=checkpointKey,proto3" json:"checkpoint_key,omitempty"`
	// Whether checkpoints should be considered as matching even without this
	// value restored. Used for non-critical values which don't affect the
	// TensorFlow graph, such as layer configurations.
	OptionalRestore bool `protobuf:"varint,4,opt,name=optional_restore,json=optionalRestore,proto3" json:"optional_restore,omitempty"`
}

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Descriptor

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetCheckpointKey

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetFullName

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetName

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetOptionalRestore

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Marshal

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) MarshalTo

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) MarshalToSizedBuffer

func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Reset

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Size

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) String

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Unmarshal

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_DiscardUnknown

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Marshal

func (m *TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Merge

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Size

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) XXX_Unmarshal

type TrackableObjectGraph_TrackableObject_SlotVariableReference

type TrackableObjectGraph_TrackableObject_SlotVariableReference struct {
	// An index into `TrackableObjectGraph.nodes`, indicating the
	// variable object this slot was created for.
	OriginalVariableNodeId int32 `` /* 132-byte string literal not displayed */
	// The name of the slot (e.g. "m"/"v").
	SlotName string `protobuf:"bytes,2,opt,name=slot_name,json=slotName,proto3" json:"slot_name,omitempty"`
	// An index into `TrackableObjectGraph.nodes`, indicating the
	// `Object` with the value of the slot variable.
	SlotVariableNodeId int32 `protobuf:"varint,3,opt,name=slot_variable_node_id,json=slotVariableNodeId,proto3" json:"slot_variable_node_id,omitempty"`
}

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Descriptor

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetOriginalVariableNodeId

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetSlotName

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetSlotVariableNodeId

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Marshal

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) MarshalTo

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) MarshalToSizedBuffer

func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Reset

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Size

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) String

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Unmarshal

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_DiscardUnknown

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Marshal

func (m *TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Merge

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Size

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) XXX_Unmarshal

type TupleValue

type TupleValue struct {
	Values []*StructuredValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
}

Represents a Python tuple.

func (*TupleValue) Descriptor

func (*TupleValue) Descriptor() ([]byte, []int)

func (*TupleValue) GetValues

func (m *TupleValue) GetValues() []*StructuredValue

func (*TupleValue) Marshal

func (m *TupleValue) Marshal() (dAtA []byte, err error)

func (*TupleValue) MarshalTo

func (m *TupleValue) MarshalTo(dAtA []byte) (int, error)

func (*TupleValue) MarshalToSizedBuffer

func (m *TupleValue) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TupleValue) ProtoMessage

func (*TupleValue) ProtoMessage()

func (*TupleValue) Reset

func (m *TupleValue) Reset()

func (*TupleValue) Size

func (m *TupleValue) Size() (n int)

func (*TupleValue) String

func (m *TupleValue) String() string

func (*TupleValue) Unmarshal

func (m *TupleValue) Unmarshal(dAtA []byte) error

func (*TupleValue) XXX_DiscardUnknown

func (m *TupleValue) XXX_DiscardUnknown()

func (*TupleValue) XXX_Marshal

func (m *TupleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TupleValue) XXX_Merge

func (m *TupleValue) XXX_Merge(src proto.Message)

func (*TupleValue) XXX_Size

func (m *TupleValue) XXX_Size() int

func (*TupleValue) XXX_Unmarshal

func (m *TupleValue) XXX_Unmarshal(b []byte) error

type TypeSpecProto

type TypeSpecProto struct {
	TypeSpecClass TypeSpecProto_TypeSpecClass `` /* 147-byte string literal not displayed */
	// The value returned by TypeSpec._serialize().
	TypeState *StructuredValue `protobuf:"bytes,2,opt,name=type_state,json=typeState,proto3" json:"type_state,omitempty"`
	// This is currently redundant with the type_spec_class enum, and is only
	// used for error reporting.  In particular, if you use an older binary to
	// load a newer model, and the model uses a TypeSpecClass that the older
	// binary doesn't support, then this lets us display a useful error message.
	TypeSpecClassName string `protobuf:"bytes,3,opt,name=type_spec_class_name,json=typeSpecClassName,proto3" json:"type_spec_class_name,omitempty"`
}

Represents a tf.TypeSpec

func (*TypeSpecProto) Descriptor

func (*TypeSpecProto) Descriptor() ([]byte, []int)

func (*TypeSpecProto) GetTypeSpecClass

func (m *TypeSpecProto) GetTypeSpecClass() TypeSpecProto_TypeSpecClass

func (*TypeSpecProto) GetTypeSpecClassName

func (m *TypeSpecProto) GetTypeSpecClassName() string

func (*TypeSpecProto) GetTypeState

func (m *TypeSpecProto) GetTypeState() *StructuredValue

func (*TypeSpecProto) Marshal

func (m *TypeSpecProto) Marshal() (dAtA []byte, err error)

func (*TypeSpecProto) MarshalTo

func (m *TypeSpecProto) MarshalTo(dAtA []byte) (int, error)

func (*TypeSpecProto) MarshalToSizedBuffer

func (m *TypeSpecProto) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TypeSpecProto) ProtoMessage

func (*TypeSpecProto) ProtoMessage()

func (*TypeSpecProto) Reset

func (m *TypeSpecProto) Reset()

func (*TypeSpecProto) Size

func (m *TypeSpecProto) Size() (n int)

func (*TypeSpecProto) String

func (m *TypeSpecProto) String() string

func (*TypeSpecProto) Unmarshal

func (m *TypeSpecProto) Unmarshal(dAtA []byte) error

func (*TypeSpecProto) XXX_DiscardUnknown

func (m *TypeSpecProto) XXX_DiscardUnknown()

func (*TypeSpecProto) XXX_Marshal

func (m *TypeSpecProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TypeSpecProto) XXX_Merge

func (m *TypeSpecProto) XXX_Merge(src proto.Message)

func (*TypeSpecProto) XXX_Size

func (m *TypeSpecProto) XXX_Size() int

func (*TypeSpecProto) XXX_Unmarshal

func (m *TypeSpecProto) XXX_Unmarshal(b []byte) error

type TypeSpecProto_TypeSpecClass

type TypeSpecProto_TypeSpecClass int32
const (
	TypeSpecProto_UNKNOWN             TypeSpecProto_TypeSpecClass = 0
	TypeSpecProto_SPARSE_TENSOR_SPEC  TypeSpecProto_TypeSpecClass = 1
	TypeSpecProto_INDEXED_SLICES_SPEC TypeSpecProto_TypeSpecClass = 2
	TypeSpecProto_RAGGED_TENSOR_SPEC  TypeSpecProto_TypeSpecClass = 3
	TypeSpecProto_TENSOR_ARRAY_SPEC   TypeSpecProto_TypeSpecClass = 4
	TypeSpecProto_DATA_DATASET_SPEC   TypeSpecProto_TypeSpecClass = 5
	TypeSpecProto_DATA_ITERATOR_SPEC  TypeSpecProto_TypeSpecClass = 6
	TypeSpecProto_OPTIONAL_SPEC       TypeSpecProto_TypeSpecClass = 7
	TypeSpecProto_PER_REPLICA_SPEC    TypeSpecProto_TypeSpecClass = 8
	TypeSpecProto_VARIABLE_SPEC       TypeSpecProto_TypeSpecClass = 9
	TypeSpecProto_ROW_PARTITION_SPEC  TypeSpecProto_TypeSpecClass = 10
)

func (TypeSpecProto_TypeSpecClass) EnumDescriptor

func (TypeSpecProto_TypeSpecClass) EnumDescriptor() ([]byte, []int)

func (TypeSpecProto_TypeSpecClass) String

type VerifierConfig

type VerifierConfig struct {
	// Deadline for completion of all verification i.e. all the Toggle ON
	// verifiers must complete execution within this time.
	VerificationTimeoutInMs int64 `` /* 135-byte string literal not displayed */
	// Perform structural validation on a tensorflow graph. Default is OFF.
	StructureVerifier VerifierConfig_Toggle `` /* 151-byte string literal not displayed */
}

The config for graph verifiers.

func (*VerifierConfig) Descriptor

func (*VerifierConfig) Descriptor() ([]byte, []int)

func (*VerifierConfig) GetStructureVerifier

func (m *VerifierConfig) GetStructureVerifier() VerifierConfig_Toggle

func (*VerifierConfig) GetVerificationTimeoutInMs

func (m *VerifierConfig) GetVerificationTimeoutInMs() int64

func (*VerifierConfig) Marshal

func (m *VerifierConfig) Marshal() (dAtA []byte, err error)

func (*VerifierConfig) MarshalTo

func (m *VerifierConfig) MarshalTo(dAtA []byte) (int, error)

func (*VerifierConfig) MarshalToSizedBuffer

func (m *VerifierConfig) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*VerifierConfig) ProtoMessage

func (*VerifierConfig) ProtoMessage()

func (*VerifierConfig) Reset

func (m *VerifierConfig) Reset()

func (*VerifierConfig) Size

func (m *VerifierConfig) Size() (n int)

func (*VerifierConfig) String

func (m *VerifierConfig) String() string

func (*VerifierConfig) Unmarshal

func (m *VerifierConfig) Unmarshal(dAtA []byte) error

func (*VerifierConfig) XXX_DiscardUnknown

func (m *VerifierConfig) XXX_DiscardUnknown()

func (*VerifierConfig) XXX_Marshal

func (m *VerifierConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*VerifierConfig) XXX_Merge

func (m *VerifierConfig) XXX_Merge(src proto.Message)

func (*VerifierConfig) XXX_Size

func (m *VerifierConfig) XXX_Size() int

func (*VerifierConfig) XXX_Unmarshal

func (m *VerifierConfig) XXX_Unmarshal(b []byte) error

type VerifierConfig_Toggle

type VerifierConfig_Toggle int32
const (
	VerifierConfig_DEFAULT VerifierConfig_Toggle = 0
	VerifierConfig_ON      VerifierConfig_Toggle = 1
	VerifierConfig_OFF     VerifierConfig_Toggle = 2
)

func (VerifierConfig_Toggle) EnumDescriptor

func (VerifierConfig_Toggle) EnumDescriptor() ([]byte, []int)

func (VerifierConfig_Toggle) String

func (x VerifierConfig_Toggle) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL