protobuf

package
v1.0.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 16, 2018 License: UPL-1.0 Imports: 7 Imported by: 0

Documentation

Overview

Package protobuf is a generated protocol buffer package.

It is generated from these files:

tensorflow/core/protobuf/config.proto
tensorflow/core/protobuf/debug.proto
tensorflow/core/protobuf/cluster.proto
tensorflow/core/protobuf/rewriter_config.proto

It has these top-level messages:

GPUOptions
OptimizerOptions
GraphOptions
ThreadPoolOptionProto
RPCOptions
ConfigProto
RunOptions
RunMetadata
TensorConnection
CallableOptions
DebugTensorWatch
DebugOptions
DebuggedSourceFile
DebuggedSourceFiles
JobDef
ClusterDef
AutoParallelOptions
ScopedAllocatorOptions
RewriterConfig

Index

Constants

This section is empty.

Variables

View Source
var (
	ErrInvalidLengthCluster = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowCluster   = fmt.Errorf("proto: integer overflow")
)
View Source
var (
	ErrInvalidLengthConfig = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowConfig   = fmt.Errorf("proto: integer overflow")
)
View Source
var (
	ErrInvalidLengthDebug = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowDebug   = fmt.Errorf("proto: integer overflow")
)
View Source
var (
	ErrInvalidLengthRewriterConfig = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowRewriterConfig   = fmt.Errorf("proto: integer overflow")
)
View Source
var OptimizerOptions_GlobalJitLevel_name = map[int32]string{
	0:  "DEFAULT",
	-1: "OFF",
	1:  "ON_1",
	2:  "ON_2",
}
View Source
var OptimizerOptions_GlobalJitLevel_value = map[string]int32{
	"DEFAULT": 0,
	"OFF":     -1,
	"ON_1":    1,
	"ON_2":    2,
}
View Source
var OptimizerOptions_Level_name = map[int32]string{
	0:  "L1",
	-1: "L0",
}
View Source
var OptimizerOptions_Level_value = map[string]int32{
	"L1": 0,
	"L0": -1,
}
View Source
var RewriterConfig_MemOptType_name = map[int32]string{
	0: "DEFAULT_MEM_OPT",
	1: "NO_MEM_OPT",
	2: "MANUAL",
	4: "SWAPPING_HEURISTICS",
	5: "RECOMPUTATION_HEURISTICS",
	6: "SCHEDULING_HEURISTICS",
	3: "HEURISTICS",
}
View Source
var RewriterConfig_MemOptType_value = map[string]int32{
	"DEFAULT_MEM_OPT":          0,
	"NO_MEM_OPT":               1,
	"MANUAL":                   2,
	"SWAPPING_HEURISTICS":      4,
	"RECOMPUTATION_HEURISTICS": 5,
	"SCHEDULING_HEURISTICS":    6,
	"HEURISTICS":               3,
}
View Source
var RewriterConfig_NumIterationsType_name = map[int32]string{
	0: "DEFAULT_NUM_ITERS",
	1: "ONE",
	2: "TWO",
}
View Source
var RewriterConfig_NumIterationsType_value = map[string]int32{
	"DEFAULT_NUM_ITERS": 0,
	"ONE":               1,
	"TWO":               2,
}
View Source
var RewriterConfig_Toggle_name = map[int32]string{
	0: "DEFAULT",
	1: "ON",
	2: "OFF",
	3: "AGGRESSIVE",
}
View Source
var RewriterConfig_Toggle_value = map[string]int32{
	"DEFAULT":    0,
	"ON":         1,
	"OFF":        2,
	"AGGRESSIVE": 3,
}
View Source
var RunOptions_TraceLevel_name = map[int32]string{
	0: "NO_TRACE",
	1: "SOFTWARE_TRACE",
	2: "HARDWARE_TRACE",
	3: "FULL_TRACE",
}
View Source
var RunOptions_TraceLevel_value = map[string]int32{
	"NO_TRACE":       0,
	"SOFTWARE_TRACE": 1,
	"HARDWARE_TRACE": 2,
	"FULL_TRACE":     3,
}

Functions

This section is empty.

Types

type AutoParallelOptions

type AutoParallelOptions struct {
	Enable      bool  `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"`
	NumReplicas int32 `protobuf:"varint,2,opt,name=num_replicas,json=numReplicas,proto3" json:"num_replicas,omitempty"`
}

func (*AutoParallelOptions) Descriptor

func (*AutoParallelOptions) Descriptor() ([]byte, []int)

func (*AutoParallelOptions) GetEnable

func (m *AutoParallelOptions) GetEnable() bool

func (*AutoParallelOptions) GetNumReplicas

func (m *AutoParallelOptions) GetNumReplicas() int32

func (*AutoParallelOptions) Marshal

func (m *AutoParallelOptions) Marshal() (dAtA []byte, err error)

func (*AutoParallelOptions) MarshalTo

func (m *AutoParallelOptions) MarshalTo(dAtA []byte) (int, error)

func (*AutoParallelOptions) ProtoMessage

func (*AutoParallelOptions) ProtoMessage()

func (*AutoParallelOptions) Reset

func (m *AutoParallelOptions) Reset()

func (*AutoParallelOptions) Size

func (m *AutoParallelOptions) Size() (n int)

func (*AutoParallelOptions) String

func (m *AutoParallelOptions) String() string

func (*AutoParallelOptions) Unmarshal

func (m *AutoParallelOptions) Unmarshal(dAtA []byte) error

type CallableOptions

type CallableOptions struct {
	// Tensors to be fed in the callable. Each feed is the name of a tensor.
	Feed []string `protobuf:"bytes,1,rep,name=feed" json:"feed,omitempty"`
	// Fetches. A list of tensor names. The caller of the callable expects a
	// tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
	// order of specified fetches does not change the execution order.
	Fetch []string `protobuf:"bytes,2,rep,name=fetch" json:"fetch,omitempty"`
	// Target Nodes. A list of node names. The named nodes will be run by the
	// callable but their outputs will not be returned.
	Target []string `protobuf:"bytes,3,rep,name=target" json:"target,omitempty"`
	// Options that will be applied to each run.
	RunOptions *RunOptions `protobuf:"bytes,4,opt,name=run_options,json=runOptions" json:"run_options,omitempty"`
	// Tensors to be connected in the callable. Each TensorConnection denotes
	// a pair of tensors in the graph, between which an edge will be created
	// in the callable.
	TensorConnection []*TensorConnection `protobuf:"bytes,5,rep,name=tensor_connection,json=tensorConnection" json:"tensor_connection,omitempty"`
	// The Tensor objects fed in the callable and fetched from the callable
	// are expected to be backed by host (CPU) memory by default.
	//
	// The options below allow changing that - feeding tensors backed by
	// device memory, or returning tensors that are backed by device memory.
	//
	// The maps below map the name of a feed/fetch tensor (which appears in
	// 'feed' or 'fetch' fields above), to the fully qualified name of the device
	// owning the memory backing the contents of the tensor.
	//
	// For example, creating a callable with the following options:
	//
	// CallableOptions {
	//   feed: "a:0"
	//   feed: "b:0"
	//
	//   fetch: "x:0"
	//   fetch: "y:0"
	//
	//   feed_devices: {
	//     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
	//   }
	//
	//   fetch_devices: {
	//     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
	//  }
	// }
	//
	// means that the Callable expects:
	// - The first argument ("a:0") is a Tensor backed by GPU memory.
	// - The second argument ("b:0") is a Tensor backed by host memory.
	// and of its return values:
	// - The first output ("x:0") will be backed by host memory.
	// - The second output ("y:0") will be backed by GPU memory.
	//
	// FEEDS:
	// It is the responsibility of the caller to ensure that the memory of the fed
	// tensors will be correctly initialized and synchronized before it is
	// accessed by operations executed during the call to Session::RunCallable().
	//
	// This is typically ensured by using the TensorFlow memory allocators
	// (Device::GetAllocator()) to create the Tensor to be fed.
	//
	// Alternatively, for CUDA-enabled GPU devices, this typically means that the
	// operation that produced the contents of the tensor has completed, i.e., the
	// CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
	// cuStreamSynchronize()).
	FeedDevices  map[string]string `` /* 175-byte string literal not displayed */
	FetchDevices map[string]string `` /* 178-byte string literal not displayed */
	// By default, RunCallable() will synchronize the GPU stream before returning
	// fetched tensors on a GPU device, to ensure that the values in those tensors
	// have been produced. This simplifies interacting with the tensors, but
	// potentially incurs a performance hit.
	//
	// If this options is set to true, the caller is responsible for ensuring
	// that the values in the fetched tensors have been produced before they are
	// used. The caller can do this by invoking `Device::Sync()` on the underlying
	// device(s), or by feeding the tensors back to the same Session using
	// `feed_devices` with the same corresponding device name.
	FetchSkipSync bool `protobuf:"varint,8,opt,name=fetch_skip_sync,json=fetchSkipSync,proto3" json:"fetch_skip_sync,omitempty"`
}

Defines a subgraph in another `GraphDef` as a set of feed points and nodes to be fetched or executed.

Compare with the arguments to `Session::Run()`.

func (*CallableOptions) Descriptor

func (*CallableOptions) Descriptor() ([]byte, []int)

func (*CallableOptions) GetFeed

func (m *CallableOptions) GetFeed() []string

func (*CallableOptions) GetFeedDevices

func (m *CallableOptions) GetFeedDevices() map[string]string

func (*CallableOptions) GetFetch

func (m *CallableOptions) GetFetch() []string

func (*CallableOptions) GetFetchDevices

func (m *CallableOptions) GetFetchDevices() map[string]string

func (*CallableOptions) GetFetchSkipSync

func (m *CallableOptions) GetFetchSkipSync() bool

func (*CallableOptions) GetRunOptions

func (m *CallableOptions) GetRunOptions() *RunOptions

func (*CallableOptions) GetTarget

func (m *CallableOptions) GetTarget() []string

func (*CallableOptions) GetTensorConnection

func (m *CallableOptions) GetTensorConnection() []*TensorConnection

func (*CallableOptions) Marshal

func (m *CallableOptions) Marshal() (dAtA []byte, err error)

func (*CallableOptions) MarshalTo

func (m *CallableOptions) MarshalTo(dAtA []byte) (int, error)

func (*CallableOptions) ProtoMessage

func (*CallableOptions) ProtoMessage()

func (*CallableOptions) Reset

func (m *CallableOptions) Reset()

func (*CallableOptions) Size

func (m *CallableOptions) Size() (n int)

func (*CallableOptions) String

func (m *CallableOptions) String() string

func (*CallableOptions) Unmarshal

func (m *CallableOptions) Unmarshal(dAtA []byte) error

type ClusterDef

type ClusterDef struct {
	// The jobs that comprise the cluster.
	Job []*JobDef `protobuf:"bytes,1,rep,name=job" json:"job,omitempty"`
}

Defines a TensorFlow cluster as a set of jobs.

func (*ClusterDef) Descriptor

func (*ClusterDef) Descriptor() ([]byte, []int)

func (*ClusterDef) GetJob

func (m *ClusterDef) GetJob() []*JobDef

func (*ClusterDef) Marshal

func (m *ClusterDef) Marshal() (dAtA []byte, err error)

func (*ClusterDef) MarshalTo

func (m *ClusterDef) MarshalTo(dAtA []byte) (int, error)

func (*ClusterDef) ProtoMessage

func (*ClusterDef) ProtoMessage()

func (*ClusterDef) Reset

func (m *ClusterDef) Reset()

func (*ClusterDef) Size

func (m *ClusterDef) Size() (n int)

func (*ClusterDef) String

func (m *ClusterDef) String() string

func (*ClusterDef) Unmarshal

func (m *ClusterDef) Unmarshal(dAtA []byte) error

type ConfigProto

type ConfigProto struct {
	// Map from device type name (e.g., "CPU" or "GPU" ) to maximum
	// number of devices of that type to use.  If a particular device
	// type is not found in the map, the system picks an appropriate
	// number.
	DeviceCount map[string]int32 `` /* 176-byte string literal not displayed */
	// The execution of an individual op (for some op types) can be
	// parallelized on a pool of intra_op_parallelism_threads.
	// 0 means the system picks an appropriate number.
	IntraOpParallelismThreads int32 `` /* 141-byte string literal not displayed */
	// Nodes that perform blocking operations are enqueued on a pool of
	// inter_op_parallelism_threads available in each process.
	//
	// 0 means the system picks an appropriate number.
	//
	// Note that the first Session created in the process sets the
	// number of threads for all future sessions unless use_per_session_threads is
	// true or session_inter_op_thread_pool is configured.
	InterOpParallelismThreads int32 `` /* 141-byte string literal not displayed */
	// If true, use a new set of threads for this session rather than the global
	// pool of threads. Only supported by direct sessions.
	//
	// If false, use the global threads created by the first session, or the
	// per-session thread pools configured by session_inter_op_thread_pool.
	//
	// This option is deprecated. The same effect can be achieved by setting
	// session_inter_op_thread_pool to have one element, whose num_threads equals
	// inter_op_parallelism_threads.
	UsePerSessionThreads bool `` /* 126-byte string literal not displayed */
	// This option is experimental - it may be replaced with a different mechanism
	// in the future.
	//
	// Configures session thread pools. If this is configured, then RunOptions for
	// a Run call can select the thread pool to use.
	//
	// The intended use is for when some session invocations need to run in a
	// background pool limited to a small number of threads:
	// - For example, a session may be configured to have one large pool (for
	// regular compute) and one small pool (for periodic, low priority work);
	// using the small pool is currently the mechanism for limiting the inter-op
	// parallelism of the low priority work.  Note that it does not limit the
	// parallelism of work spawned by a single op kernel implementation.
	// - Using this setting is normally not needed in training, but may help some
	// serving use cases.
	// - It is also generally recommended to set the global_name field of this
	// proto, to avoid creating multiple large pools. It is typically better to
	// run the non-low-priority work, even across sessions, in a single large
	// pool.
	SessionInterOpThreadPool []*ThreadPoolOptionProto `` /* 133-byte string literal not displayed */
	// Assignment of Nodes to Devices is recomputed every placement_period
	// steps until the system warms up (at which point the recomputation
	// typically slows down automatically).
	PlacementPeriod int32 `protobuf:"varint,3,opt,name=placement_period,json=placementPeriod,proto3" json:"placement_period,omitempty"`
	// When any filters are present sessions will ignore all devices which do not
	// match the filters. Each filter can be partially specified, e.g. "/job:ps"
	// "/job:worker/replica:3", etc.
	DeviceFilters []string `protobuf:"bytes,4,rep,name=device_filters,json=deviceFilters" json:"device_filters,omitempty"`
	// Options that apply to all GPUs.
	GpuOptions *GPUOptions `protobuf:"bytes,6,opt,name=gpu_options,json=gpuOptions" json:"gpu_options,omitempty"`
	// Whether soft placement is allowed. If allow_soft_placement is true,
	// an op will be placed on CPU if
	//   1. there's no GPU implementation for the OP
	// or
	//   2. no GPU devices are known or registered
	// or
	//   3. need to co-locate with reftype input(s) which are from CPU.
	AllowSoftPlacement bool `protobuf:"varint,7,opt,name=allow_soft_placement,json=allowSoftPlacement,proto3" json:"allow_soft_placement,omitempty"`
	// Whether device placements should be logged.
	LogDevicePlacement bool `protobuf:"varint,8,opt,name=log_device_placement,json=logDevicePlacement,proto3" json:"log_device_placement,omitempty"`
	// Options that apply to all graphs.
	GraphOptions *GraphOptions `protobuf:"bytes,10,opt,name=graph_options,json=graphOptions" json:"graph_options,omitempty"`
	// Global timeout for all blocking operations in this session.  If non-zero,
	// and not overridden on a per-operation basis, this value will be used as the
	// deadline for all blocking operations.
	OperationTimeoutInMs int64 `` /* 127-byte string literal not displayed */
	// Options that apply when this session uses the distributed runtime.
	RpcOptions *RPCOptions `protobuf:"bytes,13,opt,name=rpc_options,json=rpcOptions" json:"rpc_options,omitempty"`
	// Optional list of all workers to use in this session.
	ClusterDef *ClusterDef `protobuf:"bytes,14,opt,name=cluster_def,json=clusterDef" json:"cluster_def,omitempty"`
	// If true, any resources such as Variables used in the session will not be
	// shared with other sessions.
	IsolateSessionState bool                      `protobuf:"varint,15,opt,name=isolate_session_state,json=isolateSessionState,proto3" json:"isolate_session_state,omitempty"`
	Experimental        *ConfigProto_Experimental `protobuf:"bytes,16,opt,name=experimental" json:"experimental,omitempty"`
}

Session configuration parameters. The system picks appropriate values for fields that are not set.

func (*ConfigProto) Descriptor

func (*ConfigProto) Descriptor() ([]byte, []int)

func (*ConfigProto) GetAllowSoftPlacement

func (m *ConfigProto) GetAllowSoftPlacement() bool

func (*ConfigProto) GetClusterDef

func (m *ConfigProto) GetClusterDef() *ClusterDef

func (*ConfigProto) GetDeviceCount

func (m *ConfigProto) GetDeviceCount() map[string]int32

func (*ConfigProto) GetDeviceFilters

func (m *ConfigProto) GetDeviceFilters() []string

func (*ConfigProto) GetExperimental

func (m *ConfigProto) GetExperimental() *ConfigProto_Experimental

func (*ConfigProto) GetGpuOptions

func (m *ConfigProto) GetGpuOptions() *GPUOptions

func (*ConfigProto) GetGraphOptions

func (m *ConfigProto) GetGraphOptions() *GraphOptions

func (*ConfigProto) GetInterOpParallelismThreads

func (m *ConfigProto) GetInterOpParallelismThreads() int32

func (*ConfigProto) GetIntraOpParallelismThreads

func (m *ConfigProto) GetIntraOpParallelismThreads() int32

func (*ConfigProto) GetIsolateSessionState

func (m *ConfigProto) GetIsolateSessionState() bool

func (*ConfigProto) GetLogDevicePlacement

func (m *ConfigProto) GetLogDevicePlacement() bool

func (*ConfigProto) GetOperationTimeoutInMs

func (m *ConfigProto) GetOperationTimeoutInMs() int64

func (*ConfigProto) GetPlacementPeriod

func (m *ConfigProto) GetPlacementPeriod() int32

func (*ConfigProto) GetRpcOptions

func (m *ConfigProto) GetRpcOptions() *RPCOptions

func (*ConfigProto) GetSessionInterOpThreadPool

func (m *ConfigProto) GetSessionInterOpThreadPool() []*ThreadPoolOptionProto

func (*ConfigProto) GetUsePerSessionThreads

func (m *ConfigProto) GetUsePerSessionThreads() bool

func (*ConfigProto) Marshal

func (m *ConfigProto) Marshal() (dAtA []byte, err error)

func (*ConfigProto) MarshalTo

func (m *ConfigProto) MarshalTo(dAtA []byte) (int, error)

func (*ConfigProto) ProtoMessage

func (*ConfigProto) ProtoMessage()

func (*ConfigProto) Reset

func (m *ConfigProto) Reset()

func (*ConfigProto) Size

func (m *ConfigProto) Size() (n int)

func (*ConfigProto) String

func (m *ConfigProto) String() string

func (*ConfigProto) Unmarshal

func (m *ConfigProto) Unmarshal(dAtA []byte) error

type ConfigProto_Experimental

type ConfigProto_Experimental struct {
	// Task name for group resolution.
	CollectiveGroupLeader string `` /* 126-byte string literal not displayed */
}

Everything inside Experimental is subject to change and is not subject to API stability guarantees in https://www.tensorflow.org/guide/version_compat.

func (*ConfigProto_Experimental) Descriptor

func (*ConfigProto_Experimental) Descriptor() ([]byte, []int)

func (*ConfigProto_Experimental) GetCollectiveGroupLeader

func (m *ConfigProto_Experimental) GetCollectiveGroupLeader() string

func (*ConfigProto_Experimental) Marshal

func (m *ConfigProto_Experimental) Marshal() (dAtA []byte, err error)

func (*ConfigProto_Experimental) MarshalTo

func (m *ConfigProto_Experimental) MarshalTo(dAtA []byte) (int, error)

func (*ConfigProto_Experimental) ProtoMessage

func (*ConfigProto_Experimental) ProtoMessage()

func (*ConfigProto_Experimental) Reset

func (m *ConfigProto_Experimental) Reset()

func (*ConfigProto_Experimental) Size

func (m *ConfigProto_Experimental) Size() (n int)

func (*ConfigProto_Experimental) String

func (m *ConfigProto_Experimental) String() string

func (*ConfigProto_Experimental) Unmarshal

func (m *ConfigProto_Experimental) Unmarshal(dAtA []byte) error

type DebugOptions

type DebugOptions struct {
	// Debugging options
	DebugTensorWatchOpts []*DebugTensorWatch `protobuf:"bytes,4,rep,name=debug_tensor_watch_opts,json=debugTensorWatchOpts" json:"debug_tensor_watch_opts,omitempty"`
	// Caller-specified global step count.
	// Note that this is distinct from the session run count and the executor
	// step count.
	GlobalStep int64 `protobuf:"varint,10,opt,name=global_step,json=globalStep,proto3" json:"global_step,omitempty"`
}

EXPERIMENTAL. Options for initializing DebuggerState.

func (*DebugOptions) Descriptor

func (*DebugOptions) Descriptor() ([]byte, []int)

func (*DebugOptions) GetDebugTensorWatchOpts

func (m *DebugOptions) GetDebugTensorWatchOpts() []*DebugTensorWatch

func (*DebugOptions) GetGlobalStep

func (m *DebugOptions) GetGlobalStep() int64

func (*DebugOptions) Marshal

func (m *DebugOptions) Marshal() (dAtA []byte, err error)

func (*DebugOptions) MarshalTo

func (m *DebugOptions) MarshalTo(dAtA []byte) (int, error)

func (*DebugOptions) ProtoMessage

func (*DebugOptions) ProtoMessage()

func (*DebugOptions) Reset

func (m *DebugOptions) Reset()

func (*DebugOptions) Size

func (m *DebugOptions) Size() (n int)

func (*DebugOptions) String

func (m *DebugOptions) String() string

func (*DebugOptions) Unmarshal

func (m *DebugOptions) Unmarshal(dAtA []byte) error

type DebugTensorWatch

type DebugTensorWatch struct {
	// Name of the node to watch.
	NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
	// Output slot to watch.
	// The semantics of output_slot == -1 is that the node is only watched for
	// completion, but not for any output tensors. See NodeCompletionCallback
	// in debug_gateway.h.
	// TODO(cais): Implement this semantics.
	OutputSlot int32 `protobuf:"varint,2,opt,name=output_slot,json=outputSlot,proto3" json:"output_slot,omitempty"`
	// Name(s) of the debugging op(s).
	// One or more than one probes on a tensor.
	// e.g., {"DebugIdentity", "DebugNanCount"}
	DebugOps []string `protobuf:"bytes,3,rep,name=debug_ops,json=debugOps" json:"debug_ops,omitempty"`
	// URL(s) for debug targets(s).
	//
	// Supported URL formats are:
	//   - file:///foo/tfdbg_dump: Writes out Event content to file
	//     /foo/tfdbg_dump.  Assumes all directories can be created if they don't
	//     already exist.
	//   - grpc://localhost:11011: Sends an RPC request to an EventListener
	//     service running at localhost:11011 with the event.
	//   - memcbk:///event_key: Routes tensors to clients using the
	//     callback registered with the DebugCallbackRegistry for event_key.
	//
	// Each debug op listed in debug_ops will publish its output tensor (debug
	// signal) to all URLs in debug_urls.
	//
	// N.B. Session::Run() supports concurrent invocations of the same inputs
	// (feed keys), outputs and target nodes. If such concurrent invocations
	// are to be debugged, the callers of Session::Run() must use distinct
	// debug_urls to make sure that the streamed or dumped events do not overlap
	// among the invocations.
	// TODO(cais): More visible documentation of this in g3docs.
	DebugUrls []string `protobuf:"bytes,4,rep,name=debug_urls,json=debugUrls" json:"debug_urls,omitempty"`
	// Do not error out if debug op creation fails (e.g., due to dtype
	// incompatibility). Instead, just log the failure.
	TolerateDebugOpCreationFailures bool `` /* 161-byte string literal not displayed */
}

EXPERIMENTAL. Option for watching a node.

func (*DebugTensorWatch) Descriptor

func (*DebugTensorWatch) Descriptor() ([]byte, []int)

func (*DebugTensorWatch) GetDebugOps

func (m *DebugTensorWatch) GetDebugOps() []string

func (*DebugTensorWatch) GetDebugUrls

func (m *DebugTensorWatch) GetDebugUrls() []string

func (*DebugTensorWatch) GetNodeName

func (m *DebugTensorWatch) GetNodeName() string

func (*DebugTensorWatch) GetOutputSlot

func (m *DebugTensorWatch) GetOutputSlot() int32

func (*DebugTensorWatch) GetTolerateDebugOpCreationFailures

func (m *DebugTensorWatch) GetTolerateDebugOpCreationFailures() bool

func (*DebugTensorWatch) Marshal

func (m *DebugTensorWatch) Marshal() (dAtA []byte, err error)

func (*DebugTensorWatch) MarshalTo

func (m *DebugTensorWatch) MarshalTo(dAtA []byte) (int, error)

func (*DebugTensorWatch) ProtoMessage

func (*DebugTensorWatch) ProtoMessage()

func (*DebugTensorWatch) Reset

func (m *DebugTensorWatch) Reset()

func (*DebugTensorWatch) Size

func (m *DebugTensorWatch) Size() (n int)

func (*DebugTensorWatch) String

func (m *DebugTensorWatch) String() string

func (*DebugTensorWatch) Unmarshal

func (m *DebugTensorWatch) Unmarshal(dAtA []byte) error

type DebuggedSourceFile

type DebuggedSourceFile struct {
	// The host name on which a source code file is located.
	Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
	// Path to the source code file.
	FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
	// The timestamp at which the source code file is last modified.
	LastModified int64 `protobuf:"varint,3,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"`
	// Byte size of the file.
	Bytes int64 `protobuf:"varint,4,opt,name=bytes,proto3" json:"bytes,omitempty"`
	// Line-by-line content of the source code file.
	Lines []string `protobuf:"bytes,5,rep,name=lines" json:"lines,omitempty"`
}

func (*DebuggedSourceFile) Descriptor

func (*DebuggedSourceFile) Descriptor() ([]byte, []int)

func (*DebuggedSourceFile) GetBytes

func (m *DebuggedSourceFile) GetBytes() int64

func (*DebuggedSourceFile) GetFilePath

func (m *DebuggedSourceFile) GetFilePath() string

func (*DebuggedSourceFile) GetHost

func (m *DebuggedSourceFile) GetHost() string

func (*DebuggedSourceFile) GetLastModified

func (m *DebuggedSourceFile) GetLastModified() int64

func (*DebuggedSourceFile) GetLines

func (m *DebuggedSourceFile) GetLines() []string

func (*DebuggedSourceFile) Marshal

func (m *DebuggedSourceFile) Marshal() (dAtA []byte, err error)

func (*DebuggedSourceFile) MarshalTo

func (m *DebuggedSourceFile) MarshalTo(dAtA []byte) (int, error)

func (*DebuggedSourceFile) ProtoMessage

func (*DebuggedSourceFile) ProtoMessage()

func (*DebuggedSourceFile) Reset

func (m *DebuggedSourceFile) Reset()

func (*DebuggedSourceFile) Size

func (m *DebuggedSourceFile) Size() (n int)

func (*DebuggedSourceFile) String

func (m *DebuggedSourceFile) String() string

func (*DebuggedSourceFile) Unmarshal

func (m *DebuggedSourceFile) Unmarshal(dAtA []byte) error

type DebuggedSourceFiles

type DebuggedSourceFiles struct {
	// A collection of source code files.
	SourceFiles []*DebuggedSourceFile `protobuf:"bytes,1,rep,name=source_files,json=sourceFiles" json:"source_files,omitempty"`
}

func (*DebuggedSourceFiles) Descriptor

func (*DebuggedSourceFiles) Descriptor() ([]byte, []int)

func (*DebuggedSourceFiles) GetSourceFiles

func (m *DebuggedSourceFiles) GetSourceFiles() []*DebuggedSourceFile

func (*DebuggedSourceFiles) Marshal

func (m *DebuggedSourceFiles) Marshal() (dAtA []byte, err error)

func (*DebuggedSourceFiles) MarshalTo

func (m *DebuggedSourceFiles) MarshalTo(dAtA []byte) (int, error)

func (*DebuggedSourceFiles) ProtoMessage

func (*DebuggedSourceFiles) ProtoMessage()

func (*DebuggedSourceFiles) Reset

func (m *DebuggedSourceFiles) Reset()

func (*DebuggedSourceFiles) Size

func (m *DebuggedSourceFiles) Size() (n int)

func (*DebuggedSourceFiles) String

func (m *DebuggedSourceFiles) String() string

func (*DebuggedSourceFiles) Unmarshal

func (m *DebuggedSourceFiles) Unmarshal(dAtA []byte) error

type GPUOptions

type GPUOptions struct {
	// Fraction of the available GPU memory to allocate for each process.
	// 1 means to allocate all of the GPU memory, 0.5 means the process
	// allocates up to ~50% of the available GPU memory.
	//
	// GPU memory is pre-allocated unless the allow_growth option is enabled.
	//
	// If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
	// the amount of memory available on the GPU device by using host memory as a
	// swap space. Accessing memory not available on the device will be
	// significantly slower as that would require memory transfer between the host
	// and the device. Options to reduce the memory requirement should be
	// considered before enabling this option as this may come with a negative
	// performance impact. Oversubscription using the unified memory requires
	// Pascal class or newer GPUs and it is currently only supported on the Linux
	// operating system. See
	// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
	// for the detailed requirements.
	PerProcessGpuMemoryFraction float64 `` /* 150-byte string literal not displayed */
	// If true, the allocator does not pre-allocate the entire specified
	// GPU memory region, instead starting small and growing as needed.
	AllowGrowth bool `protobuf:"varint,4,opt,name=allow_growth,json=allowGrowth,proto3" json:"allow_growth,omitempty"`
	// The type of GPU allocation strategy to use.
	//
	// Allowed values:
	// "": The empty string (default) uses a system-chosen default
	//     which may change over time.
	//
	// "BFC": A "Best-fit with coalescing" algorithm, simplified from a
	//        version of dlmalloc.
	AllocatorType string `protobuf:"bytes,2,opt,name=allocator_type,json=allocatorType,proto3" json:"allocator_type,omitempty"`
	// Delay deletion of up to this many bytes to reduce the number of
	// interactions with gpu driver code.  If 0, the system chooses
	// a reasonable default (several MBs).
	DeferredDeletionBytes int64 `` /* 127-byte string literal not displayed */
	// A comma-separated list of GPU ids that determines the 'visible'
	// to 'virtual' mapping of GPU devices.  For example, if TensorFlow
	// can see 8 GPU devices in the process, and one wanted to map
	// visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
	// then one would specify this field as "5,3".  This field is similar in
	// spirit to the CUDA_VISIBLE_DEVICES environment variable, except
	// it applies to the visible GPU devices in the process.
	//
	// NOTE:
	// 1. The GPU driver provides the process with the visible GPUs
	//    in an order which is not guaranteed to have any correlation to
	//    the *physical* GPU id in the machine.  This field is used for
	//    remapping "visible" to "virtual", which means this operates only
	//    after the process starts.  Users are required to use vendor
	//    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
	//    physical to visible device mapping prior to invoking TensorFlow.
	// 2. In the code, the ids in this list are also called "CUDA GPU id"s,
	//    and the 'virtual' ids of GPU devices (i.e. the ids in the device
	//    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
	//    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
	//    for more information.
	VisibleDeviceList string `protobuf:"bytes,5,opt,name=visible_device_list,json=visibleDeviceList,proto3" json:"visible_device_list,omitempty"`
	// In the event polling loop sleep this many microseconds between
	// PollEvents calls, when the queue is not empty.  If value is not
	// set or set to 0, gets set to a non-zero default.
	PollingActiveDelayUsecs int32 `` /* 135-byte string literal not displayed */
	// This field is deprecated and ignored.
	PollingInactiveDelayMsecs int32 `` /* 141-byte string literal not displayed */
	// Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
	// enabling this option forces all CPU tensors to be allocated with Cuda
	// pinned memory. Normally, TensorFlow will infer which tensors should be
	// allocated as the pinned memory. But in case where the inference is
	// incomplete, this option can significantly speed up the cross-device memory
	// copy performance as long as it fits the memory.
	// Note that this option is not something that should be
	// enabled by default for unknown or very large models, since all Cuda pinned
	// memory is unpageable, having too much pinned memory might negatively impact
	// the overall host system performance.
	ForceGpuCompatible bool `protobuf:"varint,8,opt,name=force_gpu_compatible,json=forceGpuCompatible,proto3" json:"force_gpu_compatible,omitempty"`
	// Everything inside experimental is subject to change and is not subject
	// to API stability guarantees in
	// https://www.tensorflow.org/guide/version_compat.
	Experimental *GPUOptions_Experimental `protobuf:"bytes,9,opt,name=experimental" json:"experimental,omitempty"`
}

func (*GPUOptions) Descriptor

func (*GPUOptions) Descriptor() ([]byte, []int)

func (*GPUOptions) GetAllocatorType

func (m *GPUOptions) GetAllocatorType() string

func (*GPUOptions) GetAllowGrowth

func (m *GPUOptions) GetAllowGrowth() bool

func (*GPUOptions) GetDeferredDeletionBytes

func (m *GPUOptions) GetDeferredDeletionBytes() int64

func (*GPUOptions) GetExperimental

func (m *GPUOptions) GetExperimental() *GPUOptions_Experimental

func (*GPUOptions) GetForceGpuCompatible

func (m *GPUOptions) GetForceGpuCompatible() bool

func (*GPUOptions) GetPerProcessGpuMemoryFraction

func (m *GPUOptions) GetPerProcessGpuMemoryFraction() float64

func (*GPUOptions) GetPollingActiveDelayUsecs

func (m *GPUOptions) GetPollingActiveDelayUsecs() int32

func (*GPUOptions) GetPollingInactiveDelayMsecs

func (m *GPUOptions) GetPollingInactiveDelayMsecs() int32

func (*GPUOptions) GetVisibleDeviceList

func (m *GPUOptions) GetVisibleDeviceList() string

func (*GPUOptions) Marshal

func (m *GPUOptions) Marshal() (dAtA []byte, err error)

func (*GPUOptions) MarshalTo

func (m *GPUOptions) MarshalTo(dAtA []byte) (int, error)

func (*GPUOptions) ProtoMessage

func (*GPUOptions) ProtoMessage()

func (*GPUOptions) Reset

func (m *GPUOptions) Reset()

func (*GPUOptions) Size

func (m *GPUOptions) Size() (n int)

func (*GPUOptions) String

func (m *GPUOptions) String() string

func (*GPUOptions) Unmarshal

func (m *GPUOptions) Unmarshal(dAtA []byte) error

type GPUOptions_Experimental

type GPUOptions_Experimental struct {
	// The multi virtual device settings. If empty (not set), it will create
	// single virtual device on each visible GPU, according to the settings
	// in "visible_device_list" above. Otherwise, the number of elements in the
	// list must be the same as the number of visible GPUs (after
	// "visible_device_list" filtering if it is set), and the string represented
	// device names (e.g. /device:GPU:<id>) will refer to the virtual
	// devices and have the <id> field assigned sequentially starting from 0,
	// according to the order they appear in this list and the "memory_limit"
	// list inside each element. For example,
	//   visible_device_list = "1,0"
	//   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
	//   virtual_devices {}
	// will create three virtual devices as:
	//   /device:GPU:0 -> visible GPU 1 with 1GB memory
	//   /device:GPU:1 -> visible GPU 1 with 2GB memory
	//   /device:GPU:2 -> visible GPU 0 with all available memory
	//
	// NOTE:
	// 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
	//    at the same time.
	// 2. Currently this setting is per-process, not per-session. Using
	//    different settings in different sessions within same process will
	//    result in undefined behavior.
	VirtualDevices []*GPUOptions_Experimental_VirtualDevices `protobuf:"bytes,1,rep,name=virtual_devices,json=virtualDevices" json:"virtual_devices,omitempty"`
	// If true, uses CUDA unified memory for memory allocations. If
	// per_process_gpu_memory_fraction option is greater than 1.0, then unified
	// memory is used regardless of the value for this field. See comments for
	// per_process_gpu_memory_fraction field for more details and requirements
	// of the unified memory. This option is useful to oversubscribe memory if
	// multiple processes are sharing a single GPU while individually using less
	// than 1.0 per process memory fraction.
	UseUnifiedMemory bool `protobuf:"varint,2,opt,name=use_unified_memory,json=useUnifiedMemory,proto3" json:"use_unified_memory,omitempty"`
	// If > 1, the number of device-to-device copy streams to create
	// for each GPUDevice.
	NumDevToDevCopyStreams int32 `` /* 136-byte string literal not displayed */
}

func (*GPUOptions_Experimental) Descriptor

func (*GPUOptions_Experimental) Descriptor() ([]byte, []int)

func (*GPUOptions_Experimental) GetNumDevToDevCopyStreams

func (m *GPUOptions_Experimental) GetNumDevToDevCopyStreams() int32

func (*GPUOptions_Experimental) GetUseUnifiedMemory

func (m *GPUOptions_Experimental) GetUseUnifiedMemory() bool

func (*GPUOptions_Experimental) GetVirtualDevices

func (*GPUOptions_Experimental) Marshal

func (m *GPUOptions_Experimental) Marshal() (dAtA []byte, err error)

func (*GPUOptions_Experimental) MarshalTo

func (m *GPUOptions_Experimental) MarshalTo(dAtA []byte) (int, error)

func (*GPUOptions_Experimental) ProtoMessage

func (*GPUOptions_Experimental) ProtoMessage()

func (*GPUOptions_Experimental) Reset

func (m *GPUOptions_Experimental) Reset()

func (*GPUOptions_Experimental) Size

func (m *GPUOptions_Experimental) Size() (n int)

func (*GPUOptions_Experimental) String

func (m *GPUOptions_Experimental) String() string

func (*GPUOptions_Experimental) Unmarshal

func (m *GPUOptions_Experimental) Unmarshal(dAtA []byte) error

type GPUOptions_Experimental_VirtualDevices

type GPUOptions_Experimental_VirtualDevices struct {
	// Per "virtual" device memory limit, in MB. The number of elements in
	// the list is the number of virtual devices to create on the
	// corresponding visible GPU (see "virtual_devices" below).
	// If empty, it will create single virtual device taking all available
	// memory from the device.
	//
	// For the concept of "visible" and "virtual" GPU, see the comments for
	// "visible_device_list" above for more information.
	MemoryLimitMb []float32 `protobuf:"fixed32,1,rep,packed,name=memory_limit_mb,json=memoryLimitMb" json:"memory_limit_mb,omitempty"`
}

Configuration for breaking down a visible GPU into multiple "virtual" devices.

func (*GPUOptions_Experimental_VirtualDevices) Descriptor

func (*GPUOptions_Experimental_VirtualDevices) Descriptor() ([]byte, []int)

func (*GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb

func (m *GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb() []float32

func (*GPUOptions_Experimental_VirtualDevices) Marshal

func (m *GPUOptions_Experimental_VirtualDevices) Marshal() (dAtA []byte, err error)

func (*GPUOptions_Experimental_VirtualDevices) MarshalTo

func (m *GPUOptions_Experimental_VirtualDevices) MarshalTo(dAtA []byte) (int, error)

func (*GPUOptions_Experimental_VirtualDevices) ProtoMessage

func (*GPUOptions_Experimental_VirtualDevices) Reset

func (*GPUOptions_Experimental_VirtualDevices) Size

func (*GPUOptions_Experimental_VirtualDevices) String

func (*GPUOptions_Experimental_VirtualDevices) Unmarshal

func (m *GPUOptions_Experimental_VirtualDevices) Unmarshal(dAtA []byte) error

type GraphOptions

type GraphOptions struct {
	// If true, use control flow to schedule the activation of Recv nodes.
	// (Currently ignored.)
	EnableRecvScheduling bool `protobuf:"varint,2,opt,name=enable_recv_scheduling,json=enableRecvScheduling,proto3" json:"enable_recv_scheduling,omitempty"`
	// Options controlling how graph is optimized.
	OptimizerOptions *OptimizerOptions `protobuf:"bytes,3,opt,name=optimizer_options,json=optimizerOptions" json:"optimizer_options,omitempty"`
	// The number of steps to run before returning a cost model detailing
	// the memory usage and performance of each node of the graph. 0 means
	// no cost model.
	BuildCostModel int64 `protobuf:"varint,4,opt,name=build_cost_model,json=buildCostModel,proto3" json:"build_cost_model,omitempty"`
	// The number of steps to skip before collecting statistics for the
	// cost model.
	BuildCostModelAfter int64 `protobuf:"varint,9,opt,name=build_cost_model_after,json=buildCostModelAfter,proto3" json:"build_cost_model_after,omitempty"`
	// Annotate each Node with Op output shape data, to the extent it can
	// be statically inferred.
	InferShapes bool `protobuf:"varint,5,opt,name=infer_shapes,json=inferShapes,proto3" json:"infer_shapes,omitempty"`
	// Only place the subgraphs that are run, rather than the entire graph.
	//
	// This is useful for interactive graph building, where one might
	// produce graphs that cannot be placed during the debugging
	// process.  In particular, it allows the client to continue work in
	// a session after adding a node to a graph whose placement
	// constraints are unsatisfiable.
	PlacePrunedGraph bool `protobuf:"varint,6,opt,name=place_pruned_graph,json=placePrunedGraph,proto3" json:"place_pruned_graph,omitempty"`
	// If true, transfer float values between processes as bfloat16.
	EnableBfloat16Sendrecv bool `` /* 130-byte string literal not displayed */
	// If > 0, record a timeline every this many steps.
	// EXPERIMENTAL: This currently has no effect in MasterSession.
	TimelineStep int32 `protobuf:"varint,8,opt,name=timeline_step,json=timelineStep,proto3" json:"timeline_step,omitempty"`
	// Options that control the type and amount of graph rewriting.
	// Not currently configurable via the public Python API (i.e. there is no API
	// stability guarantee if you import RewriterConfig explicitly).
	RewriteOptions *RewriterConfig `protobuf:"bytes,10,opt,name=rewrite_options,json=rewriteOptions" json:"rewrite_options,omitempty"`
}

func (*GraphOptions) Descriptor

func (*GraphOptions) Descriptor() ([]byte, []int)

func (*GraphOptions) GetBuildCostModel

func (m *GraphOptions) GetBuildCostModel() int64

func (*GraphOptions) GetBuildCostModelAfter

func (m *GraphOptions) GetBuildCostModelAfter() int64

func (*GraphOptions) GetEnableBfloat16Sendrecv

func (m *GraphOptions) GetEnableBfloat16Sendrecv() bool

func (*GraphOptions) GetEnableRecvScheduling

func (m *GraphOptions) GetEnableRecvScheduling() bool

func (*GraphOptions) GetInferShapes

func (m *GraphOptions) GetInferShapes() bool

func (*GraphOptions) GetOptimizerOptions

func (m *GraphOptions) GetOptimizerOptions() *OptimizerOptions

func (*GraphOptions) GetPlacePrunedGraph

func (m *GraphOptions) GetPlacePrunedGraph() bool

func (*GraphOptions) GetRewriteOptions

func (m *GraphOptions) GetRewriteOptions() *RewriterConfig

func (*GraphOptions) GetTimelineStep

func (m *GraphOptions) GetTimelineStep() int32

func (*GraphOptions) Marshal

func (m *GraphOptions) Marshal() (dAtA []byte, err error)

func (*GraphOptions) MarshalTo

func (m *GraphOptions) MarshalTo(dAtA []byte) (int, error)

func (*GraphOptions) ProtoMessage

func (*GraphOptions) ProtoMessage()

func (*GraphOptions) Reset

func (m *GraphOptions) Reset()

func (*GraphOptions) Size

func (m *GraphOptions) Size() (n int)

func (*GraphOptions) String

func (m *GraphOptions) String() string

func (*GraphOptions) Unmarshal

func (m *GraphOptions) Unmarshal(dAtA []byte) error

type JobDef

type JobDef struct {
	// The name of this job.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Mapping from task ID to "hostname:port" string.
	//
	// If the `name` field contains "worker", and the `tasks` map contains a
	// mapping from 7 to "example.org:2222", then the device prefix
	// "/job:worker/task:7" will be assigned to "example.org:2222".
	Tasks map[int32]string `` /* 145-byte string literal not displayed */
}

Defines a single job in a TensorFlow cluster.

func (*JobDef) Descriptor

func (*JobDef) Descriptor() ([]byte, []int)

func (*JobDef) GetName

func (m *JobDef) GetName() string

func (*JobDef) GetTasks

func (m *JobDef) GetTasks() map[int32]string

func (*JobDef) Marshal

func (m *JobDef) Marshal() (dAtA []byte, err error)

func (*JobDef) MarshalTo

func (m *JobDef) MarshalTo(dAtA []byte) (int, error)

func (*JobDef) ProtoMessage

func (*JobDef) ProtoMessage()

func (*JobDef) Reset

func (m *JobDef) Reset()

func (*JobDef) Size

func (m *JobDef) Size() (n int)

func (*JobDef) String

func (m *JobDef) String() string

func (*JobDef) Unmarshal

func (m *JobDef) Unmarshal(dAtA []byte) error

type OptimizerOptions

type OptimizerOptions struct {
	// If true, optimize the graph using common subexpression elimination.
	DoCommonSubexpressionElimination bool `` /* 162-byte string literal not displayed */
	// If true, perform constant folding optimization on the graph.
	DoConstantFolding bool `protobuf:"varint,2,opt,name=do_constant_folding,json=doConstantFolding,proto3" json:"do_constant_folding,omitempty"`
	// Constant folding optimization replaces tensors whose values can be
	// predetermined, with constant nodes. To avoid inserting too large constants,
	// the size of each constant created can be limited. If this value is zero, a
	// default limit of 10 MiB will be applied. If constant folding optimization
	// is disabled, this value is ignored.
	MaxFoldedConstantInBytes int64 `` /* 140-byte string literal not displayed */
	// If true, perform function inlining on the graph.
	DoFunctionInlining bool `protobuf:"varint,4,opt,name=do_function_inlining,json=doFunctionInlining,proto3" json:"do_function_inlining,omitempty"`
	// Overall optimization level. The actual optimizations applied will be the
	// logical OR of the flags that this level implies and any flags already set.
	OptLevel       OptimizerOptions_Level          `protobuf:"varint,3,opt,name=opt_level,json=optLevel,proto3,enum=tensorflow.OptimizerOptions_Level" json:"opt_level,omitempty"`
	GlobalJitLevel OptimizerOptions_GlobalJitLevel `` /* 154-byte string literal not displayed */
}

Options passed to the graph optimizer

func (*OptimizerOptions) Descriptor

func (*OptimizerOptions) Descriptor() ([]byte, []int)

func (*OptimizerOptions) GetDoCommonSubexpressionElimination

func (m *OptimizerOptions) GetDoCommonSubexpressionElimination() bool

func (*OptimizerOptions) GetDoConstantFolding

func (m *OptimizerOptions) GetDoConstantFolding() bool

func (*OptimizerOptions) GetDoFunctionInlining

func (m *OptimizerOptions) GetDoFunctionInlining() bool

func (*OptimizerOptions) GetGlobalJitLevel

func (m *OptimizerOptions) GetGlobalJitLevel() OptimizerOptions_GlobalJitLevel

func (*OptimizerOptions) GetMaxFoldedConstantInBytes

func (m *OptimizerOptions) GetMaxFoldedConstantInBytes() int64

func (*OptimizerOptions) GetOptLevel

func (m *OptimizerOptions) GetOptLevel() OptimizerOptions_Level

func (*OptimizerOptions) Marshal

func (m *OptimizerOptions) Marshal() (dAtA []byte, err error)

func (*OptimizerOptions) MarshalTo

func (m *OptimizerOptions) MarshalTo(dAtA []byte) (int, error)

func (*OptimizerOptions) ProtoMessage

func (*OptimizerOptions) ProtoMessage()

func (*OptimizerOptions) Reset

func (m *OptimizerOptions) Reset()

func (*OptimizerOptions) Size

func (m *OptimizerOptions) Size() (n int)

func (*OptimizerOptions) String

func (m *OptimizerOptions) String() string

func (*OptimizerOptions) Unmarshal

func (m *OptimizerOptions) Unmarshal(dAtA []byte) error

type OptimizerOptions_GlobalJitLevel

type OptimizerOptions_GlobalJitLevel int32

Control the use of the compiler/jit. Experimental.

const (
	OptimizerOptions_DEFAULT OptimizerOptions_GlobalJitLevel = 0
	OptimizerOptions_OFF     OptimizerOptions_GlobalJitLevel = -1
	// The following settings turn on compilation, with higher values being
	// more aggressive.  Higher values may reduce opportunities for parallelism
	// and may use more memory.  (At present, there is no distinction, but this
	// is expected to change.)
	OptimizerOptions_ON_1 OptimizerOptions_GlobalJitLevel = 1
	OptimizerOptions_ON_2 OptimizerOptions_GlobalJitLevel = 2
)

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor() ([]byte, []int)

func (OptimizerOptions_GlobalJitLevel) String

type OptimizerOptions_Level

type OptimizerOptions_Level int32

Optimization level

const (
	// L1 is the default level.
	// Optimization performed at L1 :
	// 1. Common subexpression elimination
	// 2. Constant folding
	OptimizerOptions_L1 OptimizerOptions_Level = 0
	// No optimizations
	OptimizerOptions_L0 OptimizerOptions_Level = -1
)

func (OptimizerOptions_Level) EnumDescriptor

func (OptimizerOptions_Level) EnumDescriptor() ([]byte, []int)

func (OptimizerOptions_Level) String

func (x OptimizerOptions_Level) String() string

type RPCOptions

type RPCOptions struct {
	// If true, always use RPC to contact the session target.
	//
	// If false (the default option), TensorFlow may use an optimized
	// transport for client-master communication that avoids the RPC
	// stack. This option is primarily for used testing the RPC stack.
	UseRpcForInprocessMaster bool `` /* 140-byte string literal not displayed */
}

func (*RPCOptions) Descriptor

func (*RPCOptions) Descriptor() ([]byte, []int)

func (*RPCOptions) GetUseRpcForInprocessMaster

func (m *RPCOptions) GetUseRpcForInprocessMaster() bool

func (*RPCOptions) Marshal

func (m *RPCOptions) Marshal() (dAtA []byte, err error)

func (*RPCOptions) MarshalTo

func (m *RPCOptions) MarshalTo(dAtA []byte) (int, error)

func (*RPCOptions) ProtoMessage

func (*RPCOptions) ProtoMessage()

func (*RPCOptions) Reset

func (m *RPCOptions) Reset()

func (*RPCOptions) Size

func (m *RPCOptions) Size() (n int)

func (*RPCOptions) String

func (m *RPCOptions) String() string

func (*RPCOptions) Unmarshal

func (m *RPCOptions) Unmarshal(dAtA []byte) error

type RewriterConfig

type RewriterConfig struct {
	// Optimize tensor layouts (default is ON)
	// e.g. This will try to use NCHW layout on GPU which is faster.
	LayoutOptimizer RewriterConfig_Toggle `` /* 145-byte string literal not displayed */
	// Fold constants (default is ON)
	// Statically infer the value of tensors when possible, and materialize the
	// result using constants.
	ConstantFolding RewriterConfig_Toggle `` /* 145-byte string literal not displayed */
	// Shape optimizations (default is ON)
	// Simplify computations made on shapes.
	ShapeOptimization RewriterConfig_Toggle `` /* 152-byte string literal not displayed */
	// Remapping (default is ON)
	// Remap subgraphs onto more efficient implementations.
	Remapping RewriterConfig_Toggle `protobuf:"varint,14,opt,name=remapping,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"remapping,omitempty"`
	// Arithmetic optimizations (default is ON)
	// e.g. Simplify arithmetic ops; merge ops with same value (like constants).
	ArithmeticOptimization RewriterConfig_Toggle `` /* 166-byte string literal not displayed */
	// Control dependency optimizations (default is ON).
	// Remove redundant control dependencies, which may enable other optimization.
	DependencyOptimization RewriterConfig_Toggle `` /* 166-byte string literal not displayed */
	// Loop optimizations (default is ON).
	LoopOptimization RewriterConfig_Toggle `` /* 148-byte string literal not displayed */
	// Function optimizations (default is ON).
	FunctionOptimization RewriterConfig_Toggle `` /* 161-byte string literal not displayed */
	// Strips debug-related nodes from the graph (off by default).
	DebugStripper RewriterConfig_Toggle `` /* 140-byte string literal not displayed */
	// If true, don't remove unnecessary ops from the graph
	DisableModelPruning bool `protobuf:"varint,2,opt,name=disable_model_pruning,json=disableModelPruning,proto3" json:"disable_model_pruning,omitempty"`
	// Try to allocate some independent Op outputs contiguously in order to
	// merge or eliminate downstream Ops (off by default).
	ScopedAllocatorOptimization RewriterConfig_Toggle `` /* 184-byte string literal not displayed */
	// Controls how many times we run the optimizers in meta optimizer (default
	// is once).
	MetaOptimizerIterations RewriterConfig_NumIterationsType `` /* 183-byte string literal not displayed */
	// The minimum number of nodes in a graph to optimizer. For smaller graphs,
	// optimization is skipped.
	// 0 means the system picks an appropriate number.
	// < 0 means do not skip optimization.
	MinGraphNodes int32 `protobuf:"varint,17,opt,name=min_graph_nodes,json=minGraphNodes,proto3" json:"min_graph_nodes,omitempty"`
	// Configures memory optimization passes through the meta-optimizer. Has no
	// effect on manually requested memory optimization passes in the optimizers
	// field.
	MemoryOptimization RewriterConfig_MemOptType `` /* 158-byte string literal not displayed */
	// A node name scope for node names which are valid outputs of recompuations.
	// Inputs to nodes that match this scope may be recomputed (subject either to
	// manual annotation of those input nodes or to manual annotation and
	// heuristics depending on memory_optimization), but the nodes themselves will
	// not be recomputed. This matches any sub-scopes as well, meaning the scope
	// can appear not just as a top-level scope. For example, if the value is
	// "gradients/", the default, it will match node name "gradients/foo",
	// "foo/gradients/bar", but not "foo_gradients/"
	MemoryOptimizerTargetNodeNameScope string `` /* 171-byte string literal not displayed */
	// Configures AutoParallel optimization passes either through the
	// meta-optimizer or when manually specified through the optimizers field.
	AutoParallel        *AutoParallelOptions    `protobuf:"bytes,5,opt,name=auto_parallel,json=autoParallel" json:"auto_parallel,omitempty"`
	ScopedAllocatorOpts *ScopedAllocatorOptions `protobuf:"bytes,16,opt,name=scoped_allocator_opts,json=scopedAllocatorOpts" json:"scoped_allocator_opts,omitempty"`
	// If non-empty, will use this as an alternative way to specify a list of
	// optimizations to turn on and the order of the optimizations (replacing the
	// meta-optimizer).
	//
	// Of the RewriterConfig options, only the AutoParallel configuration options
	// (the auto_parallel field) apply to manually requested optimization passes
	// ("autoparallel"). Memory optimization passes ("memory") invoked here are
	// not configurable (in contrast to memory optimization passes through the
	// meta-optimizer) and act only on manual op annotations.
	//
	// Custom registered optimizers will be run after the base optimizers, in
	// the order that they are specified.
	Optimizers []string `protobuf:"bytes,100,rep,name=optimizers" json:"optimizers,omitempty"`
	// list of CustomGraphOptimizers to apply.
	CustomOptimizers []*RewriterConfig_CustomGraphOptimizer `protobuf:"bytes,200,rep,name=custom_optimizers,json=customOptimizers" json:"custom_optimizers,omitempty"`
}

func (*RewriterConfig) Descriptor

func (*RewriterConfig) Descriptor() ([]byte, []int)

func (*RewriterConfig) GetArithmeticOptimization

func (m *RewriterConfig) GetArithmeticOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoParallel

func (m *RewriterConfig) GetAutoParallel() *AutoParallelOptions

func (*RewriterConfig) GetConstantFolding

func (m *RewriterConfig) GetConstantFolding() RewriterConfig_Toggle

func (*RewriterConfig) GetCustomOptimizers

func (m *RewriterConfig) GetCustomOptimizers() []*RewriterConfig_CustomGraphOptimizer

func (*RewriterConfig) GetDebugStripper

func (m *RewriterConfig) GetDebugStripper() RewriterConfig_Toggle

func (*RewriterConfig) GetDependencyOptimization

func (m *RewriterConfig) GetDependencyOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetDisableModelPruning

func (m *RewriterConfig) GetDisableModelPruning() bool

func (*RewriterConfig) GetFunctionOptimization

func (m *RewriterConfig) GetFunctionOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetLayoutOptimizer

func (m *RewriterConfig) GetLayoutOptimizer() RewriterConfig_Toggle

func (*RewriterConfig) GetLoopOptimization

func (m *RewriterConfig) GetLoopOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetMemoryOptimization

func (m *RewriterConfig) GetMemoryOptimization() RewriterConfig_MemOptType

func (*RewriterConfig) GetMemoryOptimizerTargetNodeNameScope

func (m *RewriterConfig) GetMemoryOptimizerTargetNodeNameScope() string

func (*RewriterConfig) GetMetaOptimizerIterations

func (m *RewriterConfig) GetMetaOptimizerIterations() RewriterConfig_NumIterationsType

func (*RewriterConfig) GetMinGraphNodes

func (m *RewriterConfig) GetMinGraphNodes() int32

func (*RewriterConfig) GetOptimizers

func (m *RewriterConfig) GetOptimizers() []string

func (*RewriterConfig) GetRemapping

func (m *RewriterConfig) GetRemapping() RewriterConfig_Toggle

func (*RewriterConfig) GetScopedAllocatorOptimization

func (m *RewriterConfig) GetScopedAllocatorOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetScopedAllocatorOpts

func (m *RewriterConfig) GetScopedAllocatorOpts() *ScopedAllocatorOptions

func (*RewriterConfig) GetShapeOptimization

func (m *RewriterConfig) GetShapeOptimization() RewriterConfig_Toggle

func (*RewriterConfig) Marshal

func (m *RewriterConfig) Marshal() (dAtA []byte, err error)

func (*RewriterConfig) MarshalTo

func (m *RewriterConfig) MarshalTo(dAtA []byte) (int, error)

func (*RewriterConfig) ProtoMessage

func (*RewriterConfig) ProtoMessage()

func (*RewriterConfig) Reset

func (m *RewriterConfig) Reset()

func (*RewriterConfig) Size

func (m *RewriterConfig) Size() (n int)

func (*RewriterConfig) String

func (m *RewriterConfig) String() string

func (*RewriterConfig) Unmarshal

func (m *RewriterConfig) Unmarshal(dAtA []byte) error

type RewriterConfig_CustomGraphOptimizer

type RewriterConfig_CustomGraphOptimizer struct {
	Name         string                            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	ParameterMap map[string]*tensorflow5.AttrValue `` /* 171-byte string literal not displayed */
}

Message to describe custom graph optimizer and its parameters

func (*RewriterConfig_CustomGraphOptimizer) Descriptor

func (*RewriterConfig_CustomGraphOptimizer) Descriptor() ([]byte, []int)

func (*RewriterConfig_CustomGraphOptimizer) GetName

func (*RewriterConfig_CustomGraphOptimizer) GetParameterMap

func (*RewriterConfig_CustomGraphOptimizer) Marshal

func (m *RewriterConfig_CustomGraphOptimizer) Marshal() (dAtA []byte, err error)

func (*RewriterConfig_CustomGraphOptimizer) MarshalTo

func (m *RewriterConfig_CustomGraphOptimizer) MarshalTo(dAtA []byte) (int, error)

func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage

func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage()

func (*RewriterConfig_CustomGraphOptimizer) Reset

func (*RewriterConfig_CustomGraphOptimizer) Size

func (*RewriterConfig_CustomGraphOptimizer) String

func (*RewriterConfig_CustomGraphOptimizer) Unmarshal

func (m *RewriterConfig_CustomGraphOptimizer) Unmarshal(dAtA []byte) error

type RewriterConfig_MemOptType

type RewriterConfig_MemOptType int32
const (
	// The default setting (SCHEDULING and SWAPPING HEURISTICS only)
	RewriterConfig_DEFAULT_MEM_OPT RewriterConfig_MemOptType = 0
	// Disabled in the meta-optimizer.
	RewriterConfig_NO_MEM_OPT RewriterConfig_MemOptType = 1
	// Driven by manual op-level annotations.
	RewriterConfig_MANUAL RewriterConfig_MemOptType = 2
	// Swapping heuristic will move a tensor from the GPU to the CPU and move
	// it back when needed to reduce peak memory usage.
	RewriterConfig_SWAPPING_HEURISTICS RewriterConfig_MemOptType = 4
	// Recomputation heuristics will recompute ops (such as Relu activation)
	// during backprop instead of storing them, reducing peak memory usage.
	RewriterConfig_RECOMPUTATION_HEURISTICS RewriterConfig_MemOptType = 5
	// Scheduling will split big ops such as AddN and try to enforce a schedule
	// of the new computations that decreases peak memory usage.
	RewriterConfig_SCHEDULING_HEURISTICS RewriterConfig_MemOptType = 6
	// Use any combination of swapping and recomputation heuristics.
	RewriterConfig_HEURISTICS RewriterConfig_MemOptType = 3
)

func (RewriterConfig_MemOptType) EnumDescriptor

func (RewriterConfig_MemOptType) EnumDescriptor() ([]byte, []int)

func (RewriterConfig_MemOptType) String

func (x RewriterConfig_MemOptType) String() string

type RewriterConfig_NumIterationsType

type RewriterConfig_NumIterationsType int32

Enum controlling the number of times to run optimizers. The default is to run them once.

const (
	RewriterConfig_DEFAULT_NUM_ITERS RewriterConfig_NumIterationsType = 0
	RewriterConfig_ONE               RewriterConfig_NumIterationsType = 1
	RewriterConfig_TWO               RewriterConfig_NumIterationsType = 2
)

func (RewriterConfig_NumIterationsType) EnumDescriptor

func (RewriterConfig_NumIterationsType) EnumDescriptor() ([]byte, []int)

func (RewriterConfig_NumIterationsType) String

type RewriterConfig_Toggle

type RewriterConfig_Toggle int32
const (
	RewriterConfig_DEFAULT RewriterConfig_Toggle = 0
	RewriterConfig_ON      RewriterConfig_Toggle = 1
	RewriterConfig_OFF     RewriterConfig_Toggle = 2
	// Enable some aggressive optimizations that use assumptions that TF graphs
	// may break. For example, assume the shape of a placeholder matches its
	// actual feed.
	RewriterConfig_AGGRESSIVE RewriterConfig_Toggle = 3
)

func (RewriterConfig_Toggle) EnumDescriptor

func (RewriterConfig_Toggle) EnumDescriptor() ([]byte, []int)

func (RewriterConfig_Toggle) String

func (x RewriterConfig_Toggle) String() string

type RunMetadata

type RunMetadata struct {
	// Statistics traced for this step. Populated if tracing is turned on via the
	// "RunOptions" proto.
	// EXPERIMENTAL: The format and set of events may change in future versions.
	StepStats *tensorflow13.StepStats `protobuf:"bytes,1,opt,name=step_stats,json=stepStats" json:"step_stats,omitempty"`
	// The cost graph for the computation defined by the run call.
	CostGraph *tensorflow2.CostGraphDef `protobuf:"bytes,2,opt,name=cost_graph,json=costGraph" json:"cost_graph,omitempty"`
	// Graphs of the partitions executed by executors.
	PartitionGraphs []*tensorflow10.GraphDef `protobuf:"bytes,3,rep,name=partition_graphs,json=partitionGraphs" json:"partition_graphs,omitempty"`
}

Metadata output (i.e., non-Tensor) for a single Run() call.

func (*RunMetadata) Descriptor

func (*RunMetadata) Descriptor() ([]byte, []int)

func (*RunMetadata) GetCostGraph

func (m *RunMetadata) GetCostGraph() *tensorflow2.CostGraphDef

func (*RunMetadata) GetPartitionGraphs

func (m *RunMetadata) GetPartitionGraphs() []*tensorflow10.GraphDef

func (*RunMetadata) GetStepStats

func (m *RunMetadata) GetStepStats() *tensorflow13.StepStats

func (*RunMetadata) Marshal

func (m *RunMetadata) Marshal() (dAtA []byte, err error)

func (*RunMetadata) MarshalTo

func (m *RunMetadata) MarshalTo(dAtA []byte) (int, error)

func (*RunMetadata) ProtoMessage

func (*RunMetadata) ProtoMessage()

func (*RunMetadata) Reset

func (m *RunMetadata) Reset()

func (*RunMetadata) Size

func (m *RunMetadata) Size() (n int)

func (*RunMetadata) String

func (m *RunMetadata) String() string

func (*RunMetadata) Unmarshal

func (m *RunMetadata) Unmarshal(dAtA []byte) error

type RunOptions

type RunOptions struct {
	TraceLevel RunOptions_TraceLevel `` /* 130-byte string literal not displayed */
	// Time to wait for operation to complete in milliseconds.
	TimeoutInMs int64 `protobuf:"varint,2,opt,name=timeout_in_ms,json=timeoutInMs,proto3" json:"timeout_in_ms,omitempty"`
	// The thread pool to use, if session_inter_op_thread_pool is configured.
	InterOpThreadPool int32 `protobuf:"varint,3,opt,name=inter_op_thread_pool,json=interOpThreadPool,proto3" json:"inter_op_thread_pool,omitempty"`
	// Whether the partition graph(s) executed by the executor(s) should be
	// outputted via RunMetadata.
	OutputPartitionGraphs bool `` /* 127-byte string literal not displayed */
	// EXPERIMENTAL.  Options used to initialize DebuggerState, if enabled.
	DebugOptions *DebugOptions `protobuf:"bytes,6,opt,name=debug_options,json=debugOptions" json:"debug_options,omitempty"`
	// When enabled, causes tensor allocation information to be included in
	// the error message when the Run() call fails because the allocator ran
	// out of memory (OOM).
	//
	// Enabling this option can slow down the Run() call.
	ReportTensorAllocationsUponOom bool                     `` /* 158-byte string literal not displayed */
	Experimental                   *RunOptions_Experimental `protobuf:"bytes,8,opt,name=experimental" json:"experimental,omitempty"`
}

Options for a single Run() call.

func (*RunOptions) Descriptor

func (*RunOptions) Descriptor() ([]byte, []int)

func (*RunOptions) GetDebugOptions

func (m *RunOptions) GetDebugOptions() *DebugOptions

func (*RunOptions) GetExperimental

func (m *RunOptions) GetExperimental() *RunOptions_Experimental

func (*RunOptions) GetInterOpThreadPool

func (m *RunOptions) GetInterOpThreadPool() int32

func (*RunOptions) GetOutputPartitionGraphs

func (m *RunOptions) GetOutputPartitionGraphs() bool

func (*RunOptions) GetReportTensorAllocationsUponOom

func (m *RunOptions) GetReportTensorAllocationsUponOom() bool

func (*RunOptions) GetTimeoutInMs

func (m *RunOptions) GetTimeoutInMs() int64

func (*RunOptions) GetTraceLevel

func (m *RunOptions) GetTraceLevel() RunOptions_TraceLevel

func (*RunOptions) Marshal

func (m *RunOptions) Marshal() (dAtA []byte, err error)

func (*RunOptions) MarshalTo

func (m *RunOptions) MarshalTo(dAtA []byte) (int, error)

func (*RunOptions) ProtoMessage

func (*RunOptions) ProtoMessage()

func (*RunOptions) Reset

func (m *RunOptions) Reset()

func (*RunOptions) Size

func (m *RunOptions) Size() (n int)

func (*RunOptions) String

func (m *RunOptions) String() string

func (*RunOptions) Unmarshal

func (m *RunOptions) Unmarshal(dAtA []byte) error

type RunOptions_Experimental

type RunOptions_Experimental struct {
	// If non-zero, declares that this graph is going to use collective
	// ops and must synchronize step_ids with any other graph with this
	// same group_key value (in a distributed computation where tasks
	// run disjoint graphs).
	CollectiveGraphKey int64 `protobuf:"varint,1,opt,name=collective_graph_key,json=collectiveGraphKey,proto3" json:"collective_graph_key,omitempty"`
}

Everything inside Experimental is subject to change and is not subject to API stability guarantees in https://www.tensorflow.org/guide/version_compat.

func (*RunOptions_Experimental) Descriptor

func (*RunOptions_Experimental) Descriptor() ([]byte, []int)

func (*RunOptions_Experimental) GetCollectiveGraphKey

func (m *RunOptions_Experimental) GetCollectiveGraphKey() int64

func (*RunOptions_Experimental) Marshal

func (m *RunOptions_Experimental) Marshal() (dAtA []byte, err error)

func (*RunOptions_Experimental) MarshalTo

func (m *RunOptions_Experimental) MarshalTo(dAtA []byte) (int, error)

func (*RunOptions_Experimental) ProtoMessage

func (*RunOptions_Experimental) ProtoMessage()

func (*RunOptions_Experimental) Reset

func (m *RunOptions_Experimental) Reset()

func (*RunOptions_Experimental) Size

func (m *RunOptions_Experimental) Size() (n int)

func (*RunOptions_Experimental) String

func (m *RunOptions_Experimental) String() string

func (*RunOptions_Experimental) Unmarshal

func (m *RunOptions_Experimental) Unmarshal(dAtA []byte) error

type RunOptions_TraceLevel

type RunOptions_TraceLevel int32

TODO(pbar) Turn this into a TraceOptions proto which allows tracing to be controlled in a more orthogonal manner?

const (
	RunOptions_NO_TRACE       RunOptions_TraceLevel = 0
	RunOptions_SOFTWARE_TRACE RunOptions_TraceLevel = 1
	RunOptions_HARDWARE_TRACE RunOptions_TraceLevel = 2
	RunOptions_FULL_TRACE     RunOptions_TraceLevel = 3
)

func (RunOptions_TraceLevel) EnumDescriptor

func (RunOptions_TraceLevel) EnumDescriptor() ([]byte, []int)

func (RunOptions_TraceLevel) String

func (x RunOptions_TraceLevel) String() string

type ScopedAllocatorOptions

type ScopedAllocatorOptions struct {
	// If present, only perform optimization for these ops.
	EnableOp []string `protobuf:"bytes,1,rep,name=enable_op,json=enableOp" json:"enable_op,omitempty"`
}

func (*ScopedAllocatorOptions) Descriptor

func (*ScopedAllocatorOptions) Descriptor() ([]byte, []int)

func (*ScopedAllocatorOptions) GetEnableOp

func (m *ScopedAllocatorOptions) GetEnableOp() []string

func (*ScopedAllocatorOptions) Marshal

func (m *ScopedAllocatorOptions) Marshal() (dAtA []byte, err error)

func (*ScopedAllocatorOptions) MarshalTo

func (m *ScopedAllocatorOptions) MarshalTo(dAtA []byte) (int, error)

func (*ScopedAllocatorOptions) ProtoMessage

func (*ScopedAllocatorOptions) ProtoMessage()

func (*ScopedAllocatorOptions) Reset

func (m *ScopedAllocatorOptions) Reset()

func (*ScopedAllocatorOptions) Size

func (m *ScopedAllocatorOptions) Size() (n int)

func (*ScopedAllocatorOptions) String

func (m *ScopedAllocatorOptions) String() string

func (*ScopedAllocatorOptions) Unmarshal

func (m *ScopedAllocatorOptions) Unmarshal(dAtA []byte) error

type TensorConnection

type TensorConnection struct {
	// A tensor name. The value of this tensor will be substituted for
	// the tensor named in `to_tensor`.
	FromTensor string `protobuf:"bytes,1,opt,name=from_tensor,json=fromTensor,proto3" json:"from_tensor,omitempty"`
	// A tensor name. The value of this tensor will be bound to the
	// value of the tensor named in `from_tensor`.
	ToTensor string `protobuf:"bytes,2,opt,name=to_tensor,json=toTensor,proto3" json:"to_tensor,omitempty"`
}

Defines a connection between two tensors in a `GraphDef`.

func (*TensorConnection) Descriptor

func (*TensorConnection) Descriptor() ([]byte, []int)

func (*TensorConnection) GetFromTensor

func (m *TensorConnection) GetFromTensor() string

func (*TensorConnection) GetToTensor

func (m *TensorConnection) GetToTensor() string

func (*TensorConnection) Marshal

func (m *TensorConnection) Marshal() (dAtA []byte, err error)

func (*TensorConnection) MarshalTo

func (m *TensorConnection) MarshalTo(dAtA []byte) (int, error)

func (*TensorConnection) ProtoMessage

func (*TensorConnection) ProtoMessage()

func (*TensorConnection) Reset

func (m *TensorConnection) Reset()

func (*TensorConnection) Size

func (m *TensorConnection) Size() (n int)

func (*TensorConnection) String

func (m *TensorConnection) String() string

func (*TensorConnection) Unmarshal

func (m *TensorConnection) Unmarshal(dAtA []byte) error

type ThreadPoolOptionProto

type ThreadPoolOptionProto struct {
	// The number of threads in the pool.
	//
	// 0 means the system picks a value based on where this option proto is used
	// (see the declaration of the specific field for more info).
	NumThreads int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads,proto3" json:"num_threads,omitempty"`
	// The global name of the threadpool.
	//
	// If empty, then the threadpool is made and used according to the scope it's
	// in - e.g., for a session threadpool, it is used by that session only.
	//
	// If non-empty, then:
	// - a global threadpool associated with this name is looked
	//   up or created. This allows, for example, sharing one threadpool across
	//   many sessions (e.g., like the default behavior, if
	//   inter_op_parallelism_threads is not configured), but still partitioning
	//   into a large and small pool.
	// - if the threadpool for this global_name already exists, then it is an
	//   error if the existing pool was created using a different num_threads
	//   value as is specified on this call.
	// - threadpools created this way are never garbage collected.
	GlobalName string `protobuf:"bytes,2,opt,name=global_name,json=globalName,proto3" json:"global_name,omitempty"`
}

func (*ThreadPoolOptionProto) Descriptor

func (*ThreadPoolOptionProto) Descriptor() ([]byte, []int)

func (*ThreadPoolOptionProto) GetGlobalName

func (m *ThreadPoolOptionProto) GetGlobalName() string

func (*ThreadPoolOptionProto) GetNumThreads

func (m *ThreadPoolOptionProto) GetNumThreads() int32

func (*ThreadPoolOptionProto) Marshal

func (m *ThreadPoolOptionProto) Marshal() (dAtA []byte, err error)

func (*ThreadPoolOptionProto) MarshalTo

func (m *ThreadPoolOptionProto) MarshalTo(dAtA []byte) (int, error)

func (*ThreadPoolOptionProto) ProtoMessage

func (*ThreadPoolOptionProto) ProtoMessage()

func (*ThreadPoolOptionProto) Reset

func (m *ThreadPoolOptionProto) Reset()

func (*ThreadPoolOptionProto) Size

func (m *ThreadPoolOptionProto) Size() (n int)

func (*ThreadPoolOptionProto) String

func (m *ThreadPoolOptionProto) String() string

func (*ThreadPoolOptionProto) Unmarshal

func (m *ThreadPoolOptionProto) Unmarshal(dAtA []byte) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL