distsqlrun

package
v0.0.0-...-b3f1699 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 27, 2017 License: Apache-2.0 Imports: 32 Imported by: 0

Documentation

Overview

Package distsqlrun is a generated protocol buffer package.

It is generated from these files:

cockroach/pkg/sql/distsqlrun/api.proto
cockroach/pkg/sql/distsqlrun/data.proto
cockroach/pkg/sql/distsqlrun/processors.proto

It has these top-level messages:

SetupFlowRequest
SimpleResponse
Expression
Ordering
StreamEndpointSpec
InputSyncSpec
OutputRouterSpec
DatumInfo
StreamHeader
StreamData
StreamTrailer
StreamMessage
ProcessorSpec
PostProcessSpec
ProcessorCoreUnion
NoopCoreSpec
ValuesCoreSpec
TableReaderSpan
TableReaderSpec
JoinReaderSpec
SorterSpec
EvaluatorSpec
DistinctSpec
MergeJoinerSpec
HashJoinerSpec
AggregatorSpec
FlowSpec

Index

Constants

View Source
const (
	FlowNotStarted flowStatus = iota
	FlowRunning
	FlowFinished
)

Flow status indicators.

Variables

View Source
var (
	ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowApi   = fmt.Errorf("proto: integer overflow")
)
View Source
var (
	ErrInvalidLengthData = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowData   = fmt.Errorf("proto: integer overflow")
)
View Source
var (
	ErrInvalidLengthProcessors = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowProcessors   = fmt.Errorf("proto: integer overflow")
)
View Source
var AggregatorSpec_Func_name = map[int32]string{
	0:  "IDENT",
	1:  "AVG",
	2:  "BOOL_AND",
	3:  "BOOL_OR",
	4:  "CONCAT_AGG",
	5:  "COUNT",
	7:  "MAX",
	8:  "MIN",
	9:  "STDDEV",
	10: "SUM",
	11: "SUM_INT",
	12: "VARIANCE",
}
View Source
var AggregatorSpec_Func_value = map[string]int32{
	"IDENT":      0,
	"AVG":        1,
	"BOOL_AND":   2,
	"BOOL_OR":    3,
	"CONCAT_AGG": 4,
	"COUNT":      5,
	"MAX":        7,
	"MIN":        8,
	"STDDEV":     9,
	"SUM":        10,
	"SUM_INT":    11,
	"VARIANCE":   12,
}
View Source
var InputSyncSpec_Type_name = map[int32]string{
	0: "UNORDERED",
	1: "ORDERED",
}
View Source
var InputSyncSpec_Type_value = map[string]int32{
	"UNORDERED": 0,
	"ORDERED":   1,
}
View Source
var JoinType_name = map[int32]string{
	0: "INNER",
	1: "LEFT_OUTER",
	2: "RIGHT_OUTER",
	3: "FULL_OUTER",
}
View Source
var JoinType_value = map[string]int32{
	"INNER":       0,
	"LEFT_OUTER":  1,
	"RIGHT_OUTER": 2,
	"FULL_OUTER":  3,
}
View Source
var Ordering_Column_Direction_name = map[int32]string{
	0: "ASC",
	1: "DESC",
}
View Source
var Ordering_Column_Direction_value = map[string]int32{
	"ASC":  0,
	"DESC": 1,
}
View Source
var OutputRouterSpec_Type_name = map[int32]string{
	0: "PASS_THROUGH",
	1: "MIRROR",
	2: "BY_HASH",
	3: "BY_RANGE",
}
View Source
var OutputRouterSpec_Type_value = map[string]int32{
	"PASS_THROUGH": 0,
	"MIRROR":       1,
	"BY_HASH":      2,
	"BY_RANGE":     3,
}
View Source
var StreamEndpointSpec_Type_name = map[int32]string{
	0: "LOCAL",
	1: "REMOTE",
	2: "SYNC_RESPONSE",
}
View Source
var StreamEndpointSpec_Type_value = map[string]int32{
	"LOCAL":         0,
	"REMOTE":        1,
	"SYNC_RESPONSE": 2,
}

Functions

func GeneratePlanDiagram

func GeneratePlanDiagram(flows []FlowSpec, nodeNames []string, w io.Writer) error

GeneratePlanDiagram generates the json data for a flow diagram. There should be one FlowSpec per node. The function assumes that StreamIDs are unique across all flows.

func GetAggregateInfo

func GetAggregateInfo(
	fn AggregatorSpec_Func, inputType sqlbase.ColumnType,
) (aggregateConstructor func() parser.AggregateFunc, returnType sqlbase.ColumnType, err error)

GetAggregateInfo returns the aggregate constructor and the return type for the given aggregate function when applied on the given type.

func ProcessInboundStream

func ProcessInboundStream(
	flowCtx *FlowCtx, stream DistSQL_FlowStreamServer, firstMsg *StreamMessage, dst RowReceiver,
) error

ProcessInboundStream receives rows from a DistSQL_FlowStreamServer and sends them to a RowReceiver. Optionally processes an initial StreamMessage that was already received (because the first message contains the flow and stream IDs, it needs to be received before we can get here).

func RegisterDistSQLServer

func RegisterDistSQLServer(s *grpc.Server, srv DistSQLServer)

func SetFlowRequestTrace

func SetFlowRequestTrace(ctx context.Context, req *SetupFlowRequest) error

SetFlowRequestTrace populates req.Trace with the context of the current Span in the context (if any).

Types

type AggregatorSpec

type AggregatorSpec struct {
	// The group key is a subset of the columns in the input stream schema on the
	// basis of which we define our groups.
	GroupCols    []uint32                     `protobuf:"varint,2,rep,packed,name=group_cols,json=groupCols" json:"group_cols,omitempty"`
	Aggregations []AggregatorSpec_Aggregation `protobuf:"bytes,3,rep,name=aggregations" json:"aggregations"`
}

AggregatorSpec is the specification for an "aggregator" (processor core type, not the logical plan computation stage). An aggregator performs 'aggregation' in the SQL sense in that it groups rows and computes an aggregate for each group. The group is configured using the group key. The aggregator can be configured with one or more aggregation functions.

The "internal columns" of an Aggregator map 1-1 to the aggregations.

func (*AggregatorSpec) Descriptor

func (*AggregatorSpec) Descriptor() ([]byte, []int)

func (*AggregatorSpec) Marshal

func (m *AggregatorSpec) Marshal() (dAtA []byte, err error)

func (*AggregatorSpec) MarshalTo

func (m *AggregatorSpec) MarshalTo(dAtA []byte) (int, error)

func (*AggregatorSpec) ProtoMessage

func (*AggregatorSpec) ProtoMessage()

func (*AggregatorSpec) Reset

func (m *AggregatorSpec) Reset()

func (*AggregatorSpec) Size

func (m *AggregatorSpec) Size() (n int)

func (*AggregatorSpec) String

func (m *AggregatorSpec) String() string

func (*AggregatorSpec) Unmarshal

func (m *AggregatorSpec) Unmarshal(dAtA []byte) error

type AggregatorSpec_Aggregation

type AggregatorSpec_Aggregation struct {
	Func AggregatorSpec_Func `protobuf:"varint,1,opt,name=func,enum=cockroach.sql.distsqlrun.AggregatorSpec_Func" json:"func"`
	// Aggregation functions with distinct = true functions like you would
	// expect '<FUNC> DISTINCT' to operate, the default behaviour would be
	// the '<FUNC> ALL' operation.
	Distinct bool `protobuf:"varint,2,opt,name=distinct" json:"distinct"`
	// The column index specifies the argument to the aggregator function.
	ColIdx uint32 `protobuf:"varint,3,opt,name=col_idx,json=colIdx" json:"col_idx"`
}

func (*AggregatorSpec_Aggregation) Descriptor

func (*AggregatorSpec_Aggregation) Descriptor() ([]byte, []int)

func (*AggregatorSpec_Aggregation) Marshal

func (m *AggregatorSpec_Aggregation) Marshal() (dAtA []byte, err error)

func (*AggregatorSpec_Aggregation) MarshalTo

func (m *AggregatorSpec_Aggregation) MarshalTo(dAtA []byte) (int, error)

func (*AggregatorSpec_Aggregation) ProtoMessage

func (*AggregatorSpec_Aggregation) ProtoMessage()

func (*AggregatorSpec_Aggregation) Reset

func (m *AggregatorSpec_Aggregation) Reset()

func (*AggregatorSpec_Aggregation) Size

func (m *AggregatorSpec_Aggregation) Size() (n int)

func (*AggregatorSpec_Aggregation) String

func (m *AggregatorSpec_Aggregation) String() string

func (*AggregatorSpec_Aggregation) Unmarshal

func (m *AggregatorSpec_Aggregation) Unmarshal(dAtA []byte) error

type AggregatorSpec_Func

type AggregatorSpec_Func int32

These mirror the aggregate functions supported by sql/parser. See sql/parser/aggregate_builtins.go.

const (
	// The identity function is set to be the default zero-value function,
	// returning the last value added.
	AggregatorSpec_IDENT      AggregatorSpec_Func = 0
	AggregatorSpec_AVG        AggregatorSpec_Func = 1
	AggregatorSpec_BOOL_AND   AggregatorSpec_Func = 2
	AggregatorSpec_BOOL_OR    AggregatorSpec_Func = 3
	AggregatorSpec_CONCAT_AGG AggregatorSpec_Func = 4
	AggregatorSpec_COUNT      AggregatorSpec_Func = 5
	AggregatorSpec_MAX        AggregatorSpec_Func = 7
	AggregatorSpec_MIN        AggregatorSpec_Func = 8
	AggregatorSpec_STDDEV     AggregatorSpec_Func = 9
	AggregatorSpec_SUM        AggregatorSpec_Func = 10
	AggregatorSpec_SUM_INT    AggregatorSpec_Func = 11
	AggregatorSpec_VARIANCE   AggregatorSpec_Func = 12
)

func (AggregatorSpec_Func) Enum

func (AggregatorSpec_Func) EnumDescriptor

func (AggregatorSpec_Func) EnumDescriptor() ([]byte, []int)

func (AggregatorSpec_Func) String

func (x AggregatorSpec_Func) String() string

func (*AggregatorSpec_Func) UnmarshalJSON

func (x *AggregatorSpec_Func) UnmarshalJSON(data []byte) error

type DatumInfo

type DatumInfo struct {
	Encoding cockroach_sql_sqlbase2.DatumEncoding `protobuf:"varint,1,opt,name=encoding,enum=cockroach.sql.sqlbase.DatumEncoding" json:"encoding"`
	Type     cockroach_sql_sqlbase1.ColumnType    `protobuf:"bytes,2,opt,name=type" json:"type"`
}

func (*DatumInfo) Descriptor

func (*DatumInfo) Descriptor() ([]byte, []int)

func (*DatumInfo) Marshal

func (m *DatumInfo) Marshal() (dAtA []byte, err error)

func (*DatumInfo) MarshalTo

func (m *DatumInfo) MarshalTo(dAtA []byte) (int, error)

func (*DatumInfo) ProtoMessage

func (*DatumInfo) ProtoMessage()

func (*DatumInfo) Reset

func (m *DatumInfo) Reset()

func (*DatumInfo) Size

func (m *DatumInfo) Size() (n int)

func (*DatumInfo) String

func (m *DatumInfo) String() string

func (*DatumInfo) Unmarshal

func (m *DatumInfo) Unmarshal(dAtA []byte) error

type DistSQLClient

type DistSQLClient interface {
	// RunSyncFlow instantiates a flow and streams back results of that flow.
	// The request must contain one flow, and that flow must have a single mailbox
	// of the special sync response type.
	RunSyncFlow(ctx context.Context, in *SetupFlowRequest, opts ...grpc.CallOption) (DistSQL_RunSyncFlowClient, error)
	// SetupFlow instantiates a flow (subgraps of a distributed SQL
	// computation) on the receiving node.
	SetupFlow(ctx context.Context, in *SetupFlowRequest, opts ...grpc.CallOption) (*SimpleResponse, error)
	// FlowStream is used to push a stream of messages that is part of a flow. The
	// first message will have a StreamHeader which identifies the flow and the
	// stream (mailbox).
	FlowStream(ctx context.Context, opts ...grpc.CallOption) (DistSQL_FlowStreamClient, error)
}

func NewDistSQLClient

func NewDistSQLClient(cc *grpc.ClientConn) DistSQLClient

type DistSQLServer

type DistSQLServer interface {
	// RunSyncFlow instantiates a flow and streams back results of that flow.
	// The request must contain one flow, and that flow must have a single mailbox
	// of the special sync response type.
	RunSyncFlow(*SetupFlowRequest, DistSQL_RunSyncFlowServer) error
	// SetupFlow instantiates a flow (subgraps of a distributed SQL
	// computation) on the receiving node.
	SetupFlow(context.Context, *SetupFlowRequest) (*SimpleResponse, error)
	// FlowStream is used to push a stream of messages that is part of a flow. The
	// first message will have a StreamHeader which identifies the flow and the
	// stream (mailbox).
	FlowStream(DistSQL_FlowStreamServer) error
}

type DistSQL_FlowStreamClient

type DistSQL_FlowStreamClient interface {
	Send(*StreamMessage) error
	CloseAndRecv() (*SimpleResponse, error)
	grpc.ClientStream
}

type DistSQL_FlowStreamServer

type DistSQL_FlowStreamServer interface {
	SendAndClose(*SimpleResponse) error
	Recv() (*StreamMessage, error)
	grpc.ServerStream
}

type DistSQL_RunSyncFlowClient

type DistSQL_RunSyncFlowClient interface {
	Recv() (*StreamMessage, error)
	grpc.ClientStream
}

type DistSQL_RunSyncFlowServer

type DistSQL_RunSyncFlowServer interface {
	Send(*StreamMessage) error
	grpc.ServerStream
}

type DistinctSpec

type DistinctSpec struct {
	// The ordered columns in the input stream can be optionally specified for
	// possible optimizations. The specific ordering (ascending/descending) of
	// the column itself is not important nor is the order in which the columns
	// are specified.
	OrderedColumns []uint32 `protobuf:"varint,1,rep,name=ordered_columns,json=orderedColumns" json:"ordered_columns,omitempty"`
}

The "internal columns" of a Distict (see ProcessorSpec) are the same as the input columns.

func (*DistinctSpec) Descriptor

func (*DistinctSpec) Descriptor() ([]byte, []int)

func (*DistinctSpec) Marshal

func (m *DistinctSpec) Marshal() (dAtA []byte, err error)

func (*DistinctSpec) MarshalTo

func (m *DistinctSpec) MarshalTo(dAtA []byte) (int, error)

func (*DistinctSpec) ProtoMessage

func (*DistinctSpec) ProtoMessage()

func (*DistinctSpec) Reset

func (m *DistinctSpec) Reset()

func (*DistinctSpec) Size

func (m *DistinctSpec) Size() (n int)

func (*DistinctSpec) String

func (m *DistinctSpec) String() string

func (*DistinctSpec) Unmarshal

func (m *DistinctSpec) Unmarshal(dAtA []byte) error

type EvaluatorSpec

type EvaluatorSpec struct {
	Exprs []Expression `protobuf:"bytes,1,rep,name=exprs" json:"exprs"`
}

EvaluatorSpec is the specification for an "evaluator", a fully programmable no-grouping aggregator. It runs a 'program' on each individual row and is restricted to operating on one row of data at a time. The 'program' is a set of expressions evaluated in order, the output schema therefore consists of the results of evaluating each of these expressions on the input row.

The "internal columns" of a Sorter (see ProcessorSpec) map 1-1 to the epxressions.

func (*EvaluatorSpec) Descriptor

func (*EvaluatorSpec) Descriptor() ([]byte, []int)

func (*EvaluatorSpec) Marshal

func (m *EvaluatorSpec) Marshal() (dAtA []byte, err error)

func (*EvaluatorSpec) MarshalTo

func (m *EvaluatorSpec) MarshalTo(dAtA []byte) (int, error)

func (*EvaluatorSpec) ProtoMessage

func (*EvaluatorSpec) ProtoMessage()

func (*EvaluatorSpec) Reset

func (m *EvaluatorSpec) Reset()

func (*EvaluatorSpec) Size

func (m *EvaluatorSpec) Size() (n int)

func (*EvaluatorSpec) String

func (m *EvaluatorSpec) String() string

func (*EvaluatorSpec) Unmarshal

func (m *EvaluatorSpec) Unmarshal(dAtA []byte) error

type Expression

type Expression struct {
	// TODO(radu): TBD how this will be used
	Version string `protobuf:"bytes,1,opt,name=version" json:"version"`
	// SQL expressions are passed as a string, with ordinal references
	// (@1, @2, @3 ..) used for "input" variables.
	Expr string `protobuf:"bytes,2,opt,name=expr" json:"expr"`
}

func (*Expression) Descriptor

func (*Expression) Descriptor() ([]byte, []int)

func (*Expression) Marshal

func (m *Expression) Marshal() (dAtA []byte, err error)

func (*Expression) MarshalTo

func (m *Expression) MarshalTo(dAtA []byte) (int, error)

func (*Expression) ProtoMessage

func (*Expression) ProtoMessage()

func (*Expression) Reset

func (m *Expression) Reset()

func (*Expression) Size

func (m *Expression) Size() (n int)

func (*Expression) String

func (m *Expression) String() string

func (*Expression) Unmarshal

func (m *Expression) Unmarshal(dAtA []byte) error

type Flow

type Flow struct {
	FlowCtx
	// contains filtered or unexported fields
}

Flow represents a flow which consists of processors and streams.

func (*Flow) Cleanup

func (f *Flow) Cleanup()

Cleanup should be called when the flow completes (after all processors and mailboxes exited).

func (*Flow) RunSync

func (f *Flow) RunSync()

RunSync runs the processors in the flow in order (serially), in the same context (no goroutines are spawned).

func (*Flow) Start

func (f *Flow) Start(doneFn func())

Start starts the flow (each processor runs in their own goroutine).

func (*Flow) Wait

func (f *Flow) Wait()

Wait waits for all the goroutines for this flow to exit.

type FlowCtx

type FlowCtx struct {
	Context context.Context
	// contains filtered or unexported fields
}

FlowCtx encompasses the contexts needed for various flow components.

type FlowID

type FlowID struct {
	uuid.UUID
}

FlowID identifies a flow. It is most importantly used when setting up streams between nodes.

type FlowSpec

type FlowSpec struct {
	FlowID     FlowID          `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"`
	Processors []ProcessorSpec `protobuf:"bytes,2,rep,name=processors" json:"processors"`
}

FlowSpec describes a "flow" which is a subgraph of a distributed SQL computation consisting of processors and streams.

func (*FlowSpec) Descriptor

func (*FlowSpec) Descriptor() ([]byte, []int)

func (*FlowSpec) Marshal

func (m *FlowSpec) Marshal() (dAtA []byte, err error)

func (*FlowSpec) MarshalTo

func (m *FlowSpec) MarshalTo(dAtA []byte) (int, error)

func (*FlowSpec) ProtoMessage

func (*FlowSpec) ProtoMessage()

func (*FlowSpec) Reset

func (m *FlowSpec) Reset()

func (*FlowSpec) Size

func (m *FlowSpec) Size() (n int)

func (*FlowSpec) String

func (m *FlowSpec) String() string

func (*FlowSpec) Unmarshal

func (m *FlowSpec) Unmarshal(dAtA []byte) error

type HashJoinerSpec

type HashJoinerSpec struct {
	// The join constraints certain columns from the left stream to equal
	// corresponding columns on the right stream. These must have the same length.
	LeftEqColumns  []uint32 `protobuf:"varint,1,rep,packed,name=left_eq_columns,json=leftEqColumns" json:"left_eq_columns,omitempty"`
	RightEqColumns []uint32 `protobuf:"varint,2,rep,packed,name=right_eq_columns,json=rightEqColumns" json:"right_eq_columns,omitempty"`
	// "ON" expression (in addition to the equality constraints captured by the
	// orderings). Assuming that the left stream has N columns and the right
	// stream has M columns, in this expression variables @1 to @N refer to
	// columns of the left stream and variables @N to @(N+M) refer to columns in
	// the right stream.
	OnExpr Expression `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"`
	Type   JoinType   `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.distsqlrun.JoinType" json:"type"`
}

HashJoinerSpec is the specification for a hash join processor. The processor has two inputs and one output.

The processor works by reading the entire right input and putting it in a hash table. Thus, there is no guarantee on the ordering of results that stem only from the right input (in the case of RIGHT_OUTER, FULL_OUTER). However, it is guaranteed that results that involve the left stream preserve the ordering; i.e. all results that stem from left row (i) precede results that stem from left row (i+1).

The "internal columns" of a HashJoiner (see ProcessorSpec) are the concatenation of left input columns and right input columns. If the left input has N columns and the right input has M columns, the first N columns contain values from the left side and the following M columns contain values from the right side.

func (*HashJoinerSpec) Descriptor

func (*HashJoinerSpec) Descriptor() ([]byte, []int)

func (*HashJoinerSpec) Marshal

func (m *HashJoinerSpec) Marshal() (dAtA []byte, err error)

func (*HashJoinerSpec) MarshalTo

func (m *HashJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*HashJoinerSpec) ProtoMessage

func (*HashJoinerSpec) ProtoMessage()

func (*HashJoinerSpec) Reset

func (m *HashJoinerSpec) Reset()

func (*HashJoinerSpec) Size

func (m *HashJoinerSpec) Size() (n int)

func (*HashJoinerSpec) String

func (m *HashJoinerSpec) String() string

func (*HashJoinerSpec) Unmarshal

func (m *HashJoinerSpec) Unmarshal(dAtA []byte) error

type InputSyncSpec

type InputSyncSpec struct {
	Type     InputSyncSpec_Type   `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.InputSyncSpec_Type" json:"type"`
	Ordering Ordering             `protobuf:"bytes,2,opt,name=ordering" json:"ordering"`
	Streams  []StreamEndpointSpec `protobuf:"bytes,3,rep,name=streams" json:"streams"`
	// Schema for the streams entering this synchronizer.
	ColumnTypes []cockroach_sql_sqlbase1.ColumnType `protobuf:"bytes,4,rep,name=column_types,json=columnTypes" json:"column_types"`
}

InputSyncSpec is the specification for an input synchronizer; it decides how to interleave rows from multiple input streams.

func (*InputSyncSpec) Descriptor

func (*InputSyncSpec) Descriptor() ([]byte, []int)

func (*InputSyncSpec) Marshal

func (m *InputSyncSpec) Marshal() (dAtA []byte, err error)

func (*InputSyncSpec) MarshalTo

func (m *InputSyncSpec) MarshalTo(dAtA []byte) (int, error)

func (*InputSyncSpec) ProtoMessage

func (*InputSyncSpec) ProtoMessage()

func (*InputSyncSpec) Reset

func (m *InputSyncSpec) Reset()

func (*InputSyncSpec) Size

func (m *InputSyncSpec) Size() (n int)

func (*InputSyncSpec) String

func (m *InputSyncSpec) String() string

func (*InputSyncSpec) Unmarshal

func (m *InputSyncSpec) Unmarshal(dAtA []byte) error

type InputSyncSpec_Type

type InputSyncSpec_Type int32
const (
	// Rows from the input streams are interleaved arbitrarily.
	InputSyncSpec_UNORDERED InputSyncSpec_Type = 0
	// The input streams are guaranteed to be ordered according to the column
	// ordering field; rows from the streams are interleaved to preserve that
	// ordering.
	InputSyncSpec_ORDERED InputSyncSpec_Type = 1
)

func (InputSyncSpec_Type) Enum

func (InputSyncSpec_Type) EnumDescriptor

func (InputSyncSpec_Type) EnumDescriptor() ([]byte, []int)

func (InputSyncSpec_Type) String

func (x InputSyncSpec_Type) String() string

func (*InputSyncSpec_Type) UnmarshalJSON

func (x *InputSyncSpec_Type) UnmarshalJSON(data []byte) error

type JoinReaderSpec

type JoinReaderSpec struct {
	Table cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
	// If 0, we use the primary index; each row in the input stream has a value
	// for each primary key.
	// TODO(radu): figure out the correct semantics when joining with an index.
	IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"`
}

JoinReaderSpec is the specification for a "join reader". A join reader performs KV operations to retrieve specific rows that correspond to the values in the input stream (join by lookup).

The "internal columns" of a JoinReader (see ProcessorSpec) are all the columns of the table. Internally, only the values for the columns needed by the post-processing stage are be populated.

func (*JoinReaderSpec) Descriptor

func (*JoinReaderSpec) Descriptor() ([]byte, []int)

func (*JoinReaderSpec) Marshal

func (m *JoinReaderSpec) Marshal() (dAtA []byte, err error)

func (*JoinReaderSpec) MarshalTo

func (m *JoinReaderSpec) MarshalTo(dAtA []byte) (int, error)

func (*JoinReaderSpec) ProtoMessage

func (*JoinReaderSpec) ProtoMessage()

func (*JoinReaderSpec) Reset

func (m *JoinReaderSpec) Reset()

func (*JoinReaderSpec) Size

func (m *JoinReaderSpec) Size() (n int)

func (*JoinReaderSpec) String

func (m *JoinReaderSpec) String() string

func (*JoinReaderSpec) Unmarshal

func (m *JoinReaderSpec) Unmarshal(dAtA []byte) error

type JoinType

type JoinType int32
const (
	JoinType_INNER       JoinType = 0
	JoinType_LEFT_OUTER  JoinType = 1
	JoinType_RIGHT_OUTER JoinType = 2
	JoinType_FULL_OUTER  JoinType = 3
)

func (JoinType) Enum

func (x JoinType) Enum() *JoinType

func (JoinType) EnumDescriptor

func (JoinType) EnumDescriptor() ([]byte, []int)

func (JoinType) String

func (x JoinType) String() string

func (*JoinType) UnmarshalJSON

func (x *JoinType) UnmarshalJSON(data []byte) error

type MergeJoinerSpec

type MergeJoinerSpec struct {
	// The streams must be ordered according to the columns that have equality
	// constraints. The first column of the left ordering is constrained to be
	// equal to the first column in the right ordering and so on. The ordering
	// lengths and directions must match.
	// In the example above, left ordering describes C1+,C2- and right ordering
	// describes C5+,C4-.
	LeftOrdering  Ordering `protobuf:"bytes,1,opt,name=left_ordering,json=leftOrdering" json:"left_ordering"`
	RightOrdering Ordering `protobuf:"bytes,2,opt,name=right_ordering,json=rightOrdering" json:"right_ordering"`
	// "ON" expression (in addition to the equality constraints captured by the
	// orderings). Assuming that the left stream has N columns and the right
	// stream has M columns, in this expression ordinal references @1 to @N refer
	// to columns of the left stream and variables @(N+1) to @(N+M) refer to
	// columns in the right stream.
	OnExpr Expression `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"`
	Type   JoinType   `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.distsqlrun.JoinType" json:"type"`
}

MergeJoinerSpec is the specification for a merge join processor. The processor has two inputs and one output. The inputs must have the same ordering on the columns that have equality constraints. For example:

SELECT * FROM T1 INNER JOIN T2 ON T1.C1 = T2.C5 AND T1.C2 = T2.C4

To perform a merge join, the streams corresponding to T1 and T2 must have the same ordering on columns C1, C2 and C5, C4 respectively. For example: C1+,C2- and C5+,C4-.

The "internal columns" of a MergeJoiner (see ProcessorSpec) are the concatenation of left input columns and right input columns. If the left input has N columns and the right input has M columns, the first N columns contain values from the left side and the following M columns contain values from the right side.

func (*MergeJoinerSpec) Descriptor

func (*MergeJoinerSpec) Descriptor() ([]byte, []int)

func (*MergeJoinerSpec) Marshal

func (m *MergeJoinerSpec) Marshal() (dAtA []byte, err error)

func (*MergeJoinerSpec) MarshalTo

func (m *MergeJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*MergeJoinerSpec) ProtoMessage

func (*MergeJoinerSpec) ProtoMessage()

func (*MergeJoinerSpec) Reset

func (m *MergeJoinerSpec) Reset()

func (*MergeJoinerSpec) Size

func (m *MergeJoinerSpec) Size() (n int)

func (*MergeJoinerSpec) String

func (m *MergeJoinerSpec) String() string

func (*MergeJoinerSpec) Unmarshal

func (m *MergeJoinerSpec) Unmarshal(dAtA []byte) error

type MultiplexedRowChannel

type MultiplexedRowChannel struct {
	// contains filtered or unexported fields
}

MultiplexedRowChannel is a RowChannel wrapper which allows multiple row producers to push rows on the same channel.

func (*MultiplexedRowChannel) Close

func (mrc *MultiplexedRowChannel) Close(err error)

Close is part of the RowReceiver interface.

func (*MultiplexedRowChannel) ConsumerDone

func (mrc *MultiplexedRowChannel) ConsumerDone()

ConsumerDone is part of the RowSource interface.

func (*MultiplexedRowChannel) Init

func (mrc *MultiplexedRowChannel) Init(numSenders int, types []sqlbase.ColumnType)

Init initializes the MultiplexedRowChannel with the default buffer size.

func (*MultiplexedRowChannel) NextRow

func (mrc *MultiplexedRowChannel) NextRow() (sqlbase.EncDatumRow, error)

NextRow is part of the RowSource interface.

func (*MultiplexedRowChannel) PushRow

func (mrc *MultiplexedRowChannel) PushRow(row sqlbase.EncDatumRow) bool

PushRow is part of the RowReceiver interface.

func (*MultiplexedRowChannel) Types

func (mrc *MultiplexedRowChannel) Types() []sqlbase.ColumnType

Types is part of the RowSource interface.

type NoopCoreSpec

type NoopCoreSpec struct {
}

NoopCoreSpec indicates a "no-op" processor core. This is used when we just need post-processing or when only a synchronizer is required (e.g. at the final endpoint).

func (*NoopCoreSpec) Descriptor

func (*NoopCoreSpec) Descriptor() ([]byte, []int)

func (*NoopCoreSpec) Marshal

func (m *NoopCoreSpec) Marshal() (dAtA []byte, err error)

func (*NoopCoreSpec) MarshalTo

func (m *NoopCoreSpec) MarshalTo(dAtA []byte) (int, error)

func (*NoopCoreSpec) ProtoMessage

func (*NoopCoreSpec) ProtoMessage()

func (*NoopCoreSpec) Reset

func (m *NoopCoreSpec) Reset()

func (*NoopCoreSpec) Size

func (m *NoopCoreSpec) Size() (n int)

func (*NoopCoreSpec) String

func (m *NoopCoreSpec) String() string

func (*NoopCoreSpec) Unmarshal

func (m *NoopCoreSpec) Unmarshal(dAtA []byte) error

type Ordering

type Ordering struct {
	Columns []Ordering_Column `protobuf:"bytes,1,rep,name=columns" json:"columns"`
}

Ordering defines an order - specifically a list of column indices and directions. See sqlbase.ColumnOrdering.

func (*Ordering) Descriptor

func (*Ordering) Descriptor() ([]byte, []int)

func (*Ordering) Marshal

func (m *Ordering) Marshal() (dAtA []byte, err error)

func (*Ordering) MarshalTo

func (m *Ordering) MarshalTo(dAtA []byte) (int, error)

func (*Ordering) ProtoMessage

func (*Ordering) ProtoMessage()

func (*Ordering) Reset

func (m *Ordering) Reset()

func (*Ordering) Size

func (m *Ordering) Size() (n int)

func (*Ordering) String

func (m *Ordering) String() string

func (*Ordering) Unmarshal

func (m *Ordering) Unmarshal(dAtA []byte) error

type Ordering_Column

type Ordering_Column struct {
	ColIdx    uint32                    `protobuf:"varint,1,opt,name=col_idx,json=colIdx" json:"col_idx"`
	Direction Ordering_Column_Direction `protobuf:"varint,2,opt,name=direction,enum=cockroach.sql.distsqlrun.Ordering_Column_Direction" json:"direction"`
}

func (*Ordering_Column) Descriptor

func (*Ordering_Column) Descriptor() ([]byte, []int)

func (*Ordering_Column) Marshal

func (m *Ordering_Column) Marshal() (dAtA []byte, err error)

func (*Ordering_Column) MarshalTo

func (m *Ordering_Column) MarshalTo(dAtA []byte) (int, error)

func (*Ordering_Column) ProtoMessage

func (*Ordering_Column) ProtoMessage()

func (*Ordering_Column) Reset

func (m *Ordering_Column) Reset()

func (*Ordering_Column) Size

func (m *Ordering_Column) Size() (n int)

func (*Ordering_Column) String

func (m *Ordering_Column) String() string

func (*Ordering_Column) Unmarshal

func (m *Ordering_Column) Unmarshal(dAtA []byte) error

type Ordering_Column_Direction

type Ordering_Column_Direction int32

The direction of the desired ordering for a column.

const (
	Ordering_Column_ASC  Ordering_Column_Direction = 0
	Ordering_Column_DESC Ordering_Column_Direction = 1
)

func (Ordering_Column_Direction) Enum

func (Ordering_Column_Direction) EnumDescriptor

func (Ordering_Column_Direction) EnumDescriptor() ([]byte, []int)

func (Ordering_Column_Direction) String

func (x Ordering_Column_Direction) String() string

func (*Ordering_Column_Direction) UnmarshalJSON

func (x *Ordering_Column_Direction) UnmarshalJSON(data []byte) error

type OutputRouterSpec

type OutputRouterSpec struct {
	Type    OutputRouterSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.OutputRouterSpec_Type" json:"type"`
	Streams []StreamEndpointSpec  `protobuf:"bytes,2,rep,name=streams" json:"streams"`
	// Only used for the BY_HASH type; these are the indexes of the columns we are
	// hashing.
	HashColumns []uint32 `protobuf:"varint,3,rep,name=hash_columns,json=hashColumns" json:"hash_columns,omitempty"`
}

OutputRouterSpec is the specification for the output router of a processor; it decides how to send results to multiple output streams.

func (*OutputRouterSpec) Descriptor

func (*OutputRouterSpec) Descriptor() ([]byte, []int)

func (*OutputRouterSpec) Marshal

func (m *OutputRouterSpec) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec) MarshalTo

func (m *OutputRouterSpec) MarshalTo(dAtA []byte) (int, error)

func (*OutputRouterSpec) ProtoMessage

func (*OutputRouterSpec) ProtoMessage()

func (*OutputRouterSpec) Reset

func (m *OutputRouterSpec) Reset()

func (*OutputRouterSpec) Size

func (m *OutputRouterSpec) Size() (n int)

func (*OutputRouterSpec) String

func (m *OutputRouterSpec) String() string

func (*OutputRouterSpec) Unmarshal

func (m *OutputRouterSpec) Unmarshal(dAtA []byte) error

type OutputRouterSpec_Type

type OutputRouterSpec_Type int32
const (
	// Single output stream.
	OutputRouterSpec_PASS_THROUGH OutputRouterSpec_Type = 0
	// Each row is sent to all output streams.
	OutputRouterSpec_MIRROR OutputRouterSpec_Type = 1
	// Each row is sent to one stream, chosen by hashing certain columns of
	// the row (specified by the hash_columns field).
	OutputRouterSpec_BY_HASH OutputRouterSpec_Type = 2
	// Each row is sent to one stream, chosen according to preset boundaries
	// for the values of certain columns of the row. TODO(radu): an extra
	// optional structure below for the range details.
	OutputRouterSpec_BY_RANGE OutputRouterSpec_Type = 3
)

func (OutputRouterSpec_Type) Enum

func (OutputRouterSpec_Type) EnumDescriptor

func (OutputRouterSpec_Type) EnumDescriptor() ([]byte, []int)

func (OutputRouterSpec_Type) String

func (x OutputRouterSpec_Type) String() string

func (*OutputRouterSpec_Type) UnmarshalJSON

func (x *OutputRouterSpec_Type) UnmarshalJSON(data []byte) error

type PostProcessSpec

type PostProcessSpec struct {
	// A filtering expression which references the internal columns of the
	// processor via ordinal references (@1, @2, etc).
	Filter Expression `protobuf:"bytes,1,opt,name=filter" json:"filter"`
	// The output columns describe a projection on the internal set of columns;
	// only the columns in this list will be emitted. This can be omitted if the
	// set of output columns is a trivial identity (in which case all the internal
	// columns will be emitted).
	OutputColumns []uint32 `protobuf:"varint,2,rep,packed,name=output_columns,json=outputColumns" json:"output_columns,omitempty"`
}

PostProcessSpec describes the processing required to obtain the output (filtering, projection). It operates on the internal schema of the processor (see ProcessorSpec).

func (*PostProcessSpec) Descriptor

func (*PostProcessSpec) Descriptor() ([]byte, []int)

func (*PostProcessSpec) Marshal

func (m *PostProcessSpec) Marshal() (dAtA []byte, err error)

func (*PostProcessSpec) MarshalTo

func (m *PostProcessSpec) MarshalTo(dAtA []byte) (int, error)

func (*PostProcessSpec) ProtoMessage

func (*PostProcessSpec) ProtoMessage()

func (*PostProcessSpec) Reset

func (m *PostProcessSpec) Reset()

func (*PostProcessSpec) Size

func (m *PostProcessSpec) Size() (n int)

func (*PostProcessSpec) String

func (m *PostProcessSpec) String() string

func (*PostProcessSpec) Unmarshal

func (m *PostProcessSpec) Unmarshal(dAtA []byte) error

type ProcessorCoreUnion

type ProcessorCoreUnion struct {
	Noop        *NoopCoreSpec    `protobuf:"bytes,1,opt,name=noop" json:"noop,omitempty"`
	TableReader *TableReaderSpec `protobuf:"bytes,2,opt,name=tableReader" json:"tableReader,omitempty"`
	JoinReader  *JoinReaderSpec  `protobuf:"bytes,3,opt,name=joinReader" json:"joinReader,omitempty"`
	Sorter      *SorterSpec      `protobuf:"bytes,4,opt,name=sorter" json:"sorter,omitempty"`
	Aggregator  *AggregatorSpec  `protobuf:"bytes,5,opt,name=aggregator" json:"aggregator,omitempty"`
	Evaluator   *EvaluatorSpec   `protobuf:"bytes,6,opt,name=evaluator" json:"evaluator,omitempty"`
	Distinct    *DistinctSpec    `protobuf:"bytes,7,opt,name=distinct" json:"distinct,omitempty"`
	MergeJoiner *MergeJoinerSpec `protobuf:"bytes,8,opt,name=mergeJoiner" json:"mergeJoiner,omitempty"`
	HashJoiner  *HashJoinerSpec  `protobuf:"bytes,9,opt,name=hashJoiner" json:"hashJoiner,omitempty"`
	Values      *ValuesCoreSpec  `protobuf:"bytes,10,opt,name=values" json:"values,omitempty"`
}

func (*ProcessorCoreUnion) Descriptor

func (*ProcessorCoreUnion) Descriptor() ([]byte, []int)

func (*ProcessorCoreUnion) GetValue

func (this *ProcessorCoreUnion) GetValue() interface{}

func (*ProcessorCoreUnion) Marshal

func (m *ProcessorCoreUnion) Marshal() (dAtA []byte, err error)

func (*ProcessorCoreUnion) MarshalTo

func (m *ProcessorCoreUnion) MarshalTo(dAtA []byte) (int, error)

func (*ProcessorCoreUnion) ProtoMessage

func (*ProcessorCoreUnion) ProtoMessage()

func (*ProcessorCoreUnion) Reset

func (m *ProcessorCoreUnion) Reset()

func (*ProcessorCoreUnion) SetValue

func (this *ProcessorCoreUnion) SetValue(value interface{}) bool

func (*ProcessorCoreUnion) Size

func (m *ProcessorCoreUnion) Size() (n int)

func (*ProcessorCoreUnion) String

func (m *ProcessorCoreUnion) String() string

func (*ProcessorCoreUnion) Unmarshal

func (m *ProcessorCoreUnion) Unmarshal(dAtA []byte) error

type ProcessorSpec

type ProcessorSpec struct {
	// In most cases, there is one input.
	Input []InputSyncSpec    `protobuf:"bytes,1,rep,name=input" json:"input"`
	Core  ProcessorCoreUnion `protobuf:"bytes,2,opt,name=core" json:"core"`
	Post  PostProcessSpec    `protobuf:"bytes,4,opt,name=post" json:"post"`
	// In most cases, there is one output.
	Output []OutputRouterSpec `protobuf:"bytes,3,rep,name=output" json:"output"`
}

Each processor has the following components:

  • one or more input synchronizers; each one merges rows between one or more input streams;

  • a processor "core" which encapsulates the inner logic of each processor;

  • a post-processing stage which allows "inline" post-processing on results (like projection or filtering);

  • one or more output synchronizers; each one directs rows to one or more output streams.

== Internal columns ==

The core outputs rows of a certain schema to the post-processing stage. We call this the "internal schema" (or "internal columns") and it differs for each type of core. Column indices in a PostProcessSpec refers to these internal columns. Some columns may be unused by the post-processing stage; processor implementations are internally optimized to not produce values for such unneded columns.

func (*ProcessorSpec) Descriptor

func (*ProcessorSpec) Descriptor() ([]byte, []int)

func (*ProcessorSpec) Marshal

func (m *ProcessorSpec) Marshal() (dAtA []byte, err error)

func (*ProcessorSpec) MarshalTo

func (m *ProcessorSpec) MarshalTo(dAtA []byte) (int, error)

func (*ProcessorSpec) ProtoMessage

func (*ProcessorSpec) ProtoMessage()

func (*ProcessorSpec) Reset

func (m *ProcessorSpec) Reset()

func (*ProcessorSpec) Size

func (m *ProcessorSpec) Size() (n int)

func (*ProcessorSpec) String

func (m *ProcessorSpec) String() string

func (*ProcessorSpec) Unmarshal

func (m *ProcessorSpec) Unmarshal(dAtA []byte) error

type RowBuffer

type RowBuffer struct {
	// Rows in this buffer. PushRow appends a row to the back, NextRow removes a
	// row from the front.
	Rows sqlbase.EncDatumRows

	// Err is used when the RowBuffer is used as a RowReceiver; it is the error
	// passed via Close().
	Err error

	// Closed is used when the RowBuffer is used as a RowReceiver; it is set to
	// true when the sender calls Close.
	Closed bool

	// Done is used when the RowBuffer is used as a RowSource; it is set to true
	// when the receiver read all the rows.
	Done bool
	// contains filtered or unexported fields
}

RowBuffer is an implementation of RowReceiver that buffers (accumulates) results in memory, as well as an implementation of RowSource that returns rows from a row buffer.

func NewRowBuffer

func NewRowBuffer(types []sqlbase.ColumnType, rows sqlbase.EncDatumRows) *RowBuffer

NewRowBuffer creates a RowBuffer with the given schema and initial rows. The types are optional if there is at least one row.

func (*RowBuffer) Close

func (rb *RowBuffer) Close(err error)

Close is part of the RowReceiver interface.

func (*RowBuffer) ConsumerDone

func (rb *RowBuffer) ConsumerDone()

ConsumerDone is part of the RowSource interface.

func (*RowBuffer) NextRow

func (rb *RowBuffer) NextRow() (sqlbase.EncDatumRow, error)

NextRow is part of the RowSource interface.

func (*RowBuffer) PushRow

func (rb *RowBuffer) PushRow(row sqlbase.EncDatumRow) bool

PushRow is part of the RowReceiver interface.

func (*RowBuffer) Types

func (rb *RowBuffer) Types() []sqlbase.ColumnType

Types is part of the RowSource interface.

type RowChannel

type RowChannel struct {

	// The channel on which rows are delivered.
	C <-chan StreamMsg
	// contains filtered or unexported fields
}

RowChannel is a thin layer over a StreamMsg channel, which can be used to transfer rows between goroutines.

func (*RowChannel) Close

func (rc *RowChannel) Close(err error)

Close is part of the RowReceiver interface.

func (*RowChannel) ConsumerDone

func (rc *RowChannel) ConsumerDone()

ConsumerDone is part of the RowSource interface.

func (*RowChannel) Init

func (rc *RowChannel) Init(types []sqlbase.ColumnType)

Init initializes the RowChannel with the default buffer size.

func (*RowChannel) InitWithBufSize

func (rc *RowChannel) InitWithBufSize(types []sqlbase.ColumnType, chanBufSize int)

InitWithBufSize initializes the RowChannel with a given buffer size.

func (*RowChannel) NextRow

func (rc *RowChannel) NextRow() (sqlbase.EncDatumRow, error)

NextRow is part of the RowSource interface.

func (*RowChannel) PushRow

func (rc *RowChannel) PushRow(row sqlbase.EncDatumRow) bool

PushRow is part of the RowReceiver interface.

func (*RowChannel) Types

func (rc *RowChannel) Types() []sqlbase.ColumnType

Types is part of the RowSource interface.

type RowReceiver

type RowReceiver interface {
	// PushRow sends a row to this receiver. May block.
	// Returns true if the row was sent, or false if the receiver does not need
	// any more rows. In all cases, Close() still needs to be called.
	// The sender must not modify the row after calling this function.
	PushRow(row sqlbase.EncDatumRow) bool
	// Close is called when we have no more rows; it causes the RowReceiver to
	// process all rows and clean up. If err is not null, the error is sent to
	// the receiver (and the function may block).
	Close(err error)
}

RowReceiver is any component of a flow that receives rows from another component. It can be an input synchronizer, a router, or a mailbox.

type RowSource

type RowSource interface {
	// Types returns the schema for the rows in this source.
	Types() []sqlbase.ColumnType

	// NextRow retrieves the next row. Returns a nil row if there are no more
	// rows. Depending on the implementation, it may block.
	// The caller must not modify the received row.
	NextRow() (sqlbase.EncDatumRow, error)

	// ConsumerDone lets the source know that we will not need any more rows. May
	// block. If the consumer of the source stops consuming rows before NextRow
	// indicates that there are no more rows, this method must be called. It is ok
	// to call the method even if all the rows were consumed.
	ConsumerDone()
}

RowSource is any component of a flow that produces rows that cam be consumed by another component.

type ServerConfig

type ServerConfig struct {
	log.AmbientContext

	DB         *client.DB
	RPCContext *rpc.Context
	Stopper    *stop.Stopper
}

ServerConfig encompasses the configuration required to create a DistSQLServer.

type ServerImpl

type ServerImpl struct {
	ServerConfig
	// contains filtered or unexported fields
}

ServerImpl implements the server for the distributed SQL APIs.

func NewServer

func NewServer(cfg ServerConfig) *ServerImpl

NewServer instantiates a DistSQLServer.

func (*ServerImpl) FlowStream

func (ds *ServerImpl) FlowStream(stream DistSQL_FlowStreamServer) error

FlowStream is part of the DistSQLServer interface.

func (*ServerImpl) RunSyncFlow

func (ds *ServerImpl) RunSyncFlow(req *SetupFlowRequest, stream DistSQL_RunSyncFlowServer) error

RunSyncFlow is part of the DistSQLServer interface.

func (*ServerImpl) SetupFlow

func (ds *ServerImpl) SetupFlow(_ context.Context, req *SetupFlowRequest) (*SimpleResponse, error)

SetupFlow is part of the DistSQLServer interface.

func (*ServerImpl) SetupSyncFlow

func (ds *ServerImpl) SetupSyncFlow(
	ctx context.Context, req *SetupFlowRequest, output RowReceiver,
) (*Flow, error)

SetupSyncFlow sets up a synchoronous flow, connecting the sync response output stream to the given RowReceiver. The flow is not started. The flow will be associated with the given context.

func (*ServerImpl) Start

func (ds *ServerImpl) Start()

Start launches workers for the server.

type SetupFlowRequest

type SetupFlowRequest struct {
	Txn cockroach_roachpb1.Transaction `protobuf:"bytes,1,opt,name=txn" json:"txn"`
	// If set, the context of an active tracing span.
	TraceContext *cockroach_util_tracing.SpanContextCarrier `protobuf:"bytes,2,opt,name=trace_context,json=traceContext" json:"trace_context,omitempty"`
	Flow         FlowSpec                                   `protobuf:"bytes,3,opt,name=flow" json:"flow"`
}

func (*SetupFlowRequest) Descriptor

func (*SetupFlowRequest) Descriptor() ([]byte, []int)

func (*SetupFlowRequest) Marshal

func (m *SetupFlowRequest) Marshal() (dAtA []byte, err error)

func (*SetupFlowRequest) MarshalTo

func (m *SetupFlowRequest) MarshalTo(dAtA []byte) (int, error)

func (*SetupFlowRequest) ProtoMessage

func (*SetupFlowRequest) ProtoMessage()

func (*SetupFlowRequest) Reset

func (m *SetupFlowRequest) Reset()

func (*SetupFlowRequest) Size

func (m *SetupFlowRequest) Size() (n int)

func (*SetupFlowRequest) String

func (m *SetupFlowRequest) String() string

func (*SetupFlowRequest) Unmarshal

func (m *SetupFlowRequest) Unmarshal(dAtA []byte) error

type SimpleResponse

type SimpleResponse struct {
	// TODO(radu): we should be using our own error instead of roachpb.Error.
	// One important error field for distsql is whether the error is
	// "authoritative": if a query is distributed on multiple nodes and one node
	// hits an error, the other nodes may hit errors as well as a consequence
	// (e.g. streams can't connect to the failed flow). The node that started the
	// flow needs to distinguish which errors are caused by non-availability of
	// other nodes so they don't obscure the real error.
	Error *cockroach_roachpb2.Error `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
}

func (*SimpleResponse) Descriptor

func (*SimpleResponse) Descriptor() ([]byte, []int)

func (*SimpleResponse) Marshal

func (m *SimpleResponse) Marshal() (dAtA []byte, err error)

func (*SimpleResponse) MarshalTo

func (m *SimpleResponse) MarshalTo(dAtA []byte) (int, error)

func (*SimpleResponse) ProtoMessage

func (*SimpleResponse) ProtoMessage()

func (*SimpleResponse) Reset

func (m *SimpleResponse) Reset()

func (*SimpleResponse) Size

func (m *SimpleResponse) Size() (n int)

func (*SimpleResponse) String

func (m *SimpleResponse) String() string

func (*SimpleResponse) Unmarshal

func (m *SimpleResponse) Unmarshal(dAtA []byte) error

type SorterSpec

type SorterSpec struct {
	OutputOrdering Ordering `protobuf:"bytes,1,opt,name=output_ordering,json=outputOrdering" json:"output_ordering"`
	// Ordering match length, specifying that the input is already sorted by the
	// first 'n' output ordering columns, can be optionally specified for
	// possible speed-ups taking advantage of the partial orderings.
	OrderingMatchLen uint32 `protobuf:"varint,2,opt,name=ordering_match_len,json=orderingMatchLen" json:"ordering_match_len"`
	// Limits can be optionally specified to allow for further optimizations
	// taking advantage of the fact that only the top 'k' results are needed.
	Limit int64 `protobuf:"varint,3,opt,name=limit" json:"limit"`
}

SorterSpec is the specification for a "sorting aggregator". A sorting aggregator sorts elements in the input stream providing a certain output order guarantee regardless of the input ordering. The output ordering is according to a configurable set of columns.

The "internal columns" of a Sorter (see ProcessorSpec) are the same as the input columns.

func (*SorterSpec) Descriptor

func (*SorterSpec) Descriptor() ([]byte, []int)

func (*SorterSpec) Marshal

func (m *SorterSpec) Marshal() (dAtA []byte, err error)

func (*SorterSpec) MarshalTo

func (m *SorterSpec) MarshalTo(dAtA []byte) (int, error)

func (*SorterSpec) ProtoMessage

func (*SorterSpec) ProtoMessage()

func (*SorterSpec) Reset

func (m *SorterSpec) Reset()

func (*SorterSpec) Size

func (m *SorterSpec) Size() (n int)

func (*SorterSpec) String

func (m *SorterSpec) String() string

func (*SorterSpec) Unmarshal

func (m *SorterSpec) Unmarshal(dAtA []byte) error

type StreamData

type StreamData struct {
	// Encodes one or more data rows. Each datum is encoded according to the
	// corresponding DatumInfo.
	RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes" json:"raw_bytes,omitempty"`
}

StreamData is a message that can be sent multiple times as part of a stream.

func (*StreamData) Descriptor

func (*StreamData) Descriptor() ([]byte, []int)

func (*StreamData) Marshal

func (m *StreamData) Marshal() (dAtA []byte, err error)

func (*StreamData) MarshalTo

func (m *StreamData) MarshalTo(dAtA []byte) (int, error)

func (*StreamData) ProtoMessage

func (*StreamData) ProtoMessage()

func (*StreamData) Reset

func (m *StreamData) Reset()

func (*StreamData) Size

func (m *StreamData) Size() (n int)

func (*StreamData) String

func (m *StreamData) String() string

func (*StreamData) Unmarshal

func (m *StreamData) Unmarshal(dAtA []byte) error

type StreamDecoder

type StreamDecoder struct {
	// contains filtered or unexported fields
}

StreamDecoder converts a sequence of StreamMessage to EncDatumRows.

Sample usage:

sd := StreamDecoder{}
var row sqlbase.EncDatumRow
for each message in stream {
    err := sd.AddMessage(msg)
    if err != nil { ... }
    for {
        row, err := sd.GetRow(row)
        if err != nil { ... }
        if row == nil {
            // No more rows in this message.
            break
        }
        // Use <row>
        ...
    }
}

AddMessage can be called multiple times before getting the rows, but this will cause data to accumulate internally.

func (*StreamDecoder) AddMessage

func (sd *StreamDecoder) AddMessage(msg *StreamMessage) error

AddMessage adds the data in a StreamMessage to the decoder.

The StreamDecoder may keep a reference to msg.Data.RawBytes until all the rows in the message are retrieved with GetRow.

func (*StreamDecoder) GetRow

func (sd *StreamDecoder) GetRow(rowBuf sqlbase.EncDatumRow) (sqlbase.EncDatumRow, error)

GetRow returns a row of EncDatums received in the stream. A row buffer can be provided optionally. Returns nil if there are no more rows received so far.

func (*StreamDecoder) IsDone

func (sd *StreamDecoder) IsDone() (bool, error)

IsDone returns true if all the rows were returned and the stream trailer was received (in which case any error in the trailer is returned as well).

type StreamEncoder

type StreamEncoder struct {
	// contains filtered or unexported fields
}

StreamEncoder converts EncDatum rows into a sequence of StreamMessage.

Sample usage:

se := StreamEncoder{}

for {
    for ... {
       err := se.AddRow(...)
       ...
    }
    msg := se.FormMessage(false, nil)
    // Send out message.
    ...
}
msg := se.FormMessage(true, nil)
// Send out final message
...

func (*StreamEncoder) AddRow

func (se *StreamEncoder) AddRow(row sqlbase.EncDatumRow) error

AddRow encodes a row.

func (*StreamEncoder) FormMessage

func (se *StreamEncoder) FormMessage(final bool, trailerErr error) *StreamMessage

FormMessage populates a message containing the rows added since the last call to FormMessage. The returned StreamMessage should be treated as immutable. If final is true, a message trailer is populated with the given error.

type StreamEndpointSpec

type StreamEndpointSpec struct {
	Type StreamEndpointSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.StreamEndpointSpec_Type" json:"type"`
	// The ID of this stream.
	//
	// For LOCAL streams, both ends of the stream are part of the flow on this
	// machine (and there must be a corresponding endpoint with the same ID).
	//
	// For REMOTE streams, this ID is used in the StreamHeader when connecting to
	// the other host.
	//
	// For SYNC_RESPONSE streams, the ID is unused.
	StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"`
	// Serving address for the target host, only used for outgoing REMOTE streams.
	TargetAddr string `protobuf:"bytes,3,opt,name=target_addr,json=targetAddr" json:"target_addr"`
}

StreamEndpointSpec describes one of the endpoints (input or output) of a physical stream.

func (*StreamEndpointSpec) Descriptor

func (*StreamEndpointSpec) Descriptor() ([]byte, []int)

func (*StreamEndpointSpec) Marshal

func (m *StreamEndpointSpec) Marshal() (dAtA []byte, err error)

func (*StreamEndpointSpec) MarshalTo

func (m *StreamEndpointSpec) MarshalTo(dAtA []byte) (int, error)

func (*StreamEndpointSpec) ProtoMessage

func (*StreamEndpointSpec) ProtoMessage()

func (*StreamEndpointSpec) Reset

func (m *StreamEndpointSpec) Reset()

func (*StreamEndpointSpec) Size

func (m *StreamEndpointSpec) Size() (n int)

func (*StreamEndpointSpec) String

func (m *StreamEndpointSpec) String() string

func (*StreamEndpointSpec) Unmarshal

func (m *StreamEndpointSpec) Unmarshal(dAtA []byte) error

type StreamEndpointSpec_Type

type StreamEndpointSpec_Type int32
const (
	// Stream that is part of the local flow.
	StreamEndpointSpec_LOCAL StreamEndpointSpec_Type = 0
	// Stream that has the other endpoint on a different node.
	StreamEndpointSpec_REMOTE StreamEndpointSpec_Type = 1
	// Special stream used when in "sync flow" mode. In this mode, we return
	// results directly as part of the RPC call that set up the flow. This saves
	// overhead (extra RPCs) compared to the normal mode where the RPC just sets
	// up the flow. This type can only be used with outbound endpoints.
	StreamEndpointSpec_SYNC_RESPONSE StreamEndpointSpec_Type = 2
)

func (StreamEndpointSpec_Type) Enum

func (StreamEndpointSpec_Type) EnumDescriptor

func (StreamEndpointSpec_Type) EnumDescriptor() ([]byte, []int)

func (StreamEndpointSpec_Type) String

func (x StreamEndpointSpec_Type) String() string

func (*StreamEndpointSpec_Type) UnmarshalJSON

func (x *StreamEndpointSpec_Type) UnmarshalJSON(data []byte) error

type StreamHeader

type StreamHeader struct {
	FlowID   FlowID   `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"`
	StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"`
	// There is one DatumInfo for each element in a row.
	Info []DatumInfo `protobuf:"bytes,3,rep,name=info" json:"info"`
}

StreamHeader is a message that is sent once at the beginning of a stream.

func (*StreamHeader) Descriptor

func (*StreamHeader) Descriptor() ([]byte, []int)

func (*StreamHeader) Marshal

func (m *StreamHeader) Marshal() (dAtA []byte, err error)

func (*StreamHeader) MarshalTo

func (m *StreamHeader) MarshalTo(dAtA []byte) (int, error)

func (*StreamHeader) ProtoMessage

func (*StreamHeader) ProtoMessage()

func (*StreamHeader) Reset

func (m *StreamHeader) Reset()

func (*StreamHeader) Size

func (m *StreamHeader) Size() (n int)

func (*StreamHeader) String

func (m *StreamHeader) String() string

func (*StreamHeader) Unmarshal

func (m *StreamHeader) Unmarshal(dAtA []byte) error

type StreamID

type StreamID int

StreamID identifies a stream; it may be local to a flow or it may cross machine boundaries. The identifier can only be used in the context of a specific flow.

type StreamMessage

type StreamMessage struct {
	// Header is present in the first message.
	Header *StreamHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
	// Data is present in all messages except possibly the first and last.
	Data StreamData `protobuf:"bytes,2,opt,name=data" json:"data"`
	// Trailer is present in the last message.
	Trailer *StreamTrailer `protobuf:"bytes,3,opt,name=trailer" json:"trailer,omitempty"`
}

func (*StreamMessage) Descriptor

func (*StreamMessage) Descriptor() ([]byte, []int)

func (*StreamMessage) Marshal

func (m *StreamMessage) Marshal() (dAtA []byte, err error)

func (*StreamMessage) MarshalTo

func (m *StreamMessage) MarshalTo(dAtA []byte) (int, error)

func (*StreamMessage) ProtoMessage

func (*StreamMessage) ProtoMessage()

func (*StreamMessage) Reset

func (m *StreamMessage) Reset()

func (*StreamMessage) Size

func (m *StreamMessage) Size() (n int)

func (*StreamMessage) String

func (m *StreamMessage) String() string

func (*StreamMessage) Unmarshal

func (m *StreamMessage) Unmarshal(dAtA []byte) error

type StreamMsg

type StreamMsg struct {
	// Only one of these fields will be set.
	Row sqlbase.EncDatumRow
	Err error
}

StreamMsg is the message used in the channels that implement local physical streams.

type StreamTrailer

type StreamTrailer struct {
	Error *cockroach_roachpb2.Error `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
}

StreamTrailer is a message that is sent once at the end of a stream.

func (*StreamTrailer) Descriptor

func (*StreamTrailer) Descriptor() ([]byte, []int)

func (*StreamTrailer) Marshal

func (m *StreamTrailer) Marshal() (dAtA []byte, err error)

func (*StreamTrailer) MarshalTo

func (m *StreamTrailer) MarshalTo(dAtA []byte) (int, error)

func (*StreamTrailer) ProtoMessage

func (*StreamTrailer) ProtoMessage()

func (*StreamTrailer) Reset

func (m *StreamTrailer) Reset()

func (*StreamTrailer) Size

func (m *StreamTrailer) Size() (n int)

func (*StreamTrailer) String

func (m *StreamTrailer) String() string

func (*StreamTrailer) Unmarshal

func (m *StreamTrailer) Unmarshal(dAtA []byte) error

type TableReaderSpan

type TableReaderSpan struct {
	// TODO(radu): the dist_sql APIs should be agnostic to how we map tables to
	// KVs. The span should be described as starting and ending lists of values
	// for a prefix of the index columns, along with inclusive/exclusive flags.
	Span cockroach_roachpb1.Span `protobuf:"bytes,1,opt,name=span" json:"span"`
}

func (*TableReaderSpan) Descriptor

func (*TableReaderSpan) Descriptor() ([]byte, []int)

func (*TableReaderSpan) Marshal

func (m *TableReaderSpan) Marshal() (dAtA []byte, err error)

func (*TableReaderSpan) MarshalTo

func (m *TableReaderSpan) MarshalTo(dAtA []byte) (int, error)

func (*TableReaderSpan) ProtoMessage

func (*TableReaderSpan) ProtoMessage()

func (*TableReaderSpan) Reset

func (m *TableReaderSpan) Reset()

func (*TableReaderSpan) Size

func (m *TableReaderSpan) Size() (n int)

func (*TableReaderSpan) String

func (m *TableReaderSpan) String() string

func (*TableReaderSpan) Unmarshal

func (m *TableReaderSpan) Unmarshal(dAtA []byte) error

type TableReaderSpec

type TableReaderSpec struct {
	Table cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
	// If 0, we use the primary index. If non-zero, we use the index_idx-th index,
	// i.e. table.indexes[index_idx-1]
	IndexIdx uint32            `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"`
	Reverse  bool              `protobuf:"varint,3,opt,name=reverse" json:"reverse"`
	Spans    []TableReaderSpan `protobuf:"bytes,4,rep,name=spans" json:"spans"`
	// If nonzero, the table reader only needs to return this many rows.
	HardLimit int64 `protobuf:"varint,8,opt,name=hard_limit,json=hardLimit" json:"hard_limit"`
	// The soft limit is a hint for how many rows the consumer of the table reader
	// output might need. If both the hard limit and the soft limit are set, the
	// soft limit must be lower than the hard limit.
	SoftLimit int64 `protobuf:"varint,7,opt,name=soft_limit,json=softLimit" json:"soft_limit"`
}

TableReaderSpec is the specification for a "table reader". A table reader performs KV operations to retrieve rows for a table and outputs the desired columns of the rows that pass a filter expression.

The "internal columns" of a TableReader (see ProcessorSpec) are all the columns of the table. Internally, only the values for the columns needed by the post-processing stage are be populated.

func (*TableReaderSpec) Descriptor

func (*TableReaderSpec) Descriptor() ([]byte, []int)

func (*TableReaderSpec) Marshal

func (m *TableReaderSpec) Marshal() (dAtA []byte, err error)

func (*TableReaderSpec) MarshalTo

func (m *TableReaderSpec) MarshalTo(dAtA []byte) (int, error)

func (*TableReaderSpec) ProtoMessage

func (*TableReaderSpec) ProtoMessage()

func (*TableReaderSpec) Reset

func (m *TableReaderSpec) Reset()

func (*TableReaderSpec) Size

func (m *TableReaderSpec) Size() (n int)

func (*TableReaderSpec) String

func (m *TableReaderSpec) String() string

func (*TableReaderSpec) Unmarshal

func (m *TableReaderSpec) Unmarshal(dAtA []byte) error

type ValuesCoreSpec

type ValuesCoreSpec struct {
	// There is one DatumInfo for each element in a row.
	Columns []DatumInfo `protobuf:"bytes,1,rep,name=columns" json:"columns"`
	// Each raw block encodes one or more data rows; each datum is encoded
	// according to the corresponding DatumInfo.
	RawBytes [][]byte `protobuf:"bytes,2,rep,name=raw_bytes,json=rawBytes" json:"raw_bytes,omitempty"`
}

ValuesCoreSpec is the core of a processor that has no inputs and generates "pre-canned" rows. This is not intended to be used for very large datasets.

func (*ValuesCoreSpec) Descriptor

func (*ValuesCoreSpec) Descriptor() ([]byte, []int)

func (*ValuesCoreSpec) Marshal

func (m *ValuesCoreSpec) Marshal() (dAtA []byte, err error)

func (*ValuesCoreSpec) MarshalTo

func (m *ValuesCoreSpec) MarshalTo(dAtA []byte) (int, error)

func (*ValuesCoreSpec) ProtoMessage

func (*ValuesCoreSpec) ProtoMessage()

func (*ValuesCoreSpec) Reset

func (m *ValuesCoreSpec) Reset()

func (*ValuesCoreSpec) Size

func (m *ValuesCoreSpec) Size() (n int)

func (*ValuesCoreSpec) String

func (m *ValuesCoreSpec) String() string

func (*ValuesCoreSpec) Unmarshal

func (m *ValuesCoreSpec) Unmarshal(dAtA []byte) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL