Documentation ¶
Index ¶
- Constants
- Variables
- func AddSecondsToBaseTime(baseTimeInNanoSec int64, durationInSeconds int64) int64
- func AwaitWaitGroup(wg *sync.WaitGroup, timeout time.Duration) bool
- func CheckEventBlobSizeLimit(actualSize int, warnLimit int, errorLimit int, namespaceID string, ...) error
- func ConvertIndexedValueTypeToProtoType(fieldType interface{}, logger log.Logger) enumspb.IndexedValueType
- func CreateAdminServiceRetryPolicy() backoff.RetryPolicy
- func CreateFrontendServiceRetryPolicy() backoff.RetryPolicy
- func CreateHistoryServiceRetryPolicy() backoff.RetryPolicy
- func CreateHistoryStartWorkflowRequest(namespaceID string, ...) *historyservice.StartWorkflowExecutionRequest
- func CreateKafkaOperationRetryPolicy() backoff.RetryPolicy
- func CreateMatchingPollWorkflowTaskQueueResponse(historyResponse *historyservice.RecordWorkflowTaskStartedResponse, ...) *matchingservice.PollWorkflowTaskQueueResponse
- func CreateMatchingServiceRetryPolicy() backoff.RetryPolicy
- func CreatePersistanceRetryPolicy() backoff.RetryPolicy
- func CreateReplicationServiceBusyRetryPolicy() backoff.RetryPolicy
- func CreateTaskProcessingRetryPolicy() backoff.RetryPolicy
- func DeserializeSearchAttributeValue(value *commonpb.Payload, valueType enumspb.IndexedValueType) (interface{}, error)
- func EnsureRetryPolicyDefaults(originalPolicy *commonpb.RetryPolicy, defaultSettings DefaultRetrySettings)
- func GenerateRandomString(n int) string
- func GetDefaultAdvancedVisibilityWritingMode(isAdvancedVisConfigExist bool) string
- func GetDefaultRetryPolicyConfigOptions() map[string]interface{}
- func GetPayloadsMapSize(data map[string]*commonpb.Payloads) int
- func GetWorkflowExecutionTimeout(namespace string, requestedTimeout time.Duration, ...) time.Duration
- func GetWorkflowRunTimeout(namespace string, requestedTimeout time.Duration, ...) time.Duration
- func GetWorkflowTaskTimeout(namespace string, requestedTimeout time.Duration, ...) time.Duration
- func IsDeadlineExceeded(err error) bool
- func IsJustOrderByClause(clause string) bool
- func IsKafkaTransientError(err error) bool
- func IsPersistenceTransientError(err error) bool
- func IsResourceExhausted(err error) bool
- func IsServiceNonRetryableError(err error) bool
- func IsServiceTransientError(err error) bool
- func IsValidContext(ctx context.Context) error
- func IsWhitelistServiceTransientError(err error) bool
- func MaxDuration(a, b time.Duration) time.Duration
- func MaxInt(a, b int) int
- func MaxInt64(a, b int64) int64
- func MaxTime(a, b time.Time) time.Time
- func MinDuration(a, b time.Duration) time.Duration
- func MinInt(a, b int) int
- func MinInt32(a, b int32) int32
- func MinInt64(a, b int64) int64
- func MinTime(a, b time.Time) time.Time
- func PrettyPrintHistory(history *historypb.History, logger log.Logger)
- func SortInt64Slice(slice []int64)
- func ValidateLongPollContextTimeout(ctx context.Context, handlerName string, logger log.Logger) error
- func ValidateLongPollContextTimeoutIsSet(ctx context.Context, handlerName string, logger log.Logger) (time.Time, error)
- func ValidateRetryPolicy(policy *commonpb.RetryPolicy) error
- func WorkflowIDToHistoryShard(namespaceID, workflowID string, numberOfShards int32) int32
- type ClientCache
- type Daemon
- type DefaultRetrySettings
- type PProfInitializer
- type RPCFactory
- type TaskTokenSerializer
Constants ¶
const ( // FirstEventID is the id of the first event in the history FirstEventID int64 = 1 // EmptyEventID is the id of the empty event EmptyEventID int64 = 0 // EmptyVersion is used as the default value for failover version when no value is provided EmptyVersion int64 = 0 // EndEventID is the id of the end event, here we use the int64 max EndEventID int64 = 1<<63 - 1 // BufferedEventID is the id of the buffered event BufferedEventID int64 = -123 // EmptyEventTaskID is uninitialized id of the task id within event EmptyEventTaskID int64 = 0 // TransientEventID is the id of the transient event TransientEventID int64 = -124 // FirstBlobPageToken is the page token identifying the first blob for each history archival FirstBlobPageToken = 1 // LastBlobNextPageToken is the next page token on the last blob for each history archival LastBlobNextPageToken = -1 // EndMessageID is the id of the end message, here we use the int64 max EndMessageID int64 = 1<<63 - 1 )
const ( // FrontendServiceName is the name of the frontend service FrontendServiceName = "frontend" // HistoryServiceName is the name of the history service HistoryServiceName = "history" // MatchingServiceName is the name of the matching service MatchingServiceName = "matching" // WorkerServiceName is the name of the worker service WorkerServiceName = "worker" )
const ( // GetHistoryMaxPageSize is the max page size for get history GetHistoryMaxPageSize = 100 // ReadDLQMessagesPageSize is the max page size for read DLQ messages ReadDLQMessagesPageSize = 1000 )
const ( // SystemGlobalNamespace is global namespace name for temporal system workflows running globally SystemGlobalNamespace = "temporal-system-global" // SystemLocalNamespace is namespace name for temporal system workflows running in local cluster SystemLocalNamespace = "temporal-system" // SystemNamespaceID is namespace id for all temporal system workflows SystemNamespaceID = "32049b68-7872-4094-8e63-d0dd59896a83" // SystemNamespaceRetentionDays is retention config for all temporal system workflows SystemNamespaceRetentionDays = time.Hour * 24 * 7 // DefaultAdminOperationToken is the default dynamic config value for AdminOperationToken DefaultAdminOperationToken = "TemporalTeamONLY" )
This was flagged by salus as potentially hardcoded credentials. This is a false positive by the scanner and should be disregarded. #nosec
const ( // MinLongPollTimeout is the minimum context timeout for long poll API, below which // the request won't be processed MinLongPollTimeout = time.Second * 2 // CriticalLongPollTimeout is a threshold for the context timeout passed into long poll API, // below which a warning will be logged CriticalLongPollTimeout = time.Second * 20 // MaxWorkflowRetentionPeriod is the maximum of workflow retention when registering namespace // !!! Do NOT simply decrease this number, because it is being used by history scavenger to avoid race condition against history archival. // Check more details in history scanner(scavenger) MaxWorkflowRetentionPeriod = 30 * time.Hour * 24 )
const ( // DefaultWorkflowExecutionTimeout is the Default Workflow Execution timeout applied to a Workflow when // this value is not explicitly set by the user on a Start Workflow request // Intention is 10 years DefaultWorkflowExecutionTimeout = 24 * 365 * 10 * time.Hour // DefaultWorkflowRunTimeout is the Default Workflow Run timeout applied to a Workflow when // this value is not explicitly set by the user on a Start Workflow request // Intention is 10 years DefaultWorkflowRunTimeout = 24 * 365 * 10 * time.Hour // DefaultWorkflowTaskTimeout sets the Default Workflow Task timeout for a Workflow // when the value is not explicitly set by the user. Intention is 10 seconds. DefaultWorkflowTaskTimeout = 10 * time.Second )
const ( // ArchivalEnabled is the state for enabling archival ArchivalEnabled = "enabled" // ArchivalDisabled is the state for disabling archival ArchivalDisabled = "disabled" // ArchivalPaused is the state for pausing archival ArchivalPaused = "paused" )
const ( // AdvancedVisibilityWritingModeOff means do not write to advanced visibility store AdvancedVisibilityWritingModeOff = "off" // AdvancedVisibilityWritingModeOn means only write to advanced visibility store AdvancedVisibilityWritingModeOn = "on" // AdvancedVisibilityWritingModeDual means write to both normal visibility and advanced visibility store AdvancedVisibilityWritingModeDual = "dual" )
enum for dynamic config AdvancedVisibilityWritingMode
const ( // DaemonStatusInitialized coroutine pool initialized DaemonStatusInitialized int32 = 0 // DaemonStatusStarted coroutine pool started DaemonStatusStarted int32 = 1 // DaemonStatusStopped coroutine pool stopped DaemonStatusStopped int32 = 2 )
const ( // FailureReasonCompleteResultExceedsLimit is failureReason for complete result exceeds limit FailureReasonCompleteResultExceedsLimit = "Complete result exceeds size limit." // FailureReasonFailureDetailsExceedsLimit is failureReason for failure details exceeds limit FailureReasonFailureExceedsLimit = "Failure exceeds size limit." // FailureReasonCancelDetailsExceedsLimit is failureReason for cancel details exceeds limit FailureReasonCancelDetailsExceedsLimit = "Cancel details exceed size limit." // FailureReasonHeartbeatExceedsLimit is failureReason for heartbeat exceeds limit FailureReasonHeartbeatExceedsLimit = "Heartbeat details exceed size limit." // FailureReasonSizeExceedsLimit is reason to fail workflow when history size or count exceed limit FailureReasonSizeExceedsLimit = "Workflow history size / count exceeds limit." // FailureReasonTransactionSizeExceedsLimit is the failureReason for when transaction cannot be committed because it exceeds size limit FailureReasonTransactionSizeExceedsLimit = "Transaction size exceeds limit." )
const (
// DefaultTransactionSizeLimit is the largest allowed transaction size to persistence
DefaultTransactionSizeLimit = 14 * 1024 * 1024
)
const MaxTaskTimeout = MaxTaskTimeoutSeconds * time.Second
MaxTaskTimeout is maximum task timeout allowed. 366 days in seconds
const MaxTaskTimeoutSeconds = 31622400
const (
// VisibilityAppName is used to find kafka topics and ES indexName for visibility
VisibilityAppName = "visibility"
)
Variables ¶
var ( // ErrBlobSizeExceedsLimit is error for event blob size exceeds limit ErrBlobSizeExceedsLimit = serviceerror.NewInvalidArgument("Blob data size exceeds limit.") // ErrContextTimeoutTooShort is error for setting a very short context timeout when calling a long poll API ErrContextTimeoutTooShort = serviceerror.NewInvalidArgument("Context timeout is too short.") // ErrContextTimeoutNotSet is error for not setting a context timeout when calling a long poll API ErrContextTimeoutNotSet = serviceerror.NewInvalidArgument("Context timeout is not set.") )
Functions ¶
func AddSecondsToBaseTime ¶
AddSecondsToBaseTime - Gets the UnixNano with given duration and base time.
func AwaitWaitGroup ¶
AwaitWaitGroup calls Wait on the given wait Returns true if the Wait() call succeeded before the timeout Returns false if the Wait() did not return before the timeout
func CheckEventBlobSizeLimit ¶ added in v0.5.0
func CheckEventBlobSizeLimit( actualSize int, warnLimit int, errorLimit int, namespaceID string, workflowID string, runID string, scope metrics.Scope, logger log.Logger, blobSizeViolationOperationTag tag.Tag, ) error
CheckEventBlobSizeLimit checks if a blob data exceeds limits. It logs a warning if it exceeds warnLimit, and return ErrBlobSizeExceedsLimit if it exceeds errorLimit.
func ConvertIndexedValueTypeToProtoType ¶ added in v0.27.0
func ConvertIndexedValueTypeToProtoType(fieldType interface{}, logger log.Logger) enumspb.IndexedValueType
ConvertIndexedValueTypeToProtoType takes fieldType as interface{} and convert to IndexedValueType. Because different implementation of dynamic config client may lead to different types
func CreateAdminServiceRetryPolicy ¶ added in v0.5.0
func CreateAdminServiceRetryPolicy() backoff.RetryPolicy
CreateAdminServiceRetryPolicy creates a retry policy for calls to matching service
func CreateFrontendServiceRetryPolicy ¶ added in v0.3.11
func CreateFrontendServiceRetryPolicy() backoff.RetryPolicy
CreateFrontendServiceRetryPolicy creates a retry policy for calls to frontend service
func CreateHistoryServiceRetryPolicy ¶
func CreateHistoryServiceRetryPolicy() backoff.RetryPolicy
CreateHistoryServiceRetryPolicy creates a retry policy for calls to history service
func CreateHistoryStartWorkflowRequest ¶ added in v0.4.0
func CreateHistoryStartWorkflowRequest( namespaceID string, startRequest *workflowservice.StartWorkflowExecutionRequest, parentExecutionInfo *workflowspb.ParentExecutionInfo, now time.Time, ) *historyservice.StartWorkflowExecutionRequest
CreateHistoryStartWorkflowRequest create a start workflow request for history
func CreateKafkaOperationRetryPolicy ¶ added in v0.5.2
func CreateKafkaOperationRetryPolicy() backoff.RetryPolicy
CreateKafkaOperationRetryPolicy creates a retry policy for kafka operation
func CreateMatchingPollWorkflowTaskQueueResponse ¶ added in v0.27.0
func CreateMatchingPollWorkflowTaskQueueResponse(historyResponse *historyservice.RecordWorkflowTaskStartedResponse, workflowExecution *commonpb.WorkflowExecution, token []byte) *matchingservice.PollWorkflowTaskQueueResponse
CreateMatchingPollWorkflowTaskQueueResponse create response for matching's PollWorkflowTaskQueue
func CreateMatchingServiceRetryPolicy ¶ added in v0.5.0
func CreateMatchingServiceRetryPolicy() backoff.RetryPolicy
CreateMatchingServiceRetryPolicy creates a retry policy for calls to matching service
func CreatePersistanceRetryPolicy ¶
func CreatePersistanceRetryPolicy() backoff.RetryPolicy
CreatePersistanceRetryPolicy creates a retry policy for persistence layer operations
func CreateReplicationServiceBusyRetryPolicy ¶ added in v1.1.0
func CreateReplicationServiceBusyRetryPolicy() backoff.RetryPolicy
CreateReplicationServiceBusyRetryPolicy creates a retry policy to handle replication service busy
func CreateTaskProcessingRetryPolicy ¶ added in v1.1.0
func CreateTaskProcessingRetryPolicy() backoff.RetryPolicy
CreateTaskProcessingRetryPolicy creates a retry policy for task processing
func DeserializeSearchAttributeValue ¶ added in v0.27.0
func DeserializeSearchAttributeValue(value *commonpb.Payload, valueType enumspb.IndexedValueType) (interface{}, error)
DeserializeSearchAttributeValue takes json encoded search attribute value and it's type as input, then unmarshal the value into a concrete type and return the value
func EnsureRetryPolicyDefaults ¶ added in v0.28.0
func EnsureRetryPolicyDefaults(originalPolicy *commonpb.RetryPolicy, defaultSettings DefaultRetrySettings)
EnsureRetryPolicyDefaults ensures the policy subfields, if not explicitly set, are set to the specified defaults
func GenerateRandomString ¶ added in v0.3.12
GenerateRandomString is used for generate test string
func GetDefaultAdvancedVisibilityWritingMode ¶ added in v0.27.0
GetDefaultAdvancedVisibilityWritingMode get default advancedVisibilityWritingMode based on whether related config exists in static config file.
func GetDefaultRetryPolicyConfigOptions ¶ added in v0.30.0
func GetDefaultRetryPolicyConfigOptions() map[string]interface{}
func GetPayloadsMapSize ¶ added in v0.27.0
func GetWorkflowExecutionTimeout ¶ added in v1.0.0
func GetWorkflowExecutionTimeout( namespace string, requestedTimeout time.Duration, getDefaultTimeoutFunc dynamicconfig.DurationPropertyFnWithNamespaceFilter, getMaxAllowedTimeoutFunc dynamicconfig.DurationPropertyFnWithNamespaceFilter) time.Duration
GetWorkflowExecutionTimeout gets the default allowed execution timeout or truncates the requested value to the maximum allowed timeout
func GetWorkflowRunTimeout ¶ added in v1.0.0
func GetWorkflowRunTimeout( namespace string, requestedTimeout time.Duration, executionTimeout time.Duration, getDefaultTimeoutFunc dynamicconfig.DurationPropertyFnWithNamespaceFilter, getMaxAllowedTimeoutFunc dynamicconfig.DurationPropertyFnWithNamespaceFilter) time.Duration
GetWorkflowRunTimeout gets the default allowed run timeout or truncates the requested value to the maximum allowed timeout
func GetWorkflowTaskTimeout ¶ added in v1.0.0
func GetWorkflowTaskTimeout( namespace string, requestedTimeout time.Duration, getDefaultTimeoutFunc dynamicconfig.DurationPropertyFnWithNamespaceFilter) time.Duration
GetWorkflowTaskTimeout gets the default allowed execution timeout or truncates the requested value to the maximum allowed timeout
func IsDeadlineExceeded ¶ added in v1.0.0
IsDeadlineExceeded checks if the error is context timeout error
func IsJustOrderByClause ¶ added in v0.5.9
IsJustOrderByClause return true is query start with order by
func IsKafkaTransientError ¶ added in v0.5.2
IsKafkaTransientError check if the error is a transient kafka error
func IsPersistenceTransientError ¶
IsPersistenceTransientError checks if the error is a transient persistence error
func IsResourceExhausted ¶ added in v1.1.0
IsResourceExhausted checks if the error is a service busy error.
func IsServiceNonRetryableError ¶
IsServiceNonRetryableError checks if the error is a non retryable error.
func IsServiceTransientError ¶ added in v0.3.11
IsServiceTransientError checks if the error is a retryable error.
func IsValidContext ¶
IsValidContext checks that the thrift context is not expired on cancelled. Returns nil if the context is still valid. Otherwise, returns the result of ctx.Err()
func IsWhitelistServiceTransientError ¶ added in v0.3.14
IsWhitelistServiceTransientError checks if the error is a transient error.
func MaxDuration ¶ added in v0.27.0
MaxDuration returns the greater of two given time durations
func MinDuration ¶ added in v0.27.0
MinDuration returns the smaller of two given time duration
func PrettyPrintHistory ¶
PrettyPrintHistory prints history in human readable format
func SortInt64Slice ¶ added in v0.27.0
func SortInt64Slice(slice []int64)
SortInt64Slice sorts the given int64 slice. Sort is not guaranteed to be stable.
func ValidateLongPollContextTimeout ¶ added in v0.5.7
func ValidateLongPollContextTimeout( ctx context.Context, handlerName string, logger log.Logger, ) error
ValidateLongPollContextTimeout check if the context timeout for a long poll handler is too short or below a normal value. If the timeout is not set or too short, it logs an error, and return ErrContextTimeoutNotSet or ErrContextTimeoutTooShort accordingly. If the timeout is only below a normal value, it just logs an info and return nil.
func ValidateLongPollContextTimeoutIsSet ¶ added in v0.8.6
func ValidateLongPollContextTimeoutIsSet( ctx context.Context, handlerName string, logger log.Logger, ) (time.Time, error)
ValidateLongPollContextTimeoutIsSet checks if the context timeout is set for long poll requests.
func ValidateRetryPolicy ¶ added in v0.4.0
func ValidateRetryPolicy(policy *commonpb.RetryPolicy) error
ValidateRetryPolicy validates a retry policy
func WorkflowIDToHistoryShard ¶
WorkflowIDToHistoryShard is used to map namespaceID-workflowID pair to a shardID
Types ¶
type ClientCache ¶ added in v0.5.0
type ClientCache interface { GetClientForKey(key string) (interface{}, error) GetClientForClientKey(clientKey string) (interface{}, error) GetHostNameForKey(key string) (string, error) }
ClientCache store initialized clients
func NewClientCache ¶ added in v0.5.0
func NewClientCache( keyResolver keyResolver, clientProvider clientProvider, ) ClientCache
NewClientCache creates a new client cache based on membership
type Daemon ¶
type Daemon interface { Start() Stop() }
Daemon is the base interfaces implemented by background tasks within Temporal
type DefaultRetrySettings ¶ added in v0.30.0
type DefaultRetrySettings struct { InitialInterval time.Duration MaximumIntervalCoefficient float64 BackoffCoefficient float64 MaximumAttempts int32 }
DefaultRetrySettings indicates what the "default" retry settings are if it is not specified on an Activity or for any unset fields if a policy is explicitly set on a workflow
func FromConfigToDefaultRetrySettings ¶ added in v0.30.0
func FromConfigToDefaultRetrySettings(options map[string]interface{}) DefaultRetrySettings
type PProfInitializer ¶ added in v0.3.5
type PProfInitializer interface {
Start() error
}
PProfInitializer initialize the pprof based on config
type RPCFactory ¶ added in v0.3.2
type RPCFactory interface { GetFrontendGRPCServerOptions() ([]grpc.ServerOption, error) GetInternodeGRPCServerOptions() ([]grpc.ServerOption, error) GetGRPCListener() net.Listener GetRingpopChannel() *tchannel.Channel CreateFrontendGRPCConnection(hostName string) *grpc.ClientConn CreateInternodeGRPCConnection(hostName string) *grpc.ClientConn }
RPCFactory creates gRPC listener and connection.
type TaskTokenSerializer ¶
type TaskTokenSerializer interface { Serialize(token *tokenspb.Task) ([]byte, error) Deserialize(data []byte) (*tokenspb.Task, error) SerializeQueryTaskToken(token *tokenspb.QueryTask) ([]byte, error) DeserializeQueryTaskToken(data []byte) (*tokenspb.QueryTask, error) }
TaskTokenSerializer serializes task tokens
func NewProtoTaskTokenSerializer ¶ added in v0.27.0
func NewProtoTaskTokenSerializer() TaskTokenSerializer
NewProtoTaskTokenSerializer creates a new instance of TaskTokenSerializer
Source Files ¶
Directories ¶
Path | Synopsis |
---|---|
Package archiver is a generated GoMock package.
|
Package archiver is a generated GoMock package. |
filestore
Package filestore is a generated GoMock package.
|
Package filestore is a generated GoMock package. |
gcloud
Package gcloud is a generated GoMock package.
|
Package gcloud is a generated GoMock package. |
s3store
Package s3store is a generated GoMock package.
|
Package s3store is a generated GoMock package. |
Package authorization is a generated GoMock package.
|
Package authorization is a generated GoMock package. |
Package cache is a generated GoMock package.
|
Package cache is a generated GoMock package. |
Package cluster is a generated GoMock package.
|
Package cluster is a generated GoMock package. |
mocks
Code generated by mockery v1.0.0.
|
Code generated by mockery v1.0.0. |
Package membership is a generated GoMock package.
|
Package membership is a generated GoMock package. |
Package mocks is a generated GoMock package.
|
Package mocks is a generated GoMock package. |
Package namespace is a generated GoMock package.
|
Package namespace is a generated GoMock package. |
Package persistence is a generated GoMock package.
|
Package persistence is a generated GoMock package. |
client
Package client is a generated GoMock package.
|
Package client is a generated GoMock package. |
service
|
|
dynamicconfig
Package dynamicconfig is a generated GoMock package.
|
Package dynamicconfig is a generated GoMock package. |
Package task is a generated GoMock package.
|
Package task is a generated GoMock package. |
Package xdc is a generated GoMock package.
|
Package xdc is a generated GoMock package. |