Documentation ¶
Overview ¶
Package neptunedata provides the API client, operations, and parameter types for Amazon NeptuneData.
Neptune Data API ¶
The Amazon Neptune data API provides SDK support for more than 40 of Neptune's data operations, including data loading, query execution, data inquiry, and machine learning. It supports the Gremlin and openCypher query languages, and is available in all SDK languages. It automatically signs API requests and greatly simplifies integrating Neptune into your applications.
Index ¶
- Constants
- func NewDefaultEndpointResolver() *internalendpoints.Resolver
- func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options)
- func WithEndpointResolver(v EndpointResolver) func(*Options)deprecated
- func WithEndpointResolverV2(v EndpointResolverV2) func(*Options)
- func WithSigV4SigningName(name string) func(*Options)
- func WithSigV4SigningRegion(region string) func(*Options)
- type AuthResolverParameters
- type AuthSchemeResolver
- type CancelGremlinQueryInput
- type CancelGremlinQueryOutput
- type CancelLoaderJobInput
- type CancelLoaderJobOutput
- type CancelMLDataProcessingJobInput
- type CancelMLDataProcessingJobOutput
- type CancelMLModelTrainingJobInput
- type CancelMLModelTrainingJobOutput
- type CancelMLModelTransformJobInput
- type CancelMLModelTransformJobOutput
- type CancelOpenCypherQueryInput
- type CancelOpenCypherQueryOutput
- type Client
- func (c *Client) CancelGremlinQuery(ctx context.Context, params *CancelGremlinQueryInput, optFns ...func(*Options)) (*CancelGremlinQueryOutput, error)
- func (c *Client) CancelLoaderJob(ctx context.Context, params *CancelLoaderJobInput, optFns ...func(*Options)) (*CancelLoaderJobOutput, error)
- func (c *Client) CancelMLDataProcessingJob(ctx context.Context, params *CancelMLDataProcessingJobInput, ...) (*CancelMLDataProcessingJobOutput, error)
- func (c *Client) CancelMLModelTrainingJob(ctx context.Context, params *CancelMLModelTrainingJobInput, ...) (*CancelMLModelTrainingJobOutput, error)
- func (c *Client) CancelMLModelTransformJob(ctx context.Context, params *CancelMLModelTransformJobInput, ...) (*CancelMLModelTransformJobOutput, error)
- func (c *Client) CancelOpenCypherQuery(ctx context.Context, params *CancelOpenCypherQueryInput, ...) (*CancelOpenCypherQueryOutput, error)
- func (c *Client) CreateMLEndpoint(ctx context.Context, params *CreateMLEndpointInput, optFns ...func(*Options)) (*CreateMLEndpointOutput, error)
- func (c *Client) DeleteMLEndpoint(ctx context.Context, params *DeleteMLEndpointInput, optFns ...func(*Options)) (*DeleteMLEndpointOutput, error)
- func (c *Client) DeletePropertygraphStatistics(ctx context.Context, params *DeletePropertygraphStatisticsInput, ...) (*DeletePropertygraphStatisticsOutput, error)
- func (c *Client) DeleteSparqlStatistics(ctx context.Context, params *DeleteSparqlStatisticsInput, ...) (*DeleteSparqlStatisticsOutput, error)
- func (c *Client) ExecuteFastReset(ctx context.Context, params *ExecuteFastResetInput, optFns ...func(*Options)) (*ExecuteFastResetOutput, error)
- func (c *Client) ExecuteGremlinExplainQuery(ctx context.Context, params *ExecuteGremlinExplainQueryInput, ...) (*ExecuteGremlinExplainQueryOutput, error)
- func (c *Client) ExecuteGremlinProfileQuery(ctx context.Context, params *ExecuteGremlinProfileQueryInput, ...) (*ExecuteGremlinProfileQueryOutput, error)
- func (c *Client) ExecuteGremlinQuery(ctx context.Context, params *ExecuteGremlinQueryInput, ...) (*ExecuteGremlinQueryOutput, error)
- func (c *Client) ExecuteOpenCypherExplainQuery(ctx context.Context, params *ExecuteOpenCypherExplainQueryInput, ...) (*ExecuteOpenCypherExplainQueryOutput, error)
- func (c *Client) ExecuteOpenCypherQuery(ctx context.Context, params *ExecuteOpenCypherQueryInput, ...) (*ExecuteOpenCypherQueryOutput, error)
- func (c *Client) GetEngineStatus(ctx context.Context, params *GetEngineStatusInput, optFns ...func(*Options)) (*GetEngineStatusOutput, error)
- func (c *Client) GetGremlinQueryStatus(ctx context.Context, params *GetGremlinQueryStatusInput, ...) (*GetGremlinQueryStatusOutput, error)
- func (c *Client) GetLoaderJobStatus(ctx context.Context, params *GetLoaderJobStatusInput, optFns ...func(*Options)) (*GetLoaderJobStatusOutput, error)
- func (c *Client) GetMLDataProcessingJob(ctx context.Context, params *GetMLDataProcessingJobInput, ...) (*GetMLDataProcessingJobOutput, error)
- func (c *Client) GetMLEndpoint(ctx context.Context, params *GetMLEndpointInput, optFns ...func(*Options)) (*GetMLEndpointOutput, error)
- func (c *Client) GetMLModelTrainingJob(ctx context.Context, params *GetMLModelTrainingJobInput, ...) (*GetMLModelTrainingJobOutput, error)
- func (c *Client) GetMLModelTransformJob(ctx context.Context, params *GetMLModelTransformJobInput, ...) (*GetMLModelTransformJobOutput, error)
- func (c *Client) GetOpenCypherQueryStatus(ctx context.Context, params *GetOpenCypherQueryStatusInput, ...) (*GetOpenCypherQueryStatusOutput, error)
- func (c *Client) GetPropertygraphStatistics(ctx context.Context, params *GetPropertygraphStatisticsInput, ...) (*GetPropertygraphStatisticsOutput, error)
- func (c *Client) GetPropertygraphStream(ctx context.Context, params *GetPropertygraphStreamInput, ...) (*GetPropertygraphStreamOutput, error)
- func (c *Client) GetPropertygraphSummary(ctx context.Context, params *GetPropertygraphSummaryInput, ...) (*GetPropertygraphSummaryOutput, error)
- func (c *Client) GetRDFGraphSummary(ctx context.Context, params *GetRDFGraphSummaryInput, optFns ...func(*Options)) (*GetRDFGraphSummaryOutput, error)
- func (c *Client) GetSparqlStatistics(ctx context.Context, params *GetSparqlStatisticsInput, ...) (*GetSparqlStatisticsOutput, error)
- func (c *Client) GetSparqlStream(ctx context.Context, params *GetSparqlStreamInput, optFns ...func(*Options)) (*GetSparqlStreamOutput, error)
- func (c *Client) ListGremlinQueries(ctx context.Context, params *ListGremlinQueriesInput, optFns ...func(*Options)) (*ListGremlinQueriesOutput, error)
- func (c *Client) ListLoaderJobs(ctx context.Context, params *ListLoaderJobsInput, optFns ...func(*Options)) (*ListLoaderJobsOutput, error)
- func (c *Client) ListMLDataProcessingJobs(ctx context.Context, params *ListMLDataProcessingJobsInput, ...) (*ListMLDataProcessingJobsOutput, error)
- func (c *Client) ListMLEndpoints(ctx context.Context, params *ListMLEndpointsInput, optFns ...func(*Options)) (*ListMLEndpointsOutput, error)
- func (c *Client) ListMLModelTrainingJobs(ctx context.Context, params *ListMLModelTrainingJobsInput, ...) (*ListMLModelTrainingJobsOutput, error)
- func (c *Client) ListMLModelTransformJobs(ctx context.Context, params *ListMLModelTransformJobsInput, ...) (*ListMLModelTransformJobsOutput, error)
- func (c *Client) ListOpenCypherQueries(ctx context.Context, params *ListOpenCypherQueriesInput, ...) (*ListOpenCypherQueriesOutput, error)
- func (c *Client) ManagePropertygraphStatistics(ctx context.Context, params *ManagePropertygraphStatisticsInput, ...) (*ManagePropertygraphStatisticsOutput, error)
- func (c *Client) ManageSparqlStatistics(ctx context.Context, params *ManageSparqlStatisticsInput, ...) (*ManageSparqlStatisticsOutput, error)
- func (c *Client) Options() Options
- func (c *Client) StartLoaderJob(ctx context.Context, params *StartLoaderJobInput, optFns ...func(*Options)) (*StartLoaderJobOutput, error)
- func (c *Client) StartMLDataProcessingJob(ctx context.Context, params *StartMLDataProcessingJobInput, ...) (*StartMLDataProcessingJobOutput, error)
- func (c *Client) StartMLModelTrainingJob(ctx context.Context, params *StartMLModelTrainingJobInput, ...) (*StartMLModelTrainingJobOutput, error)
- func (c *Client) StartMLModelTransformJob(ctx context.Context, params *StartMLModelTransformJobInput, ...) (*StartMLModelTransformJobOutput, error)
- type CreateMLEndpointInput
- type CreateMLEndpointOutput
- type DeleteMLEndpointInput
- type DeleteMLEndpointOutput
- type DeletePropertygraphStatisticsInput
- type DeletePropertygraphStatisticsOutput
- type DeleteSparqlStatisticsInput
- type DeleteSparqlStatisticsOutput
- type EndpointParameters
- type EndpointResolver
- type EndpointResolverFunc
- type EndpointResolverOptions
- type EndpointResolverV2
- type ExecuteFastResetInput
- type ExecuteFastResetOutput
- type ExecuteGremlinExplainQueryInput
- type ExecuteGremlinExplainQueryOutput
- type ExecuteGremlinProfileQueryInput
- type ExecuteGremlinProfileQueryOutput
- type ExecuteGremlinQueryInput
- type ExecuteGremlinQueryOutput
- type ExecuteOpenCypherExplainQueryInput
- type ExecuteOpenCypherExplainQueryOutput
- type ExecuteOpenCypherQueryInput
- type ExecuteOpenCypherQueryOutput
- type GetEngineStatusInput
- type GetEngineStatusOutput
- type GetGremlinQueryStatusInput
- type GetGremlinQueryStatusOutput
- type GetLoaderJobStatusInput
- type GetLoaderJobStatusOutput
- type GetMLDataProcessingJobInput
- type GetMLDataProcessingJobOutput
- type GetMLEndpointInput
- type GetMLEndpointOutput
- type GetMLModelTrainingJobInput
- type GetMLModelTrainingJobOutput
- type GetMLModelTransformJobInput
- type GetMLModelTransformJobOutput
- type GetOpenCypherQueryStatusInput
- type GetOpenCypherQueryStatusOutput
- type GetPropertygraphStatisticsInput
- type GetPropertygraphStatisticsOutput
- type GetPropertygraphStreamInput
- type GetPropertygraphStreamOutput
- type GetPropertygraphSummaryInput
- type GetPropertygraphSummaryOutput
- type GetRDFGraphSummaryInput
- type GetRDFGraphSummaryOutput
- type GetSparqlStatisticsInput
- type GetSparqlStatisticsOutput
- type GetSparqlStreamInput
- type GetSparqlStreamOutput
- type HTTPClient
- type HTTPSignerV4
- type ListGremlinQueriesInput
- type ListGremlinQueriesOutput
- type ListLoaderJobsInput
- type ListLoaderJobsOutput
- type ListMLDataProcessingJobsInput
- type ListMLDataProcessingJobsOutput
- type ListMLEndpointsInput
- type ListMLEndpointsOutput
- type ListMLModelTrainingJobsInput
- type ListMLModelTrainingJobsOutput
- type ListMLModelTransformJobsInput
- type ListMLModelTransformJobsOutput
- type ListOpenCypherQueriesInput
- type ListOpenCypherQueriesOutput
- type ManagePropertygraphStatisticsInput
- type ManagePropertygraphStatisticsOutput
- type ManageSparqlStatisticsInput
- type ManageSparqlStatisticsOutput
- type Options
- type ResolveEndpoint
- type StartLoaderJobInput
- type StartLoaderJobOutput
- type StartMLDataProcessingJobInput
- type StartMLDataProcessingJobOutput
- type StartMLModelTrainingJobInput
- type StartMLModelTrainingJobOutput
- type StartMLModelTransformJobInput
- type StartMLModelTransformJobOutput
Constants ¶
const ServiceAPIVersion = "2023-08-01"
const ServiceID = "neptunedata"
Variables ¶
This section is empty.
Functions ¶
func NewDefaultEndpointResolver ¶
func NewDefaultEndpointResolver() *internalendpoints.Resolver
NewDefaultEndpointResolver constructs a new service endpoint resolver
func WithAPIOptions ¶
func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options)
WithAPIOptions returns a functional option for setting the Client's APIOptions option.
func WithEndpointResolver
deprecated
func WithEndpointResolver(v EndpointResolver) func(*Options)
Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for this field will likely prevent you from using any endpoint-related service features released after the introduction of EndpointResolverV2 and BaseEndpoint.
To migrate an EndpointResolver implementation that uses a custom endpoint, set the client option BaseEndpoint instead.
func WithEndpointResolverV2 ¶
func WithEndpointResolverV2(v EndpointResolverV2) func(*Options)
WithEndpointResolverV2 returns a functional option for setting the Client's EndpointResolverV2 option.
func WithSigV4SigningName ¶ added in v1.2.2
WithSigV4SigningName applies an override to the authentication workflow to use the given signing name for SigV4-authenticated operations.
This is an advanced setting. The value here is FINAL, taking precedence over the resolved signing name from both auth scheme resolution and endpoint resolution.
func WithSigV4SigningRegion ¶ added in v1.2.2
WithSigV4SigningRegion applies an override to the authentication workflow to use the given signing region for SigV4-authenticated operations.
This is an advanced setting. The value here is FINAL, taking precedence over the resolved signing region from both auth scheme resolution and endpoint resolution.
Types ¶
type AuthResolverParameters ¶ added in v1.2.2
type AuthResolverParameters struct { // The name of the operation being invoked. Operation string // The region in which the operation is being invoked. Region string }
AuthResolverParameters contains the set of inputs necessary for auth scheme resolution.
type AuthSchemeResolver ¶ added in v1.2.2
type AuthSchemeResolver interface {
ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
}
AuthSchemeResolver returns a set of possible authentication options for an operation.
type CancelGremlinQueryInput ¶
type CancelGremlinQueryInput struct { // The unique identifier that identifies the query to be canceled. // // This member is required. QueryId *string // contains filtered or unexported fields }
type CancelGremlinQueryOutput ¶
type CancelGremlinQueryOutput struct { // The status of the cancelation Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type CancelLoaderJobInput ¶
type CancelLoaderJobInput struct { // The ID of the load job to be deleted. // // This member is required. LoadId *string // contains filtered or unexported fields }
type CancelLoaderJobOutput ¶
type CancelLoaderJobOutput struct { // The cancellation status. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type CancelMLDataProcessingJobInput ¶
type CancelMLDataProcessingJobInput struct { // The unique identifier of the data-processing job. // // This member is required. Id *string // If set to TRUE , this flag specifies that all Neptune ML S3 artifacts should be // deleted when the job is stopped. The default is FALSE . Clean *bool // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type CancelMLDataProcessingJobOutput ¶
type CancelMLDataProcessingJobOutput struct { // The status of the cancellation request. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type CancelMLModelTrainingJobInput ¶
type CancelMLModelTrainingJobInput struct { // The unique identifier of the model-training job to be canceled. // // This member is required. Id *string // If set to TRUE , this flag specifies that all Amazon S3 artifacts should be // deleted when the job is stopped. The default is FALSE . Clean *bool // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type CancelMLModelTrainingJobOutput ¶
type CancelMLModelTrainingJobOutput struct { // The status of the cancellation. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type CancelMLModelTransformJobInput ¶
type CancelMLModelTransformJobInput struct { // The unique ID of the model transform job to be canceled. // // This member is required. Id *string // If this flag is set to TRUE , all Neptune ML S3 artifacts should be deleted when // the job is stopped. The default is FALSE . Clean *bool // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type CancelMLModelTransformJobOutput ¶
type CancelMLModelTransformJobOutput struct { // the status of the cancelation. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type CancelOpenCypherQueryOutput ¶
type CancelOpenCypherQueryOutput struct { // The cancelation payload for the openCypher query. Payload *bool // The cancellation status of the openCypher query. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
Client provides the API client to make operations call for Amazon NeptuneData.
func New ¶
New returns an initialized Client based on the functional options. Provide additional functional options to further configure the behavior of the client, such as changing the client's endpoint or adding custom middleware behavior.
func NewFromConfig ¶
NewFromConfig returns a new client from the provided config.
func (*Client) CancelGremlinQuery ¶
func (c *Client) CancelGremlinQuery(ctx context.Context, params *CancelGremlinQueryInput, optFns ...func(*Options)) (*CancelGremlinQueryOutput, error)
Cancels a Gremlin query. See Gremlin query cancellation for more information.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:CancelQueryIAM action in that cluster.
func (*Client) CancelLoaderJob ¶
func (c *Client) CancelLoaderJob(ctx context.Context, params *CancelLoaderJobInput, optFns ...func(*Options)) (*CancelLoaderJobOutput, error)
Cancels a specified load job. This is an HTTP DELETE request. See Neptune Loader Get-Status API for more information.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:CancelLoaderJobIAM action in that cluster..
func (*Client) CancelMLDataProcessingJob ¶
func (c *Client) CancelMLDataProcessingJob(ctx context.Context, params *CancelMLDataProcessingJobInput, optFns ...func(*Options)) (*CancelMLDataProcessingJobOutput, error)
Cancels a Neptune ML data processing job. See The dataprocessing commanddataprocessing .
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:CancelMLDataProcessingJobIAM action in that cluster.
func (*Client) CancelMLModelTrainingJob ¶
func (c *Client) CancelMLModelTrainingJob(ctx context.Context, params *CancelMLModelTrainingJobInput, optFns ...func(*Options)) (*CancelMLModelTrainingJobOutput, error)
Cancels a Neptune ML model training job. See Model training using the modeltraining commandmodeltraining .
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:CancelMLModelTrainingJobIAM action in that cluster.
func (*Client) CancelMLModelTransformJob ¶
func (c *Client) CancelMLModelTransformJob(ctx context.Context, params *CancelMLModelTransformJobInput, optFns ...func(*Options)) (*CancelMLModelTransformJobOutput, error)
Cancels a specified model transform job. See Use a trained model to generate new model artifacts.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:CancelMLModelTransformJobIAM action in that cluster.
func (*Client) CancelOpenCypherQuery ¶
func (c *Client) CancelOpenCypherQuery(ctx context.Context, params *CancelOpenCypherQueryInput, optFns ...func(*Options)) (*CancelOpenCypherQueryOutput, error)
Cancels a specified openCypher query. See Neptune openCypher status endpoint for more information.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:CancelQueryIAM action in that cluster.
func (*Client) CreateMLEndpoint ¶
func (c *Client) CreateMLEndpoint(ctx context.Context, params *CreateMLEndpointInput, optFns ...func(*Options)) (*CreateMLEndpointOutput, error)
Creates a new Neptune ML inference endpoint that lets you query one specific model that the model-training process constructed. See Managing inference endpoints using the endpoints command.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:CreateMLEndpointIAM action in that cluster.
func (*Client) DeleteMLEndpoint ¶
func (c *Client) DeleteMLEndpoint(ctx context.Context, params *DeleteMLEndpointInput, optFns ...func(*Options)) (*DeleteMLEndpointOutput, error)
Cancels the creation of a Neptune ML inference endpoint. See Managing inference endpoints using the endpoints command.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:DeleteMLEndpointIAM action in that cluster.
func (*Client) DeletePropertygraphStatistics ¶
func (c *Client) DeletePropertygraphStatistics(ctx context.Context, params *DeletePropertygraphStatisticsInput, optFns ...func(*Options)) (*DeletePropertygraphStatisticsOutput, error)
Deletes statistics for Gremlin and openCypher (property graph) data.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:DeleteStatisticsIAM action in that cluster.
func (*Client) DeleteSparqlStatistics ¶
func (c *Client) DeleteSparqlStatistics(ctx context.Context, params *DeleteSparqlStatisticsInput, optFns ...func(*Options)) (*DeleteSparqlStatisticsOutput, error)
Deletes SPARQL statistics
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:DeleteStatisticsIAM action in that cluster.
func (*Client) ExecuteFastReset ¶
func (c *Client) ExecuteFastReset(ctx context.Context, params *ExecuteFastResetInput, optFns ...func(*Options)) (*ExecuteFastResetOutput, error)
The fast reset REST API lets you reset a Neptune graph quicky and easily, removing all of its data.
Neptune fast reset is a two-step process. First you call ExecuteFastReset with action set to initiateDatabaseReset . This returns a UUID token which you then include when calling ExecuteFastReset again with action set to performDatabaseReset . See Empty an Amazon Neptune DB cluster using the fast reset API.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ResetDatabaseIAM action in that cluster.
func (*Client) ExecuteGremlinExplainQuery ¶
func (c *Client) ExecuteGremlinExplainQuery(ctx context.Context, params *ExecuteGremlinExplainQueryInput, optFns ...func(*Options)) (*ExecuteGremlinExplainQueryOutput, error)
Executes a Gremlin Explain query.
Amazon Neptune has added a Gremlin feature named explain that provides is a self-service tool for understanding the execution approach being taken by the Neptune engine for the query. You invoke it by adding an explain parameter to an HTTP call that submits a Gremlin query.
The explain feature provides information about the logical structure of query execution plans. You can use this information to identify potential evaluation and execution bottlenecks and to tune your query, as explained in Tuning Gremlin queries. You can also use query hints to improve query execution plans.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows one of the following IAM actions in that cluster, depending on the query:
Note that the neptune-db:QueryLanguage:Gremlin IAM condition key can be used in the policy document to restrict the use of Gremlin queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) ExecuteGremlinProfileQuery ¶
func (c *Client) ExecuteGremlinProfileQuery(ctx context.Context, params *ExecuteGremlinProfileQueryInput, optFns ...func(*Options)) (*ExecuteGremlinProfileQueryOutput, error)
Executes a Gremlin Profile query, which runs a specified traversal, collects various metrics about the run, and produces a profile report as output. See Gremlin profile API in Neptunefor details.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ReadDataViaQueryIAM action in that cluster.
Note that the neptune-db:QueryLanguage:Gremlin IAM condition key can be used in the policy document to restrict the use of Gremlin queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) ExecuteGremlinQuery ¶
func (c *Client) ExecuteGremlinQuery(ctx context.Context, params *ExecuteGremlinQueryInput, optFns ...func(*Options)) (*ExecuteGremlinQueryOutput, error)
This commands executes a Gremlin query. Amazon Neptune is compatible with Apache TinkerPop3 and Gremlin, so you can use the Gremlin traversal language to query the graph, as described under The Graphin the Apache TinkerPop3 documentation. More details can also be found in Accessing a Neptune graph with Gremlin.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that enables one of the following IAM actions in that cluster, depending on the query:
Note that the neptune-db:QueryLanguage:Gremlin IAM condition key can be used in the policy document to restrict the use of Gremlin queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) ExecuteOpenCypherExplainQuery ¶
func (c *Client) ExecuteOpenCypherExplainQuery(ctx context.Context, params *ExecuteOpenCypherExplainQueryInput, optFns ...func(*Options)) (*ExecuteOpenCypherExplainQueryOutput, error)
Executes an openCypher explain request. See The openCypher explain feature for more information.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ReadDataViaQueryIAM action in that cluster.
Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) ExecuteOpenCypherQuery ¶
func (c *Client) ExecuteOpenCypherQuery(ctx context.Context, params *ExecuteOpenCypherQueryInput, optFns ...func(*Options)) (*ExecuteOpenCypherQueryOutput, error)
Executes an openCypher query. See Accessing the Neptune Graph with openCypher for more information.
Neptune supports building graph applications using openCypher, which is currently one of the most popular query languages among developers working with graph databases. Developers, business analysts, and data scientists like openCypher's declarative, SQL-inspired syntax because it provides a familiar structure in which to querying property graphs.
The openCypher language was originally developed by Neo4j, then open-sourced in 2015 and contributed to the openCypher projectunder an Apache 2 open-source license.
Note that when invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows one of the following IAM actions in that cluster, depending on the query:
Note also that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) GetEngineStatus ¶
func (c *Client) GetEngineStatus(ctx context.Context, params *GetEngineStatusInput, optFns ...func(*Options)) (*GetEngineStatusOutput, error)
Retrieves the status of the graph database on the host.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetEngineStatusIAM action in that cluster.
func (*Client) GetGremlinQueryStatus ¶
func (c *Client) GetGremlinQueryStatus(ctx context.Context, params *GetGremlinQueryStatusInput, optFns ...func(*Options)) (*GetGremlinQueryStatusOutput, error)
Gets the status of a specified Gremlin query.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatusIAM action in that cluster.
Note that the neptune-db:QueryLanguage:Gremlin IAM condition key can be used in the policy document to restrict the use of Gremlin queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) GetLoaderJobStatus ¶
func (c *Client) GetLoaderJobStatus(ctx context.Context, params *GetLoaderJobStatusInput, optFns ...func(*Options)) (*GetLoaderJobStatusOutput, error)
Gets status information about a specified load job. Neptune keeps track of the most recent 1,024 bulk load jobs, and stores the last 10,000 error details per job.
See Neptune Loader Get-Status API for more information.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetLoaderJobStatusIAM action in that cluster..
func (*Client) GetMLDataProcessingJob ¶
func (c *Client) GetMLDataProcessingJob(ctx context.Context, params *GetMLDataProcessingJobInput, optFns ...func(*Options)) (*GetMLDataProcessingJobOutput, error)
Retrieves information about a specified data processing job. See The dataprocessing commanddataprocessing .
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:neptune-db:GetMLDataProcessingJobStatusIAM action in that cluster.
func (*Client) GetMLEndpoint ¶
func (c *Client) GetMLEndpoint(ctx context.Context, params *GetMLEndpointInput, optFns ...func(*Options)) (*GetMLEndpointOutput, error)
Retrieves details about an inference endpoint. See Managing inference endpoints using the endpoints command.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetMLEndpointStatusIAM action in that cluster.
func (*Client) GetMLModelTrainingJob ¶
func (c *Client) GetMLModelTrainingJob(ctx context.Context, params *GetMLModelTrainingJobInput, optFns ...func(*Options)) (*GetMLModelTrainingJobOutput, error)
Retrieves information about a Neptune ML model training job. See Model training using the modeltraining commandmodeltraining .
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetMLModelTrainingJobStatusIAM action in that cluster.
func (*Client) GetMLModelTransformJob ¶
func (c *Client) GetMLModelTransformJob(ctx context.Context, params *GetMLModelTransformJobInput, optFns ...func(*Options)) (*GetMLModelTransformJobOutput, error)
Gets information about a specified model transform job. See Use a trained model to generate new model artifacts.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetMLModelTransformJobStatusIAM action in that cluster.
func (*Client) GetOpenCypherQueryStatus ¶
func (c *Client) GetOpenCypherQueryStatus(ctx context.Context, params *GetOpenCypherQueryStatusInput, optFns ...func(*Options)) (*GetOpenCypherQueryStatusOutput, error)
Retrieves the status of a specified openCypher query.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatusIAM action in that cluster.
Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) GetPropertygraphStatistics ¶
func (c *Client) GetPropertygraphStatistics(ctx context.Context, params *GetPropertygraphStatisticsInput, optFns ...func(*Options)) (*GetPropertygraphStatisticsOutput, error)
Gets property graph statistics (Gremlin and openCypher).
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetStatisticsStatusIAM action in that cluster.
func (*Client) GetPropertygraphStream ¶
func (c *Client) GetPropertygraphStream(ctx context.Context, params *GetPropertygraphStreamInput, optFns ...func(*Options)) (*GetPropertygraphStreamOutput, error)
Gets a stream for a property graph.
With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetPropertygraphStream lets you collect these change-log entries for a property graph.
The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streamsDB cluster parameter to 1 .
See Capturing graph changes in real time using Neptune streams.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetStreamRecordsIAM action in that cluster.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that enables one of the following IAM actions, depending on the query:
Note that you can restrict property-graph queries using the following IAM context keys:
neptune-db:QueryLanguage:Gremlin
neptune-db:QueryLanguage:OpenCypher
See Condition keys available in Neptune IAM data-access policy statements).
func (*Client) GetPropertygraphSummary ¶
func (c *Client) GetPropertygraphSummary(ctx context.Context, params *GetPropertygraphSummaryInput, optFns ...func(*Options)) (*GetPropertygraphSummaryOutput, error)
Gets a graph summary for a property graph.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetGraphSummaryIAM action in that cluster.
func (*Client) GetRDFGraphSummary ¶
func (c *Client) GetRDFGraphSummary(ctx context.Context, params *GetRDFGraphSummaryInput, optFns ...func(*Options)) (*GetRDFGraphSummaryOutput, error)
Gets a graph summary for an RDF graph.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetGraphSummaryIAM action in that cluster.
func (*Client) GetSparqlStatistics ¶
func (c *Client) GetSparqlStatistics(ctx context.Context, params *GetSparqlStatisticsInput, optFns ...func(*Options)) (*GetSparqlStatisticsOutput, error)
Gets RDF statistics (SPARQL).
func (*Client) GetSparqlStream ¶
func (c *Client) GetSparqlStream(ctx context.Context, params *GetSparqlStreamInput, optFns ...func(*Options)) (*GetSparqlStreamOutput, error)
Gets a stream for an RDF graph.
With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetSparqlStream lets you collect these change-log entries for an RDF graph.
The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streamsDB cluster parameter to 1 .
See Capturing graph changes in real time using Neptune streams.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetStreamRecordsIAM action in that cluster.
Note that the neptune-db:QueryLanguage:Sparql IAM condition key can be used in the policy document to restrict the use of SPARQL queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) ListGremlinQueries ¶
func (c *Client) ListGremlinQueries(ctx context.Context, params *ListGremlinQueriesInput, optFns ...func(*Options)) (*ListGremlinQueriesOutput, error)
Lists active Gremlin queries. See Gremlin query status API for details about the output.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatusIAM action in that cluster.
Note that the neptune-db:QueryLanguage:Gremlin IAM condition key can be used in the policy document to restrict the use of Gremlin queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) ListLoaderJobs ¶
func (c *Client) ListLoaderJobs(ctx context.Context, params *ListLoaderJobsInput, optFns ...func(*Options)) (*ListLoaderJobsOutput, error)
Retrieves a list of the loadIds for all active loader jobs.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ListLoaderJobsIAM action in that cluster..
func (*Client) ListMLDataProcessingJobs ¶
func (c *Client) ListMLDataProcessingJobs(ctx context.Context, params *ListMLDataProcessingJobsInput, optFns ...func(*Options)) (*ListMLDataProcessingJobsOutput, error)
Returns a list of Neptune ML data processing jobs. See Listing active data-processing jobs using the Neptune ML dataprocessing command.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ListMLDataProcessingJobsIAM action in that cluster.
func (*Client) ListMLEndpoints ¶
func (c *Client) ListMLEndpoints(ctx context.Context, params *ListMLEndpointsInput, optFns ...func(*Options)) (*ListMLEndpointsOutput, error)
Lists existing inference endpoints. See Managing inference endpoints using the endpoints command.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ListMLEndpointsIAM action in that cluster.
func (*Client) ListMLModelTrainingJobs ¶
func (c *Client) ListMLModelTrainingJobs(ctx context.Context, params *ListMLModelTrainingJobsInput, optFns ...func(*Options)) (*ListMLModelTrainingJobsOutput, error)
Lists Neptune ML model-training jobs. See Model training using the modeltraining commandmodeltraining .
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:neptune-db:ListMLModelTrainingJobsIAM action in that cluster.
func (*Client) ListMLModelTransformJobs ¶
func (c *Client) ListMLModelTransformJobs(ctx context.Context, params *ListMLModelTransformJobsInput, optFns ...func(*Options)) (*ListMLModelTransformJobsOutput, error)
Returns a list of model transform job IDs. See Use a trained model to generate new model artifacts.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ListMLModelTransformJobsIAM action in that cluster.
func (*Client) ListOpenCypherQueries ¶
func (c *Client) ListOpenCypherQueries(ctx context.Context, params *ListOpenCypherQueriesInput, optFns ...func(*Options)) (*ListOpenCypherQueriesOutput, error)
Lists active openCypher queries. See Neptune openCypher status endpoint for more information.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatusIAM action in that cluster.
Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
func (*Client) ManagePropertygraphStatistics ¶
func (c *Client) ManagePropertygraphStatistics(ctx context.Context, params *ManagePropertygraphStatisticsInput, optFns ...func(*Options)) (*ManagePropertygraphStatisticsOutput, error)
Manages the generation and use of property graph statistics.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ManageStatisticsIAM action in that cluster.
func (*Client) ManageSparqlStatistics ¶
func (c *Client) ManageSparqlStatistics(ctx context.Context, params *ManageSparqlStatisticsInput, optFns ...func(*Options)) (*ManageSparqlStatisticsOutput, error)
Manages the generation and use of RDF graph statistics.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ManageStatisticsIAM action in that cluster.
func (*Client) Options ¶ added in v1.3.0
Options returns a copy of the client configuration.
Callers SHOULD NOT perform mutations on any inner structures within client config. Config overrides should instead be made on a per-operation basis through functional options.
func (*Client) StartLoaderJob ¶
func (c *Client) StartLoaderJob(ctx context.Context, params *StartLoaderJobInput, optFns ...func(*Options)) (*StartLoaderJobOutput, error)
Starts a Neptune bulk loader job to load data from an Amazon S3 bucket into a Neptune DB instance. See Using the Amazon Neptune Bulk Loader to Ingest Data.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:StartLoaderJobIAM action in that cluster.
func (*Client) StartMLDataProcessingJob ¶
func (c *Client) StartMLDataProcessingJob(ctx context.Context, params *StartMLDataProcessingJobInput, optFns ...func(*Options)) (*StartMLDataProcessingJobOutput, error)
Creates a new Neptune ML data processing job for processing the graph data exported from Neptune for training. See The dataprocessing commanddataprocessing .
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:StartMLModelDataProcessingJobIAM action in that cluster.
func (*Client) StartMLModelTrainingJob ¶
func (c *Client) StartMLModelTrainingJob(ctx context.Context, params *StartMLModelTrainingJobInput, optFns ...func(*Options)) (*StartMLModelTrainingJobOutput, error)
Creates a new Neptune ML model training job. See Model training using the modeltraining commandmodeltraining .
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:StartMLModelTrainingJobIAM action in that cluster.
func (*Client) StartMLModelTransformJob ¶
func (c *Client) StartMLModelTransformJob(ctx context.Context, params *StartMLModelTransformJobInput, optFns ...func(*Options)) (*StartMLModelTransformJobOutput, error)
Creates a new model transform job. See Use a trained model to generate new model artifacts.
When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:StartMLModelTransformJobIAM action in that cluster.
type CreateMLEndpointInput ¶
type CreateMLEndpointInput struct { // A unique identifier for the new inference endpoint. The default is an // autogenerated timestamped name. Id *string // The minimum number of Amazon EC2 instances to deploy to an endpoint for // prediction. The default is 1 InstanceCount *int32 // The type of Neptune ML instance to use for online servicing. The default is // ml.m5.xlarge . Choosing the ML instance for an inference endpoint depends on the // task type, the graph size, and your budget. InstanceType *string // The job Id of the completed model-training job that has created the model that // the inference endpoint will point to. You must supply either the // mlModelTrainingJobId or the mlModelTransformJobId . MlModelTrainingJobId *string // The job Id of the completed model-transform job. You must supply either the // mlModelTrainingJobId or the mlModelTransformJobId . MlModelTransformJobId *string // Model type for training. By default the Neptune ML model is automatically based // on the modelType used in data processing, but you can specify a different model // type here. The default is rgcn for heterogeneous graphs and kge for knowledge // graphs. The only valid value for heterogeneous graphs is rgcn . Valid values for // knowledge graphs are: kge , transe , distmult , and rotate . ModelName *string // The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will be thrown. NeptuneIamRoleArn *string // If set to true , update indicates that this is an update request. The default // is false . You must supply either the mlModelTrainingJobId or the // mlModelTransformJobId . Update *bool // The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to // encrypt data on the storage volume attached to the ML compute instances that run // the training job. The default is None. VolumeEncryptionKMSKey *string // contains filtered or unexported fields }
type CreateMLEndpointOutput ¶
type CreateMLEndpointOutput struct { // The ARN for the new inference endpoint. Arn *string // The endpoint creation time, in milliseconds. CreationTimeInMillis *int64 // The unique ID of the new inference endpoint. Id *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type DeleteMLEndpointInput ¶
type DeleteMLEndpointInput struct { // The unique identifier of the inference endpoint. // // This member is required. Id *string // If this flag is set to TRUE , all Neptune ML S3 artifacts should be deleted when // the job is stopped. The default is FALSE . Clean *bool // The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will be thrown. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type DeleteMLEndpointOutput ¶
type DeleteMLEndpointOutput struct { // The status of the cancellation. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type DeletePropertygraphStatisticsInput ¶
type DeletePropertygraphStatisticsInput struct {
// contains filtered or unexported fields
}
type DeletePropertygraphStatisticsOutput ¶
type DeletePropertygraphStatisticsOutput struct { // The deletion payload. Payload *types.DeleteStatisticsValueMap // The cancel status. Status *string // The HTTP response code: 200 if the delete was successful, or 204 if there were // no statistics to delete. StatusCode *int32 // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type DeleteSparqlStatisticsInput ¶
type DeleteSparqlStatisticsInput struct {
// contains filtered or unexported fields
}
type DeleteSparqlStatisticsOutput ¶
type DeleteSparqlStatisticsOutput struct { // The deletion payload. Payload *types.DeleteStatisticsValueMap // The cancel status. Status *string // The HTTP response code: 200 if the delete was successful, or 204 if there were // no statistics to delete. StatusCode *int32 // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type EndpointParameters ¶
type EndpointParameters struct { // The AWS region used to dispatch the request. // // Parameter is // required. // // AWS::Region Region *string // When true, use the dual-stack endpoint. If the configured endpoint does not // support dual-stack, dispatching the request MAY return an error. // // Defaults to // false if no value is provided. // // AWS::UseDualStack UseDualStack *bool // When true, send this request to the FIPS-compliant regional endpoint. If the // configured endpoint does not have a FIPS compliant endpoint, dispatching the // request will return an error. // // Defaults to false if no value is // provided. // // AWS::UseFIPS UseFIPS *bool // Override the endpoint used to send this request // // Parameter is // required. // // SDK::Endpoint Endpoint *string }
EndpointParameters provides the parameters that influence how endpoints are resolved.
func (EndpointParameters) ValidateRequired ¶
func (p EndpointParameters) ValidateRequired() error
ValidateRequired validates required parameters are set.
func (EndpointParameters) WithDefaults ¶
func (p EndpointParameters) WithDefaults() EndpointParameters
WithDefaults returns a shallow copy of EndpointParameterswith default values applied to members where applicable.
type EndpointResolver ¶
type EndpointResolver interface {
ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
}
EndpointResolver interface for resolving service endpoints.
func EndpointResolverFromURL ¶
func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver
EndpointResolverFromURL returns an EndpointResolver configured using the provided endpoint url. By default, the resolved endpoint resolver uses the client region as signing region, and the endpoint source is set to EndpointSourceCustom.You can provide functional options to configure endpoint values for the resolved endpoint.
type EndpointResolverFunc ¶
type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
EndpointResolverFunc is a helper utility that wraps a function so it satisfies the EndpointResolver interface. This is useful when you want to add additional endpoint resolving logic, or stub out specific endpoints with custom values.
func (EndpointResolverFunc) ResolveEndpoint ¶
func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error)
type EndpointResolverOptions ¶
type EndpointResolverOptions = internalendpoints.Options
EndpointResolverOptions is the service endpoint resolver options
type EndpointResolverV2 ¶
type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, // returning the endpoint if found. Otherwise an error is returned. ResolveEndpoint(ctx context.Context, params EndpointParameters) ( smithyendpoints.Endpoint, error, ) }
EndpointResolverV2 provides the interface for resolving service endpoints.
func NewDefaultEndpointResolverV2 ¶
func NewDefaultEndpointResolverV2() EndpointResolverV2
type ExecuteFastResetInput ¶
type ExecuteFastResetInput struct { // The fast reset action. One of the following values: // // - initiateDatabaseReset – This action generates a unique token needed to // actually perform the fast reset. // // - performDatabaseReset – This action uses the token generated by the // initiateDatabaseReset action to actually perform the fast reset. // // This member is required. Action types.Action // The fast-reset token to initiate the reset. Token *string // contains filtered or unexported fields }
type ExecuteFastResetOutput ¶
type ExecuteFastResetOutput struct { // The status is only returned for the performDatabaseReset action, and indicates // whether or not the fast reset rquest is accepted. // // This member is required. Status *string // The payload is only returned by the initiateDatabaseReset action, and contains // the unique token to use with the performDatabaseReset action to make the reset // occur. Payload *types.FastResetToken // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ExecuteGremlinExplainQueryInput ¶
type ExecuteGremlinExplainQueryInput struct { // The Gremlin explain query string. // // This member is required. GremlinQuery *string // contains filtered or unexported fields }
type ExecuteGremlinExplainQueryOutput ¶
type ExecuteGremlinExplainQueryOutput struct { // A text blob containing the Gremlin explain result, as described in [Tuning Gremlin queries]. // // [Tuning Gremlin queries]: https://docs.aws.amazon.com/neptune/latest/userguide/gremlin-traversal-tuning.html // // This value conforms to the media type: text/plain Output []byte // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ExecuteGremlinProfileQueryInput ¶
type ExecuteGremlinProfileQueryInput struct { // The Gremlin query string to profile. // // This member is required. GremlinQuery *string // If non-zero, causes the results string to be truncated at that number of // characters. If set to zero, the string contains all the results. Chop *int32 // If this flag is set to TRUE , the results include a detailed report of all index // operations that took place during query execution and serialization. IndexOps *bool // If this flag is set to TRUE , the query results are gathered and displayed as // part of the profile report. If FALSE , only the result count is displayed. Results *bool // If non-null, the gathered results are returned in a serialized response message // in the format specified by this parameter. See [Gremlin profile API in Neptune]for more information. // // [Gremlin profile API in Neptune]: https://docs.aws.amazon.com/neptune/latest/userguide/gremlin-profile-api.html Serializer *string // contains filtered or unexported fields }
type ExecuteGremlinProfileQueryOutput ¶
type ExecuteGremlinProfileQueryOutput struct { // A text blob containing the Gremlin Profile result. See [Gremlin profile API in Neptune] for details. // // [Gremlin profile API in Neptune]: https://docs.aws.amazon.com/neptune/latest/userguide/gremlin-profile-api.html // // This value conforms to the media type: text/plain Output []byte // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ExecuteGremlinQueryInput ¶
type ExecuteGremlinQueryInput struct { // Using this API, you can run Gremlin queries in string format much as you can // using the HTTP endpoint. The interface is compatible with whatever Gremlin // version your DB cluster is using (see the [Tinkerpop client section]to determine which Gremlin releases // your engine version supports). // // [Tinkerpop client section]: https://docs.aws.amazon.com/neptune/latest/userguide/access-graph-gremlin-client.html#best-practices-gremlin-java-latest // // This member is required. GremlinQuery *string // If non-null, the query results are returned in a serialized response message in // the format specified by this parameter. See the [GraphSON]section in the TinkerPop // documentation for a list of the formats that are currently supported. // // [GraphSON]: https://tinkerpop.apache.org/docs/current/reference/#_graphson Serializer *string // contains filtered or unexported fields }
type ExecuteGremlinQueryOutput ¶
type ExecuteGremlinQueryOutput struct { // Metadata about the Gremlin query. Meta document.Interface // The unique identifier of the Gremlin query. RequestId *string // The Gremlin query output from the server. Result document.Interface // The status of the Gremlin query. Status *types.GremlinQueryStatusAttributes // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ExecuteOpenCypherExplainQueryInput ¶
type ExecuteOpenCypherExplainQueryInput struct { // The openCypher explain mode. Can be one of: static , dynamic , or details . // // This member is required. ExplainMode types.OpenCypherExplainMode // The openCypher query string. // // This member is required. OpenCypherQuery *string // The openCypher query parameters. Parameters *string // contains filtered or unexported fields }
type ExecuteOpenCypherExplainQueryOutput ¶
type ExecuteOpenCypherExplainQueryOutput struct { // A text blob containing the openCypher explain results. // // This member is required. Results []byte // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ExecuteOpenCypherQueryInput ¶
type ExecuteOpenCypherQueryInput struct { // The openCypher query string to be executed. // // This member is required. OpenCypherQuery *string // The openCypher query parameters for query execution. See [Examples of openCypher parameterized queries] for more information. // // [Examples of openCypher parameterized queries]: https://docs.aws.amazon.com/neptune/latest/userguide/opencypher-parameterized-queries.html Parameters *string // contains filtered or unexported fields }
type ExecuteOpenCypherQueryOutput ¶
type ExecuteOpenCypherQueryOutput struct { // The openCypherquery results. // // This member is required. Results document.Interface // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetEngineStatusInput ¶
type GetEngineStatusInput struct {
// contains filtered or unexported fields
}
type GetEngineStatusOutput ¶
type GetEngineStatusOutput struct { // Set to the Neptune engine version running on your DB cluster. If this engine // version has been manually patched since it was released, the version number is // prefixed by Patch- . DbEngineVersion *string // Set to enabled if the DFE engine is fully enabled, or to viaQueryHint (the // default) if the DFE engine is only used with queries that have the useDFE query // hint set to true . DfeQueryEngine *string // Contains status information about the features enabled on your DB cluster. Features map[string]document.Interface // Contains information about the Gremlin query language available on your // cluster. Specifically, it contains a version field that specifies the current // TinkerPop version being used by the engine. Gremlin *types.QueryLanguageVersion // Contains Lab Mode settings being used by the engine. LabMode map[string]string // Contains information about the openCypher query language available on your // cluster. Specifically, it contains a version field that specifies the current // operCypher version being used by the engine. Opencypher *types.QueryLanguageVersion // Set to reader if the instance is a read-replica, or to writer if the instance // is the primary instance. Role *string // If there are transactions being rolled back, this field is set to the number of // such transactions. If there are none, the field doesn't appear at all. RollingBackTrxCount *int32 // Set to the start time of the earliest transaction being rolled back. If no // transactions are being rolled back, the field doesn't appear at all. RollingBackTrxEarliestStartTime *string // Contains information about the current settings on your DB cluster. For // example, contains the current cluster query timeout setting ( // clusterQueryTimeoutInMs ). Settings map[string]string // Contains information about the SPARQL query language available on your cluster. // Specifically, it contains a version field that specifies the current SPARQL // version being used by the engine. Sparql *types.QueryLanguageVersion // Set to the UTC time at which the current server process started. StartTime *string // Set to healthy if the instance is not experiencing problems. If the instance is // recovering from a crash or from being rebooted and there are active transactions // running from the latest server shutdown, status is set to recovery . Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetGremlinQueryStatusInput ¶
type GetGremlinQueryStatusInput struct { // The unique identifier that identifies the Gremlin query. // // This member is required. QueryId *string // contains filtered or unexported fields }
type GetGremlinQueryStatusOutput ¶
type GetGremlinQueryStatusOutput struct { // The evaluation status of the Gremlin query. QueryEvalStats *types.QueryEvalStats // The ID of the query for which status is being returned. QueryId *string // The Gremlin query string. QueryString *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetLoaderJobStatusInput ¶
type GetLoaderJobStatusInput struct { // The load ID of the load job to get the status of. // // This member is required. LoadId *string // Flag indicating whether or not to include details beyond the overall status ( // TRUE or FALSE ; the default is FALSE ). Details *bool // Flag indicating whether or not to include a list of errors encountered ( TRUE // or FALSE ; the default is FALSE ). // // The list of errors is paged. The page and errorsPerPage parameters allow you to // page through all the errors. Errors *bool // The number of errors returned in each page (a positive integer; the default is // 10 ). Only valid when the errors parameter set to TRUE . ErrorsPerPage *int32 // The error page number (a positive integer; the default is 1 ). Only valid when // the errors parameter is set to TRUE . Page *int32 // contains filtered or unexported fields }
type GetLoaderJobStatusOutput ¶
type GetLoaderJobStatusOutput struct { // Status information about the load job, in a layout that could look like this: // // This member is required. Payload document.Interface // The HTTP response code for the request. // // This member is required. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetMLDataProcessingJobInput ¶
type GetMLDataProcessingJobInput struct { // The unique identifier of the data-processing job to be retrieved. // // This member is required. Id *string // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type GetMLDataProcessingJobOutput ¶
type GetMLDataProcessingJobOutput struct { // The unique identifier of this data-processing job. Id *string // Definition of the data processing job. ProcessingJob *types.MlResourceDefinition // Status of the data processing job. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetMLEndpointInput ¶
type GetMLEndpointInput struct { // The unique identifier of the inference endpoint. // // This member is required. Id *string // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type GetMLEndpointOutput ¶
type GetMLEndpointOutput struct { // The endpoint definition. Endpoint *types.MlResourceDefinition // The endpoint configuration EndpointConfig *types.MlConfigDefinition // The unique identifier of the inference endpoint. Id *string // The status of the inference endpoint. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetMLModelTrainingJobInput ¶
type GetMLModelTrainingJobInput struct { // The unique identifier of the model-training job to retrieve. // // This member is required. Id *string // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type GetMLModelTrainingJobOutput ¶
type GetMLModelTrainingJobOutput struct { // The HPO job. HpoJob *types.MlResourceDefinition // The unique identifier of this model-training job. Id *string // A list of the configurations of the ML models being used. MlModels []types.MlConfigDefinition // The model transform job. ModelTransformJob *types.MlResourceDefinition // The data processing job. ProcessingJob *types.MlResourceDefinition // The status of the model training job. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetMLModelTransformJobInput ¶
type GetMLModelTransformJobInput struct { // The unique identifier of the model-transform job to be reetrieved. // // This member is required. Id *string // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type GetMLModelTransformJobOutput ¶
type GetMLModelTransformJobOutput struct { // The base data processing job. BaseProcessingJob *types.MlResourceDefinition // The unique identifier of the model-transform job to be retrieved. Id *string // A list of the configuration information for the models being used. Models []types.MlConfigDefinition // The remote model transform job. RemoteModelTransformJob *types.MlResourceDefinition // The status of the model-transform job. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetOpenCypherQueryStatusInput ¶
type GetOpenCypherQueryStatusInput struct { // The unique ID of the openCypher query for which to retrieve the query status. // // This member is required. QueryId *string // contains filtered or unexported fields }
type GetOpenCypherQueryStatusOutput ¶
type GetOpenCypherQueryStatusOutput struct { // The openCypher query evaluation status. QueryEvalStats *types.QueryEvalStats // The unique ID of the query for which status is being returned. QueryId *string // The openCypher query string. QueryString *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetPropertygraphStatisticsInput ¶
type GetPropertygraphStatisticsInput struct {
// contains filtered or unexported fields
}
type GetPropertygraphStatisticsOutput ¶
type GetPropertygraphStatisticsOutput struct { // Statistics for property-graph data. // // This member is required. Payload *types.Statistics // The HTTP return code of the request. If the request succeeded, the code is 200. // See [Common error codes for DFE statistics request]for a list of common errors. // // [Common error codes for DFE statistics request]: https://docs.aws.amazon.com/neptune/latest/userguide/neptune-dfe-statistics.html#neptune-dfe-statistics-errors // // This member is required. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetPropertygraphStreamInput ¶
type GetPropertygraphStreamInput struct { // The commit number of the starting record to read from the change-log stream. // This parameter is required when iteratorType is AT_SEQUENCE_NUMBER or // AFTER_SEQUENCE_NUMBER , and ignored when iteratorType is TRIM_HORIZON or LATEST . CommitNum *int64 // If set to TRUE, Neptune compresses the response using gzip encoding. Encoding types.Encoding // Can be one of: // // - AT_SEQUENCE_NUMBER – Indicates that reading should start from the event // sequence number specified jointly by the commitNum and opNum parameters. // // - AFTER_SEQUENCE_NUMBER – Indicates that reading should start right after the // event sequence number specified jointly by the commitNum and opNum parameters. // // - TRIM_HORIZON – Indicates that reading should start at the last untrimmed // record in the system, which is the oldest unexpired (not yet deleted) record in // the change-log stream. // // - LATEST – Indicates that reading should start at the most recent record in // the system, which is the latest unexpired (not yet deleted) record in the // change-log stream. IteratorType types.IteratorType // Specifies the maximum number of records to return. There is also a size limit // of 10 MB on the response that can't be modified and that takes precedence over // the number of records specified in the limit parameter. The response does // include a threshold-breaching record if the 10 MB limit was reached. // // The range for limit is 1 to 100,000, with a default of 10. Limit *int64 // The operation sequence number within the specified commit to start reading from // in the change-log stream data. The default is 1 . OpNum *int64 // contains filtered or unexported fields }
type GetPropertygraphStreamOutput ¶
type GetPropertygraphStreamOutput struct { // Serialization format for the change records being returned. Currently, the only // supported value is PG_JSON . // // This member is required. Format *string // Sequence identifier of the last change in the stream response. // // An event ID is composed of two fields: a commitNum , which identifies a // transaction that changed the graph, and an opNum , which identifies a specific // operation within that transaction: // // This member is required. LastEventId map[string]string // The time at which the commit for the transaction was requested, in milliseconds // from the Unix epoch. // // This member is required. LastTrxTimestampInMillis *int64 // An array of serialized change-log stream records included in the response. // // This member is required. Records []types.PropertygraphRecord // The total number of records in the response. // // This member is required. TotalRecords *int32 // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetPropertygraphSummaryInput ¶
type GetPropertygraphSummaryInput struct { // Mode can take one of two values: BASIC (the default), and DETAILED . Mode types.GraphSummaryType // contains filtered or unexported fields }
type GetPropertygraphSummaryOutput ¶
type GetPropertygraphSummaryOutput struct { // Payload containing the property graph summary response. Payload *types.PropertygraphSummaryValueMap // The HTTP return code of the request. If the request succeeded, the code is 200. StatusCode *int32 // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetRDFGraphSummaryInput ¶
type GetRDFGraphSummaryInput struct { // Mode can take one of two values: BASIC (the default), and DETAILED . Mode types.GraphSummaryType // contains filtered or unexported fields }
type GetRDFGraphSummaryOutput ¶
type GetRDFGraphSummaryOutput struct { // Payload for an RDF graph summary response Payload *types.RDFGraphSummaryValueMap // The HTTP return code of the request. If the request succeeded, the code is 200. StatusCode *int32 // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetSparqlStatisticsInput ¶
type GetSparqlStatisticsInput struct {
// contains filtered or unexported fields
}
type GetSparqlStatisticsOutput ¶
type GetSparqlStatisticsOutput struct { // Statistics for RDF data. // // This member is required. Payload *types.Statistics // The HTTP return code of the request. If the request succeeded, the code is 200. // See [Common error codes for DFE statistics request]for a list of common errors. // // When invoking this operation in a Neptune cluster that has IAM authentication // enabled, the IAM user or role making the request must have a policy attached // that allows the [neptune-db:GetStatisticsStatus]IAM action in that cluster. // // [Common error codes for DFE statistics request]: https://docs.aws.amazon.com/neptune/latest/userguide/neptune-dfe-statistics.html#neptune-dfe-statistics-errors // [neptune-db:GetStatisticsStatus]: https://docs.aws.amazon.com/neptune/latest/userguide/iam-dp-actions.html#getstatisticsstatus // // This member is required. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type GetSparqlStreamInput ¶
type GetSparqlStreamInput struct { // The commit number of the starting record to read from the change-log stream. // This parameter is required when iteratorType is AT_SEQUENCE_NUMBER or // AFTER_SEQUENCE_NUMBER , and ignored when iteratorType is TRIM_HORIZON or LATEST . CommitNum *int64 // If set to TRUE, Neptune compresses the response using gzip encoding. Encoding types.Encoding // Can be one of: // // - AT_SEQUENCE_NUMBER – Indicates that reading should start from the event // sequence number specified jointly by the commitNum and opNum parameters. // // - AFTER_SEQUENCE_NUMBER – Indicates that reading should start right after the // event sequence number specified jointly by the commitNum and opNum parameters. // // - TRIM_HORIZON – Indicates that reading should start at the last untrimmed // record in the system, which is the oldest unexpired (not yet deleted) record in // the change-log stream. // // - LATEST – Indicates that reading should start at the most recent record in // the system, which is the latest unexpired (not yet deleted) record in the // change-log stream. IteratorType types.IteratorType // Specifies the maximum number of records to return. There is also a size limit // of 10 MB on the response that can't be modified and that takes precedence over // the number of records specified in the limit parameter. The response does // include a threshold-breaching record if the 10 MB limit was reached. // // The range for limit is 1 to 100,000, with a default of 10. Limit *int64 // The operation sequence number within the specified commit to start reading from // in the change-log stream data. The default is 1 . OpNum *int64 // contains filtered or unexported fields }
type GetSparqlStreamOutput ¶
type GetSparqlStreamOutput struct { // Serialization format for the change records being returned. Currently, the only // supported value is NQUADS . // // This member is required. Format *string // Sequence identifier of the last change in the stream response. // // An event ID is composed of two fields: a commitNum , which identifies a // transaction that changed the graph, and an opNum , which identifies a specific // operation within that transaction: // // This member is required. LastEventId map[string]string // The time at which the commit for the transaction was requested, in milliseconds // from the Unix epoch. // // This member is required. LastTrxTimestampInMillis *int64 // An array of serialized change-log stream records included in the response. // // This member is required. Records []types.SparqlRecord // The total number of records in the response. // // This member is required. TotalRecords *int32 // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type HTTPSignerV4 ¶
type ListGremlinQueriesInput ¶
type ListGremlinQueriesInput struct { // If set to TRUE , the list returned includes waiting queries. The default is // FALSE ; IncludeWaiting *bool // contains filtered or unexported fields }
type ListGremlinQueriesOutput ¶
type ListGremlinQueriesOutput struct { // The number of queries that have been accepted but not yet completed, including // queries in the queue. AcceptedQueryCount *int32 // A list of the current queries. Queries []types.GremlinQueryStatus // The number of Gremlin queries currently running. RunningQueryCount *int32 // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ListLoaderJobsInput ¶
type ListLoaderJobsInput struct { // An optional parameter that can be used to exclude the load IDs of queued load // requests when requesting a list of load IDs by setting the parameter to FALSE . // The default value is TRUE . IncludeQueuedLoads *bool // The number of load IDs to list. Must be a positive integer greater than zero // and not more than 100 (which is the default). Limit *int32 // contains filtered or unexported fields }
type ListLoaderJobsOutput ¶
type ListLoaderJobsOutput struct { // The requested list of job IDs. // // This member is required. Payload *types.LoaderIdResult // Returns the status of the job list request. // // This member is required. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ListMLDataProcessingJobsInput ¶
type ListMLDataProcessingJobsInput struct { // The maximum number of items to return (from 1 to 1024; the default is 10). MaxItems *int32 // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type ListMLDataProcessingJobsOutput ¶
type ListMLDataProcessingJobsOutput struct { // A page listing data processing job IDs. Ids []string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ListMLEndpointsInput ¶
type ListMLEndpointsInput struct { // The maximum number of items to return (from 1 to 1024; the default is 10. MaxItems *int32 // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type ListMLEndpointsOutput ¶
type ListMLEndpointsOutput struct { // A page from the list of inference endpoint IDs. Ids []string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ListMLModelTrainingJobsInput ¶
type ListMLModelTrainingJobsInput struct { // The maximum number of items to return (from 1 to 1024; the default is 10). MaxItems *int32 // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type ListMLModelTrainingJobsOutput ¶
type ListMLModelTrainingJobsOutput struct { // A page of the list of model training job IDs. Ids []string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ListMLModelTransformJobsInput ¶
type ListMLModelTransformJobsInput struct { // The maximum number of items to return (from 1 to 1024; the default is 10). MaxItems *int32 // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // contains filtered or unexported fields }
type ListMLModelTransformJobsOutput ¶
type ListMLModelTransformJobsOutput struct { // A page from the list of model transform IDs. Ids []string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ListOpenCypherQueriesInput ¶
type ListOpenCypherQueriesInput struct { // When set to TRUE and other parameters are not present, causes status // information to be returned for waiting queries as well as for running queries. IncludeWaiting *bool // contains filtered or unexported fields }
type ListOpenCypherQueriesOutput ¶
type ListOpenCypherQueriesOutput struct { // The number of queries that have been accepted but not yet completed, including // queries in the queue. AcceptedQueryCount *int32 // A list of current openCypher queries. Queries []types.GremlinQueryStatus // The number of currently running openCypher queries. RunningQueryCount *int32 // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ManagePropertygraphStatisticsInput ¶
type ManagePropertygraphStatisticsInput struct { // The statistics generation mode. One of: DISABLE_AUTOCOMPUTE , ENABLE_AUTOCOMPUTE // , or REFRESH , the last of which manually triggers DFE statistics generation. Mode types.StatisticsAutoGenerationMode // contains filtered or unexported fields }
type ManagePropertygraphStatisticsOutput ¶
type ManagePropertygraphStatisticsOutput struct { // The HTTP return code of the request. If the request succeeded, the code is 200. // // This member is required. Status *string // This is only returned for refresh mode. Payload *types.RefreshStatisticsIdMap // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type ManageSparqlStatisticsInput ¶
type ManageSparqlStatisticsInput struct { // The statistics generation mode. One of: DISABLE_AUTOCOMPUTE , ENABLE_AUTOCOMPUTE // , or REFRESH , the last of which manually triggers DFE statistics generation. Mode types.StatisticsAutoGenerationMode // contains filtered or unexported fields }
type ManageSparqlStatisticsOutput ¶
type ManageSparqlStatisticsOutput struct { // The HTTP return code of the request. If the request succeeded, the code is 200. // // This member is required. Status *string // This is only returned for refresh mode. Payload *types.RefreshStatisticsIdMap // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type Options ¶
type Options struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error // Indicates how aws account ID is applied in endpoint2.0 routing AccountIDEndpointMode aws.AccountIDEndpointMode // The optional application specific identifier appended to the User-Agent header. AppID string // This endpoint will be given as input to an EndpointResolverV2. It is used for // providing a custom base endpoint that is subject to modifications by the // processing EndpointResolverV2. BaseEndpoint *string // Configures the events that will be sent to the configured logger. ClientLogMode aws.ClientLogMode // The credentials object to use when signing requests. Credentials aws.CredentialsProvider // The configuration DefaultsMode that the SDK should use when constructing the // clients initial default settings. DefaultsMode aws.DefaultsMode // The endpoint options to be used when attempting to resolve an endpoint. EndpointOptions EndpointResolverOptions // The service endpoint resolver. // // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and // BaseEndpoint. // // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be // used over the deprecated EndpointResolver. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer HTTPSignerV4 HTTPSignerV4 // The logger writer interface to write logging messages to. Logger logging.Logger // The region to send requests to. (Required) Region string // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify // per operation call's retry max attempts. // // If specified in an operation call's functional options with a value that is // different than the constructed client's Options, the Client's Retryer will be // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if // Retryer option is not also specified. // // When creating a new API Clients this member will only be used if the Retryer // Options member is nil. This value will be ignored if Retryer is not nil. // // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable // failures. When nil the API client will use a default retryer. The kind of // default retry created by the API client can be changed with the RetryMode // option. Retryer aws.Retryer // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You // should not populate this structure programmatically, or rely on the values here // within your applications. RuntimeEnvironment aws.RuntimeEnvironment // The HTTP client to invoke API calls with. Defaults to client's default HTTP // implementation if nil. HTTPClient HTTPClient // The auth scheme resolver which determines how to authenticate for each // operation. AuthSchemeResolver AuthSchemeResolver // The list of auth schemes supported by the client. AuthSchemes []smithyhttp.AuthScheme // contains filtered or unexported fields }
func (Options) GetIdentityResolver ¶ added in v1.2.2
func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver
type ResolveEndpoint ¶
type ResolveEndpoint struct { Resolver EndpointResolver Options EndpointResolverOptions }
func (*ResolveEndpoint) HandleSerialize ¶
func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, )
func (*ResolveEndpoint) ID ¶
func (*ResolveEndpoint) ID() string
type StartLoaderJobInput ¶
type StartLoaderJobInput struct { // The format of the data. For more information about data formats for the Neptune // Loader command, see [Load Data Formats]. // // Allowed values // // - csv for the [Gremlin CSV data format]. // // - opencypher for the [openCypher CSV data format]. // // - ntriples for the [N-Triples RDF data format]. // // - nquads for the [N-Quads RDF data format]. // // - rdfxml for the [RDF\XML RDF data format]. // // - turtle for the [Turtle RDF data format]. // // [RDF\XML RDF data format]: https://www.w3.org/TR/rdf-syntax-grammar/ // [Gremlin CSV data format]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-format-gremlin.html // [N-Triples RDF data format]: https://www.w3.org/TR/n-triples/ // [openCypher CSV data format]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-format-opencypher.html // [Turtle RDF data format]: https://www.w3.org/TR/turtle/ // [Load Data Formats]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-format.html // [N-Quads RDF data format]: https://www.w3.org/TR/n-quads/ // // This member is required. Format types.Format // The Amazon Resource Name (ARN) for an IAM role to be assumed by the Neptune DB // instance for access to the S3 bucket. The IAM role ARN provided here should be // attached to the DB cluster (see [Adding the IAM Role to an Amazon Neptune Cluster]. // // [Adding the IAM Role to an Amazon Neptune Cluster]: https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-IAM-add-role-cluster.html // // This member is required. IamRoleArn *string // The Amazon region of the S3 bucket. This must match the Amazon Region of the DB // cluster. // // This member is required. S3BucketRegion types.S3BucketRegion // The source parameter accepts an S3 URI that identifies a single file, multiple // files, a folder, or multiple folders. Neptune loads every data file in any // folder that is specified. // // The URI can be in any of the following formats. // // - s3://(bucket_name)/(object-key-name) // // - https://s3.amazonaws.com/(bucket_name)/(object-key-name) // // - https://s3.us-east-1.amazonaws.com/(bucket_name)/(object-key-name) // // The object-key-name element of the URI is equivalent to the [prefix] parameter in an S3 [ListObjects] // API call. It identifies all the objects in the specified S3 bucket whose names // begin with that prefix. That can be a single file or folder, or multiple files // and/or folders. // // The specified folder or folders can contain multiple vertex files and multiple // edge files. // // [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html // [prefix]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html#API_ListObjects_RequestParameters // // This member is required. Source *string // This is an optional parameter that can make a queued load request contingent on // the successful completion of one or more previous jobs in the queue. // // Neptune can queue up as many as 64 load requests at a time, if their // queueRequest parameters are set to "TRUE" . The dependencies parameter lets you // make execution of such a queued request dependent on the successful completion // of one or more specified previous requests in the queue. // // For example, if load Job-A and Job-B are independent of each other, but load // Job-C needs Job-A and Job-B to be finished before it begins, proceed as follows: // // - Submit load-job-A and load-job-B one after another in any order, and save // their load-ids. // // - Submit load-job-C with the load-ids of the two jobs in its dependencies // field: // // Because of the dependencies parameter, the bulk loader will not start Job-C // until Job-A and Job-B have completed successfully. If either one of them fails, // Job-C will not be executed, and its status will be set to // LOAD_FAILED_BECAUSE_DEPENDENCY_NOT_SATISFIED . // // You can set up multiple levels of dependency in this way, so that the failure // of one job will cause all requests that are directly or indirectly dependent on // it to be cancelled. Dependencies []string // failOnError – A flag to toggle a complete stop on an error. // // Allowed values: "TRUE" , "FALSE" . // // Default value: "TRUE" . // // When this parameter is set to "FALSE" , the loader tries to load all the data in // the location specified, skipping any entries with errors. // // When this parameter is set to "TRUE" , the loader stops as soon as it encounters // an error. Data loaded up to that point persists. FailOnError *bool // The load job mode. // // Allowed values: RESUME , NEW , AUTO . // // Default value: AUTO . // // - RESUME – In RESUME mode, the loader looks for a previous load from this // source, and if it finds one, resumes that load job. If no previous load job is // found, the loader stops. // // The loader avoids reloading files that were successfully loaded in a previous // job. It only tries to process failed files. If you dropped previously loaded // data from your Neptune cluster, that data is not reloaded in this mode. If a // previous load job loaded all files from the same source successfully, nothing is // reloaded, and the loader returns success. // // - NEW – In NEW mode, the creates a new load request regardless of any previous // loads. You can use this mode to reload all the data from a source after dropping // previously loaded data from your Neptune cluster, or to load new data available // at the same source. // // - AUTO – In AUTO mode, the loader looks for a previous load job from the same // source, and if it finds one, resumes that job, just as in RESUME mode. // // If the loader doesn't find a previous load job from the same source, it loads // all data from the source, just as in NEW mode. Mode types.Mode // The optional parallelism parameter can be set to reduce the number of threads // used by the bulk load process. // // Allowed values: // // - LOW – The number of threads used is the number of available vCPUs divided by // 8. // // - MEDIUM – The number of threads used is the number of available vCPUs divided // by 2. // // - HIGH – The number of threads used is the same as the number of available // vCPUs. // // - OVERSUBSCRIBE – The number of threads used is the number of available vCPUs // multiplied by 2. If this value is used, the bulk loader takes up all available // resources. // // This does not mean, however, that the OVERSUBSCRIBE setting results in 100% CPU // utilization. Because the load operation is I/O bound, the highest CPU // utilization to expect is in the 60% to 70% range. // // Default value: HIGH // // The parallelism setting can sometimes result in a deadlock between threads when // loading openCypher data. When this happens, Neptune returns the // LOAD_DATA_DEADLOCK error. You can generally fix the issue by setting parallelism // to a lower setting and retrying the load command. Parallelism types.Parallelism // parserConfiguration – An optional object with additional parser configuration // values. Each of the child parameters is also optional: // // - namedGraphUri – The default graph for all RDF formats when no graph is // specified (for non-quads formats and NQUAD entries with no graph). // // The default is https://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph . // // - baseUri – The base URI for RDF/XML and Turtle formats. // // The default is https://aws.amazon.com/neptune/default . // // - allowEmptyStrings – Gremlin users need to be able to pass empty string // values("") as node and edge properties when loading CSV data. If // allowEmptyStrings is set to false (the default), such empty strings are // treated as nulls and are not loaded. // // If allowEmptyStrings is set to true , the loader treats empty strings as valid // property values and loads them accordingly. ParserConfiguration map[string]string // This is an optional flag parameter that indicates whether the load request can // be queued up or not. // // You don't have to wait for one load job to complete before issuing the next // one, because Neptune can queue up as many as 64 jobs at a time, provided that // their queueRequest parameters are all set to "TRUE" . The queue order of the // jobs will be first-in-first-out (FIFO). // // If the queueRequest parameter is omitted or set to "FALSE" , the load request // will fail if another load job is already running. // // Allowed values: "TRUE" , "FALSE" . // // Default value: "FALSE" . QueueRequest *bool // updateSingleCardinalityProperties is an optional parameter that controls how // the bulk loader treats a new value for single-cardinality vertex or edge // properties. This is not supported for loading openCypher data. // // Allowed values: "TRUE" , "FALSE" . // // Default value: "FALSE" . // // By default, or when updateSingleCardinalityProperties is explicitly set to // "FALSE" , the loader treats a new value as an error, because it violates single // cardinality. // // When updateSingleCardinalityProperties is set to "TRUE" , on the other hand, the // bulk loader replaces the existing value with the new one. If multiple edge or // single-cardinality vertex property values are provided in the source file(s) // being loaded, the final value at the end of the bulk load could be any one of // those new values. The loader only guarantees that the existing value has been // replaced by one of the new ones. UpdateSingleCardinalityProperties *bool // This parameter is required only when loading openCypher data that contains // relationship IDs. It must be included and set to True when openCypher // relationship IDs are explicitly provided in the load data (recommended). // // When userProvidedEdgeIds is absent or set to True , an :ID column must be // present in every relationship file in the load. // // When userProvidedEdgeIds is present and set to False , relationship files in the // load must not contain an :ID column. Instead, the Neptune loader automatically // generates an ID for each relationship. // // It's useful to provide relationship IDs explicitly so that the loader can // resume loading after error in the CSV data have been fixed, without having to // reload any relationships that have already been loaded. If relationship IDs have // not been explicitly assigned, the loader cannot resume a failed load if any // relationship file has had to be corrected, and must instead reload all the // relationships. UserProvidedEdgeIds *bool // contains filtered or unexported fields }
type StartLoaderJobOutput ¶
type StartLoaderJobOutput struct { // Contains a loadId name-value pair that provides an identifier for the load // operation. // // This member is required. Payload map[string]string // The HTTP return code indicating the status of the load job. // // This member is required. Status *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type StartMLDataProcessingJobInput ¶
type StartMLDataProcessingJobInput struct { // The URI of the Amazon S3 location where you want SageMaker to download the data // needed to run the data processing job. // // This member is required. InputDataS3Location *string // The URI of the Amazon S3 location where you want SageMaker to save the results // of a data processing job. // // This member is required. ProcessedDataS3Location *string // A data specification file that describes how to load the exported graph data // for training. The file is automatically generated by the Neptune export toolkit. // The default is training-data-configuration.json . ConfigFileName *string // A unique identifier for the new job. The default is an autogenerated UUID. Id *string // One of the two model types that Neptune ML currently supports: heterogeneous // graph models ( heterogeneous ), and knowledge graph ( kge ). The default is // none. If not specified, Neptune ML chooses the model type automatically based on // the data. ModelType *string // The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to // perform tasks on your behalf. This must be listed in your DB cluster parameter // group or an error will occur. NeptuneIamRoleArn *string // The job ID of a completed data processing job run on an earlier version of the // data. PreviousDataProcessingJobId *string // The type of ML instance used during data processing. Its memory should be large // enough to hold the processed dataset. The default is the smallest ml.r5 type // whose memory is ten times larger than the size of the exported graph data on // disk. ProcessingInstanceType *string // The disk volume size of the processing instance. Both input data and processed // data are stored on disk, so the volume size must be large enough to hold both // data sets. The default is 0. If not specified or 0, Neptune ML chooses the // volume size automatically based on the data size. ProcessingInstanceVolumeSizeInGB *int32 // Timeout in seconds for the data processing job. The default is 86,400 (1 day). ProcessingTimeOutInSeconds *int32 // The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to // encrypt the output of the processing job. The default is none. S3OutputEncryptionKMSKey *string // The ARN of an IAM role for SageMaker execution. This must be listed in your DB // cluster parameter group or an error will occur. SagemakerIamRoleArn *string // The VPC security group IDs. The default is None. SecurityGroupIds []string // The IDs of the subnets in the Neptune VPC. The default is None. Subnets []string // The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to // encrypt data on the storage volume attached to the ML compute instances that run // the training job. The default is None. VolumeEncryptionKMSKey *string // contains filtered or unexported fields }
type StartMLDataProcessingJobOutput ¶
type StartMLDataProcessingJobOutput struct { // The ARN of the data processing job. Arn *string // The time it took to create the new processing job, in milliseconds. CreationTimeInMillis *int64 // The unique ID of the new data processing job. Id *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type StartMLModelTrainingJobInput ¶
type StartMLModelTrainingJobInput struct { // The job ID of the completed data-processing job that has created the data that // the training will work with. // // This member is required. DataProcessingJobId *string // The location in Amazon S3 where the model artifacts are to be stored. // // This member is required. TrainModelS3Location *string // The type of ML instance used in preparing and managing training of ML models. // This is a CPU instance chosen based on memory requirements for processing the // training data and model. BaseProcessingInstanceType *string // The configuration for custom model training. This is a JSON object. CustomModelTrainingParameters *types.CustomModelTrainingParameters // Optimizes the cost of training machine-learning models by using Amazon Elastic // Compute Cloud spot instances. The default is False . EnableManagedSpotTraining *bool // A unique identifier for the new job. The default is An autogenerated UUID. Id *string // Maximum total number of training jobs to start for the hyperparameter tuning // job. The default is 2. Neptune ML automatically tunes the hyperparameters of the // machine learning model. To obtain a model that performs well, use at least 10 // jobs (in other words, set maxHPONumberOfTrainingJobs to 10). In general, the // more tuning runs, the better the results. MaxHPONumberOfTrainingJobs *int32 // Maximum number of parallel training jobs to start for the hyperparameter tuning // job. The default is 2. The number of parallel jobs you can run is limited by the // available resources on your training instance. MaxHPOParallelTrainingJobs *int32 // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // The job ID of a completed model-training job that you want to update // incrementally based on updated data. PreviousModelTrainingJobId *string // The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt the // output of the processing job. The default is none. S3OutputEncryptionKMSKey *string // The ARN of an IAM role for SageMaker execution.This must be listed in your DB // cluster parameter group or an error will occur. SagemakerIamRoleArn *string // The VPC security group IDs. The default is None. SecurityGroupIds []string // The IDs of the subnets in the Neptune VPC. The default is None. Subnets []string // The type of ML instance used for model training. All Neptune ML models support // CPU, GPU, and multiGPU training. The default is ml.p3.2xlarge . Choosing the // right instance type for training depends on the task type, graph size, and your // budget. TrainingInstanceType *string // The disk volume size of the training instance. Both input data and the output // model are stored on disk, so the volume size must be large enough to hold both // data sets. The default is 0. If not specified or 0, Neptune ML selects a disk // volume size based on the recommendation generated in the data processing step. TrainingInstanceVolumeSizeInGB *int32 // Timeout in seconds for the training job. The default is 86,400 (1 day). TrainingTimeOutInSeconds *int32 // The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt data // on the storage volume attached to the ML compute instances that run the training // job. The default is None. VolumeEncryptionKMSKey *string // contains filtered or unexported fields }
type StartMLModelTrainingJobOutput ¶
type StartMLModelTrainingJobOutput struct { // The ARN of the new model training job. Arn *string // The model training job creation time, in milliseconds. CreationTimeInMillis *int64 // The unique ID of the new model training job. Id *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
type StartMLModelTransformJobInput ¶
type StartMLModelTransformJobInput struct { // The location in Amazon S3 where the model artifacts are to be stored. // // This member is required. ModelTransformOutputS3Location *string // The type of ML instance used in preparing and managing training of ML models. // This is an ML compute instance chosen based on memory requirements for // processing the training data and model. BaseProcessingInstanceType *string // The disk volume size of the training instance in gigabytes. The default is 0. // Both input data and the output model are stored on disk, so the volume size must // be large enough to hold both data sets. If not specified or 0, Neptune ML // selects a disk volume size based on the recommendation generated in the data // processing step. BaseProcessingInstanceVolumeSizeInGB *int32 // Configuration information for a model transform using a custom model. The // customModelTransformParameters object contains the following fields, which must // have values compatible with the saved model parameters from the training job: CustomModelTransformParameters *types.CustomModelTransformParameters // The job ID of a completed data-processing job. You must include either // dataProcessingJobId and a mlModelTrainingJobId , or a trainingJobName . DataProcessingJobId *string // A unique identifier for the new job. The default is an autogenerated UUID. Id *string // The job ID of a completed model-training job. You must include either // dataProcessingJobId and a mlModelTrainingJobId , or a trainingJobName . MlModelTrainingJobId *string // The ARN of an IAM role that provides Neptune access to SageMaker and Amazon S3 // resources. This must be listed in your DB cluster parameter group or an error // will occur. NeptuneIamRoleArn *string // The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt the // output of the processing job. The default is none. S3OutputEncryptionKMSKey *string // The ARN of an IAM role for SageMaker execution. This must be listed in your DB // cluster parameter group or an error will occur. SagemakerIamRoleArn *string // The VPC security group IDs. The default is None. SecurityGroupIds []string // The IDs of the subnets in the Neptune VPC. The default is None. Subnets []string // The name of a completed SageMaker training job. You must include either // dataProcessingJobId and a mlModelTrainingJobId , or a trainingJobName . TrainingJobName *string // The Amazon Key Management Service (KMS) key that SageMaker uses to encrypt data // on the storage volume attached to the ML compute instances that run the training // job. The default is None. VolumeEncryptionKMSKey *string // contains filtered or unexported fields }
type StartMLModelTransformJobOutput ¶
type StartMLModelTransformJobOutput struct { // The ARN of the model transform job. Arn *string // The creation time of the model transform job, in milliseconds. CreationTimeInMillis *int64 // The unique ID of the new model transform job. Id *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata // contains filtered or unexported fields }
Source Files ¶
- api_client.go
- api_op_CancelGremlinQuery.go
- api_op_CancelLoaderJob.go
- api_op_CancelMLDataProcessingJob.go
- api_op_CancelMLModelTrainingJob.go
- api_op_CancelMLModelTransformJob.go
- api_op_CancelOpenCypherQuery.go
- api_op_CreateMLEndpoint.go
- api_op_DeleteMLEndpoint.go
- api_op_DeletePropertygraphStatistics.go
- api_op_DeleteSparqlStatistics.go
- api_op_ExecuteFastReset.go
- api_op_ExecuteGremlinExplainQuery.go
- api_op_ExecuteGremlinProfileQuery.go
- api_op_ExecuteGremlinQuery.go
- api_op_ExecuteOpenCypherExplainQuery.go
- api_op_ExecuteOpenCypherQuery.go
- api_op_GetEngineStatus.go
- api_op_GetGremlinQueryStatus.go
- api_op_GetLoaderJobStatus.go
- api_op_GetMLDataProcessingJob.go
- api_op_GetMLEndpoint.go
- api_op_GetMLModelTrainingJob.go
- api_op_GetMLModelTransformJob.go
- api_op_GetOpenCypherQueryStatus.go
- api_op_GetPropertygraphStatistics.go
- api_op_GetPropertygraphStream.go
- api_op_GetPropertygraphSummary.go
- api_op_GetRDFGraphSummary.go
- api_op_GetSparqlStatistics.go
- api_op_GetSparqlStream.go
- api_op_ListGremlinQueries.go
- api_op_ListLoaderJobs.go
- api_op_ListMLDataProcessingJobs.go
- api_op_ListMLEndpoints.go
- api_op_ListMLModelTrainingJobs.go
- api_op_ListMLModelTransformJobs.go
- api_op_ListOpenCypherQueries.go
- api_op_ManagePropertygraphStatistics.go
- api_op_ManageSparqlStatistics.go
- api_op_StartLoaderJob.go
- api_op_StartMLDataProcessingJob.go
- api_op_StartMLModelTrainingJob.go
- api_op_StartMLModelTransformJob.go
- auth.go
- deserializers.go
- doc.go
- endpoints.go
- go_module_metadata.go
- options.go
- serializers.go
- validators.go