Documentation
¶
Index ¶
- Constants
- func String(obj interface{}) string
- type AggSubResult
- type Aggregation
- func (agg *Aggregation) Add(bucket *Bucket) *Aggregation
- func (agg *Aggregation) AddPostFilter(q Query) *Aggregation
- func (agg *Aggregation) AddQuery(q Query) *Aggregation
- func (agg *Aggregation) Get()
- func (agg *Aggregation) SetMetric(name string) *Aggregation
- func (agg *Aggregation) String() string
- type AggregationResult
- type AggregationResultParser
- type Alias
- type Analyze
- type AnalyzeResult
- type AnalyzeResultParser
- type AnalyzeToken
- type Analyzer
- type Bool
- type BoostingQuery
- func (boosting *BoostingQuery) AddNegative(name string, value interface{}) *BoostingQuery
- func (boosting *BoostingQuery) AddPositive(name string, value interface{}) *BoostingQuery
- func (boosting *BoostingQuery) KV() Dict
- func (boosting *BoostingQuery) Name() string
- func (boosting *BoostingQuery) SetNegativeBoost(value float32) *BoostingQuery
- type Bucket
- func (bucket *Bucket) AddBucket(b *Bucket) *Bucket
- func (bucket *Bucket) AddDict(name string, value Dict) *Bucket
- func (bucket *Bucket) AddMetric(metric, name string, value interface{}) *Bucket
- func (bucket *Bucket) AddTerm(name string, value interface{}) *Bucket
- func (bucket *Bucket) SetOrder(metric, name, value string) *Bucket
- type BucketResult
- type Bulk
- type BulkResult
- type BulkResultParser
- type Dict
- type DocType
- type Elasticsearch
- func (client *Elasticsearch) Aggs(index, doc string) *Aggregation
- func (client *Elasticsearch) Alias() *Alias
- func (client *Elasticsearch) Analyze(index string) *Analyze
- func (client *Elasticsearch) Bulk(index, docType string) *Bulk
- func (client *Elasticsearch) Execute(method, url, query string, parser Parser) (interface{}, error)
- func (client *Elasticsearch) Explain(index, class string, id int64) *Search
- func (client *Elasticsearch) Flush(index string) *ShardMgmtOp
- func (client *Elasticsearch) Index(index string) *Index
- func (client *Elasticsearch) Insert(index, doctype string) *Insert
- func (client *Elasticsearch) Mapping(index, doctype string) *Mapping
- func (client *Elasticsearch) Optimize(index string) *ShardMgmtOp
- func (client *Elasticsearch) Refresh(index string) *ShardMgmtOp
- func (client *Elasticsearch) Search(index, class string) *Search
- func (client *Elasticsearch) Validate(index, class string, explain bool) *Search
- type Error
- type ExplainResult
- type Explanation
- type Failure
- type FailureParser
- type Hits
- type Index
- func (idx *Index) AddAnalyzer(analyzer *Analyzer) *Index
- func (idx *Index) AddSetting(name string, value interface{}) *Index
- func (idx *Index) Delete()
- func (idx *Index) Mappings(doctype string, mapping *Mapping) *Index
- func (idx *Index) Pretty() *Index
- func (idx *Index) Put()
- func (idx *Index) SetAlias(alias string) *Index
- func (idx *Index) SetRefreshInterval(interval string) *Index
- func (idx *Index) SetReplicasNb(number int) *Index
- func (idx *Index) SetShardsNb(number int) *Index
- func (idx *Index) Settings(settings Dict) *Index
- func (idx *Index) String() string
- type IndexResultParser
- type Insert
- type InsertResult
- type InsertResultParser
- type Mapping
- func (mapping *Mapping) AddDocumentType(class *DocType) *Mapping
- func (mapping *Mapping) AddField(name string, body Dict) *Mapping
- func (mapping *Mapping) AddProperty(fieldname, propertyname string, propertyvalue interface{}) *Mapping
- func (mapping *Mapping) Get()
- func (mapping *Mapping) Put()
- func (mapping *Mapping) String() string
- type MappingResultParser
- type Metric
- type Object
- func NewConstantScore() *Object
- func NewExists() *Object
- func NewFilter() *Object
- func NewFunctionScore() *Object
- func NewFuzzyQuery() *Object
- func NewMatch() *Object
- func NewMatchPhrase() *Object
- func NewMissing() *Object
- func NewMultiMatch() *Object
- func NewQuery(name string) *Object
- func NewRescore() *Object
- func NewRescoreQuery() *Object
- func NewTerm() *Object
- func NewTerms() *Object
- func (obj *Object) Add(argument string, value interface{}) *Object
- func (obj *Object) AddMultiple(argument string, values ...interface{}) *Object
- func (obj *Object) AddQueries(name string, queries ...Query) *Object
- func (obj *Object) AddQuery(query Query) *Object
- func (obj *Object) Dict() Dict
- func (obj *Object) KV() Dict
- func (obj *Object) Name() string
- func (obj *Object) String() string
- type Operation
- type Parser
- type Query
- type Search
- func (search *Search) Add(argument string, value interface{}) *Search
- func (search *Search) AddParam(name, value string) *Search
- func (search *Search) AddQuery(query Query) *Search
- func (search *Search) AddSource(source string) *Search
- func (search *Search) Get()
- func (search *Search) Pretty() *Search
- func (search *Search) String() string
- type SearchHits
- type SearchResult
- type SearchResultParser
- type Shard
- type ShardMgmtOp
- type Success
- type SuccessParser
- type Template
- type Unvalid
Constants ¶
const ( // Aggs abreviateed constant name for the Aggregation query. Aggs = "aggs" // Aggregations constant name for the Aggregation query. Aggregations = "aggregations" // Terms constant name of terms Bucket Terms = "terms" // Histogram constant name of the Histogram bucket Histogram = "histogram" // DateHistogram constant name of the Date Histogram bucket. DateHistogram = "date_histogram" // Global constant name of the global bucket which is used to by pass aggregation scope. Global = "global" // FilterBucket constant name of filter bucket which is used to filter aggregation results. FilterBucket = "filter" )
const ( // Count constant name of 'count' metric. Count = "count" // Sum constant name of 'sum' metric. Sum = "sum" // Avg constant name of 'avg' metric. Avg = "avg" // Min constant name of 'min' metric. Min = "min" // Max constant name of 'max' metric. Max = "max" // ExtendedStats constant name of a metric that will return a variety of statistics (e.g. stats.avg, stats.count, stats.std_deviation). ExtendedStats = "extended_stats" // Cardiality constant name of the 'cardinality' approximation metric. Cardiality = "cardinality" // Percentiles constant name of the 'percentiles' approximation metric. Percentiles = "percentiles" // PercentileRank constant name of an approximation metric that tells to which percentile the given value belongs. PercentileRanks = "percentile_ranks" // SignificantTerms constant name of the statistical anomalie aggregation. By default, it will use the entire index as the background group while the foreground will be aggregation query scope. SignificantTerms = "significant_terms" )
Constant name of Elasticsearch metrics
const ( // Field name of parameter that defines the document's field that will be used to create buckets using its unique values. Field = "field" // Interval name of parameter that define a histogram interval, i.e. the value that Elasticsearch will use to create new buckets. Interval = "interval" // Size name of parameter that defines how many terms we want to generate. Example of values, for histograms: 10, for date histograms: "month", "quarter". Size = "size" // Format name of parameter in date histogram, used to define the dates format for bucket keys. Format = "format" // MinDocCount name of parameter in date histogram, used to force empty buckets to be returned. MinDocCount = "min_doc_count" // ExtendedBound name of parameter in date histogram. It is used to extend the boudaries of bucket from the boudaries of actual data. This, it forces all bucket betwen the min and max bound to be returned. ExtendedBound = "extended_bound" // Order name of an object that defines how the create buckets should be generatedas well as the the ordering mode (e.g. asc). Example of values: _count (sort by document count), _term (sort alphabetically by string value), _key (sort by bucket key, works only for histogram & date_histogram). Order = "order" // PrecisionThreshold configure the precision of the HyperLogLog algorithm used by the 'cardinality' metric. PrecisionThreshold = "precision_threshold" // Percents a parameter of the 'percentiles' metric. It's used to define an array of the percentiles that should be calculated instead of the default one (i.e. 5, 25, 50, 75, 95, 99). Percents = "percents" // Values a parameter of the 'percentile_ranks' metric. It is used to define the values that Elasticsearch should find their percentile. Values = "values" // Compression a parameter of the 'percentiles' metric (default value is 100). It is used to control the memory footprint (an thus the accuracy) by limiting the number of nodes involved in the calculation. Compression = "compression" )
const ( ALIASES = "_aliases" ACTIONS = "actions" )
const ( // ANALYZE a constant for Analyze query name ANALYZE = "analyze" // Tokenizer a parameter in an Analyze API used to send the text tokenizer. Example of possible values: standard, whitespace, letter. Tokenizer = "tokenizer" // Filters a parameter in an Analyze API used to send the tokens filter. Example of possible values: lowercase Filters = "filters" // CharFilters a parameter in an Analyze API used to set the text preprocessor. Example of possible values: html_strip CharFilters = "char_filters" )
const ( // ANALYSIS constant name of analysis part of Index API query ANALYSIS = "analysis" // SETTINGS constant name of settings attribute in query of Index API SETTINGS = "settings" // ALIAS constant name of field that defines alias name of this index ALIAS = "_alias" // ShardsNumber settings param of filed defining number of shards of index ShardsNumber = "number_of_shards" // ReplicasNumber settings param of field defining replicas number ReplicasNumber = "number_of_replicas" // RefreshInterval settings param of field defining the refresh interval RefreshInterval = "refresh_interval" // TOKENIZER name of the analyzer responsible for tokenisation TOKENIZER = "tokenizer" // analyzer params // FILTER a parameter name of mapping in an Index API query FILTER = "filter" // CharFilter name of the analyzer responible for filtering characters. CharFilter = "char_filter" // MinShingleSize name of field that defines the minimum size of shingle MinShingleSize = "min_shingle_size" // MaxShingleSize name of field that defines the maximum size of shingle MaxShingleSize = "max_shingle_size" // OutputUnigrams constant name of field defining output unigrams OutputUnigrams = "output_unigrams" )
const ( // StemExclusion a property in Analyzer settings used to define words that the analyzer should not stem StemExclusion = "stem_exclusion" // Stopwords a property in Analyzer settings used to define custom stopwords than the ones used by default by the analyzer Stopwords = "stopwords" // StopwordsPath a property in Analyzer settings used to define the path to a file containing custom stopwords. StopwordsPath = "stopwords_path" // Stemmer a value of 'type' propery in Analyzer settings used to define the stemmer Stemmer = "stemmer" // CommonGrams a vale of 'type' property in Filter settings. CommonGrams = "common_grams" // Type a property in Analyzer setting used to define the type of the property. Example of values: string (), stop (for stopwords), stemmer, common_grams, etc. Type = "type" // Language a property in Analyzer setting used to define the type of stemmer to use in order to reduce words to their root form. Possible values: english, english_light, english_possessive_stemmer (removes 's from words), synonym, mapping (e.g. for char filter). Language = "language" // CommonWords a property in Filter setting, similar to 'shingles' token filter, it makes phrase queries with stopwords more efficient. It accepcts values similar to the 'stopwords' property, example of values: _english_. CommonWords = "common_words" // CommonWordsPath a property in Analyzer setting used to define the path to a file containing common words. CommonWordsPath = "common_words_path" // QueryMode a boolean property in Filter settings. Used in conjugtion with common_words. It is set (by default) to false for indexing and to true for searching. QueryMode = "query_mode" // Synonyms a an array of formatted synonyms in Filter settings. Used when type is set to 'synonym'. Synonyms = "synonyms" // SynonymsPath a string property in field parameter. It is used to specify a path (absolute or relative to Elasticsearch 'config' directory) to a file containing formatted synonyms. SynonymsPath = "synonyms_path" // Encoder a property in Filter settings. Used when filter 'type' is set to 'phonetic' to set the name of Phonetic algorithm to use. Possible values: double_metaphone. Encoder = "encoder" )
Analyer related constants
const ( // MAPPING part of Mapping API path url MAPPING = "mapping" // MAPPINGS body of Mapping API query MAPPINGS = "mappings" // TYPE constant name of data type property of field TYPE = "type" // ANALYZER constant name of language analyzer for a field ANALYZER = "analyzer" // INDEX constant name of index name INDEX = "index" // PROPERTIES constant name of Mapping query body that defines properties PROPERTIES = "properties" // MATCH a query name MATCH = "match" // MatchMappingType type of matchi mapping (e.g. string) MatchMappingType = "match_mapping_type" // DynamicTemplates dynamic mapping templates DynamicTemplates = "dynamic_templates" // DEFAULT default mappings DEFAULT = "_default_" // PositionOffsetGap constant name for defining acceptable offset gap PositionOffsetGap = "position_offset_gap" // IndexAnalyzer index-time analyzer IndexAnalyzer = "index_analyzer" // SearchAnalyzer search-time analyzer SearchAnalyzer = "search_analyzer" // IndexOptions defines indexing options in Mapping query. Possible values are: docsi (default for 'not_analyzed' string fields), freqs, positions (default for 'analyzed' string fields), offsets. IndexOptions = "index_options" // Norms constant name for configuring field length normalization Norms = "norms" // Similarity in an Index mapping query. It defines the similarity algorithm to use. Possible values: default, BM25. Similarity = "similarity" )
const ( // EXPLAIN constant name of Explain API request EXPLAIN = "explain" // VALIDATE constant name of Validate API request VALIDATE = "validate" // SEARCH constant name of Search API request SEARCH = "search" // ALL a query element ALL = "_all" // INCLUDE a query element INCLUDE = "include_in_all" // SOURCE a query element SOURCE = "_source" // SearchType a url param SearchType = "search_type" // SCROLL a url param SCROLL = "scroll" // PostFilter contant name of post_filter, a top level search parameter that is executed after the search query. PostFilter = "post_filter" // Filter a query name. Filter = "filter" // DisMax query name. DisMax = "dis_max" // MultiMatch a match query on multiple terms MultiMatch = "multi_match" // Common a query name. Common = "common" // Boosting a query param that include additional results but donwgrade them Boosting = "boosting" // ConstantScore a query param that assings 1 as score to any matching document ConstantScore = "constant_score" // FunctionScore a query for customizing the scoring with predefined functions: weight, field_value_factor, random_score FunctionScore = "function_score" // Fuzzy 'fuzzy' qearch query. It's a term-level query that doesn't do analysis. Fuzzy = "fuzzy" // MatchPhrase 'phrase' search query MatchPhrase = "match_phrase" // MatchPhrasePrefix 'phrase' search query MatchPhrasePrefix = "match_phrase_prefix" // Prefix search terms with given prefix Prefix = "prefix" // Wildcard search terms with widcard Wildcard = "wildcard" // RegExp filter terms application to regular expression RegExp = "regexp" // RESCORE rescores result of previous query RESCORE = "rescore" // RescoreQuery RescoreQuery = "rescore_query" // CutOffFrequency query params. It is used to split query terms into 2 categories: low frequency terms for matching, and high frequency terms for sorting only. CutOffFrequency = "cutoff_frequency" // MinimumShouldMatch query params. It is used to reduce the number of low qualitymatches. MinimumShouldMatch = "minimum_should_match" // SLOP in 'phrase' queries to describe proximity/word ordering SLOP = "slop" // MaxExpansions controls how many terms the prefix is allowed to match MaxExpansions = "max_expansions" // WindowSize number of document from each shard WindowSize = "window_size" // DisableCoord a boolean value to enable/disable the use of Query Coordination in 'bool' queries DisableCoord = "disable_coord" // Boost an Int value in query clauses to give it more importance Boost = "boost" // IndicesBoost in mutli-index search, a dictionary for each index name it's boost value. For instance, it can be used to specify a language preference if there is an index defined per language (e.g. blogs-en, blogs-fr) IndicesBoost = "indices_boost" // NegativeBoost in boosting query, a float representing negative boost value NegativeBoost = "negative_boost" // Fuzziness a query parameter in 'fuzzy' (and also 'match', 'multi_match') query. It's used to set the maximum edit distance between a potentially mispelled word and the index words. Fuzziness = "fuzziness" // PrefixLength an integer query parameter in the 'fuzzy' query. It is used to fix the initial characters, of a word, which will not be fuzzified. PrefixLength = "prefix_length" // Operator a query parameter in the 'match' query. Possible values: and. Operator = "operator" // Weight a predifined scoring function that can be used in any query. It assigns a non normalized boost to each document (i.e. is used as it is an not alterned like 'boost') Weight = "weight" // FieldValueFactor a predifined scoring function that uses a value of a field from the given document to alter _score FieldValueFactor = "field_value_factor" // RandomScore a predifined scoring function to randomly sort documents for different users RandomScore = "random_score" // Seed is a parameter used in comabination with 'random_score'. It is used to ensure same document ordering when same seed is used (e.g. session identifier). Seed = "seed" // ScriptScore a predifined scoring function that uses a custom script ScriptScore = "script_score" // Modifer a parameter of 'field_value_factor' in a FunctionScore query. It is used to alter the calculation of the new document score, possible values log1p, etc. Modifer = "modifier" // Factor a parameter of 'field_value_factor' in a FunctionScore query. It is used to multiply the value of the concerned field (e.g. votes) to alter the final score calculation. Factor = "factor" // BoostMode is a parameter in a FunctionScore query. It is used to specify how the calculated score will affect final document score. // Possible values: multiply (mulitply _score by calculated result), sum (sum _score with calculated), min (lower of _score and calculated), max (higher of _score and calculated), replace (replace _score with calculated) BoostMode = "boost_mode" // MaxBoost is a parameter in a FunctionScore query. It is used to cap the maximum effect of the scoring function. MaxBoost = "max_boost" // ScoreMode is a parameter in a FunctionScore query. It defines, when there is many 'functions', how to reduce multiple results into single value. // Possible values are multiply, sum, avg, max, min, first. ScoreMode = "score_mode" )
fields of a Search API call
const ( // REFRESH refresh REFRESH = "refresh" // FLUSH flush FLUSH = "flush" // OPTIMIZE optimize OPTIMIZE = "optimize" )
const (
// BULK constant name of Elasticsearch bulk operations
BULK = "bulk"
)
Variables ¶
This section is empty.
Functions ¶
Types ¶
type AggSubResult ¶
type AggSubResult struct { DocCountErrorUpperBound int `json:"doc_count_error_upper_bound"` SumOtherDocCount int `json:"sum_other_doc_count"` Buckets []Dict `json:"buckets"` }
AggSubResult is a structure representing a sub result of the aggregation query result
type Aggregation ¶
type Aggregation struct {
// contains filtered or unexported fields
}
Aggregations a structure representing an aggregation request
func (*Aggregation) Add ¶
func (agg *Aggregation) Add(bucket *Bucket) *Aggregation
Add adds a bucket definition to this aggregation request
func (*Aggregation) AddPostFilter ¶
func (agg *Aggregation) AddPostFilter(q Query) *Aggregation
func (*Aggregation) AddQuery ¶
func (agg *Aggregation) AddQuery(q Query) *Aggregation
AddQuery defines a scope query for this aggregation request
func (*Aggregation) Get ¶
func (agg *Aggregation) Get()
Get submits request mappings between the json fields and how Elasticsearch store them GET /:index/:type/_search
func (*Aggregation) SetMetric ¶
func (agg *Aggregation) SetMetric(name string) *Aggregation
SetMetric sets the search type with the given value (e.g. count)
func (*Aggregation) String ¶
func (agg *Aggregation) String() string
String returns a string representation of this Search API call
type AggregationResult ¶
type AggregationResult struct { SearchResult Aggregations map[string]AggSubResult `json:"aggregations"` }
///////////////////////////////// Aggregation Query AggregationResult is a structure representing the Elasticsearch aggregation query result e.g. {"took":4,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":7,"max_score":0.0,"hits":[]},"aggregations":{"colors":{"doc_count_error_upper_bound":0,"sum_other_doc_count":0,"buckets":[{"key":"blue","doc_count":1,"avg_price":{"value":15000.0}},{"key":"green","doc_count":2,"avg_price":{"value":21000.0}},{"key":"red","doc_count":4,"avg_price":{"value":32500.0}}]}}} e.g. {"took":3,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":7,"max_score":0.0,"hits":[]},"aggregations":{"distinct_colors":{"value":3}}}
type AggregationResultParser ¶
type AggregationResultParser struct{}
AggregationResultParser a parser for aggregation result
func (*AggregationResultParser) Parse ¶
func (parser *AggregationResultParser) Parse(data []byte) (interface{}, error)
Parse returns an index result structure from the given data
type Alias ¶
type Alias struct {
// contains filtered or unexported fields
}
type Analyze ¶
type Analyze struct {
// contains filtered or unexported fields
}
Analyze a structure representing an Elasticsearch query for the Analyze API
func (*Analyze) Analyzer ¶
Analyzer adds a named standard Elasticsearch analyzer to the Analyze query
type AnalyzeResult ¶
type AnalyzeResult struct {
Tokens []AnalyzeToken `json:"tokens"`
}
AnalyzeResult is a structure representing the Elasticsearch analyze query result e.g. {"tokens":[{"token":"quick","start_offset":0,"end_offset":5,"type":"<ALPHANUM>","position":0},{"token":"brown","start_offset":6,"end_offset":11,"type":"<ALPHANUM>","position":1},{"token":"fox","start_offset":12,"end_offset":15,"type":"<ALPHANUM>","position":2}]}
type AnalyzeResultParser ¶
type AnalyzeResultParser struct{}
AnalyzeResultParser a parser for analyze result
func (*AnalyzeResultParser) Parse ¶
func (parser *AnalyzeResultParser) Parse(data []byte) (interface{}, error)
Parse returns an analyze result structure from the given data
type AnalyzeToken ¶
type AnalyzeToken struct { Token string `json:"token"` StartOffset int `json:"start_offset"` EndOffset int `json:"end_offset"` TokenType string `json:"type"` Position int `json:"position"` }
AnalyzeToken is a structure representing part of the Elasticsearch analyze query response
type Analyzer ¶
type Analyzer struct {
// contains filtered or unexported fields
}
Analyzer a structure for representing Analyzers and Filters
type Bool ¶
type Bool struct {
// contains filtered or unexported fields
}
Bool represents a boolean clause, it is a complex clause that allows to combine other clauses as 'must' match, 'must_not' match, 'should' match.
func (*Bool) AddMustNot ¶
AddMustNot adds a 'must_not' clause to this 'bool' clause
type BoostingQuery ¶
type BoostingQuery struct {
// contains filtered or unexported fields
}
BoostingQuery a strcuture representing the 'boosting' query
func (*BoostingQuery) AddNegative ¶
func (boosting *BoostingQuery) AddNegative(name string, value interface{}) *BoostingQuery
AddNegative adds a negative clause to boosting query
func (*BoostingQuery) AddPositive ¶
func (boosting *BoostingQuery) AddPositive(name string, value interface{}) *BoostingQuery
AddPositive adds a positive clause to boosting query
func (*BoostingQuery) KV ¶
func (boosting *BoostingQuery) KV() Dict
KV returns the body of this boosting query as a dictionary
func (*BoostingQuery) Name ¶
func (boosting *BoostingQuery) Name() string
Name returns the name of boosting query
func (*BoostingQuery) SetNegativeBoost ¶
func (boosting *BoostingQuery) SetNegativeBoost(value float32) *BoostingQuery
SetNegativeBoost sets the negative boost
type Bucket ¶
type Bucket struct {
// contains filtered or unexported fields
}
Bucket a structure that defines how Elasticsearch should create Bucket for aggregations.
type BucketResult ¶
type Bulk ¶
type Bulk struct {
// contains filtered or unexported fields
}
Bulk a strcuture representing bulk operations
func (*Bulk) AddOperation ¶
AddOperation adds an operation to this bulk
type BulkResult ¶
type BulkResult struct { Took int `josn:"took"` Errors bool `json:"errors"` Items []InsertResult `json:"items"` }
BulkResult is a structure representing the Elasticsearch bulk query result e.g. {"took":118,"errors":false,"items":[{"index":{"_index":"my_index","_type":"my_type","_id":"1","_version":1,"_shards":{"total":2,"successful":1,"failed":0},"status":201}},{"index":{"_index":"my_index","_type":"my_type","_id":"2","_version":1,"_shards":{"total":2,"successful":1,"failed":0},"status":201}}]}
type BulkResultParser ¶
type BulkResultParser struct{}
BulkResultParser a parser for analyze result
func (*BulkResultParser) Parse ¶
func (parser *BulkResultParser) Parse(data []byte) (interface{}, error)
Parse returns an analyze result structure from the given data
type Dict ¶
type Dict map[string]interface{}
Dict a dictionary with string keys and values of any type
type DocType ¶
type DocType struct {
// contains filtered or unexported fields
}
DocType a structure for document type
func NewDefaultType ¶
func NewDefaultType() *DocType
NewDefaultType returns a '_default_' type that encapsulates shared/default settings e.g. specify index wide dynamic templates
func (*DocType) AddDynamicTemplate ¶
AddDynamicTemplate adds a dynamic template to this mapping
func (*DocType) AddProperty ¶
AddProperty adds a property to this document type
func (*DocType) AddTemplate ¶
AddTemplate adds a template to this document type
type Elasticsearch ¶
type Elasticsearch struct {
Addr string
}
Elasticsearch client
func (*Elasticsearch) Aggs ¶
func (client *Elasticsearch) Aggs(index, doc string) *Aggregation
Aggs creates an aggregation request
func (*Elasticsearch) Alias ¶
func (client *Elasticsearch) Alias() *Alias
func (*Elasticsearch) Analyze ¶
func (client *Elasticsearch) Analyze(index string) *Analyze
Analyze returns an new Analyze request on the given index
func (*Elasticsearch) Bulk ¶
func (client *Elasticsearch) Bulk(index, docType string) *Bulk
Bulk creates a new Bulk operations
func (*Elasticsearch) Execute ¶
func (client *Elasticsearch) Execute(method, url, query string, parser Parser) (interface{}, error)
Execute an HTTP request and parse the response
func (*Elasticsearch) Explain ¶
func (client *Elasticsearch) Explain(index, class string, id int64) *Search
Explain creates an Explaination request, that will return explanation for why a document is returned by the query
func (*Elasticsearch) Flush ¶
func (client *Elasticsearch) Flush(index string) *ShardMgmtOp
Flush creates a flush API call in order to force commit and trauncating the 'translog' See, chapter 11. Inside a shard (Elasticsearch Definitive Guide)
func (*Elasticsearch) Index ¶
func (client *Elasticsearch) Index(index string) *Index
Index returns a query for managing indexes
func (*Elasticsearch) Insert ¶
func (client *Elasticsearch) Insert(index, doctype string) *Insert
Insert Create an Insert request, that will submit a new document to elastic search
func (*Elasticsearch) Mapping ¶
func (client *Elasticsearch) Mapping(index, doctype string) *Mapping
Mapping creates request mappings between the json fields and how Elasticsearch store them GET /:index/:type/_mapping
func (*Elasticsearch) Optimize ¶
func (client *Elasticsearch) Optimize(index string) *ShardMgmtOp
Optimize create an Optimize API call in order to force mering shards into a number of segments
func (*Elasticsearch) Refresh ¶
func (client *Elasticsearch) Refresh(index string) *ShardMgmtOp
Refresh create a refresh API call in order to force recently added document to be visible to search calls
func (*Elasticsearch) Search ¶
func (client *Elasticsearch) Search(index, class string) *Search
Search creates a Search request
type Error ¶
type Error struct { RootCause []Dict `json:"root_cause"` Type string `json:"type"` Reason string `json:"reason"` CausedBy Dict `json:"caused_by"` ResourceType string `json:"resource.type"` ResourceId string `json:"resource.id"` // Index the name of index involved in this error incase of an Index API response Index string `json:"index"` }
Error is a structure representing the Elasticsearch error response
type ExplainResult ¶
type ExplainResult struct { Valid bool `json:"valid"` Shards Shard `json:"_shards"` Explanations []Explanation `json:"explanations"` }
ExplainResult Elasticsearch explain result e.g. {"valid":true,"_shards":{"total":1,"successful":1,"failed":0},"explanations":[{"index":"my_index","valid":true,"explanation":"+((name:b name:br name:bro name:brow name:brown) (name:f name:fo)) #ConstantScore(+ConstantScore(_type:my_type))"}]}
type Explanation ¶
type Explanation struct { Index string `json:"index"` Valid bool `json:"valid"` Explanation string `json:"explanation"` }
Explanation the details of explanation
type Failure ¶
Failure is a structure representing the Elasticsearch failure response e.g.:{"error":{"root_cause":[{"type":"no_shard_available_action_exception","reason":"No shard available for [org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest@74508901]"}],"type":"no_shard_available_action_exception","reason":"No shard available for [org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest@74508901]"},"status":503} e.g.:{"error":{"root_cause":[{"type":"index_already_exists_exception","reason":"already exists","index":"my_index"}],"type":"index_already_exists_exception","reason":"already exists","index":"my_index"},"status":400}
type FailureParser ¶
type FailureParser struct{}
FailureParser a parser for search result
func (*FailureParser) Parse ¶
func (parser *FailureParser) Parse(data []byte) (interface{}, error)
Parse rerturns a parsed Failure result structure from the given data
type Hits ¶
type Hits struct { Total int `json:"total"` MaxScore interface{} `json:"max_score"` Hits []SearchHits `json:"hits"` }
Hits is a structure representing the Elasticsearch hits part of Search query response
type Index ¶
type Index struct {
// contains filtered or unexported fields
}
Index a strcuture to hold a query for building/deleting indexes
func (*Index) AddAnalyzer ¶
AddAnalyzer adds an anlyzer to the index settings
func (*Index) AddSetting ¶
AddSetting adds a key-value settings
func (*Index) Delete ¶
func (idx *Index) Delete()
Delete submits to elasticsearch a query to delete an index DELETE /:index
func (*Index) Put ¶
func (idx *Index) Put()
Put submits to elasticsearch the query to create an index PUT /:index
func (*Index) SetRefreshInterval ¶
SetRefreshInterval sets the refresh interval
func (*Index) SetReplicasNb ¶
SetReplicasNb sets the number of shards
func (*Index) SetShardsNb ¶
SetShardsNb sets the number of shards
type IndexResultParser ¶
type IndexResultParser struct{}
IndexResultParser a parser for index result
func (*IndexResultParser) Parse ¶
func (parser *IndexResultParser) Parse(data []byte) (interface{}, error)
Parse returns an index result structure from the given data
type Insert ¶
type Insert struct {
// contains filtered or unexported fields
}
Insert a request representing a document insert query
type InsertResult ¶
type InsertResult struct { Index string `json:"_index"` Doctype string `json:"_type"` ID string `json:"_id"` Version int `json:"_version"` Shards Shard `json:"_shards"` Created bool `json:"created"` }
InsertResult is a strucuture representing the Elasticsearch insert query result e.g. {"_index":"my_index","_type":"groups","_id":"1","_version":1,"_shards":{"total":2,"successful":1,"failed":0},"created":true}
type InsertResultParser ¶
type InsertResultParser struct{}
InsertResultParser a parser for mapping result
func (*InsertResultParser) Parse ¶
func (parser *InsertResultParser) Parse(data []byte) (interface{}, error)
Parse returns an index result structure from the given data
type Mapping ¶
type Mapping struct {
// contains filtered or unexported fields
}
Mapping maps between the json fields and how Elasticsearch store them
func (*Mapping) AddDocumentType ¶
AddDocumentType adds a mapping for a type of objects
func (*Mapping) AddProperty ¶
func (mapping *Mapping) AddProperty(fieldname, propertyname string, propertyvalue interface{}) *Mapping
AddProperty adds a mapping for a type's property (e.g. type, index, analyzer, etc.)
func (*Mapping) Get ¶
func (mapping *Mapping) Get()
Get submits a get request mappings between the json fields and how Elasticsearch store them GET /:index/_mapping/:type
type MappingResultParser ¶
type MappingResultParser struct{}
MappingResultParser a parser for mapping result
func (*MappingResultParser) Parse ¶
func (parser *MappingResultParser) Parse(data []byte) (interface{}, error)
Parse returns an index result structure from the given data
type Metric ¶
type Metric struct {
// contains filtered or unexported fields
}
Metric a structure that defines a bucket metric.
type Object ¶
type Object struct {
// contains filtered or unexported fields
}
Object a general purpose query
func NewConstantScore ¶
func NewConstantScore() *Object
NewConstantScore creates a new 'constant_score' query
func NewFunctionScore ¶
func NewFunctionScore() *Object
NewFunctionScore creates a new 'function_score' query
func NewMatchPhrase ¶
func NewMatchPhrase() *Object
NewMatchPhrase Create a `match_phrase` query to find words that are near each other
func NewMissing ¶
func NewMissing() *Object
NewMissing creates a new `missing` filter (the inverse of `exists`)
func NewRescoreQuery ¶
func NewRescoreQuery() *Object
NewRescoreQuery Create a `rescore` query algorithm
func NewTerms ¶
func NewTerms() *Object
NewTerms creates a new 'terms' filter, it is like 'term' but can match multiple values
func (*Object) AddMultiple ¶
AddMultiple specify multiple values to match
func (*Object) AddQueries ¶
AddQueries adds multiple queries, under given `name`
type Operation ¶
type Operation struct {
// contains filtered or unexported fields
}
Operation a structure representing a bulk operation
func NewOperation ¶
NewOperation creates a new operation with the given id
func (*Operation) AddMultiple ¶
AddMultiple adds a field with multiple values to this document
type Search ¶
type Search struct {
// contains filtered or unexported fields
}
Search a request representing a search
func (*Search) AddParam ¶
AddParam adds a url parameter/value, e.g. search_type (count, query_and_fetch, dfs_query_then_fetch/dfs_query_and_fetch, scan)
func (*Search) AddSource ¶
AddSource adds to _source (i.e. specify another field that should be extracted)
func (*Search) Get ¶
func (search *Search) Get()
Get submits request mappings between the json fields and how Elasticsearch store them GET /:index/:type/_search
type SearchHits ¶
type SearchHits struct { Index string `json:"_index"` Type string `json:"_type"` ID string `json:"_id"` Score float32 `json:"_score"` Source Dict `json:"_source"` }
SearchHits is a structure represennting the hitted document
type SearchResult ¶
type SearchResult struct { Took int `json:"took"` TimedOut bool `json:"timed_out"` Shards Shard `json:"_shards"` Hits Hits `json:"hits"` }
SearchResult is a structure representing the Elastisearch search result e.g. {"took":1,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":0,"max_score":null,"hits":[]}} e.g. {"took":3,"timed_out":false,"_shards":{"total":1,"successful":1,"failed":0},"hits":{"total":1,"max_score":0.50741017,"hits":[{"_index":"my_index","_type":"my_type","_id":"1","_score":0.50741017,"_source":{"name":"Brown foxes"}}]}}
type SearchResultParser ¶
type SearchResultParser struct{}
SearchResultParser a parser for search result
func (*SearchResultParser) Parse ¶
func (parser *SearchResultParser) Parse(data []byte) (interface{}, error)
Parse rerturns a parsed search result structure from the given data
type Shard ¶
type Shard struct { Total int `json:"total"` Successful int `json:"successful"` Failed int `json:"failed"` }
Shard is a structure representing the Elasticsearch shard part of Search query response
type ShardMgmtOp ¶
type ShardMgmtOp struct {
// contains filtered or unexported fields
}
ShardMgmtOp a structure for creating shard management operations
func (*ShardMgmtOp) AddParam ¶
func (op *ShardMgmtOp) AddParam(name, value string) *ShardMgmtOp
AddParam adds a query parameter to ths Flush API url (e.g. wait_for_ongoing), or Optmize API (e.g. max_num_segment to 1)
func (*ShardMgmtOp) Post ¶
func (op *ShardMgmtOp) Post()
Post submit a shard managemnt request POST /:index/_refresh
type Success ¶
type Success struct {
Acknowledged bool `json:"acknowledged"`
}
Success is a structure representing an Elasticsearch success response e.g.: {"acknowledged":true}
type SuccessParser ¶
type SuccessParser struct{}
SuccessParser parses Success responses
func (*SuccessParser) Parse ¶
func (parser *SuccessParser) Parse(data []byte) (interface{}, error)
Parse rerturns a parsed Success result structure from the given data
type Template ¶
type Template struct {
// contains filtered or unexported fields
}
Template a structure for mapping template
func NewTemplate ¶
NewTemplate creates a new named mapping template
func (*Template) AddMappingProperty ¶
AddMappingProperty adds a property to the `mapping` object
func (*Template) AddProperty ¶
AddProperty adds a property to this template
type Unvalid ¶
type Unvalid struct { Valid bool `json:"valid"` Shards Dict `json:"_shards"` Explanation []Dict `json:"explanations"` }
Unvalid is a structure representing an Elasticsearch unvalid response e.g.: {"valid":false,"_shards":{"total":1,"successful":1,"failed":0},"explanations":[{"index":"gb","valid":false,"error":"org.elasticsearch.index.query.QueryParsingException: No query registered for [tweet]"}]}