elastic

package module
v0.0.0-...-355f7c9 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 22, 2016 License: MIT Imports: 12 Imported by: 0

README

elastic-go

Build Status Coverage Status Code Climate GoDoc

elastic-go is a golang client library that wraps Elasticsearch REST API. It currently has support for:

  • Search
  • Index
  • Mapping
  • Analyze
  • ... more to come
Installation

go get github.com/dzlab/elastic-go

Tests and coverage
go test
gocov test | gocov report
Documentation

http://godoc.org/github.com/dzlab/elastic-go

Usage
import e "github.com/dzlab/elastic-go"
...
client := &e.Elasticsearch{Addr: "localhost:9200"}
client.Search("", "").Add("from", 30).Add("size", 10).Get()
// create an index example
client.Index("my_index").Delete()
cf := e.NewAnalyzer("char_filter")
  .Add1("&_to_and", "type", "mapping")
  .Add2("&_to_and", map[string]interface{}{
      "mappings": []string{"&=> and "},
    }
  )
f := e.NewAnalyzer("filter")
  .Add2("my_stopwords", map[string]interface{}{
      "type": "stop", 
      "stopwords": []string{"the", "a"},
    }
  )
a := e.NewAnalyzer("analyzer")
  .Add2("my_analyzer", e.Dict{
      "type": "custom", 
      "char_filter": []string{"html_strip", "&_to_and"}, 
      "tokenizer": "standard", 
      "filter": []string{"lowercase", "my_stopwords"},
    }
  )
client.Index("my_index").AddAnalyzer(cf).AddAnalyzer(f).AddAnalyzer(a).Put()

// try the analyzer with some data
c.Analyze("my_index").Analyzer("my_analyzer").Get("<p>a paragraph</p>")

// create mapping for a document
client.Mapping("my_index", "my_type")
  .AddField("title", e.Dict{"type":"string", "analyzer": "standard"})
  .AddField("body", e.Dict{"type":"string", "analyzer": "my_analyzer"})
  .Put()

// insert some data
c.Insert("my_index", "my_type").Document(1, e.Dict{"title": "some title", "body": "<p> a paragraph</p>"}).Put()
Contribute

This library is still under very active development. Any contribution is welcome.

Some planned features:

  • A REPL to interact easily with Elasticsearch
  • ...

Documentation

Index

Constants

View Source
const (
	// Aggs abreviateed constant name for the Aggregation query.
	Aggs = "aggs"
	// Aggregations constant name for the Aggregation query.
	Aggregations = "aggregations"
	// Terms constant name of terms Bucket
	Terms = "terms"
	// Histogram constant name of the Histogram bucket
	Histogram = "histogram"
	// DateHistogram constant name of the Date Histogram bucket.
	DateHistogram = "date_histogram"
	// Global constant name of the global bucket which is used to by pass aggregation scope.
	Global = "global"
	// FilterBucket constant name of filter bucket which is used to filter aggregation results.
	FilterBucket = "filter"
)
View Source
const (
	// Count constant name of 'count' metric.
	Count = "count"
	// Sum constant name of 'sum' metric.
	Sum = "sum"
	// Avg constant name of 'avg' metric.
	Avg = "avg"
	// Min constant name of 'min' metric.
	Min = "min"
	// Max constant name of 'max' metric.
	Max = "max"
	// ExtendedStats constant name of a metric that will return a variety of statistics (e.g. stats.avg, stats.count, stats.std_deviation).
	ExtendedStats = "extended_stats"
	// Cardiality constant name of the 'cardinality' approximation metric.
	Cardiality = "cardinality"
	// Percentiles constant name of the 'percentiles' approximation metric.
	Percentiles = "percentiles"
	// PercentileRank constant name of an approximation metric that tells to which percentile the given value belongs.
	PercentileRanks = "percentile_ranks"
	// SignificantTerms constant name of the statistical anomalie aggregation. By default, it will use the entire index as the background group while the foreground will be aggregation query scope.
	SignificantTerms = "significant_terms"
)

Constant name of Elasticsearch metrics

View Source
const (
	// Field name of parameter that defines the document's field that will be used to create buckets using its unique values.
	Field = "field"
	// Interval name of parameter that define a histogram interval, i.e. the value that Elasticsearch will use to create new buckets.
	Interval = "interval"
	// Size name of parameter that defines how many terms we want to generate. Example of values, for histograms: 10, for date histograms: "month", "quarter".
	Size = "size"
	// Format name of parameter in date histogram, used to define the  dates format for bucket keys.
	Format = "format"
	// MinDocCount name of parameter in date histogram, used to force empty buckets to be returned.
	MinDocCount = "min_doc_count"
	// ExtendedBound name of parameter in date histogram. It is used to extend the boudaries of bucket from the boudaries of actual data. This, it forces all bucket betwen the min and max bound to be returned.
	ExtendedBound = "extended_bound"
	// Order name of an object that defines how the create buckets should be generatedas well as the the ordering mode (e.g. asc). Example of values: _count (sort by document count), _term (sort alphabetically by string value), _key (sort by bucket key, works only for histogram & date_histogram).
	Order = "order"
	// PrecisionThreshold configure the precision of the HyperLogLog algorithm used by the 'cardinality' metric.
	PrecisionThreshold = "precision_threshold"
	// Percents a parameter of the 'percentiles' metric. It's used to define an array of the percentiles that should be calculated instead of the default one (i.e. 5, 25, 50, 75, 95, 99).
	Percents = "percents"
	// Values a parameter of the 'percentile_ranks' metric. It is used to define the values that Elasticsearch should find their percentile.
	Values = "values"
	// Compression a parameter of the 'percentiles' metric (default value is 100). It is used to control the memory footprint (an thus the accuracy) by limiting the number of nodes involved in the calculation.
	Compression = "compression"
)
View Source
const (
	ALIASES = "_aliases"
	ACTIONS = "actions"
)
View Source
const (
	// ANALYZE a constant for Analyze query name
	ANALYZE = "analyze"
	// Tokenizer a parameter in an Analyze API used to send the text tokenizer. Example of possible values: standard, whitespace, letter.
	Tokenizer = "tokenizer"
	// Filters a parameter in an Analyze API used to send the tokens filter. Example of possible values: lowercase
	Filters = "filters"
	// CharFilters a parameter in an Analyze API used to set the text preprocessor. Example of possible values: html_strip
	CharFilters = "char_filters"
)
View Source
const (
	// ANALYSIS constant name of analysis part of Index API query
	ANALYSIS = "analysis"
	// SETTINGS constant name of settings attribute in query of Index API
	SETTINGS = "settings"
	// ALIAS constant name of field that defines alias name of this index
	ALIAS = "_alias"
	// ShardsNumber settings param of filed defining number of shards of index
	ShardsNumber = "number_of_shards"
	// ReplicasNumber settings param of field defining replicas number
	ReplicasNumber = "number_of_replicas"
	// RefreshInterval  settings param of field defining the refresh interval
	RefreshInterval = "refresh_interval"
	// TOKENIZER name of the analyzer responsible for tokenisation
	TOKENIZER = "tokenizer" // analyzer params
	// FILTER a parameter name of mapping in an Index API query
	FILTER = "filter"
	// CharFilter name of the analyzer responible for filtering characters.
	CharFilter = "char_filter"
	// MinShingleSize name of field that defines the minimum size of shingle
	MinShingleSize = "min_shingle_size"
	// MaxShingleSize name of field that defines the maximum size of shingle
	MaxShingleSize = "max_shingle_size"
	// OutputUnigrams constant name of field defining output unigrams
	OutputUnigrams = "output_unigrams"
)
View Source
const (
	// StemExclusion a property in Analyzer settings used to define words that the analyzer should not stem
	StemExclusion = "stem_exclusion"
	// Stopwords a property in Analyzer settings used to define custom stopwords than the ones used by default by the analyzer
	Stopwords = "stopwords"
	// StopwordsPath a property in Analyzer settings used to define the path to a file containing custom stopwords.
	StopwordsPath = "stopwords_path"
	// Stemmer a value of 'type' propery in Analyzer settings used to define the stemmer
	Stemmer = "stemmer"
	// CommonGrams a vale of 'type' property in Filter settings.
	CommonGrams = "common_grams"
	// Type a property in Analyzer setting used to define the type of the property. Example of values: string (), stop (for stopwords), stemmer, common_grams, etc.
	Type = "type"
	// Language a property in Analyzer setting used to define the type of stemmer to use in order to reduce words to their root form. Possible values: english, english_light, english_possessive_stemmer (removes 's from words), synonym, mapping (e.g. for char filter).
	Language = "language"
	// CommonWords a property in Filter setting, similar to 'shingles' token filter, it makes phrase queries with stopwords more efficient. It accepcts values similar to the 'stopwords' property, example of values: _english_.
	CommonWords = "common_words"
	// CommonWordsPath a property in Analyzer setting used to define the path to a file containing common words.
	CommonWordsPath = "common_words_path"
	// QueryMode a boolean property in Filter settings. Used in conjugtion with common_words. It is set (by default) to false for indexing and to true for searching.
	QueryMode = "query_mode"
	// Synonyms a an array of formatted synonyms in Filter settings. Used when type is set to 'synonym'.
	Synonyms = "synonyms"
	// SynonymsPath a string property in field parameter. It is used to specify a path (absolute or relative to Elasticsearch 'config' directory) to a file containing formatted synonyms.
	SynonymsPath = "synonyms_path"
	// Encoder a property in Filter settings. Used when filter 'type' is set to 'phonetic' to set the name of Phonetic algorithm to use. Possible values: double_metaphone.
	Encoder = "encoder"
)

Analyer related constants

View Source
const (
	// MAPPING part of Mapping API path url
	MAPPING = "mapping"
	// MAPPINGS body of Mapping API query
	MAPPINGS = "mappings"
	// TYPE constant name of data type property of field
	TYPE = "type"
	// ANALYZER constant name of language analyzer for a field
	ANALYZER = "analyzer"
	// INDEX constant name of index name
	INDEX = "index"
	// PROPERTIES constant name of Mapping query body that defines properties
	PROPERTIES = "properties"
	// MATCH a query name
	MATCH = "match"
	// MatchMappingType type of matchi mapping (e.g. string)
	MatchMappingType = "match_mapping_type"
	// DynamicTemplates dynamic mapping templates
	DynamicTemplates = "dynamic_templates"
	// DEFAULT default mappings
	DEFAULT = "_default_"
	// PositionOffsetGap constant name for defining acceptable offset gap
	PositionOffsetGap = "position_offset_gap"
	// IndexAnalyzer index-time analyzer
	IndexAnalyzer = "index_analyzer"
	// SearchAnalyzer search-time analyzer
	SearchAnalyzer = "search_analyzer"
	// IndexOptions defines indexing options in Mapping query. Possible values are: docsi (default for 'not_analyzed' string fields), freqs, positions (default for 'analyzed' string fields), offsets.
	IndexOptions = "index_options"
	// Norms constant name for configuring field length normalization
	Norms = "norms"
	// Similarity in an Index mapping query. It defines the similarity algorithm to use. Possible values: default, BM25.
	Similarity = "similarity"
)
View Source
const (
	// EXPLAIN constant name of Explain API request
	EXPLAIN = "explain"
	// VALIDATE constant name of Validate API request
	VALIDATE = "validate"
	// SEARCH constant name of Search API request
	SEARCH = "search"
	// ALL a query element
	ALL = "_all"
	// INCLUDE a query element
	INCLUDE = "include_in_all"
	// SOURCE a query element
	SOURCE = "_source"
	// SearchType a url param
	SearchType = "search_type"
	// SCROLL a url param
	SCROLL = "scroll"
	// PostFilter contant name of post_filter, a top level search parameter that is executed after the search query.
	PostFilter = "post_filter"
	// Filter a query name.
	Filter = "filter"
	// DisMax query name.
	DisMax = "dis_max"
	// MultiMatch a match query on multiple terms
	MultiMatch = "multi_match"
	// Common a query name.
	Common = "common"
	// Boosting a query param that include additional results but donwgrade them
	Boosting = "boosting"
	// ConstantScore a query param that assings 1 as score to any matching document
	ConstantScore = "constant_score"
	// FunctionScore a query for customizing the scoring with predefined functions: weight, field_value_factor, random_score
	FunctionScore = "function_score"
	// Fuzzy 'fuzzy' qearch query. It's a term-level query that doesn't do analysis.
	Fuzzy = "fuzzy"
	// MatchPhrase 'phrase' search query
	MatchPhrase = "match_phrase"
	// MatchPhrasePrefix 'phrase' search query
	MatchPhrasePrefix = "match_phrase_prefix"
	// Prefix search terms with given prefix
	Prefix = "prefix"
	// Wildcard search terms with widcard
	Wildcard = "wildcard"
	// RegExp filter terms application to regular expression
	RegExp = "regexp"
	// RESCORE rescores result of previous query
	RESCORE = "rescore"
	// RescoreQuery
	RescoreQuery = "rescore_query"

	// CutOffFrequency query params. It is used to split query terms into 2 categories: low frequency terms for matching, and high frequency terms for sorting only.
	CutOffFrequency = "cutoff_frequency"
	// MinimumShouldMatch query params. It is used to reduce the number of low qualitymatches.
	MinimumShouldMatch = "minimum_should_match"
	// SLOP in 'phrase' queries to describe proximity/word ordering
	SLOP = "slop"
	// MaxExpansions controls how many terms the prefix is allowed to match
	MaxExpansions = "max_expansions"
	// WindowSize number of document from each shard
	WindowSize = "window_size"
	// DisableCoord a boolean value to enable/disable the use of Query Coordination in 'bool' queries
	DisableCoord = "disable_coord"
	// Boost an Int value in query clauses to give it more importance
	Boost = "boost"
	// IndicesBoost in mutli-index search, a dictionary for each index name it's boost value. For instance, it can be used to specify a language preference if there is an index defined per language (e.g. blogs-en, blogs-fr)
	IndicesBoost = "indices_boost"
	// NegativeBoost in boosting query, a float representing negative boost value
	NegativeBoost = "negative_boost"
	// Fuzziness a query parameter in 'fuzzy' (and also 'match', 'multi_match') query. It's used to set the maximum edit distance between a potentially mispelled word and the index words.
	Fuzziness = "fuzziness"
	// PrefixLength an integer query parameter in the 'fuzzy' query. It is used to fix the initial characters, of a word, which will not be fuzzified.
	PrefixLength = "prefix_length"
	// Operator a query parameter in the 'match' query. Possible values: and.
	Operator = "operator"

	// Weight a predifined scoring function that can be used in any query. It assigns a non normalized boost to each document (i.e. is used as it is an not alterned like 'boost')
	Weight = "weight"
	// FieldValueFactor a predifined scoring function that uses a value of a field from the given document to alter _score
	FieldValueFactor = "field_value_factor"
	// RandomScore a predifined scoring function to randomly sort documents for different users
	RandomScore = "random_score"
	// Seed is a parameter used in comabination with 'random_score'. It is used to ensure same document ordering when same seed is used (e.g. session identifier).
	Seed = "seed"
	// ScriptScore a predifined scoring function that uses a custom script
	ScriptScore = "script_score"
	// Modifer a parameter of 'field_value_factor' in a FunctionScore query. It is used to alter the calculation of the new document score, possible values log1p, etc.
	Modifer = "modifier"
	// Factor a parameter of 'field_value_factor' in a FunctionScore query. It is used to multiply the value of the concerned field (e.g. votes) to alter the final score calculation.
	Factor = "factor"
	// BoostMode is a parameter in a FunctionScore query. It is used to specify how the calculated score will affect final document score.
	// Possible values: multiply (mulitply _score by calculated result), sum (sum _score with calculated), min (lower of _score and calculated), max (higher of _score and calculated), replace (replace _score with calculated)
	BoostMode = "boost_mode"
	// MaxBoost is a parameter in a FunctionScore query. It is used to cap the maximum effect of the scoring function.
	MaxBoost = "max_boost"
	// ScoreMode is a parameter in a FunctionScore query. It defines, when there is many 'functions', how to reduce multiple results into single value.
	// Possible values are multiply, sum, avg, max, min, first.
	ScoreMode = "score_mode"
)

fields of a Search API call

View Source
const (
	// REFRESH refresh
	REFRESH = "refresh"
	// FLUSH flush
	FLUSH = "flush"
	// OPTIMIZE optimize
	OPTIMIZE = "optimize"
)
View Source
const (
	// BULK constant name of Elasticsearch bulk operations
	BULK = "bulk"
)

Variables

This section is empty.

Functions

func String

func String(obj interface{}) string

String returns a string representation of the dictionary

Types

type AggSubResult

type AggSubResult struct {
	DocCountErrorUpperBound int    `json:"doc_count_error_upper_bound"`
	SumOtherDocCount        int    `json:"sum_other_doc_count"`
	Buckets                 []Dict `json:"buckets"`
}

AggSubResult is a structure representing a sub result of the aggregation query result

type Aggregation

type Aggregation struct {
	// contains filtered or unexported fields
}

Aggregations a structure representing an aggregation request

func (*Aggregation) Add

func (agg *Aggregation) Add(bucket *Bucket) *Aggregation

Add adds a bucket definition to this aggregation request

func (*Aggregation) AddPostFilter

func (agg *Aggregation) AddPostFilter(q Query) *Aggregation

func (*Aggregation) AddQuery

func (agg *Aggregation) AddQuery(q Query) *Aggregation

AddQuery defines a scope query for this aggregation request

func (*Aggregation) Get

func (agg *Aggregation) Get()

Get submits request mappings between the json fields and how Elasticsearch store them GET /:index/:type/_search

func (*Aggregation) SetMetric

func (agg *Aggregation) SetMetric(name string) *Aggregation

SetMetric sets the search type with the given value (e.g. count)

func (*Aggregation) String

func (agg *Aggregation) String() string

String returns a string representation of this Search API call

type AggregationResult

type AggregationResult struct {
	SearchResult
	Aggregations map[string]AggSubResult `json:"aggregations"`
}

///////////////////////////////// Aggregation Query AggregationResult is a structure representing the Elasticsearch aggregation query result e.g. {"took":4,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":7,"max_score":0.0,"hits":[]},"aggregations":{"colors":{"doc_count_error_upper_bound":0,"sum_other_doc_count":0,"buckets":[{"key":"blue","doc_count":1,"avg_price":{"value":15000.0}},{"key":"green","doc_count":2,"avg_price":{"value":21000.0}},{"key":"red","doc_count":4,"avg_price":{"value":32500.0}}]}}} e.g. {"took":3,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":7,"max_score":0.0,"hits":[]},"aggregations":{"distinct_colors":{"value":3}}}

type AggregationResultParser

type AggregationResultParser struct{}

AggregationResultParser a parser for aggregation result

func (*AggregationResultParser) Parse

func (parser *AggregationResultParser) Parse(data []byte) (interface{}, error)

Parse returns an index result structure from the given data

type Alias

type Alias struct {
	// contains filtered or unexported fields
}

func (*Alias) AddAction

func (alias *Alias) AddAction(operation, index, name string) *Alias

* Add an Alias operation (e.g. remove index's alias)

func (*Alias) Post

func (alias *Alias) Post()

* Submit an Aliases POST operation * POST /:index

func (*Alias) String

func (alias *Alias) String() string

* Return a JSOn representation of the body of this Alias

type Analyze

type Analyze struct {
	// contains filtered or unexported fields
}

Analyze a structure representing an Elasticsearch query for the Analyze API

func (*Analyze) AddParam

func (analyzer *Analyze) AddParam(name, value string) *Analyze

AddParam adds a key/value pair to Analyze API request.

func (*Analyze) Analyzer

func (analyzer *Analyze) Analyzer(name string) *Analyze

Analyzer adds a named standard Elasticsearch analyzer to the Analyze query

func (*Analyze) Field

func (analyzer *Analyze) Field(field string) *Analyze

Field adds a field to an anlyze request

func (*Analyze) Get

func (analyzer *Analyze) Get(body string)

Get submits an Analyze query to Elasticsearch GET /:index/_analyze?field=field_name

func (*Analyze) Pretty

func (analyzer *Analyze) Pretty() *Analyze

Pretty pretiffies the response result

type AnalyzeResult

type AnalyzeResult struct {
	Tokens []AnalyzeToken `json:"tokens"`
}

AnalyzeResult is a structure representing the Elasticsearch analyze query result e.g. {"tokens":[{"token":"quick","start_offset":0,"end_offset":5,"type":"<ALPHANUM>","position":0},{"token":"brown","start_offset":6,"end_offset":11,"type":"<ALPHANUM>","position":1},{"token":"fox","start_offset":12,"end_offset":15,"type":"<ALPHANUM>","position":2}]}

type AnalyzeResultParser

type AnalyzeResultParser struct{}

AnalyzeResultParser a parser for analyze result

func (*AnalyzeResultParser) Parse

func (parser *AnalyzeResultParser) Parse(data []byte) (interface{}, error)

Parse returns an analyze result structure from the given data

type AnalyzeToken

type AnalyzeToken struct {
	Token       string `json:"token"`
	StartOffset int    `json:"start_offset"`
	EndOffset   int    `json:"end_offset"`
	TokenType   string `json:"type"`
	Position    int    `json:"position"`
}

AnalyzeToken is a structure representing part of the Elasticsearch analyze query response

type Analyzer

type Analyzer struct {
	// contains filtered or unexported fields
}

Analyzer a structure for representing Analyzers and Filters

func NewAnalyzer

func NewAnalyzer(name string) *Analyzer

NewAnalyzer creates a new analyzer

func (*Analyzer) Add1

func (analyzer *Analyzer) Add1(key1, key2 string, value interface{}) *Analyzer

Add1 adds an attribute to analyzer definition

func (*Analyzer) Add2

func (analyzer *Analyzer) Add2(name string, value Dict) *Analyzer

Add2 adds a dictionary of attributes to analyzer definition

func (*Analyzer) String

func (analyzer *Analyzer) String() string

String returns a JSON string representation of this analyzer

type Bool

type Bool struct {
	// contains filtered or unexported fields
}

Bool represents a boolean clause, it is a complex clause that allows to combine other clauses as 'must' match, 'must_not' match, 'should' match.

func NewBool

func NewBool() *Bool

NewBool creates a new 'bool' clause

func (*Bool) Add

func (b *Bool) Add(name string, value interface{}) *Bool

Add adds a parameter to this `bool` query

func (*Bool) AddMust

func (b *Bool) AddMust(query Query) *Bool

AddMust adds a 'must' clause to this 'bool' clause

func (*Bool) AddMustNot

func (b *Bool) AddMustNot(query Query) *Bool

AddMustNot adds a 'must_not' clause to this 'bool' clause

func (*Bool) AddShould

func (b *Bool) AddShould(query Query) *Bool

AddShould adds a 'should' clause to this 'bool' clause

func (*Bool) KV

func (b *Bool) KV() Dict

KV returns the key-value store representing the body of this 'bool' query

func (*Bool) Name

func (b *Bool) Name() string

Name returns the name of this 'bool' query

type BoostingQuery

type BoostingQuery struct {
	// contains filtered or unexported fields
}

BoostingQuery a strcuture representing the 'boosting' query

func NewBoosting

func NewBoosting() *BoostingQuery

NewBoosting returns a new Boosting query

func (*BoostingQuery) AddNegative

func (boosting *BoostingQuery) AddNegative(name string, value interface{}) *BoostingQuery

AddNegative adds a negative clause to boosting query

func (*BoostingQuery) AddPositive

func (boosting *BoostingQuery) AddPositive(name string, value interface{}) *BoostingQuery

AddPositive adds a positive clause to boosting query

func (*BoostingQuery) KV

func (boosting *BoostingQuery) KV() Dict

KV returns the body of this boosting query as a dictionary

func (*BoostingQuery) Name

func (boosting *BoostingQuery) Name() string

Name returns the name of boosting query

func (*BoostingQuery) SetNegativeBoost

func (boosting *BoostingQuery) SetNegativeBoost(value float32) *BoostingQuery

SetNegativeBoost sets the negative boost

type Bucket

type Bucket struct {
	// contains filtered or unexported fields
}

Bucket a structure that defines how Elasticsearch should create Bucket for aggregations.

func NewBucket

func NewBucket(name string) *Bucket

NewBucket creates a new Bucket definition

func (*Bucket) AddBucket

func (bucket *Bucket) AddBucket(b *Bucket) *Bucket

AddBucket adds a nested bucket to this bucket

func (*Bucket) AddDict

func (bucket *Bucket) AddDict(name string, value Dict) *Bucket

func (*Bucket) AddMetric

func (bucket *Bucket) AddMetric(metric, name string, value interface{}) *Bucket

func (*Bucket) AddTerm

func (bucket *Bucket) AddTerm(name string, value interface{}) *Bucket

func (*Bucket) SetOrder

func (bucket *Bucket) SetOrder(metric, name, value string) *Bucket

SetOrder set the ordering for this bucket. name is the name of ordering, e.g. _count, _term, _key, name of metric value defines the sens of ordering, e.g. asc

type BucketResult

type BucketResult struct {
	Key      string `json:"key"`
	DocCount int    `json:"doc_count"`
	Dict     `json`
}

type Bulk

type Bulk struct {
	// contains filtered or unexported fields
}

Bulk a strcuture representing bulk operations

func (*Bulk) AddOperation

func (bulk *Bulk) AddOperation(op *Operation) *Bulk

AddOperation adds an operation to this bulk

func (*Bulk) Post

func (bulk *Bulk) Post()

Post submits a bulk that consists of a list of operations POST /:index/:type/_bulk

func (*Bulk) String

func (bulk *Bulk) String() string

String gets a string representation of the list of operations in this bulk

type BulkResult

type BulkResult struct {
	Took   int            `josn:"took"`
	Errors bool           `json:"errors"`
	Items  []InsertResult `json:"items"`
}

BulkResult is a structure representing the Elasticsearch bulk query result e.g. {"took":118,"errors":false,"items":[{"index":{"_index":"my_index","_type":"my_type","_id":"1","_version":1,"_shards":{"total":2,"successful":1,"failed":0},"status":201}},{"index":{"_index":"my_index","_type":"my_type","_id":"2","_version":1,"_shards":{"total":2,"successful":1,"failed":0},"status":201}}]}

type BulkResultParser

type BulkResultParser struct{}

BulkResultParser a parser for analyze result

func (*BulkResultParser) Parse

func (parser *BulkResultParser) Parse(data []byte) (interface{}, error)

Parse returns an analyze result structure from the given data

type Dict

type Dict map[string]interface{}

Dict a dictionary with string keys and values of any type

type DocType

type DocType struct {
	// contains filtered or unexported fields
}

DocType a structure for document type

func NewDefaultType

func NewDefaultType() *DocType

NewDefaultType returns a '_default_' type that encapsulates shared/default settings e.g. specify index wide dynamic templates

func NewDocType

func NewDocType(name string) *DocType

NewDocType a new mapping template

func (*DocType) AddDynamicTemplate

func (doctype *DocType) AddDynamicTemplate(tmpl *Template) *DocType

AddDynamicTemplate adds a dynamic template to this mapping

func (*DocType) AddProperty

func (doctype *DocType) AddProperty(name string, value interface{}) *DocType

AddProperty adds a property to this document type

func (*DocType) AddTemplate

func (doctype *DocType) AddTemplate(tmpl *Template) *DocType

AddTemplate adds a template to this document type

func (*DocType) String

func (doctype *DocType) String() string

String returns a string representation of this document type

type Elasticsearch

type Elasticsearch struct {
	Addr string
}

Elasticsearch client

func (*Elasticsearch) Aggs

func (client *Elasticsearch) Aggs(index, doc string) *Aggregation

Aggs creates an aggregation request

func (*Elasticsearch) Alias

func (client *Elasticsearch) Alias() *Alias

func (*Elasticsearch) Analyze

func (client *Elasticsearch) Analyze(index string) *Analyze

Analyze returns an new Analyze request on the given index

func (*Elasticsearch) Bulk

func (client *Elasticsearch) Bulk(index, docType string) *Bulk

Bulk creates a new Bulk operations

func (*Elasticsearch) Execute

func (client *Elasticsearch) Execute(method, url, query string, parser Parser) (interface{}, error)

Execute an HTTP request and parse the response

func (*Elasticsearch) Explain

func (client *Elasticsearch) Explain(index, class string, id int64) *Search

Explain creates an Explaination request, that will return explanation for why a document is returned by the query

func (*Elasticsearch) Flush

func (client *Elasticsearch) Flush(index string) *ShardMgmtOp

Flush creates a flush API call in order to force commit and trauncating the 'translog' See, chapter 11. Inside a shard (Elasticsearch Definitive Guide)

func (*Elasticsearch) Index

func (client *Elasticsearch) Index(index string) *Index

Index returns a query for managing indexes

func (*Elasticsearch) Insert

func (client *Elasticsearch) Insert(index, doctype string) *Insert

Insert Create an Insert request, that will submit a new document to elastic search

func (*Elasticsearch) Mapping

func (client *Elasticsearch) Mapping(index, doctype string) *Mapping

Mapping creates request mappings between the json fields and how Elasticsearch store them GET /:index/:type/_mapping

func (*Elasticsearch) Optimize

func (client *Elasticsearch) Optimize(index string) *ShardMgmtOp

Optimize create an Optimize API call in order to force mering shards into a number of segments

func (*Elasticsearch) Refresh

func (client *Elasticsearch) Refresh(index string) *ShardMgmtOp

Refresh create a refresh API call in order to force recently added document to be visible to search calls

func (*Elasticsearch) Search

func (client *Elasticsearch) Search(index, class string) *Search

Search creates a Search request

func (*Elasticsearch) Validate

func (client *Elasticsearch) Validate(index, class string, explain bool) *Search

Validate creates a Validation request

type Error

type Error struct {
	RootCause    []Dict `json:"root_cause"`
	Type         string `json:"type"`
	Reason       string `json:"reason"`
	CausedBy     Dict   `json:"caused_by"`
	ResourceType string `json:"resource.type"`
	ResourceId   string `json:"resource.id"`
	// Index the name of index involved in this error incase of an Index API response
	Index string `json:"index"`
}

Error is a structure representing the Elasticsearch error response

type ExplainResult

type ExplainResult struct {
	Valid        bool          `json:"valid"`
	Shards       Shard         `json:"_shards"`
	Explanations []Explanation `json:"explanations"`
}

ExplainResult Elasticsearch explain result e.g. {"valid":true,"_shards":{"total":1,"successful":1,"failed":0},"explanations":[{"index":"my_index","valid":true,"explanation":"+((name:b name:br name:bro name:brow name:brown) (name:f name:fo)) #ConstantScore(+ConstantScore(_type:my_type))"}]}

type Explanation

type Explanation struct {
	Index       string `json:"index"`
	Valid       bool   `json:"valid"`
	Explanation string `json:"explanation"`
}

Explanation the details of explanation

type Failure

type Failure struct {
	Err    Error `json:"error"`
	Status int   `json:"status"`
}

Failure is a structure representing the Elasticsearch failure response e.g.:{"error":{"root_cause":[{"type":"no_shard_available_action_exception","reason":"No shard available for [org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest@74508901]"}],"type":"no_shard_available_action_exception","reason":"No shard available for [org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest@74508901]"},"status":503} e.g.:{"error":{"root_cause":[{"type":"index_already_exists_exception","reason":"already exists","index":"my_index"}],"type":"index_already_exists_exception","reason":"already exists","index":"my_index"},"status":400}

type FailureParser

type FailureParser struct{}

FailureParser a parser for search result

func (*FailureParser) Parse

func (parser *FailureParser) Parse(data []byte) (interface{}, error)

Parse rerturns a parsed Failure result structure from the given data

type Hits

type Hits struct {
	Total    int          `json:"total"`
	MaxScore interface{}  `json:"max_score"`
	Hits     []SearchHits `json:"hits"`
}

Hits is a structure representing the Elasticsearch hits part of Search query response

type Index

type Index struct {
	// contains filtered or unexported fields
}

Index a strcuture to hold a query for building/deleting indexes

func (*Index) AddAnalyzer

func (idx *Index) AddAnalyzer(analyzer *Analyzer) *Index

AddAnalyzer adds an anlyzer to the index settings

func (*Index) AddSetting

func (idx *Index) AddSetting(name string, value interface{}) *Index

AddSetting adds a key-value settings

func (*Index) Delete

func (idx *Index) Delete()

Delete submits to elasticsearch a query to delete an index DELETE /:index

func (*Index) Mappings

func (idx *Index) Mappings(doctype string, mapping *Mapping) *Index

Mappings set the mapping parameter

func (*Index) Pretty

func (idx *Index) Pretty() *Index

Pretty adds a parameter to the query url to pretify elasticsearch result

func (*Index) Put

func (idx *Index) Put()

Put submits to elasticsearch the query to create an index PUT /:index

func (*Index) SetAlias

func (idx *Index) SetAlias(alias string) *Index

SetAlias defines an alias for this index

func (*Index) SetRefreshInterval

func (idx *Index) SetRefreshInterval(interval string) *Index

SetRefreshInterval sets the refresh interval

func (*Index) SetReplicasNb

func (idx *Index) SetReplicasNb(number int) *Index

SetReplicasNb sets the number of shards

func (*Index) SetShardsNb

func (idx *Index) SetShardsNb(number int) *Index

SetShardsNb sets the number of shards

func (*Index) Settings

func (idx *Index) Settings(settings Dict) *Index

Settings add a setting parameter to the Index query body

func (*Index) String

func (idx *Index) String() string

String returns a JSON representation of the body of this Index

type IndexResultParser

type IndexResultParser struct{}

IndexResultParser a parser for index result

func (*IndexResultParser) Parse

func (parser *IndexResultParser) Parse(data []byte) (interface{}, error)

Parse returns an index result structure from the given data

type Insert

type Insert struct {
	// contains filtered or unexported fields
}

Insert a request representing a document insert query

func (*Insert) Document

func (insert *Insert) Document(id int64, doc interface{}) *Insert

Document set the document to insert

func (*Insert) Put

func (insert *Insert) Put()

Put submits a request mappings between the json fields and how Elasticsearch store them PUT /:index/:type/:id

func (*Insert) String

func (insert *Insert) String() string

String returns a string representation of the document

type InsertResult

type InsertResult struct {
	Index   string `json:"_index"`
	Doctype string `json:"_type"`
	ID      string `json:"_id"`
	Version int    `json:"_version"`
	Shards  Shard  `json:"_shards"`
	Created bool   `json:"created"`
}

InsertResult is a strucuture representing the Elasticsearch insert query result e.g. {"_index":"my_index","_type":"groups","_id":"1","_version":1,"_shards":{"total":2,"successful":1,"failed":0},"created":true}

type InsertResultParser

type InsertResultParser struct{}

InsertResultParser a parser for mapping result

func (*InsertResultParser) Parse

func (parser *InsertResultParser) Parse(data []byte) (interface{}, error)

Parse returns an index result structure from the given data

type Mapping

type Mapping struct {
	// contains filtered or unexported fields
}

Mapping maps between the json fields and how Elasticsearch store them

func NewMapping

func NewMapping() *Mapping

NewMapping creates a new mapping query

func (*Mapping) AddDocumentType

func (mapping *Mapping) AddDocumentType(class *DocType) *Mapping

AddDocumentType adds a mapping for a type of objects

func (*Mapping) AddField

func (mapping *Mapping) AddField(name string, body Dict) *Mapping

AddField adds a mapping for a field

func (*Mapping) AddProperty

func (mapping *Mapping) AddProperty(fieldname, propertyname string, propertyvalue interface{}) *Mapping

AddProperty adds a mapping for a type's property (e.g. type, index, analyzer, etc.)

func (*Mapping) Get

func (mapping *Mapping) Get()

Get submits a get request mappings between the json fields and how Elasticsearch store them GET /:index/_mapping/:type

func (*Mapping) Put

func (mapping *Mapping) Put()

Put submits a request for updating the mappings between the json fields and how Elasticsearch store them PUT /:index/_mapping/:type

func (*Mapping) String

func (mapping *Mapping) String() string

String returns a string representation of this mapping API

type MappingResultParser

type MappingResultParser struct{}

MappingResultParser a parser for mapping result

func (*MappingResultParser) Parse

func (parser *MappingResultParser) Parse(data []byte) (interface{}, error)

Parse returns an index result structure from the given data

type Metric

type Metric struct {
	// contains filtered or unexported fields
}

Metric a structure that defines a bucket metric.

type Object

type Object struct {
	// contains filtered or unexported fields
}

Object a general purpose query

func NewConstantScore

func NewConstantScore() *Object

NewConstantScore creates a new 'constant_score' query

func NewExists

func NewExists() *Object

NewExists creates a new `exists` filter.

func NewFilter

func NewFilter() *Object

NewFilter returns a new filter query

func NewFunctionScore

func NewFunctionScore() *Object

NewFunctionScore creates a new 'function_score' query

func NewFuzzyQuery

func NewFuzzyQuery() *Object

NewFuzzyQuery create a new 'fuzzy' query

func NewMatch

func NewMatch() *Object

NewMatch Create a new match query

func NewMatchPhrase

func NewMatchPhrase() *Object

NewMatchPhrase Create a `match_phrase` query to find words that are near each other

func NewMissing

func NewMissing() *Object

NewMissing creates a new `missing` filter (the inverse of `exists`)

func NewMultiMatch

func NewMultiMatch() *Object

NewMultiMatch Create a new multi_match query

func NewQuery

func NewQuery(name string) *Object

NewQuery Create a new query object

func NewRescore

func NewRescore() *Object

NewRescore Create a `rescore` query

func NewRescoreQuery

func NewRescoreQuery() *Object

NewRescoreQuery Create a `rescore` query algorithm

func NewTerm

func NewTerm() *Object

NewTerm creates a new 'term' filter

func NewTerms

func NewTerms() *Object

NewTerms creates a new 'terms' filter, it is like 'term' but can match multiple values

func (*Object) Add

func (obj *Object) Add(argument string, value interface{}) *Object

Add adds a query argument/value

func (*Object) AddMultiple

func (obj *Object) AddMultiple(argument string, values ...interface{}) *Object

AddMultiple specify multiple values to match

func (*Object) AddQueries

func (obj *Object) AddQueries(name string, queries ...Query) *Object

AddQueries adds multiple queries, under given `name`

func (*Object) AddQuery

func (obj *Object) AddQuery(query Query) *Object

AddQuery adds a sub query (e.g. a field query)

func (*Object) Dict

func (obj *Object) Dict() Dict

Dict return a dictionarry representation of this object

func (*Object) KV

func (obj *Object) KV() Dict

KV returns the key-value store representing the body of this query

func (*Object) Name

func (obj *Object) Name() string

Name returns the name of this query object

func (*Object) String

func (obj *Object) String() string

String returns a string representation of this object

type Operation

type Operation struct {
	// contains filtered or unexported fields
}

Operation a structure representing a bulk operation

func NewOperation

func NewOperation(id int) *Operation

NewOperation creates a new operation with the given id

func (*Operation) Add

func (op *Operation) Add(name string, value interface{}) *Operation

Add adds a field to this document

func (*Operation) AddMultiple

func (op *Operation) AddMultiple(name string, values ...interface{}) *Operation

AddMultiple adds a field with multiple values to this document

func (*Operation) String

func (op *Operation) String() string

String get a string representation of this operation

type Parser

type Parser interface {
	Parse(data []byte) (interface{}, error)
}

Parser an interface for parsing reponses

type Query

type Query interface {
	Name() string
	KV() Dict
}

Query defines an interfece of an object from an Elasticsearch query

type Search struct {
	// contains filtered or unexported fields
}

Search a request representing a search

func (*Search) Add

func (search *Search) Add(argument string, value interface{}) *Search

Add adds a query argument/value, e.g. size, from, etc.

func (*Search) AddParam

func (search *Search) AddParam(name, value string) *Search

AddParam adds a url parameter/value, e.g. search_type (count, query_and_fetch, dfs_query_then_fetch/dfs_query_and_fetch, scan)

func (*Search) AddQuery

func (search *Search) AddQuery(query Query) *Search

AddQuery adds a query to this search request

func (*Search) AddSource

func (search *Search) AddSource(source string) *Search

AddSource adds to _source (i.e. specify another field that should be extracted)

func (*Search) Get

func (search *Search) Get()

Get submits request mappings between the json fields and how Elasticsearch store them GET /:index/:type/_search

func (*Search) Pretty

func (search *Search) Pretty() *Search

Pretty pretiffies the response result

func (*Search) String

func (search *Search) String() string

String returns a string representation of this Search API call

type SearchHits

type SearchHits struct {
	Index  string  `json:"_index"`
	Type   string  `json:"_type"`
	ID     string  `json:"_id"`
	Score  float32 `json:"_score"`
	Source Dict    `json:"_source"`
}

SearchHits is a structure represennting the hitted document

type SearchResult

type SearchResult struct {
	Took     int   `json:"took"`
	TimedOut bool  `json:"timed_out"`
	Shards   Shard `json:"_shards"`
	Hits     Hits  `json:"hits"`
}

SearchResult is a structure representing the Elastisearch search result e.g. {"took":1,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":0,"max_score":null,"hits":[]}} e.g. {"took":3,"timed_out":false,"_shards":{"total":1,"successful":1,"failed":0},"hits":{"total":1,"max_score":0.50741017,"hits":[{"_index":"my_index","_type":"my_type","_id":"1","_score":0.50741017,"_source":{"name":"Brown foxes"}}]}}

type SearchResultParser

type SearchResultParser struct{}

SearchResultParser a parser for search result

func (*SearchResultParser) Parse

func (parser *SearchResultParser) Parse(data []byte) (interface{}, error)

Parse rerturns a parsed search result structure from the given data

type Shard

type Shard struct {
	Total      int `json:"total"`
	Successful int `json:"successful"`
	Failed     int `json:"failed"`
}

Shard is a structure representing the Elasticsearch shard part of Search query response

type ShardMgmtOp

type ShardMgmtOp struct {
	// contains filtered or unexported fields
}

ShardMgmtOp a structure for creating shard management operations

func (*ShardMgmtOp) AddParam

func (op *ShardMgmtOp) AddParam(name, value string) *ShardMgmtOp

AddParam adds a query parameter to ths Flush API url (e.g. wait_for_ongoing), or Optmize API (e.g. max_num_segment to 1)

func (*ShardMgmtOp) Post

func (op *ShardMgmtOp) Post()

Post submit a shard managemnt request POST /:index/_refresh

type Success

type Success struct {
	Acknowledged bool `json:"acknowledged"`
}

Success is a structure representing an Elasticsearch success response e.g.: {"acknowledged":true}

type SuccessParser

type SuccessParser struct{}

SuccessParser parses Success responses

func (*SuccessParser) Parse

func (parser *SuccessParser) Parse(data []byte) (interface{}, error)

Parse rerturns a parsed Success result structure from the given data

type Template

type Template struct {
	// contains filtered or unexported fields
}

Template a structure for mapping template

func NewAllTemplate

func NewAllTemplate() *Template

NewAllTemplate returns an new '_all' template

func NewTemplate

func NewTemplate(name string) *Template

NewTemplate creates a new named mapping template

func (*Template) AddMappingProperty

func (template *Template) AddMappingProperty(name string, value interface{}) *Template

AddMappingProperty adds a property to the `mapping` object

func (*Template) AddMatch

func (template *Template) AddMatch(match string) *Template

AddMatch adds a match string (e.g. '*', '_es')

func (*Template) AddProperty

func (template *Template) AddProperty(name string, value interface{}) *Template

AddProperty adds a property to this template

func (*Template) String

func (template *Template) String() string

String returns a string representation of this template

type Unvalid

type Unvalid struct {
	Valid       bool   `json:"valid"`
	Shards      Dict   `json:"_shards"`
	Explanation []Dict `json:"explanations"`
}

Unvalid is a structure representing an Elasticsearch unvalid response e.g.: {"valid":false,"_shards":{"total":1,"successful":1,"failed":0},"explanations":[{"index":"gb","valid":false,"error":"org.elasticsearch.index.query.QueryParsingException: No query registered for [tweet]"}]}

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL