language

package
v0.0.0-...-2824937 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 2, 2020 License: MIT, Apache-2.0 Imports: 6 Imported by: 0

Documentation

Overview

Package language is a generated protocol buffer package.

It is generated from these files:

google/cloud/language/v1beta1/language_service.proto

It has these top-level messages:

Document
Sentence
Entity
Token
Sentiment
PartOfSpeech
DependencyEdge
EntityMention
TextSpan
AnalyzeSentimentRequest
AnalyzeSentimentResponse
AnalyzeEntitiesRequest
AnalyzeEntitiesResponse
AnalyzeSyntaxRequest
AnalyzeSyntaxResponse
AnnotateTextRequest
AnnotateTextResponse

Index

Constants

This section is empty.

Variables

View Source
var DependencyEdge_Label_name = map[int32]string{
	0:  "UNKNOWN",
	1:  "ABBREV",
	2:  "ACOMP",
	3:  "ADVCL",
	4:  "ADVMOD",
	5:  "AMOD",
	6:  "APPOS",
	7:  "ATTR",
	8:  "AUX",
	9:  "AUXPASS",
	10: "CC",
	11: "CCOMP",
	12: "CONJ",
	13: "CSUBJ",
	14: "CSUBJPASS",
	15: "DEP",
	16: "DET",
	17: "DISCOURSE",
	18: "DOBJ",
	19: "EXPL",
	20: "GOESWITH",
	21: "IOBJ",
	22: "MARK",
	23: "MWE",
	24: "MWV",
	25: "NEG",
	26: "NN",
	27: "NPADVMOD",
	28: "NSUBJ",
	29: "NSUBJPASS",
	30: "NUM",
	31: "NUMBER",
	32: "P",
	33: "PARATAXIS",
	34: "PARTMOD",
	35: "PCOMP",
	36: "POBJ",
	37: "POSS",
	38: "POSTNEG",
	39: "PRECOMP",
	40: "PRECONJ",
	41: "PREDET",
	42: "PREF",
	43: "PREP",
	44: "PRONL",
	45: "PRT",
	46: "PS",
	47: "QUANTMOD",
	48: "RCMOD",
	49: "RCMODREL",
	50: "RDROP",
	51: "REF",
	52: "REMNANT",
	53: "REPARANDUM",
	54: "ROOT",
	55: "SNUM",
	56: "SUFF",
	57: "TMOD",
	58: "TOPIC",
	59: "VMOD",
	60: "VOCATIVE",
	61: "XCOMP",
	62: "SUFFIX",
	63: "TITLE",
	64: "ADVPHMOD",
	65: "AUXCAUS",
	66: "AUXVV",
	67: "DTMOD",
	68: "FOREIGN",
	69: "KW",
	70: "LIST",
	71: "NOMC",
	72: "NOMCSUBJ",
	73: "NOMCSUBJPASS",
	74: "NUMC",
	75: "COP",
	76: "DISLOCATED",
}
View Source
var DependencyEdge_Label_value = map[string]int32{
	"UNKNOWN":      0,
	"ABBREV":       1,
	"ACOMP":        2,
	"ADVCL":        3,
	"ADVMOD":       4,
	"AMOD":         5,
	"APPOS":        6,
	"ATTR":         7,
	"AUX":          8,
	"AUXPASS":      9,
	"CC":           10,
	"CCOMP":        11,
	"CONJ":         12,
	"CSUBJ":        13,
	"CSUBJPASS":    14,
	"DEP":          15,
	"DET":          16,
	"DISCOURSE":    17,
	"DOBJ":         18,
	"EXPL":         19,
	"GOESWITH":     20,
	"IOBJ":         21,
	"MARK":         22,
	"MWE":          23,
	"MWV":          24,
	"NEG":          25,
	"NN":           26,
	"NPADVMOD":     27,
	"NSUBJ":        28,
	"NSUBJPASS":    29,
	"NUM":          30,
	"NUMBER":       31,
	"P":            32,
	"PARATAXIS":    33,
	"PARTMOD":      34,
	"PCOMP":        35,
	"POBJ":         36,
	"POSS":         37,
	"POSTNEG":      38,
	"PRECOMP":      39,
	"PRECONJ":      40,
	"PREDET":       41,
	"PREF":         42,
	"PREP":         43,
	"PRONL":        44,
	"PRT":          45,
	"PS":           46,
	"QUANTMOD":     47,
	"RCMOD":        48,
	"RCMODREL":     49,
	"RDROP":        50,
	"REF":          51,
	"REMNANT":      52,
	"REPARANDUM":   53,
	"ROOT":         54,
	"SNUM":         55,
	"SUFF":         56,
	"TMOD":         57,
	"TOPIC":        58,
	"VMOD":         59,
	"VOCATIVE":     60,
	"XCOMP":        61,
	"SUFFIX":       62,
	"TITLE":        63,
	"ADVPHMOD":     64,
	"AUXCAUS":      65,
	"AUXVV":        66,
	"DTMOD":        67,
	"FOREIGN":      68,
	"KW":           69,
	"LIST":         70,
	"NOMC":         71,
	"NOMCSUBJ":     72,
	"NOMCSUBJPASS": 73,
	"NUMC":         74,
	"COP":          75,
	"DISLOCATED":   76,
}
View Source
var Document_Type_name = map[int32]string{
	0: "TYPE_UNSPECIFIED",
	1: "PLAIN_TEXT",
	2: "HTML",
}
View Source
var Document_Type_value = map[string]int32{
	"TYPE_UNSPECIFIED": 0,
	"PLAIN_TEXT":       1,
	"HTML":             2,
}
View Source
var EncodingType_name = map[int32]string{
	0: "NONE",
	1: "UTF8",
	2: "UTF16",
	3: "UTF32",
}
View Source
var EncodingType_value = map[string]int32{
	"NONE":  0,
	"UTF8":  1,
	"UTF16": 2,
	"UTF32": 3,
}
View Source
var EntityMention_Type_name = map[int32]string{
	0: "TYPE_UNKNOWN",
	1: "PROPER",
	2: "COMMON",
}
View Source
var EntityMention_Type_value = map[string]int32{
	"TYPE_UNKNOWN": 0,
	"PROPER":       1,
	"COMMON":       2,
}
View Source
var Entity_Type_name = map[int32]string{
	0: "UNKNOWN",
	1: "PERSON",
	2: "LOCATION",
	3: "ORGANIZATION",
	4: "EVENT",
	5: "WORK_OF_ART",
	6: "CONSUMER_GOOD",
	7: "OTHER",
}
View Source
var Entity_Type_value = map[string]int32{
	"UNKNOWN":       0,
	"PERSON":        1,
	"LOCATION":      2,
	"ORGANIZATION":  3,
	"EVENT":         4,
	"WORK_OF_ART":   5,
	"CONSUMER_GOOD": 6,
	"OTHER":         7,
}
View Source
var PartOfSpeech_Aspect_name = map[int32]string{
	0: "ASPECT_UNKNOWN",
	1: "PERFECTIVE",
	2: "IMPERFECTIVE",
	3: "PROGRESSIVE",
}
View Source
var PartOfSpeech_Aspect_value = map[string]int32{
	"ASPECT_UNKNOWN": 0,
	"PERFECTIVE":     1,
	"IMPERFECTIVE":   2,
	"PROGRESSIVE":    3,
}
View Source
var PartOfSpeech_Case_name = map[int32]string{
	0:  "CASE_UNKNOWN",
	1:  "ACCUSATIVE",
	2:  "ADVERBIAL",
	3:  "COMPLEMENTIVE",
	4:  "DATIVE",
	5:  "GENITIVE",
	6:  "INSTRUMENTAL",
	7:  "LOCATIVE",
	8:  "NOMINATIVE",
	9:  "OBLIQUE",
	10: "PARTITIVE",
	11: "PREPOSITIONAL",
	12: "REFLEXIVE_CASE",
	13: "RELATIVE_CASE",
	14: "VOCATIVE",
}
View Source
var PartOfSpeech_Case_value = map[string]int32{
	"CASE_UNKNOWN":   0,
	"ACCUSATIVE":     1,
	"ADVERBIAL":      2,
	"COMPLEMENTIVE":  3,
	"DATIVE":         4,
	"GENITIVE":       5,
	"INSTRUMENTAL":   6,
	"LOCATIVE":       7,
	"NOMINATIVE":     8,
	"OBLIQUE":        9,
	"PARTITIVE":      10,
	"PREPOSITIONAL":  11,
	"REFLEXIVE_CASE": 12,
	"RELATIVE_CASE":  13,
	"VOCATIVE":       14,
}
View Source
var PartOfSpeech_Form_name = map[int32]string{
	0:  "FORM_UNKNOWN",
	1:  "ADNOMIAL",
	2:  "AUXILIARY",
	3:  "COMPLEMENTIZER",
	4:  "FINAL_ENDING",
	5:  "GERUND",
	6:  "REALIS",
	7:  "IRREALIS",
	8:  "SHORT",
	9:  "LONG",
	10: "ORDER",
	11: "SPECIFIC",
}
View Source
var PartOfSpeech_Form_value = map[string]int32{
	"FORM_UNKNOWN":   0,
	"ADNOMIAL":       1,
	"AUXILIARY":      2,
	"COMPLEMENTIZER": 3,
	"FINAL_ENDING":   4,
	"GERUND":         5,
	"REALIS":         6,
	"IRREALIS":       7,
	"SHORT":          8,
	"LONG":           9,
	"ORDER":          10,
	"SPECIFIC":       11,
}
View Source
var PartOfSpeech_Gender_name = map[int32]string{
	0: "GENDER_UNKNOWN",
	1: "FEMININE",
	2: "MASCULINE",
	3: "NEUTER",
}
View Source
var PartOfSpeech_Gender_value = map[string]int32{
	"GENDER_UNKNOWN": 0,
	"FEMININE":       1,
	"MASCULINE":      2,
	"NEUTER":         3,
}
View Source
var PartOfSpeech_Mood_name = map[int32]string{
	0: "MOOD_UNKNOWN",
	1: "CONDITIONAL_MOOD",
	2: "IMPERATIVE",
	3: "INDICATIVE",
	4: "INTERROGATIVE",
	5: "JUSSIVE",
	6: "SUBJUNCTIVE",
}
View Source
var PartOfSpeech_Mood_value = map[string]int32{
	"MOOD_UNKNOWN":     0,
	"CONDITIONAL_MOOD": 1,
	"IMPERATIVE":       2,
	"INDICATIVE":       3,
	"INTERROGATIVE":    4,
	"JUSSIVE":          5,
	"SUBJUNCTIVE":      6,
}
View Source
var PartOfSpeech_Number_name = map[int32]string{
	0: "NUMBER_UNKNOWN",
	1: "SINGULAR",
	2: "PLURAL",
	3: "DUAL",
}
View Source
var PartOfSpeech_Number_value = map[string]int32{
	"NUMBER_UNKNOWN": 0,
	"SINGULAR":       1,
	"PLURAL":         2,
	"DUAL":           3,
}
View Source
var PartOfSpeech_Person_name = map[int32]string{
	0: "PERSON_UNKNOWN",
	1: "FIRST",
	2: "SECOND",
	3: "THIRD",
	4: "REFLEXIVE_PERSON",
}
View Source
var PartOfSpeech_Person_value = map[string]int32{
	"PERSON_UNKNOWN":   0,
	"FIRST":            1,
	"SECOND":           2,
	"THIRD":            3,
	"REFLEXIVE_PERSON": 4,
}
View Source
var PartOfSpeech_Proper_name = map[int32]string{
	0: "PROPER_UNKNOWN",
	1: "PROPER",
	2: "NOT_PROPER",
}
View Source
var PartOfSpeech_Proper_value = map[string]int32{
	"PROPER_UNKNOWN": 0,
	"PROPER":         1,
	"NOT_PROPER":     2,
}
View Source
var PartOfSpeech_Reciprocity_name = map[int32]string{
	0: "RECIPROCITY_UNKNOWN",
	1: "RECIPROCAL",
	2: "NON_RECIPROCAL",
}
View Source
var PartOfSpeech_Reciprocity_value = map[string]int32{
	"RECIPROCITY_UNKNOWN": 0,
	"RECIPROCAL":          1,
	"NON_RECIPROCAL":      2,
}
View Source
var PartOfSpeech_Tag_name = map[int32]string{
	0:  "UNKNOWN",
	1:  "ADJ",
	2:  "ADP",
	3:  "ADV",
	4:  "CONJ",
	5:  "DET",
	6:  "NOUN",
	7:  "NUM",
	8:  "PRON",
	9:  "PRT",
	10: "PUNCT",
	11: "VERB",
	12: "X",
	13: "AFFIX",
}
View Source
var PartOfSpeech_Tag_value = map[string]int32{
	"UNKNOWN": 0,
	"ADJ":     1,
	"ADP":     2,
	"ADV":     3,
	"CONJ":    4,
	"DET":     5,
	"NOUN":    6,
	"NUM":     7,
	"PRON":    8,
	"PRT":     9,
	"PUNCT":   10,
	"VERB":    11,
	"X":       12,
	"AFFIX":   13,
}
View Source
var PartOfSpeech_Tense_name = map[int32]string{
	0: "TENSE_UNKNOWN",
	1: "CONDITIONAL_TENSE",
	2: "FUTURE",
	3: "PAST",
	4: "PRESENT",
	5: "IMPERFECT",
	6: "PLUPERFECT",
}
View Source
var PartOfSpeech_Tense_value = map[string]int32{
	"TENSE_UNKNOWN":     0,
	"CONDITIONAL_TENSE": 1,
	"FUTURE":            2,
	"PAST":              3,
	"PRESENT":           4,
	"IMPERFECT":         5,
	"PLUPERFECT":        6,
}
View Source
var PartOfSpeech_Voice_name = map[int32]string{
	0: "VOICE_UNKNOWN",
	1: "ACTIVE",
	2: "CAUSATIVE",
	3: "PASSIVE",
}
View Source
var PartOfSpeech_Voice_value = map[string]int32{
	"VOICE_UNKNOWN": 0,
	"ACTIVE":        1,
	"CAUSATIVE":     2,
	"PASSIVE":       3,
}

Functions

func RegisterLanguageServiceServer

func RegisterLanguageServiceServer(s *grpc.Server, srv LanguageServiceServer)

Types

type AnalyzeEntitiesRequest

type AnalyzeEntitiesRequest struct {
	// Input document.
	Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"`
	// The encoding type used by the API to calculate offsets.
	EncodingType EncodingType `` /* 139-byte string literal not displayed */
}

The entity analysis request message.

func (*AnalyzeEntitiesRequest) Descriptor

func (*AnalyzeEntitiesRequest) Descriptor() ([]byte, []int)

func (*AnalyzeEntitiesRequest) GetDocument

func (m *AnalyzeEntitiesRequest) GetDocument() *Document

func (*AnalyzeEntitiesRequest) GetEncodingType

func (m *AnalyzeEntitiesRequest) GetEncodingType() EncodingType

func (*AnalyzeEntitiesRequest) ProtoMessage

func (*AnalyzeEntitiesRequest) ProtoMessage()

func (*AnalyzeEntitiesRequest) Reset

func (m *AnalyzeEntitiesRequest) Reset()

func (*AnalyzeEntitiesRequest) String

func (m *AnalyzeEntitiesRequest) String() string

type AnalyzeEntitiesResponse

type AnalyzeEntitiesResponse struct {
	// The recognized entities in the input document.
	Entities []*Entity `protobuf:"bytes,1,rep,name=entities" json:"entities,omitempty"`
	// The language of the text, which will be the same as the language specified
	// in the request or, if not specified, the automatically-detected language.
	// See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details.
	Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"`
}

The entity analysis response message.

func (*AnalyzeEntitiesResponse) Descriptor

func (*AnalyzeEntitiesResponse) Descriptor() ([]byte, []int)

func (*AnalyzeEntitiesResponse) GetEntities

func (m *AnalyzeEntitiesResponse) GetEntities() []*Entity

func (*AnalyzeEntitiesResponse) GetLanguage

func (m *AnalyzeEntitiesResponse) GetLanguage() string

func (*AnalyzeEntitiesResponse) ProtoMessage

func (*AnalyzeEntitiesResponse) ProtoMessage()

func (*AnalyzeEntitiesResponse) Reset

func (m *AnalyzeEntitiesResponse) Reset()

func (*AnalyzeEntitiesResponse) String

func (m *AnalyzeEntitiesResponse) String() string

type AnalyzeSentimentRequest

type AnalyzeSentimentRequest struct {
	// Input document.
	Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"`
	// The encoding type used by the API to calculate sentence offsets for the
	// sentence sentiment.
	EncodingType EncodingType `` /* 139-byte string literal not displayed */
}

The sentiment analysis request message.

func (*AnalyzeSentimentRequest) Descriptor

func (*AnalyzeSentimentRequest) Descriptor() ([]byte, []int)

func (*AnalyzeSentimentRequest) GetDocument

func (m *AnalyzeSentimentRequest) GetDocument() *Document

func (*AnalyzeSentimentRequest) GetEncodingType

func (m *AnalyzeSentimentRequest) GetEncodingType() EncodingType

func (*AnalyzeSentimentRequest) ProtoMessage

func (*AnalyzeSentimentRequest) ProtoMessage()

func (*AnalyzeSentimentRequest) Reset

func (m *AnalyzeSentimentRequest) Reset()

func (*AnalyzeSentimentRequest) String

func (m *AnalyzeSentimentRequest) String() string

type AnalyzeSentimentResponse

type AnalyzeSentimentResponse struct {
	// The overall sentiment of the input document.
	DocumentSentiment *Sentiment `protobuf:"bytes,1,opt,name=document_sentiment,json=documentSentiment" json:"document_sentiment,omitempty"`
	// The language of the text, which will be the same as the language specified
	// in the request or, if not specified, the automatically-detected language.
	// See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details.
	Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"`
	// The sentiment for all the sentences in the document.
	Sentences []*Sentence `protobuf:"bytes,3,rep,name=sentences" json:"sentences,omitempty"`
}

The sentiment analysis response message.

func (*AnalyzeSentimentResponse) Descriptor

func (*AnalyzeSentimentResponse) Descriptor() ([]byte, []int)

func (*AnalyzeSentimentResponse) GetDocumentSentiment

func (m *AnalyzeSentimentResponse) GetDocumentSentiment() *Sentiment

func (*AnalyzeSentimentResponse) GetLanguage

func (m *AnalyzeSentimentResponse) GetLanguage() string

func (*AnalyzeSentimentResponse) GetSentences

func (m *AnalyzeSentimentResponse) GetSentences() []*Sentence

func (*AnalyzeSentimentResponse) ProtoMessage

func (*AnalyzeSentimentResponse) ProtoMessage()

func (*AnalyzeSentimentResponse) Reset

func (m *AnalyzeSentimentResponse) Reset()

func (*AnalyzeSentimentResponse) String

func (m *AnalyzeSentimentResponse) String() string

type AnalyzeSyntaxRequest

type AnalyzeSyntaxRequest struct {
	// Input document.
	Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"`
	// The encoding type used by the API to calculate offsets.
	EncodingType EncodingType `` /* 139-byte string literal not displayed */
}

The syntax analysis request message.

func (*AnalyzeSyntaxRequest) Descriptor

func (*AnalyzeSyntaxRequest) Descriptor() ([]byte, []int)

func (*AnalyzeSyntaxRequest) GetDocument

func (m *AnalyzeSyntaxRequest) GetDocument() *Document

func (*AnalyzeSyntaxRequest) GetEncodingType

func (m *AnalyzeSyntaxRequest) GetEncodingType() EncodingType

func (*AnalyzeSyntaxRequest) ProtoMessage

func (*AnalyzeSyntaxRequest) ProtoMessage()

func (*AnalyzeSyntaxRequest) Reset

func (m *AnalyzeSyntaxRequest) Reset()

func (*AnalyzeSyntaxRequest) String

func (m *AnalyzeSyntaxRequest) String() string

type AnalyzeSyntaxResponse

type AnalyzeSyntaxResponse struct {
	// Sentences in the input document.
	Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences" json:"sentences,omitempty"`
	// Tokens, along with their syntactic information, in the input document.
	Tokens []*Token `protobuf:"bytes,2,rep,name=tokens" json:"tokens,omitempty"`
	// The language of the text, which will be the same as the language specified
	// in the request or, if not specified, the automatically-detected language.
	// See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details.
	Language string `protobuf:"bytes,3,opt,name=language" json:"language,omitempty"`
}

The syntax analysis response message.

func (*AnalyzeSyntaxResponse) Descriptor

func (*AnalyzeSyntaxResponse) Descriptor() ([]byte, []int)

func (*AnalyzeSyntaxResponse) GetLanguage

func (m *AnalyzeSyntaxResponse) GetLanguage() string

func (*AnalyzeSyntaxResponse) GetSentences

func (m *AnalyzeSyntaxResponse) GetSentences() []*Sentence

func (*AnalyzeSyntaxResponse) GetTokens

func (m *AnalyzeSyntaxResponse) GetTokens() []*Token

func (*AnalyzeSyntaxResponse) ProtoMessage

func (*AnalyzeSyntaxResponse) ProtoMessage()

func (*AnalyzeSyntaxResponse) Reset

func (m *AnalyzeSyntaxResponse) Reset()

func (*AnalyzeSyntaxResponse) String

func (m *AnalyzeSyntaxResponse) String() string

type AnnotateTextRequest

type AnnotateTextRequest struct {
	// Input document.
	Document *Document `protobuf:"bytes,1,opt,name=document" json:"document,omitempty"`
	// The enabled features.
	Features *AnnotateTextRequest_Features `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
	// The encoding type used by the API to calculate offsets.
	EncodingType EncodingType `` /* 139-byte string literal not displayed */
}

The request message for the text annotation API, which can perform multiple analysis types (sentiment, entities, and syntax) in one call.

func (*AnnotateTextRequest) Descriptor

func (*AnnotateTextRequest) Descriptor() ([]byte, []int)

func (*AnnotateTextRequest) GetDocument

func (m *AnnotateTextRequest) GetDocument() *Document

func (*AnnotateTextRequest) GetEncodingType

func (m *AnnotateTextRequest) GetEncodingType() EncodingType

func (*AnnotateTextRequest) GetFeatures

func (*AnnotateTextRequest) ProtoMessage

func (*AnnotateTextRequest) ProtoMessage()

func (*AnnotateTextRequest) Reset

func (m *AnnotateTextRequest) Reset()

func (*AnnotateTextRequest) String

func (m *AnnotateTextRequest) String() string

type AnnotateTextRequest_Features

type AnnotateTextRequest_Features struct {
	// Extract syntax information.
	ExtractSyntax bool `protobuf:"varint,1,opt,name=extract_syntax,json=extractSyntax" json:"extract_syntax,omitempty"`
	// Extract entities.
	ExtractEntities bool `protobuf:"varint,2,opt,name=extract_entities,json=extractEntities" json:"extract_entities,omitempty"`
	// Extract document-level sentiment.
	ExtractDocumentSentiment bool `` /* 129-byte string literal not displayed */
}

All available features for sentiment, syntax, and semantic analysis. Setting each one to true will enable that specific analysis for the input.

func (*AnnotateTextRequest_Features) Descriptor

func (*AnnotateTextRequest_Features) Descriptor() ([]byte, []int)

func (*AnnotateTextRequest_Features) GetExtractDocumentSentiment

func (m *AnnotateTextRequest_Features) GetExtractDocumentSentiment() bool

func (*AnnotateTextRequest_Features) GetExtractEntities

func (m *AnnotateTextRequest_Features) GetExtractEntities() bool

func (*AnnotateTextRequest_Features) GetExtractSyntax

func (m *AnnotateTextRequest_Features) GetExtractSyntax() bool

func (*AnnotateTextRequest_Features) ProtoMessage

func (*AnnotateTextRequest_Features) ProtoMessage()

func (*AnnotateTextRequest_Features) Reset

func (m *AnnotateTextRequest_Features) Reset()

func (*AnnotateTextRequest_Features) String

type AnnotateTextResponse

type AnnotateTextResponse struct {
	// Sentences in the input document. Populated if the user enables
	// [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_syntax].
	Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences" json:"sentences,omitempty"`
	// Tokens, along with their syntactic information, in the input document.
	// Populated if the user enables
	// [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_syntax].
	Tokens []*Token `protobuf:"bytes,2,rep,name=tokens" json:"tokens,omitempty"`
	// Entities, along with their semantic information, in the input document.
	// Populated if the user enables
	// [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_entities].
	Entities []*Entity `protobuf:"bytes,3,rep,name=entities" json:"entities,omitempty"`
	// The overall sentiment for the document. Populated if the user enables
	// [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_document_sentiment].
	DocumentSentiment *Sentiment `protobuf:"bytes,4,opt,name=document_sentiment,json=documentSentiment" json:"document_sentiment,omitempty"`
	// The language of the text, which will be the same as the language specified
	// in the request or, if not specified, the automatically-detected language.
	// See [Document.language][google.cloud.language.v1beta1.Document.language] field for more details.
	Language string `protobuf:"bytes,5,opt,name=language" json:"language,omitempty"`
}

The text annotations response message.

func (*AnnotateTextResponse) Descriptor

func (*AnnotateTextResponse) Descriptor() ([]byte, []int)

func (*AnnotateTextResponse) GetDocumentSentiment

func (m *AnnotateTextResponse) GetDocumentSentiment() *Sentiment

func (*AnnotateTextResponse) GetEntities

func (m *AnnotateTextResponse) GetEntities() []*Entity

func (*AnnotateTextResponse) GetLanguage

func (m *AnnotateTextResponse) GetLanguage() string

func (*AnnotateTextResponse) GetSentences

func (m *AnnotateTextResponse) GetSentences() []*Sentence

func (*AnnotateTextResponse) GetTokens

func (m *AnnotateTextResponse) GetTokens() []*Token

func (*AnnotateTextResponse) ProtoMessage

func (*AnnotateTextResponse) ProtoMessage()

func (*AnnotateTextResponse) Reset

func (m *AnnotateTextResponse) Reset()

func (*AnnotateTextResponse) String

func (m *AnnotateTextResponse) String() string

type DependencyEdge

type DependencyEdge struct {
	// Represents the head of this token in the dependency tree.
	// This is the index of the token which has an arc going to this token.
	// The index is the position of the token in the array of tokens returned
	// by the API method. If this token is a root token, then the
	// `head_token_index` is its own index.
	HeadTokenIndex int32 `protobuf:"varint,1,opt,name=head_token_index,json=headTokenIndex" json:"head_token_index,omitempty"`
	// The parse label for the token.
	Label DependencyEdge_Label `protobuf:"varint,2,opt,name=label,enum=google.cloud.language.v1beta1.DependencyEdge_Label" json:"label,omitempty"`
}

Represents dependency parse tree information for a token.

func (*DependencyEdge) Descriptor

func (*DependencyEdge) Descriptor() ([]byte, []int)

func (*DependencyEdge) GetHeadTokenIndex

func (m *DependencyEdge) GetHeadTokenIndex() int32

func (*DependencyEdge) GetLabel

func (m *DependencyEdge) GetLabel() DependencyEdge_Label

func (*DependencyEdge) ProtoMessage

func (*DependencyEdge) ProtoMessage()

func (*DependencyEdge) Reset

func (m *DependencyEdge) Reset()

func (*DependencyEdge) String

func (m *DependencyEdge) String() string

type DependencyEdge_Label

type DependencyEdge_Label int32

The parse label enum for the token.

const (
	// Unknown
	DependencyEdge_UNKNOWN DependencyEdge_Label = 0
	// Abbreviation modifier
	DependencyEdge_ABBREV DependencyEdge_Label = 1
	// Adjectival complement
	DependencyEdge_ACOMP DependencyEdge_Label = 2
	// Adverbial clause modifier
	DependencyEdge_ADVCL DependencyEdge_Label = 3
	// Adverbial modifier
	DependencyEdge_ADVMOD DependencyEdge_Label = 4
	// Adjectival modifier of an NP
	DependencyEdge_AMOD DependencyEdge_Label = 5
	// Appositional modifier of an NP
	DependencyEdge_APPOS DependencyEdge_Label = 6
	// Attribute dependent of a copular verb
	DependencyEdge_ATTR DependencyEdge_Label = 7
	// Auxiliary (non-main) verb
	DependencyEdge_AUX DependencyEdge_Label = 8
	// Passive auxiliary
	DependencyEdge_AUXPASS DependencyEdge_Label = 9
	// Coordinating conjunction
	DependencyEdge_CC DependencyEdge_Label = 10
	// Clausal complement of a verb or adjective
	DependencyEdge_CCOMP DependencyEdge_Label = 11
	// Conjunct
	DependencyEdge_CONJ DependencyEdge_Label = 12
	// Clausal subject
	DependencyEdge_CSUBJ DependencyEdge_Label = 13
	// Clausal passive subject
	DependencyEdge_CSUBJPASS DependencyEdge_Label = 14
	// Dependency (unable to determine)
	DependencyEdge_DEP DependencyEdge_Label = 15
	// Determiner
	DependencyEdge_DET DependencyEdge_Label = 16
	// Discourse
	DependencyEdge_DISCOURSE DependencyEdge_Label = 17
	// Direct object
	DependencyEdge_DOBJ DependencyEdge_Label = 18
	// Expletive
	DependencyEdge_EXPL DependencyEdge_Label = 19
	// Goes with (part of a word in a text not well edited)
	DependencyEdge_GOESWITH DependencyEdge_Label = 20
	// Indirect object
	DependencyEdge_IOBJ DependencyEdge_Label = 21
	// Marker (word introducing a subordinate clause)
	DependencyEdge_MARK DependencyEdge_Label = 22
	// Multi-word expression
	DependencyEdge_MWE DependencyEdge_Label = 23
	// Multi-word verbal expression
	DependencyEdge_MWV DependencyEdge_Label = 24
	// Negation modifier
	DependencyEdge_NEG DependencyEdge_Label = 25
	// Noun compound modifier
	DependencyEdge_NN DependencyEdge_Label = 26
	// Noun phrase used as an adverbial modifier
	DependencyEdge_NPADVMOD DependencyEdge_Label = 27
	// Nominal subject
	DependencyEdge_NSUBJ DependencyEdge_Label = 28
	// Passive nominal subject
	DependencyEdge_NSUBJPASS DependencyEdge_Label = 29
	// Numeric modifier of a noun
	DependencyEdge_NUM DependencyEdge_Label = 30
	// Element of compound number
	DependencyEdge_NUMBER DependencyEdge_Label = 31
	// Punctuation mark
	DependencyEdge_P DependencyEdge_Label = 32
	// Parataxis relation
	DependencyEdge_PARATAXIS DependencyEdge_Label = 33
	// Participial modifier
	DependencyEdge_PARTMOD DependencyEdge_Label = 34
	// The complement of a preposition is a clause
	DependencyEdge_PCOMP DependencyEdge_Label = 35
	// Object of a preposition
	DependencyEdge_POBJ DependencyEdge_Label = 36
	// Possession modifier
	DependencyEdge_POSS DependencyEdge_Label = 37
	// Postverbal negative particle
	DependencyEdge_POSTNEG DependencyEdge_Label = 38
	// Predicate complement
	DependencyEdge_PRECOMP DependencyEdge_Label = 39
	// Preconjunt
	DependencyEdge_PRECONJ DependencyEdge_Label = 40
	// Predeterminer
	DependencyEdge_PREDET DependencyEdge_Label = 41
	// Prefix
	DependencyEdge_PREF DependencyEdge_Label = 42
	// Prepositional modifier
	DependencyEdge_PREP DependencyEdge_Label = 43
	// The relationship between a verb and verbal morpheme
	DependencyEdge_PRONL DependencyEdge_Label = 44
	// Particle
	DependencyEdge_PRT DependencyEdge_Label = 45
	// Associative or possessive marker
	DependencyEdge_PS DependencyEdge_Label = 46
	// Quantifier phrase modifier
	DependencyEdge_QUANTMOD DependencyEdge_Label = 47
	// Relative clause modifier
	DependencyEdge_RCMOD DependencyEdge_Label = 48
	// Complementizer in relative clause
	DependencyEdge_RCMODREL DependencyEdge_Label = 49
	// Ellipsis without a preceding predicate
	DependencyEdge_RDROP DependencyEdge_Label = 50
	// Referent
	DependencyEdge_REF DependencyEdge_Label = 51
	// Remnant
	DependencyEdge_REMNANT DependencyEdge_Label = 52
	// Reparandum
	DependencyEdge_REPARANDUM DependencyEdge_Label = 53
	// Root
	DependencyEdge_ROOT DependencyEdge_Label = 54
	// Suffix specifying a unit of number
	DependencyEdge_SNUM DependencyEdge_Label = 55
	// Suffix
	DependencyEdge_SUFF DependencyEdge_Label = 56
	// Temporal modifier
	DependencyEdge_TMOD DependencyEdge_Label = 57
	// Topic marker
	DependencyEdge_TOPIC DependencyEdge_Label = 58
	// Clause headed by an infinite form of the verb that modifies a noun
	DependencyEdge_VMOD DependencyEdge_Label = 59
	// Vocative
	DependencyEdge_VOCATIVE DependencyEdge_Label = 60
	// Open clausal complement
	DependencyEdge_XCOMP DependencyEdge_Label = 61
	// Name suffix
	DependencyEdge_SUFFIX DependencyEdge_Label = 62
	// Name title
	DependencyEdge_TITLE DependencyEdge_Label = 63
	// Adverbial phrase modifier
	DependencyEdge_ADVPHMOD DependencyEdge_Label = 64
	// Causative auxiliary
	DependencyEdge_AUXCAUS DependencyEdge_Label = 65
	// Helper auxiliary
	DependencyEdge_AUXVV DependencyEdge_Label = 66
	// Rentaishi (Prenominal modifier)
	DependencyEdge_DTMOD DependencyEdge_Label = 67
	// Foreign words
	DependencyEdge_FOREIGN DependencyEdge_Label = 68
	// Keyword
	DependencyEdge_KW DependencyEdge_Label = 69
	// List for chains of comparable items
	DependencyEdge_LIST DependencyEdge_Label = 70
	// Nominalized clause
	DependencyEdge_NOMC DependencyEdge_Label = 71
	// Nominalized clausal subject
	DependencyEdge_NOMCSUBJ DependencyEdge_Label = 72
	// Nominalized clausal passive
	DependencyEdge_NOMCSUBJPASS DependencyEdge_Label = 73
	// Compound of numeric modifier
	DependencyEdge_NUMC DependencyEdge_Label = 74
	// Copula
	DependencyEdge_COP DependencyEdge_Label = 75
	// Dislocated relation (for fronted/topicalized elements)
	DependencyEdge_DISLOCATED DependencyEdge_Label = 76
)

func (DependencyEdge_Label) EnumDescriptor

func (DependencyEdge_Label) EnumDescriptor() ([]byte, []int)

func (DependencyEdge_Label) String

func (x DependencyEdge_Label) String() string

type Document

type Document struct {
	// Required. If the type is not set or is `TYPE_UNSPECIFIED`,
	// returns an `INVALID_ARGUMENT` error.
	Type Document_Type `protobuf:"varint,1,opt,name=type,enum=google.cloud.language.v1beta1.Document_Type" json:"type,omitempty"`
	// The source of the document: a string containing the content or a
	// Google Cloud Storage URI.
	//
	// Types that are valid to be assigned to Source:
	//	*Document_Content
	//	*Document_GcsContentUri
	Source isDocument_Source `protobuf_oneof:"source"`
	// The language of the document (if not specified, the language is
	// automatically detected). Both ISO and BCP-47 language codes are
	// accepted.<br>
	// [Language Support](https://cloud.google.com/natural-language/docs/languages)
	// lists currently supported languages for each API method.
	// If the language (either specified by the caller or automatically detected)
	// is not supported by the called API method, an `INVALID_ARGUMENT` error
	// is returned.
	Language string `protobuf:"bytes,4,opt,name=language" json:"language,omitempty"`
}

################################################################ #

Represents the input to API methods.

func (*Document) Descriptor

func (*Document) Descriptor() ([]byte, []int)

func (*Document) GetContent

func (m *Document) GetContent() string

func (*Document) GetGcsContentUri

func (m *Document) GetGcsContentUri() string

func (*Document) GetLanguage

func (m *Document) GetLanguage() string

func (*Document) GetSource

func (m *Document) GetSource() isDocument_Source

func (*Document) GetType

func (m *Document) GetType() Document_Type

func (*Document) ProtoMessage

func (*Document) ProtoMessage()

func (*Document) Reset

func (m *Document) Reset()

func (*Document) String

func (m *Document) String() string

func (*Document) XXX_OneofFuncs

func (*Document) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type Document_Content

type Document_Content struct {
	Content string `protobuf:"bytes,2,opt,name=content,oneof"`
}

type Document_GcsContentUri

type Document_GcsContentUri struct {
	GcsContentUri string `protobuf:"bytes,3,opt,name=gcs_content_uri,json=gcsContentUri,oneof"`
}

type Document_Type

type Document_Type int32

The document types enum.

const (
	// The content type is not specified.
	Document_TYPE_UNSPECIFIED Document_Type = 0
	// Plain text
	Document_PLAIN_TEXT Document_Type = 1
	// HTML
	Document_HTML Document_Type = 2
)

func (Document_Type) EnumDescriptor

func (Document_Type) EnumDescriptor() ([]byte, []int)

func (Document_Type) String

func (x Document_Type) String() string

type EncodingType

type EncodingType int32

Represents the text encoding that the caller uses to process the output. Providing an `EncodingType` is recommended because the API provides the beginning offsets for various outputs, such as tokens and mentions, and languages that natively use different text encodings may access offsets differently.

const (
	// If `EncodingType` is not specified, encoding-dependent information (such as
	// `begin_offset`) will be set at `-1`.
	EncodingType_NONE EncodingType = 0
	// Encoding-dependent information (such as `begin_offset`) is calculated based
	// on the UTF-8 encoding of the input. C++ and Go are examples of languages
	// that use this encoding natively.
	EncodingType_UTF8 EncodingType = 1
	// Encoding-dependent information (such as `begin_offset`) is calculated based
	// on the UTF-16 encoding of the input. Java and Javascript are examples of
	// languages that use this encoding natively.
	EncodingType_UTF16 EncodingType = 2
	// Encoding-dependent information (such as `begin_offset`) is calculated based
	// on the UTF-32 encoding of the input. Python is an example of a language
	// that uses this encoding natively.
	EncodingType_UTF32 EncodingType = 3
)

func (EncodingType) EnumDescriptor

func (EncodingType) EnumDescriptor() ([]byte, []int)

func (EncodingType) String

func (x EncodingType) String() string

type Entity

type Entity struct {
	// The representative name for the entity.
	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
	// The entity type.
	Type Entity_Type `protobuf:"varint,2,opt,name=type,enum=google.cloud.language.v1beta1.Entity_Type" json:"type,omitempty"`
	// Metadata associated with the entity.
	//
	// Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if
	// available. The associated keys are "wikipedia_url" and "mid", respectively.
	Metadata map[string]string `` /* 136-byte string literal not displayed */
	// The salience score associated with the entity in the [0, 1.0] range.
	//
	// The salience score for an entity provides information about the
	// importance or centrality of that entity to the entire document text.
	// Scores closer to 0 are less salient, while scores closer to 1.0 are highly
	// salient.
	Salience float32 `protobuf:"fixed32,4,opt,name=salience" json:"salience,omitempty"`
	// The mentions of this entity in the input document. The API currently
	// supports proper noun mentions.
	Mentions []*EntityMention `protobuf:"bytes,5,rep,name=mentions" json:"mentions,omitempty"`
}

Represents a phrase in the text that is a known entity, such as a person, an organization, or location. The API associates information, such as salience and mentions, with entities.

func (*Entity) Descriptor

func (*Entity) Descriptor() ([]byte, []int)

func (*Entity) GetMentions

func (m *Entity) GetMentions() []*EntityMention

func (*Entity) GetMetadata

func (m *Entity) GetMetadata() map[string]string

func (*Entity) GetName

func (m *Entity) GetName() string

func (*Entity) GetSalience

func (m *Entity) GetSalience() float32

func (*Entity) GetType

func (m *Entity) GetType() Entity_Type

func (*Entity) ProtoMessage

func (*Entity) ProtoMessage()

func (*Entity) Reset

func (m *Entity) Reset()

func (*Entity) String

func (m *Entity) String() string

type EntityMention

type EntityMention struct {
	// The mention text.
	Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"`
	// The type of the entity mention.
	Type EntityMention_Type `protobuf:"varint,2,opt,name=type,enum=google.cloud.language.v1beta1.EntityMention_Type" json:"type,omitempty"`
}

Represents a mention for an entity in the text. Currently, proper noun mentions are supported.

func (*EntityMention) Descriptor

func (*EntityMention) Descriptor() ([]byte, []int)

func (*EntityMention) GetText

func (m *EntityMention) GetText() *TextSpan

func (*EntityMention) GetType

func (m *EntityMention) GetType() EntityMention_Type

func (*EntityMention) ProtoMessage

func (*EntityMention) ProtoMessage()

func (*EntityMention) Reset

func (m *EntityMention) Reset()

func (*EntityMention) String

func (m *EntityMention) String() string

type EntityMention_Type

type EntityMention_Type int32

The supported types of mentions.

const (
	// Unknown
	EntityMention_TYPE_UNKNOWN EntityMention_Type = 0
	// Proper name
	EntityMention_PROPER EntityMention_Type = 1
	// Common noun (or noun compound)
	EntityMention_COMMON EntityMention_Type = 2
)

func (EntityMention_Type) EnumDescriptor

func (EntityMention_Type) EnumDescriptor() ([]byte, []int)

func (EntityMention_Type) String

func (x EntityMention_Type) String() string

type Entity_Type

type Entity_Type int32

The type of the entity.

const (
	// Unknown
	Entity_UNKNOWN Entity_Type = 0
	// Person
	Entity_PERSON Entity_Type = 1
	// Location
	Entity_LOCATION Entity_Type = 2
	// Organization
	Entity_ORGANIZATION Entity_Type = 3
	// Event
	Entity_EVENT Entity_Type = 4
	// Work of art
	Entity_WORK_OF_ART Entity_Type = 5
	// Consumer goods
	Entity_CONSUMER_GOOD Entity_Type = 6
	// Other types
	Entity_OTHER Entity_Type = 7
)

func (Entity_Type) EnumDescriptor

func (Entity_Type) EnumDescriptor() ([]byte, []int)

func (Entity_Type) String

func (x Entity_Type) String() string

type LanguageServiceClient

type LanguageServiceClient interface {
	// Analyzes the sentiment of the provided text.
	AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error)
	// Finds named entities (currently proper names and common nouns) in the text
	// along with entity types, salience, mentions for each entity, and
	// other properties.
	AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error)
	// Analyzes the syntax of the text and provides sentence boundaries and
	// tokenization along with part of speech tags, dependency trees, and other
	// properties.
	AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error)
	// A convenience method that provides all the features that analyzeSentiment,
	// analyzeEntities, and analyzeSyntax provide in one call.
	AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error)
}

func NewLanguageServiceClient

func NewLanguageServiceClient(cc *grpc.ClientConn) LanguageServiceClient

type LanguageServiceServer

type LanguageServiceServer interface {
	// Analyzes the sentiment of the provided text.
	AnalyzeSentiment(context.Context, *AnalyzeSentimentRequest) (*AnalyzeSentimentResponse, error)
	// Finds named entities (currently proper names and common nouns) in the text
	// along with entity types, salience, mentions for each entity, and
	// other properties.
	AnalyzeEntities(context.Context, *AnalyzeEntitiesRequest) (*AnalyzeEntitiesResponse, error)
	// Analyzes the syntax of the text and provides sentence boundaries and
	// tokenization along with part of speech tags, dependency trees, and other
	// properties.
	AnalyzeSyntax(context.Context, *AnalyzeSyntaxRequest) (*AnalyzeSyntaxResponse, error)
	// A convenience method that provides all the features that analyzeSentiment,
	// analyzeEntities, and analyzeSyntax provide in one call.
	AnnotateText(context.Context, *AnnotateTextRequest) (*AnnotateTextResponse, error)
}

type PartOfSpeech

type PartOfSpeech struct {
	// The part of speech tag.
	Tag PartOfSpeech_Tag `protobuf:"varint,1,opt,name=tag,enum=google.cloud.language.v1beta1.PartOfSpeech_Tag" json:"tag,omitempty"`
	// The grammatical aspect.
	Aspect PartOfSpeech_Aspect `protobuf:"varint,2,opt,name=aspect,enum=google.cloud.language.v1beta1.PartOfSpeech_Aspect" json:"aspect,omitempty"`
	// The grammatical case.
	Case PartOfSpeech_Case `protobuf:"varint,3,opt,name=case,enum=google.cloud.language.v1beta1.PartOfSpeech_Case" json:"case,omitempty"`
	// The grammatical form.
	Form PartOfSpeech_Form `protobuf:"varint,4,opt,name=form,enum=google.cloud.language.v1beta1.PartOfSpeech_Form" json:"form,omitempty"`
	// The grammatical gender.
	Gender PartOfSpeech_Gender `protobuf:"varint,5,opt,name=gender,enum=google.cloud.language.v1beta1.PartOfSpeech_Gender" json:"gender,omitempty"`
	// The grammatical mood.
	Mood PartOfSpeech_Mood `protobuf:"varint,6,opt,name=mood,enum=google.cloud.language.v1beta1.PartOfSpeech_Mood" json:"mood,omitempty"`
	// The grammatical number.
	Number PartOfSpeech_Number `protobuf:"varint,7,opt,name=number,enum=google.cloud.language.v1beta1.PartOfSpeech_Number" json:"number,omitempty"`
	// The grammatical person.
	Person PartOfSpeech_Person `protobuf:"varint,8,opt,name=person,enum=google.cloud.language.v1beta1.PartOfSpeech_Person" json:"person,omitempty"`
	// The grammatical properness.
	Proper PartOfSpeech_Proper `protobuf:"varint,9,opt,name=proper,enum=google.cloud.language.v1beta1.PartOfSpeech_Proper" json:"proper,omitempty"`
	// The grammatical reciprocity.
	Reciprocity PartOfSpeech_Reciprocity `` /* 130-byte string literal not displayed */
	// The grammatical tense.
	Tense PartOfSpeech_Tense `protobuf:"varint,11,opt,name=tense,enum=google.cloud.language.v1beta1.PartOfSpeech_Tense" json:"tense,omitempty"`
	// The grammatical voice.
	Voice PartOfSpeech_Voice `protobuf:"varint,12,opt,name=voice,enum=google.cloud.language.v1beta1.PartOfSpeech_Voice" json:"voice,omitempty"`
}

Represents part of speech information for a token.

func (*PartOfSpeech) Descriptor

func (*PartOfSpeech) Descriptor() ([]byte, []int)

func (*PartOfSpeech) GetAspect

func (m *PartOfSpeech) GetAspect() PartOfSpeech_Aspect

func (*PartOfSpeech) GetCase

func (m *PartOfSpeech) GetCase() PartOfSpeech_Case

func (*PartOfSpeech) GetForm

func (m *PartOfSpeech) GetForm() PartOfSpeech_Form

func (*PartOfSpeech) GetGender

func (m *PartOfSpeech) GetGender() PartOfSpeech_Gender

func (*PartOfSpeech) GetMood

func (m *PartOfSpeech) GetMood() PartOfSpeech_Mood

func (*PartOfSpeech) GetNumber

func (m *PartOfSpeech) GetNumber() PartOfSpeech_Number

func (*PartOfSpeech) GetPerson

func (m *PartOfSpeech) GetPerson() PartOfSpeech_Person

func (*PartOfSpeech) GetProper

func (m *PartOfSpeech) GetProper() PartOfSpeech_Proper

func (*PartOfSpeech) GetReciprocity

func (m *PartOfSpeech) GetReciprocity() PartOfSpeech_Reciprocity

func (*PartOfSpeech) GetTag

func (m *PartOfSpeech) GetTag() PartOfSpeech_Tag

func (*PartOfSpeech) GetTense

func (m *PartOfSpeech) GetTense() PartOfSpeech_Tense

func (*PartOfSpeech) GetVoice

func (m *PartOfSpeech) GetVoice() PartOfSpeech_Voice

func (*PartOfSpeech) ProtoMessage

func (*PartOfSpeech) ProtoMessage()

func (*PartOfSpeech) Reset

func (m *PartOfSpeech) Reset()

func (*PartOfSpeech) String

func (m *PartOfSpeech) String() string

type PartOfSpeech_Aspect

type PartOfSpeech_Aspect int32

The characteristic of a verb that expresses time flow during an event.

const (
	// Aspect is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_ASPECT_UNKNOWN PartOfSpeech_Aspect = 0
	// Perfective
	PartOfSpeech_PERFECTIVE PartOfSpeech_Aspect = 1
	// Imperfective
	PartOfSpeech_IMPERFECTIVE PartOfSpeech_Aspect = 2
	// Progressive
	PartOfSpeech_PROGRESSIVE PartOfSpeech_Aspect = 3
)

func (PartOfSpeech_Aspect) EnumDescriptor

func (PartOfSpeech_Aspect) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Aspect) String

func (x PartOfSpeech_Aspect) String() string

type PartOfSpeech_Case

type PartOfSpeech_Case int32

The grammatical function performed by a noun or pronoun in a phrase, clause, or sentence. In some languages, other parts of speech, such as adjective and determiner, take case inflection in agreement with the noun.

const (
	// Case is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_CASE_UNKNOWN PartOfSpeech_Case = 0
	// Accusative
	PartOfSpeech_ACCUSATIVE PartOfSpeech_Case = 1
	// Adverbial
	PartOfSpeech_ADVERBIAL PartOfSpeech_Case = 2
	// Complementive
	PartOfSpeech_COMPLEMENTIVE PartOfSpeech_Case = 3
	// Dative
	PartOfSpeech_DATIVE PartOfSpeech_Case = 4
	// Genitive
	PartOfSpeech_GENITIVE PartOfSpeech_Case = 5
	// Instrumental
	PartOfSpeech_INSTRUMENTAL PartOfSpeech_Case = 6
	// Locative
	PartOfSpeech_LOCATIVE PartOfSpeech_Case = 7
	// Nominative
	PartOfSpeech_NOMINATIVE PartOfSpeech_Case = 8
	// Oblique
	PartOfSpeech_OBLIQUE PartOfSpeech_Case = 9
	// Partitive
	PartOfSpeech_PARTITIVE PartOfSpeech_Case = 10
	// Prepositional
	PartOfSpeech_PREPOSITIONAL PartOfSpeech_Case = 11
	// Reflexive
	PartOfSpeech_REFLEXIVE_CASE PartOfSpeech_Case = 12
	// Relative
	PartOfSpeech_RELATIVE_CASE PartOfSpeech_Case = 13
	// Vocative
	PartOfSpeech_VOCATIVE PartOfSpeech_Case = 14
)

func (PartOfSpeech_Case) EnumDescriptor

func (PartOfSpeech_Case) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Case) String

func (x PartOfSpeech_Case) String() string

type PartOfSpeech_Form

type PartOfSpeech_Form int32

Depending on the language, Form can be categorizing different forms of verbs, adjectives, adverbs, etc. For example, categorizing inflected endings of verbs and adjectives or distinguishing between short and long forms of adjectives and participles

const (
	// Form is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_FORM_UNKNOWN PartOfSpeech_Form = 0
	// Adnomial
	PartOfSpeech_ADNOMIAL PartOfSpeech_Form = 1
	// Auxiliary
	PartOfSpeech_AUXILIARY PartOfSpeech_Form = 2
	// Complementizer
	PartOfSpeech_COMPLEMENTIZER PartOfSpeech_Form = 3
	// Final ending
	PartOfSpeech_FINAL_ENDING PartOfSpeech_Form = 4
	// Gerund
	PartOfSpeech_GERUND PartOfSpeech_Form = 5
	// Realis
	PartOfSpeech_REALIS PartOfSpeech_Form = 6
	// Irrealis
	PartOfSpeech_IRREALIS PartOfSpeech_Form = 7
	// Short form
	PartOfSpeech_SHORT PartOfSpeech_Form = 8
	// Long form
	PartOfSpeech_LONG PartOfSpeech_Form = 9
	// Order form
	PartOfSpeech_ORDER PartOfSpeech_Form = 10
	// Specific form
	PartOfSpeech_SPECIFIC PartOfSpeech_Form = 11
)

func (PartOfSpeech_Form) EnumDescriptor

func (PartOfSpeech_Form) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Form) String

func (x PartOfSpeech_Form) String() string

type PartOfSpeech_Gender

type PartOfSpeech_Gender int32

Gender classes of nouns reflected in the behaviour of associated words.

const (
	// Gender is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_GENDER_UNKNOWN PartOfSpeech_Gender = 0
	// Feminine
	PartOfSpeech_FEMININE PartOfSpeech_Gender = 1
	// Masculine
	PartOfSpeech_MASCULINE PartOfSpeech_Gender = 2
	// Neuter
	PartOfSpeech_NEUTER PartOfSpeech_Gender = 3
)

func (PartOfSpeech_Gender) EnumDescriptor

func (PartOfSpeech_Gender) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Gender) String

func (x PartOfSpeech_Gender) String() string

type PartOfSpeech_Mood

type PartOfSpeech_Mood int32

The grammatical feature of verbs, used for showing modality and attitude.

const (
	// Mood is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_MOOD_UNKNOWN PartOfSpeech_Mood = 0
	// Conditional
	PartOfSpeech_CONDITIONAL_MOOD PartOfSpeech_Mood = 1
	// Imperative
	PartOfSpeech_IMPERATIVE PartOfSpeech_Mood = 2
	// Indicative
	PartOfSpeech_INDICATIVE PartOfSpeech_Mood = 3
	// Interrogative
	PartOfSpeech_INTERROGATIVE PartOfSpeech_Mood = 4
	// Jussive
	PartOfSpeech_JUSSIVE PartOfSpeech_Mood = 5
	// Subjunctive
	PartOfSpeech_SUBJUNCTIVE PartOfSpeech_Mood = 6
)

func (PartOfSpeech_Mood) EnumDescriptor

func (PartOfSpeech_Mood) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Mood) String

func (x PartOfSpeech_Mood) String() string

type PartOfSpeech_Number

type PartOfSpeech_Number int32

Count distinctions.

const (
	// Number is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_NUMBER_UNKNOWN PartOfSpeech_Number = 0
	// Singular
	PartOfSpeech_SINGULAR PartOfSpeech_Number = 1
	// Plural
	PartOfSpeech_PLURAL PartOfSpeech_Number = 2
	// Dual
	PartOfSpeech_DUAL PartOfSpeech_Number = 3
)

func (PartOfSpeech_Number) EnumDescriptor

func (PartOfSpeech_Number) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Number) String

func (x PartOfSpeech_Number) String() string

type PartOfSpeech_Person

type PartOfSpeech_Person int32

The distinction between the speaker, second person, third person, etc.

const (
	// Person is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_PERSON_UNKNOWN PartOfSpeech_Person = 0
	// First
	PartOfSpeech_FIRST PartOfSpeech_Person = 1
	// Second
	PartOfSpeech_SECOND PartOfSpeech_Person = 2
	// Third
	PartOfSpeech_THIRD PartOfSpeech_Person = 3
	// Reflexive
	PartOfSpeech_REFLEXIVE_PERSON PartOfSpeech_Person = 4
)

func (PartOfSpeech_Person) EnumDescriptor

func (PartOfSpeech_Person) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Person) String

func (x PartOfSpeech_Person) String() string

type PartOfSpeech_Proper

type PartOfSpeech_Proper int32

This category shows if the token is part of a proper name.

const (
	// Proper is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_PROPER_UNKNOWN PartOfSpeech_Proper = 0
	// Proper
	PartOfSpeech_PROPER PartOfSpeech_Proper = 1
	// Not proper
	PartOfSpeech_NOT_PROPER PartOfSpeech_Proper = 2
)

func (PartOfSpeech_Proper) EnumDescriptor

func (PartOfSpeech_Proper) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Proper) String

func (x PartOfSpeech_Proper) String() string

type PartOfSpeech_Reciprocity

type PartOfSpeech_Reciprocity int32

Reciprocal features of a pronoun.

const (
	// Reciprocity is not applicable in the analyzed language or is not
	// predicted.
	PartOfSpeech_RECIPROCITY_UNKNOWN PartOfSpeech_Reciprocity = 0
	// Reciprocal
	PartOfSpeech_RECIPROCAL PartOfSpeech_Reciprocity = 1
	// Non-reciprocal
	PartOfSpeech_NON_RECIPROCAL PartOfSpeech_Reciprocity = 2
)

func (PartOfSpeech_Reciprocity) EnumDescriptor

func (PartOfSpeech_Reciprocity) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Reciprocity) String

func (x PartOfSpeech_Reciprocity) String() string

type PartOfSpeech_Tag

type PartOfSpeech_Tag int32

The part of speech tags enum.

const (
	// Unknown
	PartOfSpeech_UNKNOWN PartOfSpeech_Tag = 0
	// Adjective
	PartOfSpeech_ADJ PartOfSpeech_Tag = 1
	// Adposition (preposition and postposition)
	PartOfSpeech_ADP PartOfSpeech_Tag = 2
	// Adverb
	PartOfSpeech_ADV PartOfSpeech_Tag = 3
	// Conjunction
	PartOfSpeech_CONJ PartOfSpeech_Tag = 4
	// Determiner
	PartOfSpeech_DET PartOfSpeech_Tag = 5
	// Noun (common and proper)
	PartOfSpeech_NOUN PartOfSpeech_Tag = 6
	// Cardinal number
	PartOfSpeech_NUM PartOfSpeech_Tag = 7
	// Pronoun
	PartOfSpeech_PRON PartOfSpeech_Tag = 8
	// Particle or other function word
	PartOfSpeech_PRT PartOfSpeech_Tag = 9
	// Punctuation
	PartOfSpeech_PUNCT PartOfSpeech_Tag = 10
	// Verb (all tenses and modes)
	PartOfSpeech_VERB PartOfSpeech_Tag = 11
	// Other: foreign words, typos, abbreviations
	PartOfSpeech_X PartOfSpeech_Tag = 12
	// Affix
	PartOfSpeech_AFFIX PartOfSpeech_Tag = 13
)

func (PartOfSpeech_Tag) EnumDescriptor

func (PartOfSpeech_Tag) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Tag) String

func (x PartOfSpeech_Tag) String() string

type PartOfSpeech_Tense

type PartOfSpeech_Tense int32

Time reference.

const (
	// Tense is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_TENSE_UNKNOWN PartOfSpeech_Tense = 0
	// Conditional
	PartOfSpeech_CONDITIONAL_TENSE PartOfSpeech_Tense = 1
	// Future
	PartOfSpeech_FUTURE PartOfSpeech_Tense = 2
	// Past
	PartOfSpeech_PAST PartOfSpeech_Tense = 3
	// Present
	PartOfSpeech_PRESENT PartOfSpeech_Tense = 4
	// Imperfect
	PartOfSpeech_IMPERFECT PartOfSpeech_Tense = 5
	// Pluperfect
	PartOfSpeech_PLUPERFECT PartOfSpeech_Tense = 6
)

func (PartOfSpeech_Tense) EnumDescriptor

func (PartOfSpeech_Tense) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Tense) String

func (x PartOfSpeech_Tense) String() string

type PartOfSpeech_Voice

type PartOfSpeech_Voice int32

The relationship between the action that a verb expresses and the participants identified by its arguments.

const (
	// Voice is not applicable in the analyzed language or is not predicted.
	PartOfSpeech_VOICE_UNKNOWN PartOfSpeech_Voice = 0
	// Active
	PartOfSpeech_ACTIVE PartOfSpeech_Voice = 1
	// Causative
	PartOfSpeech_CAUSATIVE PartOfSpeech_Voice = 2
	// Passive
	PartOfSpeech_PASSIVE PartOfSpeech_Voice = 3
)

func (PartOfSpeech_Voice) EnumDescriptor

func (PartOfSpeech_Voice) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Voice) String

func (x PartOfSpeech_Voice) String() string

type Sentence

type Sentence struct {
	// The sentence text.
	Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"`
	// For calls to [AnalyzeSentiment][] or if
	// [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta1.AnnotateTextRequest.Features.extract_document_sentiment] is set to
	// true, this field will contain the sentiment for the sentence.
	Sentiment *Sentiment `protobuf:"bytes,2,opt,name=sentiment" json:"sentiment,omitempty"`
}

Represents a sentence in the input document.

func (*Sentence) Descriptor

func (*Sentence) Descriptor() ([]byte, []int)

func (*Sentence) GetSentiment

func (m *Sentence) GetSentiment() *Sentiment

func (*Sentence) GetText

func (m *Sentence) GetText() *TextSpan

func (*Sentence) ProtoMessage

func (*Sentence) ProtoMessage()

func (*Sentence) Reset

func (m *Sentence) Reset()

func (*Sentence) String

func (m *Sentence) String() string

type Sentiment

type Sentiment struct {
	// DEPRECATED FIELD - This field is being deprecated in
	// favor of score. Please refer to our documentation at
	// https://cloud.google.com/natural-language/docs for more information.
	Polarity float32 `protobuf:"fixed32,1,opt,name=polarity" json:"polarity,omitempty"`
	// A non-negative number in the [0, +inf) range, which represents
	// the absolute magnitude of sentiment regardless of score (positive or
	// negative).
	Magnitude float32 `protobuf:"fixed32,2,opt,name=magnitude" json:"magnitude,omitempty"`
	// Sentiment score between -1.0 (negative sentiment) and 1.0
	// (positive sentiment).
	Score float32 `protobuf:"fixed32,3,opt,name=score" json:"score,omitempty"`
}

Represents the feeling associated with the entire text or entities in the text.

func (*Sentiment) Descriptor

func (*Sentiment) Descriptor() ([]byte, []int)

func (*Sentiment) GetMagnitude

func (m *Sentiment) GetMagnitude() float32

func (*Sentiment) GetPolarity

func (m *Sentiment) GetPolarity() float32

func (*Sentiment) GetScore

func (m *Sentiment) GetScore() float32

func (*Sentiment) ProtoMessage

func (*Sentiment) ProtoMessage()

func (*Sentiment) Reset

func (m *Sentiment) Reset()

func (*Sentiment) String

func (m *Sentiment) String() string

type TextSpan

type TextSpan struct {
	// The content of the output text.
	Content string `protobuf:"bytes,1,opt,name=content" json:"content,omitempty"`
	// The API calculates the beginning offset of the content in the original
	// document according to the [EncodingType][google.cloud.language.v1beta1.EncodingType] specified in the API request.
	BeginOffset int32 `protobuf:"varint,2,opt,name=begin_offset,json=beginOffset" json:"begin_offset,omitempty"`
}

Represents an output piece of text.

func (*TextSpan) Descriptor

func (*TextSpan) Descriptor() ([]byte, []int)

func (*TextSpan) GetBeginOffset

func (m *TextSpan) GetBeginOffset() int32

func (*TextSpan) GetContent

func (m *TextSpan) GetContent() string

func (*TextSpan) ProtoMessage

func (*TextSpan) ProtoMessage()

func (*TextSpan) Reset

func (m *TextSpan) Reset()

func (*TextSpan) String

func (m *TextSpan) String() string

type Token

type Token struct {
	// The token text.
	Text *TextSpan `protobuf:"bytes,1,opt,name=text" json:"text,omitempty"`
	// Parts of speech tag for this token.
	PartOfSpeech *PartOfSpeech `protobuf:"bytes,2,opt,name=part_of_speech,json=partOfSpeech" json:"part_of_speech,omitempty"`
	// Dependency tree parse for this token.
	DependencyEdge *DependencyEdge `protobuf:"bytes,3,opt,name=dependency_edge,json=dependencyEdge" json:"dependency_edge,omitempty"`
	// [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
	Lemma string `protobuf:"bytes,4,opt,name=lemma" json:"lemma,omitempty"`
}

Represents the smallest syntactic building block of the text.

func (*Token) Descriptor

func (*Token) Descriptor() ([]byte, []int)

func (*Token) GetDependencyEdge

func (m *Token) GetDependencyEdge() *DependencyEdge

func (*Token) GetLemma

func (m *Token) GetLemma() string

func (*Token) GetPartOfSpeech

func (m *Token) GetPartOfSpeech() *PartOfSpeech

func (*Token) GetText

func (m *Token) GetText() *TextSpan

func (*Token) ProtoMessage

func (*Token) ProtoMessage()

func (*Token) Reset

func (m *Token) Reset()

func (*Token) String

func (m *Token) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL