| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/cloud/language/v1/language_service.proto |
| |
| package language // import "google.golang.org/genproto/googleapis/cloud/language/v1" |
| |
| import proto "github.com/golang/protobuf/proto" |
| import fmt "fmt" |
| import math "math" |
| import _ "google.golang.org/genproto/googleapis/api/annotations" |
| |
| import ( |
| context "golang.org/x/net/context" |
| grpc "google.golang.org/grpc" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package |
| |
| // Represents the text encoding that the caller uses to process the output. |
| // Providing an `EncodingType` is recommended because the API provides the |
| // beginning offsets for various outputs, such as tokens and mentions, and |
| // languages that natively use different text encodings may access offsets |
| // differently. |
| type EncodingType int32 |
| |
| const ( |
| // If `EncodingType` is not specified, encoding-dependent information (such as |
| // `begin_offset`) will be set at `-1`. |
| EncodingType_NONE EncodingType = 0 |
| // Encoding-dependent information (such as `begin_offset`) is calculated based |
| // on the UTF-8 encoding of the input. C++ and Go are examples of languages |
| // that use this encoding natively. |
| EncodingType_UTF8 EncodingType = 1 |
| // Encoding-dependent information (such as `begin_offset`) is calculated based |
| // on the UTF-16 encoding of the input. Java and Javascript are examples of |
| // languages that use this encoding natively. |
| EncodingType_UTF16 EncodingType = 2 |
| // Encoding-dependent information (such as `begin_offset`) is calculated based |
| // on the UTF-32 encoding of the input. Python is an example of a language |
| // that uses this encoding natively. |
| EncodingType_UTF32 EncodingType = 3 |
| ) |
| |
| var EncodingType_name = map[int32]string{ |
| 0: "NONE", |
| 1: "UTF8", |
| 2: "UTF16", |
| 3: "UTF32", |
| } |
| var EncodingType_value = map[string]int32{ |
| "NONE": 0, |
| "UTF8": 1, |
| "UTF16": 2, |
| "UTF32": 3, |
| } |
| |
| func (x EncodingType) String() string { |
| return proto.EnumName(EncodingType_name, int32(x)) |
| } |
| func (EncodingType) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{0} |
| } |
| |
| // The document types enum. |
| type Document_Type int32 |
| |
| const ( |
| // The content type is not specified. |
| Document_TYPE_UNSPECIFIED Document_Type = 0 |
| // Plain text |
| Document_PLAIN_TEXT Document_Type = 1 |
| // HTML |
| Document_HTML Document_Type = 2 |
| ) |
| |
| var Document_Type_name = map[int32]string{ |
| 0: "TYPE_UNSPECIFIED", |
| 1: "PLAIN_TEXT", |
| 2: "HTML", |
| } |
| var Document_Type_value = map[string]int32{ |
| "TYPE_UNSPECIFIED": 0, |
| "PLAIN_TEXT": 1, |
| "HTML": 2, |
| } |
| |
| func (x Document_Type) String() string { |
| return proto.EnumName(Document_Type_name, int32(x)) |
| } |
| func (Document_Type) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{0, 0} |
| } |
| |
| // The type of the entity. |
| type Entity_Type int32 |
| |
| const ( |
| // Unknown |
| Entity_UNKNOWN Entity_Type = 0 |
| // Person |
| Entity_PERSON Entity_Type = 1 |
| // Location |
| Entity_LOCATION Entity_Type = 2 |
| // Organization |
| Entity_ORGANIZATION Entity_Type = 3 |
| // Event |
| Entity_EVENT Entity_Type = 4 |
| // Work of art |
| Entity_WORK_OF_ART Entity_Type = 5 |
| // Consumer goods |
| Entity_CONSUMER_GOOD Entity_Type = 6 |
| // Other types |
| Entity_OTHER Entity_Type = 7 |
| ) |
| |
| var Entity_Type_name = map[int32]string{ |
| 0: "UNKNOWN", |
| 1: "PERSON", |
| 2: "LOCATION", |
| 3: "ORGANIZATION", |
| 4: "EVENT", |
| 5: "WORK_OF_ART", |
| 6: "CONSUMER_GOOD", |
| 7: "OTHER", |
| } |
| var Entity_Type_value = map[string]int32{ |
| "UNKNOWN": 0, |
| "PERSON": 1, |
| "LOCATION": 2, |
| "ORGANIZATION": 3, |
| "EVENT": 4, |
| "WORK_OF_ART": 5, |
| "CONSUMER_GOOD": 6, |
| "OTHER": 7, |
| } |
| |
| func (x Entity_Type) String() string { |
| return proto.EnumName(Entity_Type_name, int32(x)) |
| } |
| func (Entity_Type) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{2, 0} |
| } |
| |
| // The part of speech tags enum. |
| type PartOfSpeech_Tag int32 |
| |
| const ( |
| // Unknown |
| PartOfSpeech_UNKNOWN PartOfSpeech_Tag = 0 |
| // Adjective |
| PartOfSpeech_ADJ PartOfSpeech_Tag = 1 |
| // Adposition (preposition and postposition) |
| PartOfSpeech_ADP PartOfSpeech_Tag = 2 |
| // Adverb |
| PartOfSpeech_ADV PartOfSpeech_Tag = 3 |
| // Conjunction |
| PartOfSpeech_CONJ PartOfSpeech_Tag = 4 |
| // Determiner |
| PartOfSpeech_DET PartOfSpeech_Tag = 5 |
| // Noun (common and proper) |
| PartOfSpeech_NOUN PartOfSpeech_Tag = 6 |
| // Cardinal number |
| PartOfSpeech_NUM PartOfSpeech_Tag = 7 |
| // Pronoun |
| PartOfSpeech_PRON PartOfSpeech_Tag = 8 |
| // Particle or other function word |
| PartOfSpeech_PRT PartOfSpeech_Tag = 9 |
| // Punctuation |
| PartOfSpeech_PUNCT PartOfSpeech_Tag = 10 |
| // Verb (all tenses and modes) |
| PartOfSpeech_VERB PartOfSpeech_Tag = 11 |
| // Other: foreign words, typos, abbreviations |
| PartOfSpeech_X PartOfSpeech_Tag = 12 |
| // Affix |
| PartOfSpeech_AFFIX PartOfSpeech_Tag = 13 |
| ) |
| |
| var PartOfSpeech_Tag_name = map[int32]string{ |
| 0: "UNKNOWN", |
| 1: "ADJ", |
| 2: "ADP", |
| 3: "ADV", |
| 4: "CONJ", |
| 5: "DET", |
| 6: "NOUN", |
| 7: "NUM", |
| 8: "PRON", |
| 9: "PRT", |
| 10: "PUNCT", |
| 11: "VERB", |
| 12: "X", |
| 13: "AFFIX", |
| } |
| var PartOfSpeech_Tag_value = map[string]int32{ |
| "UNKNOWN": 0, |
| "ADJ": 1, |
| "ADP": 2, |
| "ADV": 3, |
| "CONJ": 4, |
| "DET": 5, |
| "NOUN": 6, |
| "NUM": 7, |
| "PRON": 8, |
| "PRT": 9, |
| "PUNCT": 10, |
| "VERB": 11, |
| "X": 12, |
| "AFFIX": 13, |
| } |
| |
| func (x PartOfSpeech_Tag) String() string { |
| return proto.EnumName(PartOfSpeech_Tag_name, int32(x)) |
| } |
| func (PartOfSpeech_Tag) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 0} |
| } |
| |
| // The characteristic of a verb that expresses time flow during an event. |
| type PartOfSpeech_Aspect int32 |
| |
| const ( |
| // Aspect is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_ASPECT_UNKNOWN PartOfSpeech_Aspect = 0 |
| // Perfective |
| PartOfSpeech_PERFECTIVE PartOfSpeech_Aspect = 1 |
| // Imperfective |
| PartOfSpeech_IMPERFECTIVE PartOfSpeech_Aspect = 2 |
| // Progressive |
| PartOfSpeech_PROGRESSIVE PartOfSpeech_Aspect = 3 |
| ) |
| |
| var PartOfSpeech_Aspect_name = map[int32]string{ |
| 0: "ASPECT_UNKNOWN", |
| 1: "PERFECTIVE", |
| 2: "IMPERFECTIVE", |
| 3: "PROGRESSIVE", |
| } |
| var PartOfSpeech_Aspect_value = map[string]int32{ |
| "ASPECT_UNKNOWN": 0, |
| "PERFECTIVE": 1, |
| "IMPERFECTIVE": 2, |
| "PROGRESSIVE": 3, |
| } |
| |
| func (x PartOfSpeech_Aspect) String() string { |
| return proto.EnumName(PartOfSpeech_Aspect_name, int32(x)) |
| } |
| func (PartOfSpeech_Aspect) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 1} |
| } |
| |
| // The grammatical function performed by a noun or pronoun in a phrase, |
| // clause, or sentence. In some languages, other parts of speech, such as |
| // adjective and determiner, take case inflection in agreement with the noun. |
| type PartOfSpeech_Case int32 |
| |
| const ( |
| // Case is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_CASE_UNKNOWN PartOfSpeech_Case = 0 |
| // Accusative |
| PartOfSpeech_ACCUSATIVE PartOfSpeech_Case = 1 |
| // Adverbial |
| PartOfSpeech_ADVERBIAL PartOfSpeech_Case = 2 |
| // Complementive |
| PartOfSpeech_COMPLEMENTIVE PartOfSpeech_Case = 3 |
| // Dative |
| PartOfSpeech_DATIVE PartOfSpeech_Case = 4 |
| // Genitive |
| PartOfSpeech_GENITIVE PartOfSpeech_Case = 5 |
| // Instrumental |
| PartOfSpeech_INSTRUMENTAL PartOfSpeech_Case = 6 |
| // Locative |
| PartOfSpeech_LOCATIVE PartOfSpeech_Case = 7 |
| // Nominative |
| PartOfSpeech_NOMINATIVE PartOfSpeech_Case = 8 |
| // Oblique |
| PartOfSpeech_OBLIQUE PartOfSpeech_Case = 9 |
| // Partitive |
| PartOfSpeech_PARTITIVE PartOfSpeech_Case = 10 |
| // Prepositional |
| PartOfSpeech_PREPOSITIONAL PartOfSpeech_Case = 11 |
| // Reflexive |
| PartOfSpeech_REFLEXIVE_CASE PartOfSpeech_Case = 12 |
| // Relative |
| PartOfSpeech_RELATIVE_CASE PartOfSpeech_Case = 13 |
| // Vocative |
| PartOfSpeech_VOCATIVE PartOfSpeech_Case = 14 |
| ) |
| |
| var PartOfSpeech_Case_name = map[int32]string{ |
| 0: "CASE_UNKNOWN", |
| 1: "ACCUSATIVE", |
| 2: "ADVERBIAL", |
| 3: "COMPLEMENTIVE", |
| 4: "DATIVE", |
| 5: "GENITIVE", |
| 6: "INSTRUMENTAL", |
| 7: "LOCATIVE", |
| 8: "NOMINATIVE", |
| 9: "OBLIQUE", |
| 10: "PARTITIVE", |
| 11: "PREPOSITIONAL", |
| 12: "REFLEXIVE_CASE", |
| 13: "RELATIVE_CASE", |
| 14: "VOCATIVE", |
| } |
| var PartOfSpeech_Case_value = map[string]int32{ |
| "CASE_UNKNOWN": 0, |
| "ACCUSATIVE": 1, |
| "ADVERBIAL": 2, |
| "COMPLEMENTIVE": 3, |
| "DATIVE": 4, |
| "GENITIVE": 5, |
| "INSTRUMENTAL": 6, |
| "LOCATIVE": 7, |
| "NOMINATIVE": 8, |
| "OBLIQUE": 9, |
| "PARTITIVE": 10, |
| "PREPOSITIONAL": 11, |
| "REFLEXIVE_CASE": 12, |
| "RELATIVE_CASE": 13, |
| "VOCATIVE": 14, |
| } |
| |
| func (x PartOfSpeech_Case) String() string { |
| return proto.EnumName(PartOfSpeech_Case_name, int32(x)) |
| } |
| func (PartOfSpeech_Case) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 2} |
| } |
| |
| // Depending on the language, Form can be categorizing different forms of |
| // verbs, adjectives, adverbs, etc. For example, categorizing inflected |
| // endings of verbs and adjectives or distinguishing between short and long |
| // forms of adjectives and participles |
| type PartOfSpeech_Form int32 |
| |
| const ( |
| // Form is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_FORM_UNKNOWN PartOfSpeech_Form = 0 |
| // Adnomial |
| PartOfSpeech_ADNOMIAL PartOfSpeech_Form = 1 |
| // Auxiliary |
| PartOfSpeech_AUXILIARY PartOfSpeech_Form = 2 |
| // Complementizer |
| PartOfSpeech_COMPLEMENTIZER PartOfSpeech_Form = 3 |
| // Final ending |
| PartOfSpeech_FINAL_ENDING PartOfSpeech_Form = 4 |
| // Gerund |
| PartOfSpeech_GERUND PartOfSpeech_Form = 5 |
| // Realis |
| PartOfSpeech_REALIS PartOfSpeech_Form = 6 |
| // Irrealis |
| PartOfSpeech_IRREALIS PartOfSpeech_Form = 7 |
| // Short form |
| PartOfSpeech_SHORT PartOfSpeech_Form = 8 |
| // Long form |
| PartOfSpeech_LONG PartOfSpeech_Form = 9 |
| // Order form |
| PartOfSpeech_ORDER PartOfSpeech_Form = 10 |
| // Specific form |
| PartOfSpeech_SPECIFIC PartOfSpeech_Form = 11 |
| ) |
| |
| var PartOfSpeech_Form_name = map[int32]string{ |
| 0: "FORM_UNKNOWN", |
| 1: "ADNOMIAL", |
| 2: "AUXILIARY", |
| 3: "COMPLEMENTIZER", |
| 4: "FINAL_ENDING", |
| 5: "GERUND", |
| 6: "REALIS", |
| 7: "IRREALIS", |
| 8: "SHORT", |
| 9: "LONG", |
| 10: "ORDER", |
| 11: "SPECIFIC", |
| } |
| var PartOfSpeech_Form_value = map[string]int32{ |
| "FORM_UNKNOWN": 0, |
| "ADNOMIAL": 1, |
| "AUXILIARY": 2, |
| "COMPLEMENTIZER": 3, |
| "FINAL_ENDING": 4, |
| "GERUND": 5, |
| "REALIS": 6, |
| "IRREALIS": 7, |
| "SHORT": 8, |
| "LONG": 9, |
| "ORDER": 10, |
| "SPECIFIC": 11, |
| } |
| |
| func (x PartOfSpeech_Form) String() string { |
| return proto.EnumName(PartOfSpeech_Form_name, int32(x)) |
| } |
| func (PartOfSpeech_Form) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 3} |
| } |
| |
| // Gender classes of nouns reflected in the behaviour of associated words. |
| type PartOfSpeech_Gender int32 |
| |
| const ( |
| // Gender is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_GENDER_UNKNOWN PartOfSpeech_Gender = 0 |
| // Feminine |
| PartOfSpeech_FEMININE PartOfSpeech_Gender = 1 |
| // Masculine |
| PartOfSpeech_MASCULINE PartOfSpeech_Gender = 2 |
| // Neuter |
| PartOfSpeech_NEUTER PartOfSpeech_Gender = 3 |
| ) |
| |
| var PartOfSpeech_Gender_name = map[int32]string{ |
| 0: "GENDER_UNKNOWN", |
| 1: "FEMININE", |
| 2: "MASCULINE", |
| 3: "NEUTER", |
| } |
| var PartOfSpeech_Gender_value = map[string]int32{ |
| "GENDER_UNKNOWN": 0, |
| "FEMININE": 1, |
| "MASCULINE": 2, |
| "NEUTER": 3, |
| } |
| |
| func (x PartOfSpeech_Gender) String() string { |
| return proto.EnumName(PartOfSpeech_Gender_name, int32(x)) |
| } |
| func (PartOfSpeech_Gender) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 4} |
| } |
| |
| // The grammatical feature of verbs, used for showing modality and attitude. |
| type PartOfSpeech_Mood int32 |
| |
| const ( |
| // Mood is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_MOOD_UNKNOWN PartOfSpeech_Mood = 0 |
| // Conditional |
| PartOfSpeech_CONDITIONAL_MOOD PartOfSpeech_Mood = 1 |
| // Imperative |
| PartOfSpeech_IMPERATIVE PartOfSpeech_Mood = 2 |
| // Indicative |
| PartOfSpeech_INDICATIVE PartOfSpeech_Mood = 3 |
| // Interrogative |
| PartOfSpeech_INTERROGATIVE PartOfSpeech_Mood = 4 |
| // Jussive |
| PartOfSpeech_JUSSIVE PartOfSpeech_Mood = 5 |
| // Subjunctive |
| PartOfSpeech_SUBJUNCTIVE PartOfSpeech_Mood = 6 |
| ) |
| |
| var PartOfSpeech_Mood_name = map[int32]string{ |
| 0: "MOOD_UNKNOWN", |
| 1: "CONDITIONAL_MOOD", |
| 2: "IMPERATIVE", |
| 3: "INDICATIVE", |
| 4: "INTERROGATIVE", |
| 5: "JUSSIVE", |
| 6: "SUBJUNCTIVE", |
| } |
| var PartOfSpeech_Mood_value = map[string]int32{ |
| "MOOD_UNKNOWN": 0, |
| "CONDITIONAL_MOOD": 1, |
| "IMPERATIVE": 2, |
| "INDICATIVE": 3, |
| "INTERROGATIVE": 4, |
| "JUSSIVE": 5, |
| "SUBJUNCTIVE": 6, |
| } |
| |
| func (x PartOfSpeech_Mood) String() string { |
| return proto.EnumName(PartOfSpeech_Mood_name, int32(x)) |
| } |
| func (PartOfSpeech_Mood) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 5} |
| } |
| |
| // Count distinctions. |
| type PartOfSpeech_Number int32 |
| |
| const ( |
| // Number is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_NUMBER_UNKNOWN PartOfSpeech_Number = 0 |
| // Singular |
| PartOfSpeech_SINGULAR PartOfSpeech_Number = 1 |
| // Plural |
| PartOfSpeech_PLURAL PartOfSpeech_Number = 2 |
| // Dual |
| PartOfSpeech_DUAL PartOfSpeech_Number = 3 |
| ) |
| |
| var PartOfSpeech_Number_name = map[int32]string{ |
| 0: "NUMBER_UNKNOWN", |
| 1: "SINGULAR", |
| 2: "PLURAL", |
| 3: "DUAL", |
| } |
| var PartOfSpeech_Number_value = map[string]int32{ |
| "NUMBER_UNKNOWN": 0, |
| "SINGULAR": 1, |
| "PLURAL": 2, |
| "DUAL": 3, |
| } |
| |
| func (x PartOfSpeech_Number) String() string { |
| return proto.EnumName(PartOfSpeech_Number_name, int32(x)) |
| } |
| func (PartOfSpeech_Number) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 6} |
| } |
| |
| // The distinction between the speaker, second person, third person, etc. |
| type PartOfSpeech_Person int32 |
| |
| const ( |
| // Person is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_PERSON_UNKNOWN PartOfSpeech_Person = 0 |
| // First |
| PartOfSpeech_FIRST PartOfSpeech_Person = 1 |
| // Second |
| PartOfSpeech_SECOND PartOfSpeech_Person = 2 |
| // Third |
| PartOfSpeech_THIRD PartOfSpeech_Person = 3 |
| // Reflexive |
| PartOfSpeech_REFLEXIVE_PERSON PartOfSpeech_Person = 4 |
| ) |
| |
| var PartOfSpeech_Person_name = map[int32]string{ |
| 0: "PERSON_UNKNOWN", |
| 1: "FIRST", |
| 2: "SECOND", |
| 3: "THIRD", |
| 4: "REFLEXIVE_PERSON", |
| } |
| var PartOfSpeech_Person_value = map[string]int32{ |
| "PERSON_UNKNOWN": 0, |
| "FIRST": 1, |
| "SECOND": 2, |
| "THIRD": 3, |
| "REFLEXIVE_PERSON": 4, |
| } |
| |
| func (x PartOfSpeech_Person) String() string { |
| return proto.EnumName(PartOfSpeech_Person_name, int32(x)) |
| } |
| func (PartOfSpeech_Person) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 7} |
| } |
| |
| // This category shows if the token is part of a proper name. |
| type PartOfSpeech_Proper int32 |
| |
| const ( |
| // Proper is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_PROPER_UNKNOWN PartOfSpeech_Proper = 0 |
| // Proper |
| PartOfSpeech_PROPER PartOfSpeech_Proper = 1 |
| // Not proper |
| PartOfSpeech_NOT_PROPER PartOfSpeech_Proper = 2 |
| ) |
| |
| var PartOfSpeech_Proper_name = map[int32]string{ |
| 0: "PROPER_UNKNOWN", |
| 1: "PROPER", |
| 2: "NOT_PROPER", |
| } |
| var PartOfSpeech_Proper_value = map[string]int32{ |
| "PROPER_UNKNOWN": 0, |
| "PROPER": 1, |
| "NOT_PROPER": 2, |
| } |
| |
| func (x PartOfSpeech_Proper) String() string { |
| return proto.EnumName(PartOfSpeech_Proper_name, int32(x)) |
| } |
| func (PartOfSpeech_Proper) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 8} |
| } |
| |
| // Reciprocal features of a pronoun. |
| type PartOfSpeech_Reciprocity int32 |
| |
| const ( |
| // Reciprocity is not applicable in the analyzed language or is not |
| // predicted. |
| PartOfSpeech_RECIPROCITY_UNKNOWN PartOfSpeech_Reciprocity = 0 |
| // Reciprocal |
| PartOfSpeech_RECIPROCAL PartOfSpeech_Reciprocity = 1 |
| // Non-reciprocal |
| PartOfSpeech_NON_RECIPROCAL PartOfSpeech_Reciprocity = 2 |
| ) |
| |
| var PartOfSpeech_Reciprocity_name = map[int32]string{ |
| 0: "RECIPROCITY_UNKNOWN", |
| 1: "RECIPROCAL", |
| 2: "NON_RECIPROCAL", |
| } |
| var PartOfSpeech_Reciprocity_value = map[string]int32{ |
| "RECIPROCITY_UNKNOWN": 0, |
| "RECIPROCAL": 1, |
| "NON_RECIPROCAL": 2, |
| } |
| |
| func (x PartOfSpeech_Reciprocity) String() string { |
| return proto.EnumName(PartOfSpeech_Reciprocity_name, int32(x)) |
| } |
| func (PartOfSpeech_Reciprocity) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 9} |
| } |
| |
| // Time reference. |
| type PartOfSpeech_Tense int32 |
| |
| const ( |
| // Tense is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_TENSE_UNKNOWN PartOfSpeech_Tense = 0 |
| // Conditional |
| PartOfSpeech_CONDITIONAL_TENSE PartOfSpeech_Tense = 1 |
| // Future |
| PartOfSpeech_FUTURE PartOfSpeech_Tense = 2 |
| // Past |
| PartOfSpeech_PAST PartOfSpeech_Tense = 3 |
| // Present |
| PartOfSpeech_PRESENT PartOfSpeech_Tense = 4 |
| // Imperfect |
| PartOfSpeech_IMPERFECT PartOfSpeech_Tense = 5 |
| // Pluperfect |
| PartOfSpeech_PLUPERFECT PartOfSpeech_Tense = 6 |
| ) |
| |
| var PartOfSpeech_Tense_name = map[int32]string{ |
| 0: "TENSE_UNKNOWN", |
| 1: "CONDITIONAL_TENSE", |
| 2: "FUTURE", |
| 3: "PAST", |
| 4: "PRESENT", |
| 5: "IMPERFECT", |
| 6: "PLUPERFECT", |
| } |
| var PartOfSpeech_Tense_value = map[string]int32{ |
| "TENSE_UNKNOWN": 0, |
| "CONDITIONAL_TENSE": 1, |
| "FUTURE": 2, |
| "PAST": 3, |
| "PRESENT": 4, |
| "IMPERFECT": 5, |
| "PLUPERFECT": 6, |
| } |
| |
| func (x PartOfSpeech_Tense) String() string { |
| return proto.EnumName(PartOfSpeech_Tense_name, int32(x)) |
| } |
| func (PartOfSpeech_Tense) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 10} |
| } |
| |
| // The relationship between the action that a verb expresses and the |
| // participants identified by its arguments. |
| type PartOfSpeech_Voice int32 |
| |
| const ( |
| // Voice is not applicable in the analyzed language or is not predicted. |
| PartOfSpeech_VOICE_UNKNOWN PartOfSpeech_Voice = 0 |
| // Active |
| PartOfSpeech_ACTIVE PartOfSpeech_Voice = 1 |
| // Causative |
| PartOfSpeech_CAUSATIVE PartOfSpeech_Voice = 2 |
| // Passive |
| PartOfSpeech_PASSIVE PartOfSpeech_Voice = 3 |
| ) |
| |
| var PartOfSpeech_Voice_name = map[int32]string{ |
| 0: "VOICE_UNKNOWN", |
| 1: "ACTIVE", |
| 2: "CAUSATIVE", |
| 3: "PASSIVE", |
| } |
| var PartOfSpeech_Voice_value = map[string]int32{ |
| "VOICE_UNKNOWN": 0, |
| "ACTIVE": 1, |
| "CAUSATIVE": 2, |
| "PASSIVE": 3, |
| } |
| |
| func (x PartOfSpeech_Voice) String() string { |
| return proto.EnumName(PartOfSpeech_Voice_name, int32(x)) |
| } |
| func (PartOfSpeech_Voice) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5, 11} |
| } |
| |
| // The parse label enum for the token. |
| type DependencyEdge_Label int32 |
| |
| const ( |
| // Unknown |
| DependencyEdge_UNKNOWN DependencyEdge_Label = 0 |
| // Abbreviation modifier |
| DependencyEdge_ABBREV DependencyEdge_Label = 1 |
| // Adjectival complement |
| DependencyEdge_ACOMP DependencyEdge_Label = 2 |
| // Adverbial clause modifier |
| DependencyEdge_ADVCL DependencyEdge_Label = 3 |
| // Adverbial modifier |
| DependencyEdge_ADVMOD DependencyEdge_Label = 4 |
| // Adjectival modifier of an NP |
| DependencyEdge_AMOD DependencyEdge_Label = 5 |
| // Appositional modifier of an NP |
| DependencyEdge_APPOS DependencyEdge_Label = 6 |
| // Attribute dependent of a copular verb |
| DependencyEdge_ATTR DependencyEdge_Label = 7 |
| // Auxiliary (non-main) verb |
| DependencyEdge_AUX DependencyEdge_Label = 8 |
| // Passive auxiliary |
| DependencyEdge_AUXPASS DependencyEdge_Label = 9 |
| // Coordinating conjunction |
| DependencyEdge_CC DependencyEdge_Label = 10 |
| // Clausal complement of a verb or adjective |
| DependencyEdge_CCOMP DependencyEdge_Label = 11 |
| // Conjunct |
| DependencyEdge_CONJ DependencyEdge_Label = 12 |
| // Clausal subject |
| DependencyEdge_CSUBJ DependencyEdge_Label = 13 |
| // Clausal passive subject |
| DependencyEdge_CSUBJPASS DependencyEdge_Label = 14 |
| // Dependency (unable to determine) |
| DependencyEdge_DEP DependencyEdge_Label = 15 |
| // Determiner |
| DependencyEdge_DET DependencyEdge_Label = 16 |
| // Discourse |
| DependencyEdge_DISCOURSE DependencyEdge_Label = 17 |
| // Direct object |
| DependencyEdge_DOBJ DependencyEdge_Label = 18 |
| // Expletive |
| DependencyEdge_EXPL DependencyEdge_Label = 19 |
| // Goes with (part of a word in a text not well edited) |
| DependencyEdge_GOESWITH DependencyEdge_Label = 20 |
| // Indirect object |
| DependencyEdge_IOBJ DependencyEdge_Label = 21 |
| // Marker (word introducing a subordinate clause) |
| DependencyEdge_MARK DependencyEdge_Label = 22 |
| // Multi-word expression |
| DependencyEdge_MWE DependencyEdge_Label = 23 |
| // Multi-word verbal expression |
| DependencyEdge_MWV DependencyEdge_Label = 24 |
| // Negation modifier |
| DependencyEdge_NEG DependencyEdge_Label = 25 |
| // Noun compound modifier |
| DependencyEdge_NN DependencyEdge_Label = 26 |
| // Noun phrase used as an adverbial modifier |
| DependencyEdge_NPADVMOD DependencyEdge_Label = 27 |
| // Nominal subject |
| DependencyEdge_NSUBJ DependencyEdge_Label = 28 |
| // Passive nominal subject |
| DependencyEdge_NSUBJPASS DependencyEdge_Label = 29 |
| // Numeric modifier of a noun |
| DependencyEdge_NUM DependencyEdge_Label = 30 |
| // Element of compound number |
| DependencyEdge_NUMBER DependencyEdge_Label = 31 |
| // Punctuation mark |
| DependencyEdge_P DependencyEdge_Label = 32 |
| // Parataxis relation |
| DependencyEdge_PARATAXIS DependencyEdge_Label = 33 |
| // Participial modifier |
| DependencyEdge_PARTMOD DependencyEdge_Label = 34 |
| // The complement of a preposition is a clause |
| DependencyEdge_PCOMP DependencyEdge_Label = 35 |
| // Object of a preposition |
| DependencyEdge_POBJ DependencyEdge_Label = 36 |
| // Possession modifier |
| DependencyEdge_POSS DependencyEdge_Label = 37 |
| // Postverbal negative particle |
| DependencyEdge_POSTNEG DependencyEdge_Label = 38 |
| // Predicate complement |
| DependencyEdge_PRECOMP DependencyEdge_Label = 39 |
| // Preconjunt |
| DependencyEdge_PRECONJ DependencyEdge_Label = 40 |
| // Predeterminer |
| DependencyEdge_PREDET DependencyEdge_Label = 41 |
| // Prefix |
| DependencyEdge_PREF DependencyEdge_Label = 42 |
| // Prepositional modifier |
| DependencyEdge_PREP DependencyEdge_Label = 43 |
| // The relationship between a verb and verbal morpheme |
| DependencyEdge_PRONL DependencyEdge_Label = 44 |
| // Particle |
| DependencyEdge_PRT DependencyEdge_Label = 45 |
| // Associative or possessive marker |
| DependencyEdge_PS DependencyEdge_Label = 46 |
| // Quantifier phrase modifier |
| DependencyEdge_QUANTMOD DependencyEdge_Label = 47 |
| // Relative clause modifier |
| DependencyEdge_RCMOD DependencyEdge_Label = 48 |
| // Complementizer in relative clause |
| DependencyEdge_RCMODREL DependencyEdge_Label = 49 |
| // Ellipsis without a preceding predicate |
| DependencyEdge_RDROP DependencyEdge_Label = 50 |
| // Referent |
| DependencyEdge_REF DependencyEdge_Label = 51 |
| // Remnant |
| DependencyEdge_REMNANT DependencyEdge_Label = 52 |
| // Reparandum |
| DependencyEdge_REPARANDUM DependencyEdge_Label = 53 |
| // Root |
| DependencyEdge_ROOT DependencyEdge_Label = 54 |
| // Suffix specifying a unit of number |
| DependencyEdge_SNUM DependencyEdge_Label = 55 |
| // Suffix |
| DependencyEdge_SUFF DependencyEdge_Label = 56 |
| // Temporal modifier |
| DependencyEdge_TMOD DependencyEdge_Label = 57 |
| // Topic marker |
| DependencyEdge_TOPIC DependencyEdge_Label = 58 |
| // Clause headed by an infinite form of the verb that modifies a noun |
| DependencyEdge_VMOD DependencyEdge_Label = 59 |
| // Vocative |
| DependencyEdge_VOCATIVE DependencyEdge_Label = 60 |
| // Open clausal complement |
| DependencyEdge_XCOMP DependencyEdge_Label = 61 |
| // Name suffix |
| DependencyEdge_SUFFIX DependencyEdge_Label = 62 |
| // Name title |
| DependencyEdge_TITLE DependencyEdge_Label = 63 |
| // Adverbial phrase modifier |
| DependencyEdge_ADVPHMOD DependencyEdge_Label = 64 |
| // Causative auxiliary |
| DependencyEdge_AUXCAUS DependencyEdge_Label = 65 |
| // Helper auxiliary |
| DependencyEdge_AUXVV DependencyEdge_Label = 66 |
| // Rentaishi (Prenominal modifier) |
| DependencyEdge_DTMOD DependencyEdge_Label = 67 |
| // Foreign words |
| DependencyEdge_FOREIGN DependencyEdge_Label = 68 |
| // Keyword |
| DependencyEdge_KW DependencyEdge_Label = 69 |
| // List for chains of comparable items |
| DependencyEdge_LIST DependencyEdge_Label = 70 |
| // Nominalized clause |
| DependencyEdge_NOMC DependencyEdge_Label = 71 |
| // Nominalized clausal subject |
| DependencyEdge_NOMCSUBJ DependencyEdge_Label = 72 |
| // Nominalized clausal passive |
| DependencyEdge_NOMCSUBJPASS DependencyEdge_Label = 73 |
| // Compound of numeric modifier |
| DependencyEdge_NUMC DependencyEdge_Label = 74 |
| // Copula |
| DependencyEdge_COP DependencyEdge_Label = 75 |
| // Dislocated relation (for fronted/topicalized elements) |
| DependencyEdge_DISLOCATED DependencyEdge_Label = 76 |
| // Aspect marker |
| DependencyEdge_ASP DependencyEdge_Label = 77 |
| // Genitive modifier |
| DependencyEdge_GMOD DependencyEdge_Label = 78 |
| // Genitive object |
| DependencyEdge_GOBJ DependencyEdge_Label = 79 |
| // Infinitival modifier |
| DependencyEdge_INFMOD DependencyEdge_Label = 80 |
| // Measure |
| DependencyEdge_MES DependencyEdge_Label = 81 |
| // Nominal complement of a noun |
| DependencyEdge_NCOMP DependencyEdge_Label = 82 |
| ) |
| |
| var DependencyEdge_Label_name = map[int32]string{ |
| 0: "UNKNOWN", |
| 1: "ABBREV", |
| 2: "ACOMP", |
| 3: "ADVCL", |
| 4: "ADVMOD", |
| 5: "AMOD", |
| 6: "APPOS", |
| 7: "ATTR", |
| 8: "AUX", |
| 9: "AUXPASS", |
| 10: "CC", |
| 11: "CCOMP", |
| 12: "CONJ", |
| 13: "CSUBJ", |
| 14: "CSUBJPASS", |
| 15: "DEP", |
| 16: "DET", |
| 17: "DISCOURSE", |
| 18: "DOBJ", |
| 19: "EXPL", |
| 20: "GOESWITH", |
| 21: "IOBJ", |
| 22: "MARK", |
| 23: "MWE", |
| 24: "MWV", |
| 25: "NEG", |
| 26: "NN", |
| 27: "NPADVMOD", |
| 28: "NSUBJ", |
| 29: "NSUBJPASS", |
| 30: "NUM", |
| 31: "NUMBER", |
| 32: "P", |
| 33: "PARATAXIS", |
| 34: "PARTMOD", |
| 35: "PCOMP", |
| 36: "POBJ", |
| 37: "POSS", |
| 38: "POSTNEG", |
| 39: "PRECOMP", |
| 40: "PRECONJ", |
| 41: "PREDET", |
| 42: "PREF", |
| 43: "PREP", |
| 44: "PRONL", |
| 45: "PRT", |
| 46: "PS", |
| 47: "QUANTMOD", |
| 48: "RCMOD", |
| 49: "RCMODREL", |
| 50: "RDROP", |
| 51: "REF", |
| 52: "REMNANT", |
| 53: "REPARANDUM", |
| 54: "ROOT", |
| 55: "SNUM", |
| 56: "SUFF", |
| 57: "TMOD", |
| 58: "TOPIC", |
| 59: "VMOD", |
| 60: "VOCATIVE", |
| 61: "XCOMP", |
| 62: "SUFFIX", |
| 63: "TITLE", |
| 64: "ADVPHMOD", |
| 65: "AUXCAUS", |
| 66: "AUXVV", |
| 67: "DTMOD", |
| 68: "FOREIGN", |
| 69: "KW", |
| 70: "LIST", |
| 71: "NOMC", |
| 72: "NOMCSUBJ", |
| 73: "NOMCSUBJPASS", |
| 74: "NUMC", |
| 75: "COP", |
| 76: "DISLOCATED", |
| 77: "ASP", |
| 78: "GMOD", |
| 79: "GOBJ", |
| 80: "INFMOD", |
| 81: "MES", |
| 82: "NCOMP", |
| } |
| var DependencyEdge_Label_value = map[string]int32{ |
| "UNKNOWN": 0, |
| "ABBREV": 1, |
| "ACOMP": 2, |
| "ADVCL": 3, |
| "ADVMOD": 4, |
| "AMOD": 5, |
| "APPOS": 6, |
| "ATTR": 7, |
| "AUX": 8, |
| "AUXPASS": 9, |
| "CC": 10, |
| "CCOMP": 11, |
| "CONJ": 12, |
| "CSUBJ": 13, |
| "CSUBJPASS": 14, |
| "DEP": 15, |
| "DET": 16, |
| "DISCOURSE": 17, |
| "DOBJ": 18, |
| "EXPL": 19, |
| "GOESWITH": 20, |
| "IOBJ": 21, |
| "MARK": 22, |
| "MWE": 23, |
| "MWV": 24, |
| "NEG": 25, |
| "NN": 26, |
| "NPADVMOD": 27, |
| "NSUBJ": 28, |
| "NSUBJPASS": 29, |
| "NUM": 30, |
| "NUMBER": 31, |
| "P": 32, |
| "PARATAXIS": 33, |
| "PARTMOD": 34, |
| "PCOMP": 35, |
| "POBJ": 36, |
| "POSS": 37, |
| "POSTNEG": 38, |
| "PRECOMP": 39, |
| "PRECONJ": 40, |
| "PREDET": 41, |
| "PREF": 42, |
| "PREP": 43, |
| "PRONL": 44, |
| "PRT": 45, |
| "PS": 46, |
| "QUANTMOD": 47, |
| "RCMOD": 48, |
| "RCMODREL": 49, |
| "RDROP": 50, |
| "REF": 51, |
| "REMNANT": 52, |
| "REPARANDUM": 53, |
| "ROOT": 54, |
| "SNUM": 55, |
| "SUFF": 56, |
| "TMOD": 57, |
| "TOPIC": 58, |
| "VMOD": 59, |
| "VOCATIVE": 60, |
| "XCOMP": 61, |
| "SUFFIX": 62, |
| "TITLE": 63, |
| "ADVPHMOD": 64, |
| "AUXCAUS": 65, |
| "AUXVV": 66, |
| "DTMOD": 67, |
| "FOREIGN": 68, |
| "KW": 69, |
| "LIST": 70, |
| "NOMC": 71, |
| "NOMCSUBJ": 72, |
| "NOMCSUBJPASS": 73, |
| "NUMC": 74, |
| "COP": 75, |
| "DISLOCATED": 76, |
| "ASP": 77, |
| "GMOD": 78, |
| "GOBJ": 79, |
| "INFMOD": 80, |
| "MES": 81, |
| "NCOMP": 82, |
| } |
| |
| func (x DependencyEdge_Label) String() string { |
| return proto.EnumName(DependencyEdge_Label_name, int32(x)) |
| } |
| func (DependencyEdge_Label) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{6, 0} |
| } |
| |
| // The supported types of mentions. |
| type EntityMention_Type int32 |
| |
| const ( |
| // Unknown |
| EntityMention_TYPE_UNKNOWN EntityMention_Type = 0 |
| // Proper name |
| EntityMention_PROPER EntityMention_Type = 1 |
| // Common noun (or noun compound) |
| EntityMention_COMMON EntityMention_Type = 2 |
| ) |
| |
| var EntityMention_Type_name = map[int32]string{ |
| 0: "TYPE_UNKNOWN", |
| 1: "PROPER", |
| 2: "COMMON", |
| } |
| var EntityMention_Type_value = map[string]int32{ |
| "TYPE_UNKNOWN": 0, |
| "PROPER": 1, |
| "COMMON": 2, |
| } |
| |
| func (x EntityMention_Type) String() string { |
| return proto.EnumName(EntityMention_Type_name, int32(x)) |
| } |
| func (EntityMention_Type) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{7, 0} |
| } |
| |
| // ################################################################ # |
| // |
| // Represents the input to API methods. |
| type Document struct { |
| // Required. If the type is not set or is `TYPE_UNSPECIFIED`, |
| // returns an `INVALID_ARGUMENT` error. |
| Type Document_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.cloud.language.v1.Document_Type" json:"type,omitempty"` |
| // The source of the document: a string containing the content or a |
| // Google Cloud Storage URI. |
| // |
| // Types that are valid to be assigned to Source: |
| // *Document_Content |
| // *Document_GcsContentUri |
| Source isDocument_Source `protobuf_oneof:"source"` |
| // The language of the document (if not specified, the language is |
| // automatically detected). Both ISO and BCP-47 language codes are |
| // accepted.<br> |
| // [Language Support](/natural-language/docs/languages) |
| // lists currently supported languages for each API method. |
| // If the language (either specified by the caller or automatically detected) |
| // is not supported by the called API method, an `INVALID_ARGUMENT` error |
| // is returned. |
| Language string `protobuf:"bytes,4,opt,name=language,proto3" json:"language,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Document) Reset() { *m = Document{} } |
| func (m *Document) String() string { return proto.CompactTextString(m) } |
| func (*Document) ProtoMessage() {} |
| func (*Document) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{0} |
| } |
| func (m *Document) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Document.Unmarshal(m, b) |
| } |
| func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Document.Marshal(b, m, deterministic) |
| } |
| func (dst *Document) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Document.Merge(dst, src) |
| } |
| func (m *Document) XXX_Size() int { |
| return xxx_messageInfo_Document.Size(m) |
| } |
| func (m *Document) XXX_DiscardUnknown() { |
| xxx_messageInfo_Document.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Document proto.InternalMessageInfo |
| |
| func (m *Document) GetType() Document_Type { |
| if m != nil { |
| return m.Type |
| } |
| return Document_TYPE_UNSPECIFIED |
| } |
| |
| type isDocument_Source interface { |
| isDocument_Source() |
| } |
| |
| type Document_Content struct { |
| Content string `protobuf:"bytes,2,opt,name=content,proto3,oneof"` |
| } |
| |
| type Document_GcsContentUri struct { |
| GcsContentUri string `protobuf:"bytes,3,opt,name=gcs_content_uri,json=gcsContentUri,proto3,oneof"` |
| } |
| |
| func (*Document_Content) isDocument_Source() {} |
| |
| func (*Document_GcsContentUri) isDocument_Source() {} |
| |
| func (m *Document) GetSource() isDocument_Source { |
| if m != nil { |
| return m.Source |
| } |
| return nil |
| } |
| |
| func (m *Document) GetContent() string { |
| if x, ok := m.GetSource().(*Document_Content); ok { |
| return x.Content |
| } |
| return "" |
| } |
| |
| func (m *Document) GetGcsContentUri() string { |
| if x, ok := m.GetSource().(*Document_GcsContentUri); ok { |
| return x.GcsContentUri |
| } |
| return "" |
| } |
| |
| func (m *Document) GetLanguage() string { |
| if m != nil { |
| return m.Language |
| } |
| return "" |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*Document) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _Document_OneofMarshaler, _Document_OneofUnmarshaler, _Document_OneofSizer, []interface{}{ |
| (*Document_Content)(nil), |
| (*Document_GcsContentUri)(nil), |
| } |
| } |
| |
| func _Document_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*Document) |
| // source |
| switch x := m.Source.(type) { |
| case *Document_Content: |
| b.EncodeVarint(2<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.Content) |
| case *Document_GcsContentUri: |
| b.EncodeVarint(3<<3 | proto.WireBytes) |
| b.EncodeStringBytes(x.GcsContentUri) |
| case nil: |
| default: |
| return fmt.Errorf("Document.Source has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _Document_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*Document) |
| switch tag { |
| case 2: // source.content |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Source = &Document_Content{x} |
| return true, err |
| case 3: // source.gcs_content_uri |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| x, err := b.DecodeStringBytes() |
| m.Source = &Document_GcsContentUri{x} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _Document_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*Document) |
| // source |
| switch x := m.Source.(type) { |
| case *Document_Content: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.Content))) |
| n += len(x.Content) |
| case *Document_GcsContentUri: |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(len(x.GcsContentUri))) |
| n += len(x.GcsContentUri) |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| // Represents a sentence in the input document. |
| type Sentence struct { |
| // The sentence text. |
| Text *TextSpan `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` |
| // For calls to [AnalyzeSentiment][] or if |
| // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment] is set to |
| // true, this field will contain the sentiment for the sentence. |
| Sentiment *Sentiment `protobuf:"bytes,2,opt,name=sentiment,proto3" json:"sentiment,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Sentence) Reset() { *m = Sentence{} } |
| func (m *Sentence) String() string { return proto.CompactTextString(m) } |
| func (*Sentence) ProtoMessage() {} |
| func (*Sentence) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{1} |
| } |
| func (m *Sentence) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Sentence.Unmarshal(m, b) |
| } |
| func (m *Sentence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Sentence.Marshal(b, m, deterministic) |
| } |
| func (dst *Sentence) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Sentence.Merge(dst, src) |
| } |
| func (m *Sentence) XXX_Size() int { |
| return xxx_messageInfo_Sentence.Size(m) |
| } |
| func (m *Sentence) XXX_DiscardUnknown() { |
| xxx_messageInfo_Sentence.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Sentence proto.InternalMessageInfo |
| |
| func (m *Sentence) GetText() *TextSpan { |
| if m != nil { |
| return m.Text |
| } |
| return nil |
| } |
| |
| func (m *Sentence) GetSentiment() *Sentiment { |
| if m != nil { |
| return m.Sentiment |
| } |
| return nil |
| } |
| |
| // Represents a phrase in the text that is a known entity, such as |
| // a person, an organization, or location. The API associates information, such |
| // as salience and mentions, with entities. |
| type Entity struct { |
| // The representative name for the entity. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| // The entity type. |
| Type Entity_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.language.v1.Entity_Type" json:"type,omitempty"` |
| // Metadata associated with the entity. |
| // |
| // Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if |
| // available. The associated keys are "wikipedia_url" and "mid", respectively. |
| Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // The salience score associated with the entity in the [0, 1.0] range. |
| // |
| // The salience score for an entity provides information about the |
| // importance or centrality of that entity to the entire document text. |
| // Scores closer to 0 are less salient, while scores closer to 1.0 are highly |
| // salient. |
| Salience float32 `protobuf:"fixed32,4,opt,name=salience,proto3" json:"salience,omitempty"` |
| // The mentions of this entity in the input document. The API currently |
| // supports proper noun mentions. |
| Mentions []*EntityMention `protobuf:"bytes,5,rep,name=mentions,proto3" json:"mentions,omitempty"` |
| // For calls to [AnalyzeEntitySentiment][] or if |
| // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] is set to |
| // true, this field will contain the aggregate sentiment expressed for this |
| // entity in the provided document. |
| Sentiment *Sentiment `protobuf:"bytes,6,opt,name=sentiment,proto3" json:"sentiment,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Entity) Reset() { *m = Entity{} } |
| func (m *Entity) String() string { return proto.CompactTextString(m) } |
| func (*Entity) ProtoMessage() {} |
| func (*Entity) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{2} |
| } |
| func (m *Entity) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Entity.Unmarshal(m, b) |
| } |
| func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Entity.Marshal(b, m, deterministic) |
| } |
| func (dst *Entity) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Entity.Merge(dst, src) |
| } |
| func (m *Entity) XXX_Size() int { |
| return xxx_messageInfo_Entity.Size(m) |
| } |
| func (m *Entity) XXX_DiscardUnknown() { |
| xxx_messageInfo_Entity.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Entity proto.InternalMessageInfo |
| |
| func (m *Entity) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| func (m *Entity) GetType() Entity_Type { |
| if m != nil { |
| return m.Type |
| } |
| return Entity_UNKNOWN |
| } |
| |
| func (m *Entity) GetMetadata() map[string]string { |
| if m != nil { |
| return m.Metadata |
| } |
| return nil |
| } |
| |
| func (m *Entity) GetSalience() float32 { |
| if m != nil { |
| return m.Salience |
| } |
| return 0 |
| } |
| |
| func (m *Entity) GetMentions() []*EntityMention { |
| if m != nil { |
| return m.Mentions |
| } |
| return nil |
| } |
| |
| func (m *Entity) GetSentiment() *Sentiment { |
| if m != nil { |
| return m.Sentiment |
| } |
| return nil |
| } |
| |
| // Represents the smallest syntactic building block of the text. |
| type Token struct { |
| // The token text. |
| Text *TextSpan `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` |
| // Parts of speech tag for this token. |
| PartOfSpeech *PartOfSpeech `protobuf:"bytes,2,opt,name=part_of_speech,json=partOfSpeech,proto3" json:"part_of_speech,omitempty"` |
| // Dependency tree parse for this token. |
| DependencyEdge *DependencyEdge `protobuf:"bytes,3,opt,name=dependency_edge,json=dependencyEdge,proto3" json:"dependency_edge,omitempty"` |
| // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token. |
| Lemma string `protobuf:"bytes,4,opt,name=lemma,proto3" json:"lemma,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Token) Reset() { *m = Token{} } |
| func (m *Token) String() string { return proto.CompactTextString(m) } |
| func (*Token) ProtoMessage() {} |
| func (*Token) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{3} |
| } |
| func (m *Token) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Token.Unmarshal(m, b) |
| } |
| func (m *Token) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Token.Marshal(b, m, deterministic) |
| } |
| func (dst *Token) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Token.Merge(dst, src) |
| } |
| func (m *Token) XXX_Size() int { |
| return xxx_messageInfo_Token.Size(m) |
| } |
| func (m *Token) XXX_DiscardUnknown() { |
| xxx_messageInfo_Token.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Token proto.InternalMessageInfo |
| |
| func (m *Token) GetText() *TextSpan { |
| if m != nil { |
| return m.Text |
| } |
| return nil |
| } |
| |
| func (m *Token) GetPartOfSpeech() *PartOfSpeech { |
| if m != nil { |
| return m.PartOfSpeech |
| } |
| return nil |
| } |
| |
| func (m *Token) GetDependencyEdge() *DependencyEdge { |
| if m != nil { |
| return m.DependencyEdge |
| } |
| return nil |
| } |
| |
| func (m *Token) GetLemma() string { |
| if m != nil { |
| return m.Lemma |
| } |
| return "" |
| } |
| |
| // Represents the feeling associated with the entire text or entities in |
| // the text. |
| type Sentiment struct { |
| // A non-negative number in the [0, +inf) range, which represents |
| // the absolute magnitude of sentiment regardless of score (positive or |
| // negative). |
| Magnitude float32 `protobuf:"fixed32,2,opt,name=magnitude,proto3" json:"magnitude,omitempty"` |
| // Sentiment score between -1.0 (negative sentiment) and 1.0 |
| // (positive sentiment). |
| Score float32 `protobuf:"fixed32,3,opt,name=score,proto3" json:"score,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Sentiment) Reset() { *m = Sentiment{} } |
| func (m *Sentiment) String() string { return proto.CompactTextString(m) } |
| func (*Sentiment) ProtoMessage() {} |
| func (*Sentiment) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{4} |
| } |
| func (m *Sentiment) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Sentiment.Unmarshal(m, b) |
| } |
| func (m *Sentiment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Sentiment.Marshal(b, m, deterministic) |
| } |
| func (dst *Sentiment) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Sentiment.Merge(dst, src) |
| } |
| func (m *Sentiment) XXX_Size() int { |
| return xxx_messageInfo_Sentiment.Size(m) |
| } |
| func (m *Sentiment) XXX_DiscardUnknown() { |
| xxx_messageInfo_Sentiment.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Sentiment proto.InternalMessageInfo |
| |
| func (m *Sentiment) GetMagnitude() float32 { |
| if m != nil { |
| return m.Magnitude |
| } |
| return 0 |
| } |
| |
| func (m *Sentiment) GetScore() float32 { |
| if m != nil { |
| return m.Score |
| } |
| return 0 |
| } |
| |
| // Represents part of speech information for a token. Parts of speech |
| // are as defined in |
| // http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf |
| type PartOfSpeech struct { |
| // The part of speech tag. |
| Tag PartOfSpeech_Tag `protobuf:"varint,1,opt,name=tag,proto3,enum=google.cloud.language.v1.PartOfSpeech_Tag" json:"tag,omitempty"` |
| // The grammatical aspect. |
| Aspect PartOfSpeech_Aspect `protobuf:"varint,2,opt,name=aspect,proto3,enum=google.cloud.language.v1.PartOfSpeech_Aspect" json:"aspect,omitempty"` |
| // The grammatical case. |
| Case PartOfSpeech_Case `protobuf:"varint,3,opt,name=case,proto3,enum=google.cloud.language.v1.PartOfSpeech_Case" json:"case,omitempty"` |
| // The grammatical form. |
| Form PartOfSpeech_Form `protobuf:"varint,4,opt,name=form,proto3,enum=google.cloud.language.v1.PartOfSpeech_Form" json:"form,omitempty"` |
| // The grammatical gender. |
| Gender PartOfSpeech_Gender `protobuf:"varint,5,opt,name=gender,proto3,enum=google.cloud.language.v1.PartOfSpeech_Gender" json:"gender,omitempty"` |
| // The grammatical mood. |
| Mood PartOfSpeech_Mood `protobuf:"varint,6,opt,name=mood,proto3,enum=google.cloud.language.v1.PartOfSpeech_Mood" json:"mood,omitempty"` |
| // The grammatical number. |
| Number PartOfSpeech_Number `protobuf:"varint,7,opt,name=number,proto3,enum=google.cloud.language.v1.PartOfSpeech_Number" json:"number,omitempty"` |
| // The grammatical person. |
| Person PartOfSpeech_Person `protobuf:"varint,8,opt,name=person,proto3,enum=google.cloud.language.v1.PartOfSpeech_Person" json:"person,omitempty"` |
| // The grammatical properness. |
| Proper PartOfSpeech_Proper `protobuf:"varint,9,opt,name=proper,proto3,enum=google.cloud.language.v1.PartOfSpeech_Proper" json:"proper,omitempty"` |
| // The grammatical reciprocity. |
| Reciprocity PartOfSpeech_Reciprocity `protobuf:"varint,10,opt,name=reciprocity,proto3,enum=google.cloud.language.v1.PartOfSpeech_Reciprocity" json:"reciprocity,omitempty"` |
| // The grammatical tense. |
| Tense PartOfSpeech_Tense `protobuf:"varint,11,opt,name=tense,proto3,enum=google.cloud.language.v1.PartOfSpeech_Tense" json:"tense,omitempty"` |
| // The grammatical voice. |
| Voice PartOfSpeech_Voice `protobuf:"varint,12,opt,name=voice,proto3,enum=google.cloud.language.v1.PartOfSpeech_Voice" json:"voice,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PartOfSpeech) Reset() { *m = PartOfSpeech{} } |
| func (m *PartOfSpeech) String() string { return proto.CompactTextString(m) } |
| func (*PartOfSpeech) ProtoMessage() {} |
| func (*PartOfSpeech) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{5} |
| } |
| func (m *PartOfSpeech) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PartOfSpeech.Unmarshal(m, b) |
| } |
| func (m *PartOfSpeech) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PartOfSpeech.Marshal(b, m, deterministic) |
| } |
| func (dst *PartOfSpeech) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PartOfSpeech.Merge(dst, src) |
| } |
| func (m *PartOfSpeech) XXX_Size() int { |
| return xxx_messageInfo_PartOfSpeech.Size(m) |
| } |
| func (m *PartOfSpeech) XXX_DiscardUnknown() { |
| xxx_messageInfo_PartOfSpeech.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PartOfSpeech proto.InternalMessageInfo |
| |
| func (m *PartOfSpeech) GetTag() PartOfSpeech_Tag { |
| if m != nil { |
| return m.Tag |
| } |
| return PartOfSpeech_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetAspect() PartOfSpeech_Aspect { |
| if m != nil { |
| return m.Aspect |
| } |
| return PartOfSpeech_ASPECT_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetCase() PartOfSpeech_Case { |
| if m != nil { |
| return m.Case |
| } |
| return PartOfSpeech_CASE_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetForm() PartOfSpeech_Form { |
| if m != nil { |
| return m.Form |
| } |
| return PartOfSpeech_FORM_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetGender() PartOfSpeech_Gender { |
| if m != nil { |
| return m.Gender |
| } |
| return PartOfSpeech_GENDER_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetMood() PartOfSpeech_Mood { |
| if m != nil { |
| return m.Mood |
| } |
| return PartOfSpeech_MOOD_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetNumber() PartOfSpeech_Number { |
| if m != nil { |
| return m.Number |
| } |
| return PartOfSpeech_NUMBER_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetPerson() PartOfSpeech_Person { |
| if m != nil { |
| return m.Person |
| } |
| return PartOfSpeech_PERSON_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetProper() PartOfSpeech_Proper { |
| if m != nil { |
| return m.Proper |
| } |
| return PartOfSpeech_PROPER_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetReciprocity() PartOfSpeech_Reciprocity { |
| if m != nil { |
| return m.Reciprocity |
| } |
| return PartOfSpeech_RECIPROCITY_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetTense() PartOfSpeech_Tense { |
| if m != nil { |
| return m.Tense |
| } |
| return PartOfSpeech_TENSE_UNKNOWN |
| } |
| |
| func (m *PartOfSpeech) GetVoice() PartOfSpeech_Voice { |
| if m != nil { |
| return m.Voice |
| } |
| return PartOfSpeech_VOICE_UNKNOWN |
| } |
| |
| // Represents dependency parse tree information for a token. (For more |
| // information on dependency labels, see |
| // http://www.aclweb.org/anthology/P13-2017 |
| type DependencyEdge struct { |
| // Represents the head of this token in the dependency tree. |
| // This is the index of the token which has an arc going to this token. |
| // The index is the position of the token in the array of tokens returned |
| // by the API method. If this token is a root token, then the |
| // `head_token_index` is its own index. |
| HeadTokenIndex int32 `protobuf:"varint,1,opt,name=head_token_index,json=headTokenIndex,proto3" json:"head_token_index,omitempty"` |
| // The parse label for the token. |
| Label DependencyEdge_Label `protobuf:"varint,2,opt,name=label,proto3,enum=google.cloud.language.v1.DependencyEdge_Label" json:"label,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *DependencyEdge) Reset() { *m = DependencyEdge{} } |
| func (m *DependencyEdge) String() string { return proto.CompactTextString(m) } |
| func (*DependencyEdge) ProtoMessage() {} |
| func (*DependencyEdge) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{6} |
| } |
| func (m *DependencyEdge) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_DependencyEdge.Unmarshal(m, b) |
| } |
| func (m *DependencyEdge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_DependencyEdge.Marshal(b, m, deterministic) |
| } |
| func (dst *DependencyEdge) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_DependencyEdge.Merge(dst, src) |
| } |
| func (m *DependencyEdge) XXX_Size() int { |
| return xxx_messageInfo_DependencyEdge.Size(m) |
| } |
| func (m *DependencyEdge) XXX_DiscardUnknown() { |
| xxx_messageInfo_DependencyEdge.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_DependencyEdge proto.InternalMessageInfo |
| |
| func (m *DependencyEdge) GetHeadTokenIndex() int32 { |
| if m != nil { |
| return m.HeadTokenIndex |
| } |
| return 0 |
| } |
| |
| func (m *DependencyEdge) GetLabel() DependencyEdge_Label { |
| if m != nil { |
| return m.Label |
| } |
| return DependencyEdge_UNKNOWN |
| } |
| |
| // Represents a mention for an entity in the text. Currently, proper noun |
| // mentions are supported. |
| type EntityMention struct { |
| // The mention text. |
| Text *TextSpan `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` |
| // The type of the entity mention. |
| Type EntityMention_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.language.v1.EntityMention_Type" json:"type,omitempty"` |
| // For calls to [AnalyzeEntitySentiment][] or if |
| // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] is set to |
| // true, this field will contain the sentiment expressed for this mention of |
| // the entity in the provided document. |
| Sentiment *Sentiment `protobuf:"bytes,3,opt,name=sentiment,proto3" json:"sentiment,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *EntityMention) Reset() { *m = EntityMention{} } |
| func (m *EntityMention) String() string { return proto.CompactTextString(m) } |
| func (*EntityMention) ProtoMessage() {} |
| func (*EntityMention) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{7} |
| } |
| func (m *EntityMention) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_EntityMention.Unmarshal(m, b) |
| } |
| func (m *EntityMention) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_EntityMention.Marshal(b, m, deterministic) |
| } |
| func (dst *EntityMention) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_EntityMention.Merge(dst, src) |
| } |
| func (m *EntityMention) XXX_Size() int { |
| return xxx_messageInfo_EntityMention.Size(m) |
| } |
| func (m *EntityMention) XXX_DiscardUnknown() { |
| xxx_messageInfo_EntityMention.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_EntityMention proto.InternalMessageInfo |
| |
| func (m *EntityMention) GetText() *TextSpan { |
| if m != nil { |
| return m.Text |
| } |
| return nil |
| } |
| |
| func (m *EntityMention) GetType() EntityMention_Type { |
| if m != nil { |
| return m.Type |
| } |
| return EntityMention_TYPE_UNKNOWN |
| } |
| |
| func (m *EntityMention) GetSentiment() *Sentiment { |
| if m != nil { |
| return m.Sentiment |
| } |
| return nil |
| } |
| |
| // Represents an output piece of text. |
| type TextSpan struct { |
| // The content of the output text. |
| Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` |
| // The API calculates the beginning offset of the content in the original |
| // document according to the [EncodingType][google.cloud.language.v1.EncodingType] specified in the API request. |
| BeginOffset int32 `protobuf:"varint,2,opt,name=begin_offset,json=beginOffset,proto3" json:"begin_offset,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *TextSpan) Reset() { *m = TextSpan{} } |
| func (m *TextSpan) String() string { return proto.CompactTextString(m) } |
| func (*TextSpan) ProtoMessage() {} |
| func (*TextSpan) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{8} |
| } |
| func (m *TextSpan) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_TextSpan.Unmarshal(m, b) |
| } |
| func (m *TextSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_TextSpan.Marshal(b, m, deterministic) |
| } |
| func (dst *TextSpan) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_TextSpan.Merge(dst, src) |
| } |
| func (m *TextSpan) XXX_Size() int { |
| return xxx_messageInfo_TextSpan.Size(m) |
| } |
| func (m *TextSpan) XXX_DiscardUnknown() { |
| xxx_messageInfo_TextSpan.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_TextSpan proto.InternalMessageInfo |
| |
| func (m *TextSpan) GetContent() string { |
| if m != nil { |
| return m.Content |
| } |
| return "" |
| } |
| |
| func (m *TextSpan) GetBeginOffset() int32 { |
| if m != nil { |
| return m.BeginOffset |
| } |
| return 0 |
| } |
| |
| // Represents a category returned from the text classifier. |
| type ClassificationCategory struct { |
| // The name of the category representing the document. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| // The classifier's confidence of the category. Number represents how certain |
| // the classifier is that this category represents the given text. |
| Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ClassificationCategory) Reset() { *m = ClassificationCategory{} } |
| func (m *ClassificationCategory) String() string { return proto.CompactTextString(m) } |
| func (*ClassificationCategory) ProtoMessage() {} |
| func (*ClassificationCategory) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{9} |
| } |
| func (m *ClassificationCategory) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ClassificationCategory.Unmarshal(m, b) |
| } |
| func (m *ClassificationCategory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ClassificationCategory.Marshal(b, m, deterministic) |
| } |
| func (dst *ClassificationCategory) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ClassificationCategory.Merge(dst, src) |
| } |
| func (m *ClassificationCategory) XXX_Size() int { |
| return xxx_messageInfo_ClassificationCategory.Size(m) |
| } |
| func (m *ClassificationCategory) XXX_DiscardUnknown() { |
| xxx_messageInfo_ClassificationCategory.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ClassificationCategory proto.InternalMessageInfo |
| |
| func (m *ClassificationCategory) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| func (m *ClassificationCategory) GetConfidence() float32 { |
| if m != nil { |
| return m.Confidence |
| } |
| return 0 |
| } |
| |
| // The sentiment analysis request message. |
| type AnalyzeSentimentRequest struct { |
| // Input document. |
| Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` |
| // The encoding type used by the API to calculate sentence offsets. |
| EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnalyzeSentimentRequest) Reset() { *m = AnalyzeSentimentRequest{} } |
| func (m *AnalyzeSentimentRequest) String() string { return proto.CompactTextString(m) } |
| func (*AnalyzeSentimentRequest) ProtoMessage() {} |
| func (*AnalyzeSentimentRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{10} |
| } |
| func (m *AnalyzeSentimentRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnalyzeSentimentRequest.Unmarshal(m, b) |
| } |
| func (m *AnalyzeSentimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnalyzeSentimentRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *AnalyzeSentimentRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnalyzeSentimentRequest.Merge(dst, src) |
| } |
| func (m *AnalyzeSentimentRequest) XXX_Size() int { |
| return xxx_messageInfo_AnalyzeSentimentRequest.Size(m) |
| } |
| func (m *AnalyzeSentimentRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnalyzeSentimentRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnalyzeSentimentRequest proto.InternalMessageInfo |
| |
| func (m *AnalyzeSentimentRequest) GetDocument() *Document { |
| if m != nil { |
| return m.Document |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeSentimentRequest) GetEncodingType() EncodingType { |
| if m != nil { |
| return m.EncodingType |
| } |
| return EncodingType_NONE |
| } |
| |
| // The sentiment analysis response message. |
| type AnalyzeSentimentResponse struct { |
| // The overall sentiment of the input document. |
| DocumentSentiment *Sentiment `protobuf:"bytes,1,opt,name=document_sentiment,json=documentSentiment,proto3" json:"document_sentiment,omitempty"` |
| // The language of the text, which will be the same as the language specified |
| // in the request or, if not specified, the automatically-detected language. |
| // See [Document.language][google.cloud.language.v1.Document.language] field for more details. |
| Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` |
| // The sentiment for all the sentences in the document. |
| Sentences []*Sentence `protobuf:"bytes,3,rep,name=sentences,proto3" json:"sentences,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnalyzeSentimentResponse) Reset() { *m = AnalyzeSentimentResponse{} } |
| func (m *AnalyzeSentimentResponse) String() string { return proto.CompactTextString(m) } |
| func (*AnalyzeSentimentResponse) ProtoMessage() {} |
| func (*AnalyzeSentimentResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{11} |
| } |
| func (m *AnalyzeSentimentResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnalyzeSentimentResponse.Unmarshal(m, b) |
| } |
| func (m *AnalyzeSentimentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnalyzeSentimentResponse.Marshal(b, m, deterministic) |
| } |
| func (dst *AnalyzeSentimentResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnalyzeSentimentResponse.Merge(dst, src) |
| } |
| func (m *AnalyzeSentimentResponse) XXX_Size() int { |
| return xxx_messageInfo_AnalyzeSentimentResponse.Size(m) |
| } |
| func (m *AnalyzeSentimentResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnalyzeSentimentResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnalyzeSentimentResponse proto.InternalMessageInfo |
| |
| func (m *AnalyzeSentimentResponse) GetDocumentSentiment() *Sentiment { |
| if m != nil { |
| return m.DocumentSentiment |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeSentimentResponse) GetLanguage() string { |
| if m != nil { |
| return m.Language |
| } |
| return "" |
| } |
| |
| func (m *AnalyzeSentimentResponse) GetSentences() []*Sentence { |
| if m != nil { |
| return m.Sentences |
| } |
| return nil |
| } |
| |
| // The entity-level sentiment analysis request message. |
| type AnalyzeEntitySentimentRequest struct { |
| // Input document. |
| Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` |
| // The encoding type used by the API to calculate offsets. |
| EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnalyzeEntitySentimentRequest) Reset() { *m = AnalyzeEntitySentimentRequest{} } |
| func (m *AnalyzeEntitySentimentRequest) String() string { return proto.CompactTextString(m) } |
| func (*AnalyzeEntitySentimentRequest) ProtoMessage() {} |
| func (*AnalyzeEntitySentimentRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{12} |
| } |
| func (m *AnalyzeEntitySentimentRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnalyzeEntitySentimentRequest.Unmarshal(m, b) |
| } |
| func (m *AnalyzeEntitySentimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnalyzeEntitySentimentRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *AnalyzeEntitySentimentRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnalyzeEntitySentimentRequest.Merge(dst, src) |
| } |
| func (m *AnalyzeEntitySentimentRequest) XXX_Size() int { |
| return xxx_messageInfo_AnalyzeEntitySentimentRequest.Size(m) |
| } |
| func (m *AnalyzeEntitySentimentRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnalyzeEntitySentimentRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnalyzeEntitySentimentRequest proto.InternalMessageInfo |
| |
| func (m *AnalyzeEntitySentimentRequest) GetDocument() *Document { |
| if m != nil { |
| return m.Document |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeEntitySentimentRequest) GetEncodingType() EncodingType { |
| if m != nil { |
| return m.EncodingType |
| } |
| return EncodingType_NONE |
| } |
| |
| // The entity-level sentiment analysis response message. |
| type AnalyzeEntitySentimentResponse struct { |
| // The recognized entities in the input document with associated sentiments. |
| Entities []*Entity `protobuf:"bytes,1,rep,name=entities,proto3" json:"entities,omitempty"` |
| // The language of the text, which will be the same as the language specified |
| // in the request or, if not specified, the automatically-detected language. |
| // See [Document.language][google.cloud.language.v1.Document.language] field for more details. |
| Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnalyzeEntitySentimentResponse) Reset() { *m = AnalyzeEntitySentimentResponse{} } |
| func (m *AnalyzeEntitySentimentResponse) String() string { return proto.CompactTextString(m) } |
| func (*AnalyzeEntitySentimentResponse) ProtoMessage() {} |
| func (*AnalyzeEntitySentimentResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{13} |
| } |
| func (m *AnalyzeEntitySentimentResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnalyzeEntitySentimentResponse.Unmarshal(m, b) |
| } |
| func (m *AnalyzeEntitySentimentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnalyzeEntitySentimentResponse.Marshal(b, m, deterministic) |
| } |
| func (dst *AnalyzeEntitySentimentResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnalyzeEntitySentimentResponse.Merge(dst, src) |
| } |
| func (m *AnalyzeEntitySentimentResponse) XXX_Size() int { |
| return xxx_messageInfo_AnalyzeEntitySentimentResponse.Size(m) |
| } |
| func (m *AnalyzeEntitySentimentResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnalyzeEntitySentimentResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnalyzeEntitySentimentResponse proto.InternalMessageInfo |
| |
| func (m *AnalyzeEntitySentimentResponse) GetEntities() []*Entity { |
| if m != nil { |
| return m.Entities |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeEntitySentimentResponse) GetLanguage() string { |
| if m != nil { |
| return m.Language |
| } |
| return "" |
| } |
| |
| // The entity analysis request message. |
| type AnalyzeEntitiesRequest struct { |
| // Input document. |
| Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` |
| // The encoding type used by the API to calculate offsets. |
| EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnalyzeEntitiesRequest) Reset() { *m = AnalyzeEntitiesRequest{} } |
| func (m *AnalyzeEntitiesRequest) String() string { return proto.CompactTextString(m) } |
| func (*AnalyzeEntitiesRequest) ProtoMessage() {} |
| func (*AnalyzeEntitiesRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{14} |
| } |
| func (m *AnalyzeEntitiesRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnalyzeEntitiesRequest.Unmarshal(m, b) |
| } |
| func (m *AnalyzeEntitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnalyzeEntitiesRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *AnalyzeEntitiesRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnalyzeEntitiesRequest.Merge(dst, src) |
| } |
| func (m *AnalyzeEntitiesRequest) XXX_Size() int { |
| return xxx_messageInfo_AnalyzeEntitiesRequest.Size(m) |
| } |
| func (m *AnalyzeEntitiesRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnalyzeEntitiesRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnalyzeEntitiesRequest proto.InternalMessageInfo |
| |
| func (m *AnalyzeEntitiesRequest) GetDocument() *Document { |
| if m != nil { |
| return m.Document |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeEntitiesRequest) GetEncodingType() EncodingType { |
| if m != nil { |
| return m.EncodingType |
| } |
| return EncodingType_NONE |
| } |
| |
| // The entity analysis response message. |
| type AnalyzeEntitiesResponse struct { |
| // The recognized entities in the input document. |
| Entities []*Entity `protobuf:"bytes,1,rep,name=entities,proto3" json:"entities,omitempty"` |
| // The language of the text, which will be the same as the language specified |
| // in the request or, if not specified, the automatically-detected language. |
| // See [Document.language][google.cloud.language.v1.Document.language] field for more details. |
| Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnalyzeEntitiesResponse) Reset() { *m = AnalyzeEntitiesResponse{} } |
| func (m *AnalyzeEntitiesResponse) String() string { return proto.CompactTextString(m) } |
| func (*AnalyzeEntitiesResponse) ProtoMessage() {} |
| func (*AnalyzeEntitiesResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{15} |
| } |
| func (m *AnalyzeEntitiesResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnalyzeEntitiesResponse.Unmarshal(m, b) |
| } |
| func (m *AnalyzeEntitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnalyzeEntitiesResponse.Marshal(b, m, deterministic) |
| } |
| func (dst *AnalyzeEntitiesResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnalyzeEntitiesResponse.Merge(dst, src) |
| } |
| func (m *AnalyzeEntitiesResponse) XXX_Size() int { |
| return xxx_messageInfo_AnalyzeEntitiesResponse.Size(m) |
| } |
| func (m *AnalyzeEntitiesResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnalyzeEntitiesResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnalyzeEntitiesResponse proto.InternalMessageInfo |
| |
| func (m *AnalyzeEntitiesResponse) GetEntities() []*Entity { |
| if m != nil { |
| return m.Entities |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeEntitiesResponse) GetLanguage() string { |
| if m != nil { |
| return m.Language |
| } |
| return "" |
| } |
| |
| // The syntax analysis request message. |
| type AnalyzeSyntaxRequest struct { |
| // Input document. |
| Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` |
| // The encoding type used by the API to calculate offsets. |
| EncodingType EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnalyzeSyntaxRequest) Reset() { *m = AnalyzeSyntaxRequest{} } |
| func (m *AnalyzeSyntaxRequest) String() string { return proto.CompactTextString(m) } |
| func (*AnalyzeSyntaxRequest) ProtoMessage() {} |
| func (*AnalyzeSyntaxRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{16} |
| } |
| func (m *AnalyzeSyntaxRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnalyzeSyntaxRequest.Unmarshal(m, b) |
| } |
| func (m *AnalyzeSyntaxRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnalyzeSyntaxRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *AnalyzeSyntaxRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnalyzeSyntaxRequest.Merge(dst, src) |
| } |
| func (m *AnalyzeSyntaxRequest) XXX_Size() int { |
| return xxx_messageInfo_AnalyzeSyntaxRequest.Size(m) |
| } |
| func (m *AnalyzeSyntaxRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnalyzeSyntaxRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnalyzeSyntaxRequest proto.InternalMessageInfo |
| |
| func (m *AnalyzeSyntaxRequest) GetDocument() *Document { |
| if m != nil { |
| return m.Document |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeSyntaxRequest) GetEncodingType() EncodingType { |
| if m != nil { |
| return m.EncodingType |
| } |
| return EncodingType_NONE |
| } |
| |
| // The syntax analysis response message. |
| type AnalyzeSyntaxResponse struct { |
| // Sentences in the input document. |
| Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences,proto3" json:"sentences,omitempty"` |
| // Tokens, along with their syntactic information, in the input document. |
| Tokens []*Token `protobuf:"bytes,2,rep,name=tokens,proto3" json:"tokens,omitempty"` |
| // The language of the text, which will be the same as the language specified |
| // in the request or, if not specified, the automatically-detected language. |
| // See [Document.language][google.cloud.language.v1.Document.language] field for more details. |
| Language string `protobuf:"bytes,3,opt,name=language,proto3" json:"language,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnalyzeSyntaxResponse) Reset() { *m = AnalyzeSyntaxResponse{} } |
| func (m *AnalyzeSyntaxResponse) String() string { return proto.CompactTextString(m) } |
| func (*AnalyzeSyntaxResponse) ProtoMessage() {} |
| func (*AnalyzeSyntaxResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{17} |
| } |
| func (m *AnalyzeSyntaxResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnalyzeSyntaxResponse.Unmarshal(m, b) |
| } |
| func (m *AnalyzeSyntaxResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnalyzeSyntaxResponse.Marshal(b, m, deterministic) |
| } |
| func (dst *AnalyzeSyntaxResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnalyzeSyntaxResponse.Merge(dst, src) |
| } |
| func (m *AnalyzeSyntaxResponse) XXX_Size() int { |
| return xxx_messageInfo_AnalyzeSyntaxResponse.Size(m) |
| } |
| func (m *AnalyzeSyntaxResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnalyzeSyntaxResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnalyzeSyntaxResponse proto.InternalMessageInfo |
| |
| func (m *AnalyzeSyntaxResponse) GetSentences() []*Sentence { |
| if m != nil { |
| return m.Sentences |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeSyntaxResponse) GetTokens() []*Token { |
| if m != nil { |
| return m.Tokens |
| } |
| return nil |
| } |
| |
| func (m *AnalyzeSyntaxResponse) GetLanguage() string { |
| if m != nil { |
| return m.Language |
| } |
| return "" |
| } |
| |
| // The document classification request message. |
| type ClassifyTextRequest struct { |
| // Input document. |
| Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ClassifyTextRequest) Reset() { *m = ClassifyTextRequest{} } |
| func (m *ClassifyTextRequest) String() string { return proto.CompactTextString(m) } |
| func (*ClassifyTextRequest) ProtoMessage() {} |
| func (*ClassifyTextRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{18} |
| } |
| func (m *ClassifyTextRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ClassifyTextRequest.Unmarshal(m, b) |
| } |
| func (m *ClassifyTextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ClassifyTextRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *ClassifyTextRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ClassifyTextRequest.Merge(dst, src) |
| } |
| func (m *ClassifyTextRequest) XXX_Size() int { |
| return xxx_messageInfo_ClassifyTextRequest.Size(m) |
| } |
| func (m *ClassifyTextRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ClassifyTextRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ClassifyTextRequest proto.InternalMessageInfo |
| |
| func (m *ClassifyTextRequest) GetDocument() *Document { |
| if m != nil { |
| return m.Document |
| } |
| return nil |
| } |
| |
| // The document classification response message. |
| type ClassifyTextResponse struct { |
| // Categories representing the input document. |
| Categories []*ClassificationCategory `protobuf:"bytes,1,rep,name=categories,proto3" json:"categories,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ClassifyTextResponse) Reset() { *m = ClassifyTextResponse{} } |
| func (m *ClassifyTextResponse) String() string { return proto.CompactTextString(m) } |
| func (*ClassifyTextResponse) ProtoMessage() {} |
| func (*ClassifyTextResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{19} |
| } |
| func (m *ClassifyTextResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ClassifyTextResponse.Unmarshal(m, b) |
| } |
| func (m *ClassifyTextResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ClassifyTextResponse.Marshal(b, m, deterministic) |
| } |
| func (dst *ClassifyTextResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ClassifyTextResponse.Merge(dst, src) |
| } |
| func (m *ClassifyTextResponse) XXX_Size() int { |
| return xxx_messageInfo_ClassifyTextResponse.Size(m) |
| } |
| func (m *ClassifyTextResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_ClassifyTextResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ClassifyTextResponse proto.InternalMessageInfo |
| |
| func (m *ClassifyTextResponse) GetCategories() []*ClassificationCategory { |
| if m != nil { |
| return m.Categories |
| } |
| return nil |
| } |
| |
| // The request message for the text annotation API, which can perform multiple |
| // analysis types (sentiment, entities, and syntax) in one call. |
| type AnnotateTextRequest struct { |
| // Input document. |
| Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"` |
| // The enabled features. |
| Features *AnnotateTextRequest_Features `protobuf:"bytes,2,opt,name=features,proto3" json:"features,omitempty"` |
| // The encoding type used by the API to calculate offsets. |
| EncodingType EncodingType `protobuf:"varint,3,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnnotateTextRequest) Reset() { *m = AnnotateTextRequest{} } |
| func (m *AnnotateTextRequest) String() string { return proto.CompactTextString(m) } |
| func (*AnnotateTextRequest) ProtoMessage() {} |
| func (*AnnotateTextRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{20} |
| } |
| func (m *AnnotateTextRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnnotateTextRequest.Unmarshal(m, b) |
| } |
| func (m *AnnotateTextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnnotateTextRequest.Marshal(b, m, deterministic) |
| } |
| func (dst *AnnotateTextRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnnotateTextRequest.Merge(dst, src) |
| } |
| func (m *AnnotateTextRequest) XXX_Size() int { |
| return xxx_messageInfo_AnnotateTextRequest.Size(m) |
| } |
| func (m *AnnotateTextRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnnotateTextRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnnotateTextRequest proto.InternalMessageInfo |
| |
| func (m *AnnotateTextRequest) GetDocument() *Document { |
| if m != nil { |
| return m.Document |
| } |
| return nil |
| } |
| |
| func (m *AnnotateTextRequest) GetFeatures() *AnnotateTextRequest_Features { |
| if m != nil { |
| return m.Features |
| } |
| return nil |
| } |
| |
| func (m *AnnotateTextRequest) GetEncodingType() EncodingType { |
| if m != nil { |
| return m.EncodingType |
| } |
| return EncodingType_NONE |
| } |
| |
| // All available features for sentiment, syntax, and semantic analysis. |
| // Setting each one to true will enable that specific analysis for the input. |
| type AnnotateTextRequest_Features struct { |
| // Extract syntax information. |
| ExtractSyntax bool `protobuf:"varint,1,opt,name=extract_syntax,json=extractSyntax,proto3" json:"extract_syntax,omitempty"` |
| // Extract entities. |
| ExtractEntities bool `protobuf:"varint,2,opt,name=extract_entities,json=extractEntities,proto3" json:"extract_entities,omitempty"` |
| // Extract document-level sentiment. |
| ExtractDocumentSentiment bool `protobuf:"varint,3,opt,name=extract_document_sentiment,json=extractDocumentSentiment,proto3" json:"extract_document_sentiment,omitempty"` |
| // Extract entities and their associated sentiment. |
| ExtractEntitySentiment bool `protobuf:"varint,4,opt,name=extract_entity_sentiment,json=extractEntitySentiment,proto3" json:"extract_entity_sentiment,omitempty"` |
| // Classify the full document into categories. |
| ClassifyText bool `protobuf:"varint,6,opt,name=classify_text,json=classifyText,proto3" json:"classify_text,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnnotateTextRequest_Features) Reset() { *m = AnnotateTextRequest_Features{} } |
| func (m *AnnotateTextRequest_Features) String() string { return proto.CompactTextString(m) } |
| func (*AnnotateTextRequest_Features) ProtoMessage() {} |
| func (*AnnotateTextRequest_Features) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{20, 0} |
| } |
| func (m *AnnotateTextRequest_Features) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnnotateTextRequest_Features.Unmarshal(m, b) |
| } |
| func (m *AnnotateTextRequest_Features) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnnotateTextRequest_Features.Marshal(b, m, deterministic) |
| } |
| func (dst *AnnotateTextRequest_Features) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnnotateTextRequest_Features.Merge(dst, src) |
| } |
| func (m *AnnotateTextRequest_Features) XXX_Size() int { |
| return xxx_messageInfo_AnnotateTextRequest_Features.Size(m) |
| } |
| func (m *AnnotateTextRequest_Features) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnnotateTextRequest_Features.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnnotateTextRequest_Features proto.InternalMessageInfo |
| |
| func (m *AnnotateTextRequest_Features) GetExtractSyntax() bool { |
| if m != nil { |
| return m.ExtractSyntax |
| } |
| return false |
| } |
| |
| func (m *AnnotateTextRequest_Features) GetExtractEntities() bool { |
| if m != nil { |
| return m.ExtractEntities |
| } |
| return false |
| } |
| |
| func (m *AnnotateTextRequest_Features) GetExtractDocumentSentiment() bool { |
| if m != nil { |
| return m.ExtractDocumentSentiment |
| } |
| return false |
| } |
| |
| func (m *AnnotateTextRequest_Features) GetExtractEntitySentiment() bool { |
| if m != nil { |
| return m.ExtractEntitySentiment |
| } |
| return false |
| } |
| |
| func (m *AnnotateTextRequest_Features) GetClassifyText() bool { |
| if m != nil { |
| return m.ClassifyText |
| } |
| return false |
| } |
| |
| // The text annotations response message. |
| type AnnotateTextResponse struct { |
| // Sentences in the input document. Populated if the user enables |
| // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax]. |
| Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences,proto3" json:"sentences,omitempty"` |
| // Tokens, along with their syntactic information, in the input document. |
| // Populated if the user enables |
| // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax]. |
| Tokens []*Token `protobuf:"bytes,2,rep,name=tokens,proto3" json:"tokens,omitempty"` |
| // Entities, along with their semantic information, in the input document. |
| // Populated if the user enables |
| // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entities]. |
| Entities []*Entity `protobuf:"bytes,3,rep,name=entities,proto3" json:"entities,omitempty"` |
| // The overall sentiment for the document. Populated if the user enables |
| // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment]. |
| DocumentSentiment *Sentiment `protobuf:"bytes,4,opt,name=document_sentiment,json=documentSentiment,proto3" json:"document_sentiment,omitempty"` |
| // The language of the text, which will be the same as the language specified |
| // in the request or, if not specified, the automatically-detected language. |
| // See [Document.language][google.cloud.language.v1.Document.language] field for more details. |
| Language string `protobuf:"bytes,5,opt,name=language,proto3" json:"language,omitempty"` |
| // Categories identified in the input document. |
| Categories []*ClassificationCategory `protobuf:"bytes,6,rep,name=categories,proto3" json:"categories,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *AnnotateTextResponse) Reset() { *m = AnnotateTextResponse{} } |
| func (m *AnnotateTextResponse) String() string { return proto.CompactTextString(m) } |
| func (*AnnotateTextResponse) ProtoMessage() {} |
| func (*AnnotateTextResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_language_service_7142fb5eff0cd389, []int{21} |
| } |
| func (m *AnnotateTextResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_AnnotateTextResponse.Unmarshal(m, b) |
| } |
| func (m *AnnotateTextResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_AnnotateTextResponse.Marshal(b, m, deterministic) |
| } |
| func (dst *AnnotateTextResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_AnnotateTextResponse.Merge(dst, src) |
| } |
| func (m *AnnotateTextResponse) XXX_Size() int { |
| return xxx_messageInfo_AnnotateTextResponse.Size(m) |
| } |
| func (m *AnnotateTextResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_AnnotateTextResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_AnnotateTextResponse proto.InternalMessageInfo |
| |
| func (m *AnnotateTextResponse) GetSentences() []*Sentence { |
| if m != nil { |
| return m.Sentences |
| } |
| return nil |
| } |
| |
| func (m *AnnotateTextResponse) GetTokens() []*Token { |
| if m != nil { |
| return m.Tokens |
| } |
| return nil |
| } |
| |
| func (m *AnnotateTextResponse) GetEntities() []*Entity { |
| if m != nil { |
| return m.Entities |
| } |
| return nil |
| } |
| |
| func (m *AnnotateTextResponse) GetDocumentSentiment() *Sentiment { |
| if m != nil { |
| return m.DocumentSentiment |
| } |
| return nil |
| } |
| |
| func (m *AnnotateTextResponse) GetLanguage() string { |
| if m != nil { |
| return m.Language |
| } |
| return "" |
| } |
| |
| func (m *AnnotateTextResponse) GetCategories() []*ClassificationCategory { |
| if m != nil { |
| return m.Categories |
| } |
| return nil |
| } |
| |
| func init() { |
| proto.RegisterType((*Document)(nil), "google.cloud.language.v1.Document") |
| proto.RegisterType((*Sentence)(nil), "google.cloud.language.v1.Sentence") |
| proto.RegisterType((*Entity)(nil), "google.cloud.language.v1.Entity") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.language.v1.Entity.MetadataEntry") |
| proto.RegisterType((*Token)(nil), "google.cloud.language.v1.Token") |
| proto.RegisterType((*Sentiment)(nil), "google.cloud.language.v1.Sentiment") |
| proto.RegisterType((*PartOfSpeech)(nil), "google.cloud.language.v1.PartOfSpeech") |
| proto.RegisterType((*DependencyEdge)(nil), "google.cloud.language.v1.DependencyEdge") |
| proto.RegisterType((*EntityMention)(nil), "google.cloud.language.v1.EntityMention") |
| proto.RegisterType((*TextSpan)(nil), "google.cloud.language.v1.TextSpan") |
| proto.RegisterType((*ClassificationCategory)(nil), "google.cloud.language.v1.ClassificationCategory") |
| proto.RegisterType((*AnalyzeSentimentRequest)(nil), "google.cloud.language.v1.AnalyzeSentimentRequest") |
| proto.RegisterType((*AnalyzeSentimentResponse)(nil), "google.cloud.language.v1.AnalyzeSentimentResponse") |
| proto.RegisterType((*AnalyzeEntitySentimentRequest)(nil), "google.cloud.language.v1.AnalyzeEntitySentimentRequest") |
| proto.RegisterType((*AnalyzeEntitySentimentResponse)(nil), "google.cloud.language.v1.AnalyzeEntitySentimentResponse") |
| proto.RegisterType((*AnalyzeEntitiesRequest)(nil), "google.cloud.language.v1.AnalyzeEntitiesRequest") |
| proto.RegisterType((*AnalyzeEntitiesResponse)(nil), "google.cloud.language.v1.AnalyzeEntitiesResponse") |
| proto.RegisterType((*AnalyzeSyntaxRequest)(nil), "google.cloud.language.v1.AnalyzeSyntaxRequest") |
| proto.RegisterType((*AnalyzeSyntaxResponse)(nil), "google.cloud.language.v1.AnalyzeSyntaxResponse") |
| proto.RegisterType((*ClassifyTextRequest)(nil), "google.cloud.language.v1.ClassifyTextRequest") |
| proto.RegisterType((*ClassifyTextResponse)(nil), "google.cloud.language.v1.ClassifyTextResponse") |
| proto.RegisterType((*AnnotateTextRequest)(nil), "google.cloud.language.v1.AnnotateTextRequest") |
| proto.RegisterType((*AnnotateTextRequest_Features)(nil), "google.cloud.language.v1.AnnotateTextRequest.Features") |
| proto.RegisterType((*AnnotateTextResponse)(nil), "google.cloud.language.v1.AnnotateTextResponse") |
| proto.RegisterEnum("google.cloud.language.v1.EncodingType", EncodingType_name, EncodingType_value) |
| proto.RegisterEnum("google.cloud.language.v1.Document_Type", Document_Type_name, Document_Type_value) |
| proto.RegisterEnum("google.cloud.language.v1.Entity_Type", Entity_Type_name, Entity_Type_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Tag", PartOfSpeech_Tag_name, PartOfSpeech_Tag_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Aspect", PartOfSpeech_Aspect_name, PartOfSpeech_Aspect_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Case", PartOfSpeech_Case_name, PartOfSpeech_Case_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Form", PartOfSpeech_Form_name, PartOfSpeech_Form_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Gender", PartOfSpeech_Gender_name, PartOfSpeech_Gender_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Mood", PartOfSpeech_Mood_name, PartOfSpeech_Mood_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Number", PartOfSpeech_Number_name, PartOfSpeech_Number_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Person", PartOfSpeech_Person_name, PartOfSpeech_Person_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Proper", PartOfSpeech_Proper_name, PartOfSpeech_Proper_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Reciprocity", PartOfSpeech_Reciprocity_name, PartOfSpeech_Reciprocity_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Tense", PartOfSpeech_Tense_name, PartOfSpeech_Tense_value) |
| proto.RegisterEnum("google.cloud.language.v1.PartOfSpeech_Voice", PartOfSpeech_Voice_name, PartOfSpeech_Voice_value) |
| proto.RegisterEnum("google.cloud.language.v1.DependencyEdge_Label", DependencyEdge_Label_name, DependencyEdge_Label_value) |
| proto.RegisterEnum("google.cloud.language.v1.EntityMention_Type", EntityMention_Type_name, EntityMention_Type_value) |
| } |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ context.Context |
| var _ grpc.ClientConn |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the grpc package it is being compiled against. |
| const _ = grpc.SupportPackageIsVersion4 |
| |
| // LanguageServiceClient is the client API for LanguageService service. |
| // |
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. |
| type LanguageServiceClient interface { |
| // Analyzes the sentiment of the provided text. |
| AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error) |
| // Finds named entities (currently proper names and common nouns) in the text |
| // along with entity types, salience, mentions for each entity, and |
| // other properties. |
| AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error) |
| // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes |
| // sentiment associated with each entity and its mentions. |
| AnalyzeEntitySentiment(ctx context.Context, in *AnalyzeEntitySentimentRequest, opts ...grpc.CallOption) (*AnalyzeEntitySentimentResponse, error) |
| // Analyzes the syntax of the text and provides sentence boundaries and |
| // tokenization along with part of speech tags, dependency trees, and other |
| // properties. |
| AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error) |
| // Classifies a document into categories. |
| ClassifyText(ctx context.Context, in *ClassifyTextRequest, opts ...grpc.CallOption) (*ClassifyTextResponse, error) |
| // A convenience method that provides all the features that analyzeSentiment, |
| // analyzeEntities, and analyzeSyntax provide in one call. |
| AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error) |
| } |
| |
| type languageServiceClient struct { |
| cc *grpc.ClientConn |
| } |
| |
| func NewLanguageServiceClient(cc *grpc.ClientConn) LanguageServiceClient { |
| return &languageServiceClient{cc} |
| } |
| |
| func (c *languageServiceClient) AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error) { |
| out := new(AnalyzeSentimentResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnalyzeSentiment", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *languageServiceClient) AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error) { |
| out := new(AnalyzeEntitiesResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnalyzeEntities", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *languageServiceClient) AnalyzeEntitySentiment(ctx context.Context, in *AnalyzeEntitySentimentRequest, opts ...grpc.CallOption) (*AnalyzeEntitySentimentResponse, error) { |
| out := new(AnalyzeEntitySentimentResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnalyzeEntitySentiment", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *languageServiceClient) AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error) { |
| out := new(AnalyzeSyntaxResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnalyzeSyntax", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *languageServiceClient) ClassifyText(ctx context.Context, in *ClassifyTextRequest, opts ...grpc.CallOption) (*ClassifyTextResponse, error) { |
| out := new(ClassifyTextResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/ClassifyText", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *languageServiceClient) AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error) { |
| out := new(AnnotateTextResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.language.v1.LanguageService/AnnotateText", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| // LanguageServiceServer is the server API for LanguageService service. |
| type LanguageServiceServer interface { |
| // Analyzes the sentiment of the provided text. |
| AnalyzeSentiment(context.Context, *AnalyzeSentimentRequest) (*AnalyzeSentimentResponse, error) |
| // Finds named entities (currently proper names and common nouns) in the text |
| // along with entity types, salience, mentions for each entity, and |
| // other properties. |
| AnalyzeEntities(context.Context, *AnalyzeEntitiesRequest) (*AnalyzeEntitiesResponse, error) |
| // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes |
| // sentiment associated with each entity and its mentions. |
| AnalyzeEntitySentiment(context.Context, *AnalyzeEntitySentimentRequest) (*AnalyzeEntitySentimentResponse, error) |
| // Analyzes the syntax of the text and provides sentence boundaries and |
| // tokenization along with part of speech tags, dependency trees, and other |
| // properties. |
| AnalyzeSyntax(context.Context, *AnalyzeSyntaxRequest) (*AnalyzeSyntaxResponse, error) |
| // Classifies a document into categories. |
| ClassifyText(context.Context, *ClassifyTextRequest) (*ClassifyTextResponse, error) |
| // A convenience method that provides all the features that analyzeSentiment, |
| // analyzeEntities, and analyzeSyntax provide in one call. |
| AnnotateText(context.Context, *AnnotateTextRequest) (*AnnotateTextResponse, error) |
| } |
| |
| func RegisterLanguageServiceServer(s *grpc.Server, srv LanguageServiceServer) { |
| s.RegisterService(&_LanguageService_serviceDesc, srv) |
| } |
| |
| func _LanguageService_AnalyzeSentiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(AnalyzeSentimentRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(LanguageServiceServer).AnalyzeSentiment(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.language.v1.LanguageService/AnalyzeSentiment", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(LanguageServiceServer).AnalyzeSentiment(ctx, req.(*AnalyzeSentimentRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _LanguageService_AnalyzeEntities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(AnalyzeEntitiesRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(LanguageServiceServer).AnalyzeEntities(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.language.v1.LanguageService/AnalyzeEntities", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(LanguageServiceServer).AnalyzeEntities(ctx, req.(*AnalyzeEntitiesRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _LanguageService_AnalyzeEntitySentiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(AnalyzeEntitySentimentRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(LanguageServiceServer).AnalyzeEntitySentiment(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.language.v1.LanguageService/AnalyzeEntitySentiment", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(LanguageServiceServer).AnalyzeEntitySentiment(ctx, req.(*AnalyzeEntitySentimentRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _LanguageService_AnalyzeSyntax_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(AnalyzeSyntaxRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(LanguageServiceServer).AnalyzeSyntax(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.language.v1.LanguageService/AnalyzeSyntax", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(LanguageServiceServer).AnalyzeSyntax(ctx, req.(*AnalyzeSyntaxRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _LanguageService_ClassifyText_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(ClassifyTextRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(LanguageServiceServer).ClassifyText(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.language.v1.LanguageService/ClassifyText", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(LanguageServiceServer).ClassifyText(ctx, req.(*ClassifyTextRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _LanguageService_AnnotateText_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(AnnotateTextRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(LanguageServiceServer).AnnotateText(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.language.v1.LanguageService/AnnotateText", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(LanguageServiceServer).AnnotateText(ctx, req.(*AnnotateTextRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| var _LanguageService_serviceDesc = grpc.ServiceDesc{ |
| ServiceName: "google.cloud.language.v1.LanguageService", |
| HandlerType: (*LanguageServiceServer)(nil), |
| Methods: []grpc.MethodDesc{ |
| { |
| MethodName: "AnalyzeSentiment", |
| Handler: _LanguageService_AnalyzeSentiment_Handler, |
| }, |
| { |
| MethodName: "AnalyzeEntities", |
| Handler: _LanguageService_AnalyzeEntities_Handler, |
| }, |
| { |
| MethodName: "AnalyzeEntitySentiment", |
| Handler: _LanguageService_AnalyzeEntitySentiment_Handler, |
| }, |
| { |
| MethodName: "AnalyzeSyntax", |
| Handler: _LanguageService_AnalyzeSyntax_Handler, |
| }, |
| { |
| MethodName: "ClassifyText", |
| Handler: _LanguageService_ClassifyText_Handler, |
| }, |
| { |
| MethodName: "AnnotateText", |
| Handler: _LanguageService_AnnotateText_Handler, |
| }, |
| }, |
| Streams: []grpc.StreamDesc{}, |
| Metadata: "google/cloud/language/v1/language_service.proto", |
| } |
| |
| func init() { |
| proto.RegisterFile("google/cloud/language/v1/language_service.proto", fileDescriptor_language_service_7142fb5eff0cd389) |
| } |
| |
| var fileDescriptor_language_service_7142fb5eff0cd389 = []byte{ |
| // 2967 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcb, 0x73, 0xdb, 0xd6, |
| 0xd5, 0x37, 0xf8, 0x12, 0x75, 0x28, 0xc9, 0xd7, 0xb0, 0xe3, 0xf0, 0x53, 0x1c, 0xc7, 0x81, 0x63, |
| 0x47, 0x76, 0x12, 0xca, 0x56, 0xbe, 0xcf, 0x71, 0x6c, 0x7f, 0x89, 0x21, 0xf0, 0x92, 0x82, 0x0c, |
| 0x02, 0xf0, 0x05, 0x40, 0x2b, 0xd9, 0x70, 0x60, 0x12, 0x62, 0x38, 0x91, 0x08, 0x96, 0x80, 0x3c, |
| 0x56, 0x36, 0x9d, 0xe9, 0x4c, 0x97, 0x5d, 0x74, 0xda, 0x45, 0x97, 0x9d, 0xe9, 0x63, 0xa6, 0x33, |
| 0x99, 0xf6, 0x1f, 0xe8, 0x9f, 0xd0, 0x5d, 0xff, 0x84, 0x76, 0xd7, 0x5d, 0x17, 0x9d, 0x76, 0xd1, |
| 0xc7, 0x9c, 0x7b, 0x01, 0x12, 0x94, 0x25, 0x59, 0x72, 0xd3, 0x69, 0x76, 0xf7, 0x1e, 0x9e, 0xdf, |
| 0x79, 0xdd, 0xf3, 0xb8, 0xb8, 0x12, 0xac, 0xf6, 0xc3, 0xb0, 0xbf, 0x13, 0xac, 0x76, 0x77, 0xc2, |
| 0xbd, 0xde, 0xea, 0x8e, 0x3f, 0xec, 0xef, 0xf9, 0xfd, 0x60, 0xf5, 0xd9, 0xed, 0xc9, 0xba, 0x13, |
| 0x05, 0xe3, 0x67, 0x83, 0x6e, 0x50, 0x1b, 0x8d, 0xc3, 0x38, 0x94, 0xab, 0x02, 0x50, 0xe3, 0x80, |
| 0x5a, 0xca, 0x54, 0x7b, 0x76, 0x7b, 0xf9, 0x52, 0x22, 0xca, 0x1f, 0x0d, 0x56, 0xfd, 0xe1, 0x30, |
| 0x8c, 0xfd, 0x78, 0x10, 0x0e, 0x23, 0x81, 0x53, 0xfe, 0x24, 0x41, 0xb9, 0x1e, 0x76, 0xf7, 0x76, |
| 0x83, 0x61, 0x2c, 0xdf, 0x87, 0x42, 0xbc, 0x3f, 0x0a, 0xaa, 0xd2, 0x15, 0x69, 0x65, 0x69, 0xed, |
| 0xdd, 0xda, 0x51, 0x32, 0x6b, 0x29, 0xa2, 0xe6, 0xee, 0x8f, 0x02, 0xc6, 0x41, 0xf2, 0x32, 0xcc, |
| 0x75, 0xc3, 0x61, 0x1c, 0x0c, 0xe3, 0x6a, 0xee, 0x8a, 0xb4, 0x32, 0xbf, 0x71, 0x86, 0xa5, 0x04, |
| 0x79, 0x05, 0xce, 0xf6, 0xbb, 0x51, 0x27, 0xd9, 0x76, 0xf6, 0xc6, 0x83, 0x6a, 0x3e, 0xe1, 0x59, |
| 0xec, 0x77, 0x23, 0x4d, 0xd0, 0xbd, 0xf1, 0x40, 0x5e, 0x86, 0x72, 0xaa, 0xa8, 0x5a, 0x40, 0x16, |
| 0x36, 0xd9, 0x2b, 0x77, 0xa0, 0x80, 0xfa, 0xe4, 0x0b, 0x40, 0xdc, 0xcf, 0x6c, 0xda, 0xf1, 0x4c, |
| 0xc7, 0xa6, 0x9a, 0xde, 0xd0, 0x69, 0x9d, 0x9c, 0x91, 0x97, 0x00, 0x6c, 0x43, 0xd5, 0xcd, 0x8e, |
| 0x4b, 0xb7, 0x5c, 0x22, 0xc9, 0x65, 0x28, 0x6c, 0xb8, 0x2d, 0x83, 0xe4, 0xd6, 0xcb, 0x50, 0x8a, |
| 0xc2, 0xbd, 0x71, 0x37, 0x50, 0xbe, 0x2f, 0x41, 0xd9, 0x09, 0x50, 0x59, 0x37, 0x90, 0xef, 0x40, |
| 0x21, 0x0e, 0x9e, 0xc7, 0xdc, 0xdb, 0xca, 0x9a, 0x72, 0xb4, 0xb7, 0x6e, 0xf0, 0x3c, 0x76, 0x46, |
| 0xfe, 0x90, 0x71, 0x7e, 0x59, 0x85, 0xf9, 0x28, 0x18, 0xc6, 0x83, 0xdd, 0xd4, 0xd5, 0xca, 0xda, |
| 0xd5, 0xa3, 0xc1, 0x4e, 0xca, 0xca, 0xa6, 0x28, 0xe5, 0x9f, 0x79, 0x28, 0xd1, 0x61, 0x3c, 0x88, |
| 0xf7, 0x65, 0x19, 0x0a, 0x43, 0x7f, 0x57, 0xc4, 0x7c, 0x9e, 0xf1, 0xb5, 0xfc, 0x71, 0x72, 0x0e, |
| 0x39, 0x7e, 0x0e, 0xd7, 0x8e, 0x16, 0x2e, 0x64, 0x64, 0x4f, 0x61, 0x13, 0xca, 0xbb, 0x41, 0xec, |
| 0xf7, 0xfc, 0xd8, 0xaf, 0xe6, 0xaf, 0xe4, 0x57, 0x2a, 0x6b, 0xb5, 0x97, 0xc2, 0x5b, 0x09, 0x80, |
| 0x0e, 0xe3, 0xf1, 0x3e, 0x9b, 0xe0, 0xf1, 0x2c, 0x22, 0x7f, 0x67, 0x80, 0xc1, 0xe2, 0x67, 0x91, |
| 0x63, 0x93, 0xbd, 0xac, 0xa1, 0x9e, 0x21, 0xcf, 0xa4, 0x6a, 0x91, 0xeb, 0x79, 0xf7, 0x65, 0x7a, |
| 0x5a, 0x82, 0x9f, 0x4d, 0x80, 0xb3, 0x91, 0x2c, 0xbd, 0x4a, 0x24, 0x97, 0xef, 0xc3, 0xe2, 0x8c, |
| 0xf9, 0x32, 0x81, 0xfc, 0x97, 0xc1, 0x7e, 0x12, 0x4e, 0x5c, 0xca, 0x17, 0xa0, 0xf8, 0xcc, 0xdf, |
| 0xd9, 0x13, 0xe1, 0x9c, 0x67, 0x62, 0x73, 0x2f, 0x77, 0x57, 0x52, 0xf6, 0x93, 0x84, 0xaa, 0xc0, |
| 0x9c, 0x67, 0x3e, 0x32, 0xad, 0x27, 0x26, 0x39, 0x23, 0x03, 0x94, 0x6c, 0xca, 0x1c, 0xcb, 0x24, |
| 0x92, 0xbc, 0x00, 0x65, 0xc3, 0xd2, 0x54, 0x57, 0xb7, 0x4c, 0x92, 0x93, 0x09, 0x2c, 0x58, 0xac, |
| 0xa9, 0x9a, 0xfa, 0xe7, 0x82, 0x92, 0x97, 0xe7, 0xa1, 0x48, 0xdb, 0xd4, 0x74, 0x49, 0x41, 0x3e, |
| 0x0b, 0x95, 0x27, 0x16, 0x7b, 0xd4, 0xb1, 0x1a, 0x1d, 0x95, 0xb9, 0xa4, 0x28, 0x9f, 0x83, 0x45, |
| 0xcd, 0x32, 0x1d, 0xaf, 0x45, 0x59, 0xa7, 0x69, 0x59, 0x75, 0x52, 0x42, 0x76, 0xcb, 0xdd, 0xa0, |
| 0x8c, 0xcc, 0x29, 0x7f, 0x91, 0xa0, 0xe8, 0x86, 0x5f, 0x06, 0xc3, 0x57, 0x4e, 0x43, 0x03, 0x96, |
| 0x46, 0xfe, 0x38, 0xee, 0x84, 0xdb, 0x9d, 0x68, 0x14, 0x04, 0xdd, 0x2f, 0x92, 0x5c, 0xbc, 0x7e, |
| 0xb4, 0x04, 0xdb, 0x1f, 0xc7, 0xd6, 0xb6, 0xc3, 0xb9, 0xd9, 0xc2, 0x28, 0xb3, 0x93, 0x1f, 0xc3, |
| 0xd9, 0x5e, 0x30, 0x0a, 0x86, 0xbd, 0x60, 0xd8, 0xdd, 0xef, 0x04, 0xbd, 0x7e, 0xc0, 0x2b, 0xb4, |
| 0xb2, 0xb6, 0x72, 0x4c, 0x17, 0x98, 0x00, 0x68, 0xaf, 0x1f, 0xb0, 0xa5, 0xde, 0xcc, 0x1e, 0xe3, |
| 0xbe, 0x13, 0xec, 0xee, 0xfa, 0x49, 0x1d, 0x8b, 0x8d, 0xf2, 0x29, 0xcc, 0x4f, 0x0e, 0x52, 0xbe, |
| 0x04, 0xf3, 0xbb, 0x7e, 0x7f, 0x38, 0x88, 0xf7, 0x7a, 0xe2, 0x78, 0x72, 0x6c, 0x4a, 0x40, 0x01, |
| 0x51, 0x37, 0x1c, 0x0b, 0x4b, 0x72, 0x4c, 0x6c, 0x94, 0xbf, 0x13, 0x58, 0xc8, 0x3a, 0x22, 0x3f, |
| 0x80, 0x7c, 0xec, 0xf7, 0x93, 0xa6, 0x75, 0xf3, 0x64, 0xde, 0xd7, 0x5c, 0xbf, 0xcf, 0x10, 0x26, |
| 0x53, 0x28, 0xf9, 0xd1, 0x28, 0xe8, 0xc6, 0x49, 0xb5, 0x7d, 0x70, 0x42, 0x01, 0x2a, 0x07, 0xb1, |
| 0x04, 0x2c, 0x7f, 0x0a, 0x85, 0xae, 0x1f, 0x09, 0x53, 0x97, 0xd6, 0xde, 0x3b, 0xa1, 0x10, 0xcd, |
| 0x8f, 0x02, 0xc6, 0x81, 0x28, 0x60, 0x3b, 0x1c, 0xef, 0xf2, 0x60, 0x9d, 0x5c, 0x40, 0x23, 0x1c, |
| 0xef, 0x32, 0x0e, 0x44, 0x47, 0xfa, 0x18, 0xfe, 0x71, 0xb5, 0x78, 0x2a, 0x47, 0x9a, 0x1c, 0xc4, |
| 0x12, 0x30, 0xda, 0xb1, 0x1b, 0x86, 0x3d, 0x5e, 0x8e, 0x27, 0xb7, 0xa3, 0x15, 0x86, 0x3d, 0xc6, |
| 0x81, 0x68, 0xc7, 0x70, 0x6f, 0xf7, 0x69, 0x30, 0xae, 0xce, 0x9d, 0xca, 0x0e, 0x93, 0x83, 0x58, |
| 0x02, 0x46, 0x31, 0xa3, 0x60, 0x1c, 0x85, 0xc3, 0x6a, 0xf9, 0x54, 0x62, 0x6c, 0x0e, 0x62, 0x09, |
| 0x98, 0x8b, 0x19, 0x87, 0xa3, 0x60, 0x5c, 0x9d, 0x3f, 0x9d, 0x18, 0x0e, 0x62, 0x09, 0x58, 0x76, |
| 0xa1, 0x32, 0x0e, 0xba, 0x83, 0xd1, 0x38, 0xec, 0x0e, 0xe2, 0xfd, 0x2a, 0x70, 0x59, 0x6b, 0x27, |
| 0x94, 0xc5, 0xa6, 0x48, 0x96, 0x15, 0x23, 0xaf, 0x43, 0x31, 0x0e, 0x86, 0x51, 0x50, 0xad, 0x70, |
| 0x79, 0xef, 0x9f, 0x34, 0x77, 0x11, 0xc3, 0x04, 0x14, 0x65, 0x3c, 0x0b, 0x07, 0xdd, 0xa0, 0xba, |
| 0x70, 0x2a, 0x19, 0x6d, 0xc4, 0x30, 0x01, 0x55, 0x7e, 0x20, 0x41, 0xde, 0xf5, 0xfb, 0xb3, 0x7d, |
| 0x70, 0x0e, 0xf2, 0x6a, 0x7d, 0x93, 0x48, 0x62, 0x61, 0x93, 0x9c, 0x58, 0xb4, 0x49, 0x1e, 0x47, |
| 0xab, 0x66, 0x99, 0x9b, 0xa4, 0x80, 0xa4, 0x3a, 0xc5, 0x6e, 0x57, 0x86, 0x82, 0x69, 0x79, 0x26, |
| 0x29, 0x21, 0xc9, 0xf4, 0x5a, 0x64, 0x0e, 0x49, 0x36, 0xb3, 0x4c, 0x52, 0x46, 0x92, 0xcd, 0x5c, |
| 0x32, 0x8f, 0x0d, 0xd0, 0xf6, 0x4c, 0xcd, 0x25, 0x80, 0xbf, 0xb6, 0x29, 0x5b, 0x27, 0x15, 0xb9, |
| 0x08, 0xd2, 0x16, 0x59, 0xc0, 0xdf, 0xd4, 0x46, 0x43, 0xdf, 0x22, 0x8b, 0x8a, 0x05, 0x25, 0x51, |
| 0x5e, 0xb2, 0x0c, 0x4b, 0x2a, 0x0e, 0x79, 0xb7, 0x33, 0x35, 0x0c, 0x07, 0x3d, 0x65, 0x0d, 0xaa, |
| 0xb9, 0x7a, 0x9b, 0x12, 0x09, 0xdb, 0xb2, 0xde, 0xca, 0x50, 0x72, 0xd8, 0x8b, 0x6d, 0x66, 0x35, |
| 0x19, 0x75, 0x1c, 0x24, 0xe4, 0x95, 0xbf, 0x4a, 0x50, 0xc0, 0x5a, 0x43, 0x5e, 0x4d, 0x75, 0xe8, |
| 0xac, 0x34, 0x55, 0xd3, 0x3c, 0x47, 0x4d, 0xa4, 0x2d, 0xc2, 0xbc, 0x5a, 0x47, 0xcb, 0x74, 0xd5, |
| 0x20, 0x39, 0xd1, 0xc5, 0x5b, 0xb6, 0x41, 0x5b, 0xd4, 0xe4, 0x1c, 0x79, 0x1c, 0x10, 0x75, 0xc1, |
| 0x5d, 0xc0, 0x01, 0xd1, 0xa4, 0xa6, 0xce, 0x77, 0x45, 0x6e, 0x89, 0xe9, 0xb8, 0xcc, 0x43, 0x66, |
| 0xd5, 0x20, 0xa5, 0xe9, 0x00, 0x69, 0x53, 0x32, 0x87, 0xba, 0x4c, 0xab, 0xa5, 0x9b, 0x62, 0x5f, |
| 0xc6, 0x78, 0x5b, 0xeb, 0x86, 0xfe, 0xd8, 0xa3, 0x64, 0x1e, 0x15, 0xdb, 0x2a, 0x73, 0x85, 0x2c, |
| 0x40, 0xc5, 0x36, 0xa3, 0xb6, 0xe5, 0xe8, 0x38, 0x6b, 0x54, 0x83, 0x54, 0x30, 0x18, 0x8c, 0x36, |
| 0x0c, 0xba, 0xa5, 0xb7, 0x69, 0x07, 0xdd, 0x20, 0x0b, 0xc8, 0xc6, 0xa8, 0xc1, 0x05, 0x0a, 0xd2, |
| 0x22, 0xea, 0x6c, 0xa7, 0x3a, 0x97, 0x94, 0xdf, 0x48, 0x50, 0xc0, 0x2e, 0x81, 0xc6, 0x35, 0x2c, |
| 0xd6, 0xca, 0xb8, 0xbe, 0x00, 0x65, 0xb5, 0x8e, 0x06, 0xa9, 0x46, 0xe2, 0xb8, 0xb7, 0xa5, 0x1b, |
| 0xba, 0xca, 0x3e, 0x23, 0x39, 0x54, 0x96, 0x71, 0xfc, 0x73, 0xca, 0x48, 0x9e, 0x8b, 0xd0, 0x4d, |
| 0xd5, 0xe8, 0x50, 0xb3, 0xae, 0x9b, 0x4d, 0x52, 0xc0, 0x58, 0x34, 0x29, 0xf3, 0xcc, 0x3a, 0x29, |
| 0xe2, 0x9a, 0x51, 0xd5, 0xd0, 0x1d, 0xe1, 0xb7, 0xce, 0x92, 0xdd, 0x1c, 0x1e, 0xad, 0xb3, 0x61, |
| 0x31, 0x97, 0x94, 0xf1, 0xd8, 0x0d, 0xcb, 0x6c, 0x8a, 0x5c, 0xb0, 0x58, 0x9d, 0x32, 0x02, 0xc8, |
| 0x9d, 0xdc, 0xe4, 0x34, 0x52, 0x51, 0x28, 0x94, 0x44, 0x4f, 0x42, 0x1b, 0x9a, 0xd4, 0xac, 0x53, |
| 0x36, 0x6b, 0x74, 0x83, 0xb6, 0x74, 0x53, 0x37, 0x93, 0xd3, 0x6a, 0xa9, 0x8e, 0xe6, 0x19, 0xb8, |
| 0xcd, 0xa1, 0x09, 0x26, 0xf5, 0x5c, 0x34, 0x56, 0xf9, 0x2e, 0x14, 0xb0, 0x2b, 0xa1, 0xd1, 0x2d, |
| 0xcb, 0xaa, 0x67, 0x44, 0x5c, 0x00, 0xa2, 0x59, 0x66, 0x3d, 0x09, 0x6c, 0x07, 0x7f, 0x25, 0x12, |
| 0x1e, 0x0e, 0x4f, 0x23, 0x35, 0x49, 0x22, 0xdc, 0x9b, 0x75, 0x3d, 0x09, 0x64, 0x1e, 0x23, 0xad, |
| 0x9b, 0x2e, 0x65, 0xcc, 0x6a, 0xa6, 0xa7, 0x5f, 0x81, 0xb9, 0x4d, 0x4f, 0xe4, 0x58, 0x11, 0x93, |
| 0xce, 0xf1, 0xd6, 0x37, 0x31, 0xbd, 0x91, 0x50, 0x52, 0x1e, 0x42, 0x49, 0xf4, 0x34, 0xf4, 0xc3, |
| 0xf4, 0x5a, 0xeb, 0x07, 0xfd, 0x70, 0x74, 0xb3, 0xe9, 0x19, 0x2a, 0x23, 0x12, 0xbf, 0x74, 0x18, |
| 0x1e, 0xe3, 0x29, 0x57, 0x86, 0x42, 0xdd, 0x53, 0x0d, 0x92, 0x57, 0x5c, 0x28, 0x89, 0x76, 0x86, |
| 0x12, 0xc4, 0xa5, 0x24, 0x23, 0x61, 0x1e, 0x8a, 0x0d, 0x9d, 0x39, 0xae, 0x80, 0x3b, 0x14, 0x7d, |
| 0x22, 0x39, 0x24, 0xbb, 0x1b, 0x3a, 0xab, 0x93, 0x3c, 0x3a, 0x3a, 0x4d, 0x98, 0xe4, 0x52, 0x53, |
| 0x50, 0xee, 0x42, 0x49, 0x74, 0x37, 0x2e, 0x95, 0x59, 0xf6, 0x8c, 0x5d, 0x68, 0x09, 0xa7, 0x89, |
| 0x90, 0x98, 0x96, 0xdb, 0x49, 0xf6, 0x39, 0x65, 0x13, 0x2a, 0x99, 0x5e, 0x26, 0xbf, 0x0e, 0xe7, |
| 0x19, 0xd5, 0x74, 0x9b, 0x59, 0x9a, 0xee, 0x7e, 0x36, 0x5b, 0x53, 0xe9, 0x0f, 0x3c, 0xb5, 0xd0, |
| 0x7f, 0xcb, 0xec, 0x64, 0x68, 0x39, 0x25, 0x82, 0x22, 0xef, 0x63, 0x18, 0x57, 0x97, 0x9a, 0x33, |
| 0x35, 0xf9, 0x1a, 0x9c, 0xcb, 0x1e, 0x10, 0xff, 0x59, 0x78, 0xd9, 0xf0, 0x5c, 0x8f, 0x51, 0x11, |
| 0x24, 0x5b, 0x75, 0x5c, 0x92, 0xc7, 0x43, 0xb0, 0x19, 0x75, 0xc4, 0x2d, 0x6c, 0x11, 0xe6, 0x27, |
| 0xbd, 0x80, 0x14, 0xc5, 0x37, 0x81, 0x97, 0xee, 0x4b, 0xca, 0x3a, 0x14, 0x79, 0xe3, 0x43, 0xa5, |
| 0x6d, 0x4b, 0xd7, 0xe8, 0xac, 0xe3, 0xaa, 0x36, 0x6d, 0x02, 0x9a, 0x9a, 0xf6, 0x84, 0x1c, 0x57, |
| 0xa1, 0xa6, 0xbd, 0xe4, 0xeb, 0x32, 0x2c, 0xcd, 0xde, 0x7c, 0xe4, 0x15, 0x20, 0x5f, 0x04, 0x7e, |
| 0xaf, 0x13, 0xe3, 0x85, 0xae, 0x33, 0x18, 0xf6, 0x82, 0xe7, 0xfc, 0x3a, 0x52, 0x64, 0x4b, 0x48, |
| 0xe7, 0xf7, 0x3c, 0x1d, 0xa9, 0x72, 0x1d, 0x8a, 0x3b, 0xfe, 0xd3, 0x60, 0x27, 0xb9, 0x6c, 0xd4, |
| 0x4e, 0x7a, 0xb9, 0xaa, 0x19, 0x88, 0x62, 0x02, 0xac, 0xfc, 0x6a, 0x0e, 0x8a, 0x9c, 0xf0, 0xc2, |
| 0xcd, 0x55, 0x5d, 0x5f, 0x67, 0xb4, 0x4d, 0x24, 0xde, 0x4d, 0xb1, 0x7e, 0x45, 0x42, 0xa8, 0xf5, |
| 0xb6, 0x66, 0x88, 0xd6, 0xa5, 0xd6, 0xdb, 0x2d, 0xab, 0x4e, 0x0a, 0x18, 0x41, 0x15, 0x57, 0x45, |
| 0xce, 0x60, 0xdb, 0x16, 0xd6, 0x2d, 0x12, 0x5d, 0x97, 0x91, 0x39, 0xde, 0xec, 0xbd, 0x2d, 0xd1, |
| 0xa4, 0x54, 0x6f, 0x0b, 0xfd, 0x27, 0xf3, 0x72, 0x09, 0x72, 0x9a, 0x46, 0x00, 0x21, 0x1a, 0x17, |
| 0x5f, 0x99, 0x0c, 0x03, 0xde, 0xc1, 0x35, 0x2c, 0x01, 0xb2, 0xc8, 0x03, 0x88, 0x4b, 0x0e, 0x5b, |
| 0x12, 0x63, 0xc2, 0x26, 0x67, 0xd3, 0x79, 0x41, 0x90, 0xa1, 0xae, 0x3b, 0x9a, 0xe5, 0x31, 0x87, |
| 0x92, 0x73, 0x3c, 0xe7, 0xad, 0xf5, 0x4d, 0x22, 0xe3, 0x8a, 0x6e, 0xd9, 0x06, 0x39, 0xcf, 0x7b, |
| 0xab, 0x45, 0x9d, 0x27, 0xba, 0xbb, 0x41, 0x2e, 0x20, 0x5d, 0x47, 0x8e, 0xd7, 0x70, 0xd5, 0x52, |
| 0xd9, 0x23, 0x72, 0x11, 0xa5, 0xb5, 0x9e, 0x50, 0xf2, 0xba, 0x58, 0xb4, 0x49, 0x95, 0x0f, 0x1f, |
| 0xda, 0x24, 0xff, 0x83, 0x86, 0x9a, 0x26, 0x59, 0x46, 0x21, 0xa6, 0x9d, 0xf8, 0xfc, 0x06, 0x5a, |
| 0x68, 0x72, 0x0b, 0x2f, 0xa1, 0x01, 0xe6, 0xc4, 0xc2, 0x37, 0xd3, 0xa9, 0x75, 0x99, 0xb7, 0x10, |
| 0x5e, 0xab, 0xe4, 0x2d, 0x9c, 0x4c, 0x36, 0xb9, 0x92, 0x74, 0x66, 0xd5, 0x55, 0xb7, 0x74, 0x87, |
| 0xbc, 0x2d, 0xb2, 0x81, 0xb9, 0x28, 0x51, 0xe1, 0x13, 0x8d, 0x07, 0xe2, 0x2a, 0x4f, 0x49, 0xb4, |
| 0xf0, 0x1d, 0xb1, 0x72, 0x1c, 0x72, 0x8d, 0xf3, 0x5a, 0x8e, 0x8b, 0x36, 0x5d, 0x4f, 0x32, 0x95, |
| 0x73, 0xbf, 0x3b, 0xd9, 0x98, 0x9b, 0x64, 0x45, 0x14, 0x1d, 0xc5, 0xc8, 0xdc, 0x10, 0x63, 0x93, |
| 0x36, 0xc8, 0xcd, 0x64, 0x65, 0x93, 0xf7, 0xb8, 0x16, 0x66, 0x99, 0x06, 0x79, 0x3f, 0x9d, 0xa5, |
| 0x1f, 0xa0, 0x87, 0xb6, 0x43, 0x6a, 0xe8, 0xe1, 0x63, 0x4f, 0x35, 0xb9, 0x3d, 0xab, 0xc8, 0xc9, |
| 0x34, 0x5c, 0xde, 0xc2, 0x1f, 0xf8, 0x92, 0x51, 0x83, 0xdc, 0xe6, 0x3f, 0xd4, 0x99, 0x65, 0x93, |
| 0x35, 0x14, 0x81, 0x0a, 0x3e, 0x44, 0x1b, 0x18, 0x6d, 0x99, 0xaa, 0xe9, 0x92, 0xff, 0x15, 0x45, |
| 0x8b, 0x7e, 0x9a, 0x75, 0xaf, 0x45, 0xfe, 0x0f, 0xb5, 0x33, 0xcb, 0x72, 0xc9, 0x1d, 0x5c, 0x39, |
| 0x18, 0x9c, 0x8f, 0xf8, 0xca, 0x6b, 0x34, 0xc8, 0x5d, 0x5c, 0x71, 0x8d, 0x1f, 0xf3, 0x7e, 0x63, |
| 0xd9, 0xba, 0x46, 0xee, 0xf1, 0x99, 0x8e, 0xc4, 0xfb, 0x33, 0x33, 0xe8, 0x01, 0xb2, 0x6c, 0x71, |
| 0xb7, 0xff, 0x9f, 0x77, 0x2a, 0x8f, 0x8f, 0xf9, 0x4f, 0x38, 0x52, 0x77, 0x0d, 0x4a, 0x3e, 0x15, |
| 0xa3, 0xa8, 0x6d, 0x6f, 0x20, 0xfa, 0x61, 0x92, 0x72, 0x58, 0x81, 0x44, 0xe5, 0xd9, 0xe9, 0x6d, |
| 0xb5, 0xdb, 0x64, 0x1d, 0x97, 0x75, 0xae, 0x55, 0x43, 0x96, 0x86, 0xc5, 0xa8, 0xde, 0x34, 0x49, |
| 0x1d, 0x43, 0xf1, 0xe8, 0x09, 0xa1, 0x7c, 0xb8, 0xe8, 0x8e, 0x4b, 0x1a, 0xe2, 0x3a, 0xd2, 0xd2, |
| 0x48, 0x93, 0x27, 0x80, 0xd5, 0x12, 0x79, 0xb9, 0x81, 0xc3, 0x20, 0xdd, 0xf1, 0x83, 0xd7, 0x39, |
| 0xa7, 0xd7, 0xd2, 0xc8, 0x26, 0x86, 0x45, 0xb3, 0x6c, 0xf2, 0x08, 0x23, 0x51, 0xd7, 0x1d, 0x3e, |
| 0xb7, 0x69, 0x9d, 0x18, 0xbc, 0x14, 0x1c, 0x9b, 0xb4, 0x90, 0xb7, 0x89, 0xea, 0x4d, 0xbe, 0xc2, |
| 0xb3, 0xb6, 0xd0, 0x21, 0xdd, 0x6c, 0x20, 0xd5, 0xe6, 0x69, 0x48, 0x1d, 0xf2, 0x98, 0xe7, 0x19, |
| 0x77, 0x98, 0x29, 0xff, 0x90, 0x60, 0x71, 0xe6, 0xfb, 0xf7, 0x95, 0x3f, 0xf8, 0x1e, 0xce, 0xbc, |
| 0x0a, 0xbc, 0x7f, 0xc2, 0xcf, 0xed, 0xec, 0xe3, 0xc0, 0xcc, 0xf7, 0x76, 0xfe, 0x95, 0x5e, 0x2e, |
| 0x6e, 0x25, 0x9f, 0xcc, 0x04, 0x16, 0x92, 0x37, 0x98, 0xc3, 0x06, 0x07, 0x40, 0x49, 0xb3, 0x5a, |
| 0x2d, 0xfc, 0x6a, 0x56, 0x9a, 0x50, 0x4e, 0x1d, 0x91, 0xab, 0xd3, 0x37, 0x22, 0xf1, 0x81, 0x3e, |
| 0x79, 0x21, 0x7a, 0x1b, 0x16, 0x9e, 0x06, 0xfd, 0xc1, 0xb0, 0x13, 0x6e, 0x6f, 0x47, 0x81, 0xf8, |
| 0x18, 0x2b, 0xb2, 0x0a, 0xa7, 0x59, 0x9c, 0xa4, 0x18, 0x70, 0x51, 0xdb, 0xf1, 0xa3, 0x68, 0xb0, |
| 0x3d, 0xe8, 0xf2, 0x37, 0x2c, 0xcd, 0x8f, 0x83, 0x7e, 0x38, 0x3e, 0xfc, 0x0d, 0xe5, 0x32, 0x40, |
| 0x37, 0x1c, 0x6e, 0x0f, 0x7a, 0xfc, 0xf9, 0x42, 0x7c, 0x5b, 0x66, 0x28, 0xca, 0x2f, 0x25, 0x78, |
| 0x5d, 0x1d, 0xfa, 0x3b, 0xfb, 0x5f, 0x05, 0x53, 0x47, 0x83, 0xef, 0xec, 0x05, 0x51, 0x2c, 0x7f, |
| 0x02, 0xe5, 0x5e, 0xf2, 0xc2, 0xf5, 0xf2, 0x53, 0x4a, 0xdf, 0xc2, 0xd8, 0x04, 0x23, 0x3f, 0x82, |
| 0xc5, 0x60, 0xd8, 0x0d, 0x7b, 0x83, 0x61, 0xbf, 0x93, 0x39, 0xb2, 0xeb, 0xc7, 0x1d, 0x99, 0x60, |
| 0xe7, 0x87, 0xb5, 0x10, 0x64, 0x76, 0xca, 0xef, 0x24, 0xa8, 0xbe, 0x68, 0x68, 0x34, 0x0a, 0x71, |
| 0x78, 0x32, 0x90, 0x53, 0xad, 0x9d, 0xe9, 0xd1, 0x4a, 0x27, 0x3f, 0xda, 0x73, 0x29, 0x7c, 0xfa, |
| 0x51, 0x9e, 0x7d, 0x82, 0xcb, 0xcd, 0x3e, 0xc1, 0xc9, 0x0f, 0x45, 0x06, 0x61, 0x04, 0xa3, 0xe4, |
| 0x7d, 0x49, 0x39, 0x5e, 0x0d, 0xb2, 0xb2, 0x29, 0x48, 0xf9, 0x5a, 0x82, 0x37, 0x13, 0x77, 0x44, |
| 0x9e, 0x7e, 0xbb, 0xa3, 0xff, 0x15, 0x5c, 0x3e, 0xca, 0xda, 0xe4, 0x08, 0x1e, 0x40, 0x19, 0x69, |
| 0xf1, 0x20, 0x88, 0xaa, 0x12, 0x8f, 0xc8, 0x95, 0x97, 0x95, 0x26, 0x9b, 0x20, 0x8e, 0x0b, 0xb6, |
| 0xf2, 0x0b, 0x09, 0x2e, 0x66, 0x95, 0x0f, 0x82, 0xe8, 0x5b, 0x19, 0xa3, 0x68, 0x52, 0x49, 0x53, |
| 0x33, 0xff, 0xe3, 0xc1, 0xf9, 0x99, 0x04, 0x17, 0xd2, 0xb2, 0xd8, 0x1f, 0xc6, 0xfe, 0xf3, 0x6f, |
| 0x65, 0x68, 0x7e, 0x2d, 0xc1, 0x6b, 0x07, 0xac, 0x4c, 0x22, 0x33, 0x53, 0x49, 0xd2, 0x2b, 0x54, |
| 0x92, 0xfc, 0x11, 0x94, 0xf8, 0x85, 0x33, 0xaa, 0xe6, 0x38, 0xfc, 0xad, 0x63, 0x26, 0x09, 0xf2, |
| 0xb1, 0x84, 0x7d, 0x26, 0xac, 0xf9, 0x03, 0x61, 0xf5, 0xe0, 0x7c, 0xd2, 0x64, 0xf7, 0xb1, 0x6b, |
| 0x7f, 0x43, 0x41, 0x55, 0xbe, 0x80, 0x0b, 0xb3, 0x62, 0x93, 0x28, 0xd8, 0x00, 0x5d, 0xd1, 0xc5, |
| 0xa7, 0x19, 0x72, 0xeb, 0x68, 0xc9, 0x87, 0xf7, 0x7f, 0x96, 0x91, 0xa1, 0xfc, 0x39, 0x0f, 0xe7, |
| 0x55, 0xf1, 0x67, 0x8e, 0xe0, 0x1b, 0xf4, 0x40, 0x66, 0x50, 0xde, 0x0e, 0xfc, 0x78, 0x6f, 0x1c, |
| 0x44, 0xc9, 0x43, 0xeb, 0x9d, 0xa3, 0xf1, 0x87, 0x18, 0x50, 0x6b, 0x24, 0x68, 0x36, 0x91, 0xf3, |
| 0x62, 0xaa, 0xe5, 0x5f, 0x3d, 0xd5, 0x96, 0xff, 0x26, 0x41, 0x39, 0xd5, 0x21, 0x5f, 0x83, 0xa5, |
| 0xe0, 0x79, 0x3c, 0xf6, 0xbb, 0x71, 0x27, 0xe2, 0x79, 0xc7, 0x7d, 0x2e, 0xb3, 0xc5, 0x84, 0x2a, |
| 0x92, 0x51, 0xbe, 0x01, 0x24, 0x65, 0x9b, 0x94, 0x69, 0x8e, 0x33, 0x9e, 0x4d, 0xe8, 0x69, 0x45, |
| 0xcb, 0x0f, 0x60, 0x39, 0x65, 0x3d, 0x64, 0xe2, 0xe4, 0x39, 0xa8, 0x9a, 0x70, 0xd4, 0x5f, 0x98, |
| 0x29, 0x77, 0xa1, 0x3a, 0xa3, 0x68, 0x3f, 0x83, 0x2d, 0x70, 0xec, 0xc5, 0xac, 0xc2, 0x69, 0x9b, |
| 0x95, 0xaf, 0xc2, 0x62, 0x37, 0xc9, 0x9c, 0x0e, 0xbf, 0x36, 0x95, 0x38, 0xfb, 0x42, 0x37, 0x93, |
| 0x4e, 0xca, 0x0f, 0xf3, 0xd8, 0x0c, 0xb2, 0x31, 0xff, 0xef, 0x57, 0x59, 0xb6, 0xf5, 0xe5, 0x4f, |
| 0xdd, 0xfa, 0x0e, 0x1f, 0xec, 0x85, 0x6f, 0x6c, 0xb0, 0x17, 0x0f, 0x0c, 0xf6, 0xd9, 0x42, 0x2c, |
| 0xfd, 0xfb, 0x85, 0x78, 0xf3, 0x2e, 0x2c, 0x64, 0xb3, 0x55, 0x5c, 0xc9, 0x4d, 0x4a, 0xce, 0xe0, |
| 0xca, 0x73, 0x1b, 0x77, 0xc5, 0x57, 0xaa, 0xe7, 0x36, 0x6e, 0xdf, 0x11, 0x5f, 0xa9, 0x9e, 0xdb, |
| 0xf8, 0x70, 0x8d, 0xe4, 0xd7, 0xfe, 0x30, 0x07, 0x67, 0x8d, 0x44, 0x99, 0x23, 0xfe, 0xca, 0x29, |
| 0xff, 0x5c, 0x02, 0x72, 0xf0, 0x16, 0x24, 0xdf, 0x3e, 0xae, 0x02, 0x0f, 0xbd, 0xda, 0x2d, 0xaf, |
| 0x9d, 0x06, 0x22, 0x92, 0x48, 0xb9, 0xf1, 0xbd, 0xdf, 0xff, 0xf1, 0x47, 0xb9, 0xab, 0xca, 0xe5, |
| 0xd5, 0x67, 0xb7, 0x57, 0xd3, 0xb0, 0x46, 0xf7, 0xfc, 0x03, 0xfc, 0xf7, 0xa4, 0x9b, 0xf2, 0x4f, |
| 0x25, 0x38, 0x7b, 0x60, 0x16, 0xca, 0xb7, 0x5e, 0xaa, 0xf2, 0xc0, 0x74, 0x5f, 0xbe, 0x7d, 0x0a, |
| 0x44, 0x62, 0xe3, 0x0a, 0xb7, 0x51, 0x51, 0xde, 0x3c, 0xd4, 0xc6, 0x94, 0x1d, 0x4d, 0xfc, 0xed, |
| 0x81, 0x5b, 0x45, 0xa6, 0xd6, 0x3e, 0x3a, 0x99, 0xde, 0x17, 0xae, 0x6c, 0xcb, 0x77, 0x4f, 0x0f, |
| 0x4c, 0xec, 0x5e, 0xe5, 0x76, 0xdf, 0x50, 0xde, 0x39, 0xda, 0xee, 0xfd, 0x99, 0x08, 0xff, 0x44, |
| 0x82, 0xc5, 0x99, 0x89, 0x2a, 0xd7, 0x5e, 0x7e, 0xa4, 0xd9, 0x0b, 0xc2, 0xf2, 0xea, 0x89, 0xf9, |
| 0x13, 0x1b, 0xaf, 0x73, 0x1b, 0xaf, 0x28, 0x6f, 0x1c, 0x7e, 0xfe, 0x9c, 0x19, 0x4d, 0xfb, 0xb1, |
| 0x04, 0x0b, 0xd9, 0x29, 0x27, 0x7f, 0xf0, 0xd2, 0x02, 0xca, 0x0e, 0xd9, 0xe5, 0xda, 0x49, 0xd9, |
| 0x13, 0xbb, 0xae, 0x71, 0xbb, 0xde, 0x52, 0x96, 0x67, 0xed, 0xca, 0x76, 0xc6, 0xd4, 0xac, 0x6c, |
| 0x73, 0x3c, 0xce, 0xac, 0x43, 0x06, 0xd7, 0x72, 0xed, 0xa4, 0xec, 0xc7, 0x9b, 0xe5, 0x67, 0x78, |
| 0xef, 0x49, 0x37, 0xd7, 0x9f, 0xc3, 0xa5, 0x6e, 0xb8, 0x7b, 0xa4, 0xec, 0xf5, 0x0b, 0x07, 0x5a, |
| 0x80, 0x3d, 0x0e, 0xe3, 0xd0, 0x96, 0x3e, 0x7f, 0x98, 0x20, 0xfa, 0x21, 0x72, 0xd7, 0xc2, 0x71, |
| 0x7f, 0xb5, 0x1f, 0x0c, 0xf9, 0x7f, 0x33, 0x24, 0xff, 0x35, 0xe1, 0x8f, 0x06, 0xd1, 0x8b, 0xff, |
| 0x39, 0x71, 0x3f, 0x5d, 0x3f, 0x2d, 0x71, 0xe6, 0x0f, 0xff, 0x15, 0x00, 0x00, 0xff, 0xff, 0x5f, |
| 0x87, 0x12, 0xb1, 0x65, 0x21, 0x00, 0x00, |
| } |