| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/cloud/automl/v1beta1/io.proto |
| |
| package automl |
| |
| import ( |
| fmt "fmt" |
| math "math" |
| |
| proto "github.com/golang/protobuf/proto" |
| _ "google.golang.org/genproto/googleapis/api/annotations" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package |
| |
| // Input configuration for ImportData Action. |
| // |
| // The format of input depends on dataset_metadata the Dataset into which |
| // the import is happening has. As input source the |
| // [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] |
| // is expected, unless specified otherwise. Additionally any input .CSV file |
| // by itself must be 100MB or smaller, unless specified otherwise. |
| // If an "example" file (that is, image, video etc.) with identical content |
| // (even if it had different GCS_FILE_PATH) is mentioned multiple times, then |
| // its label, bounding boxes etc. are appended. The same file should be always |
| // provided with the same ML_USE and GCS_FILE_PATH, if it is not, then |
| // these values are nondeterministically selected from the given ones. |
| // |
| // The formats are represented in EBNF with commas being literal and with |
| // non-terminal symbols defined near the end of this comment. The formats are: |
| // |
| // * For Image Classification: |
| // CSV file(s) with each line in format: |
| // ML_USE,GCS_FILE_PATH,LABEL,LABEL,... |
| // GCS_FILE_PATH leads to image of up to 30MB in size. Supported |
| // extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO |
| // For MULTICLASS classification type, at most one LABEL is allowed |
| // per image. If an image has not yet been labeled, then it should be |
| // mentioned just once with no LABEL. |
| // Some sample rows: |
| // TRAIN,gs://folder/image1.jpg,daisy |
| // TEST,gs://folder/image2.jpg,dandelion,tulip,rose |
| // UNASSIGNED,gs://folder/image3.jpg,daisy |
| // UNASSIGNED,gs://folder/image4.jpg |
| // |
| // * For Image Object Detection: |
| // CSV file(s) with each line in format: |
| // ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX | ,,,,,,,) |
| // GCS_FILE_PATH leads to image of up to 30MB in size. Supported |
| // extensions: .JPEG, .GIF, .PNG. |
| // Each image is assumed to be exhaustively labeled. The minimum |
| // allowed BOUNDING_BOX edge length is 0.01, and no more than 500 |
| // BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is defined |
| // per line). If an image has not yet been labeled, then it should be |
| // mentioned just once with no LABEL and the ",,,,,,," in place of the |
| // BOUNDING_BOX. For images which are known to not contain any |
| // bounding boxes, they should be labelled explictly as |
| // "NEGATIVE_IMAGE", followed by ",,,,,,," in place of the |
| // BOUNDING_BOX. |
| // Sample rows: |
| // TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, |
| // TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, |
| // UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 |
| // TEST,gs://folder/im3.png,,,,,,,,, |
| // TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, |
| // |
| // * For Video Classification: |
| // CSV file(s) with each line in format: |
| // ML_USE,GCS_FILE_PATH |
| // where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH |
| // should lead to another .csv file which describes examples that have |
| // given ML_USE, using the following row format: |
| // GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) |
| // Here GCS_FILE_PATH leads to a video of up to 50GB in size and up |
| // to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. |
| // TIME_SEGMENT_START and TIME_SEGMENT_END must be within the |
| // length of the video, and end has to be after the start. Any segment |
| // of a video which has one or more labels on it, is considered a |
| // hard negative for all other labels. Any segment with no labels on |
| // it is considered to be unknown. If a whole video is unknown, then |
| // it shuold be mentioned just once with ",," in place of LABEL, |
| // TIME_SEGMENT_START,TIME_SEGMENT_END. |
| // Sample top level CSV file: |
| // TRAIN,gs://folder/train_videos.csv |
| // TEST,gs://folder/test_videos.csv |
| // UNASSIGNED,gs://folder/other_videos.csv |
| // Sample rows of a CSV file for a particular ML_USE: |
| // gs://folder/video1.avi,car,120,180.000021 |
| // gs://folder/video1.avi,bike,150,180.000021 |
| // gs://folder/vid2.avi,car,0,60.5 |
| // gs://folder/vid3.avi,,, |
| // |
| // * For Video Object Tracking: |
| // CSV file(s) with each line in format: |
| // ML_USE,GCS_FILE_PATH |
| // where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH |
| // should lead to another .csv file which describes examples that have |
| // given ML_USE, using one of the following row format: |
| // GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX |
| // or |
| // GCS_FILE_PATH,,,,,,,,,, |
| // Here GCS_FILE_PATH leads to a video of up to 50GB in size and up |
| // to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. |
| // Providing INSTANCE_IDs can help to obtain a better model. When |
| // a specific labeled entity leaves the video frame, and shows up |
| // afterwards it is not required, albeit preferable, that the same |
| // INSTANCE_ID is given to it. |
| // TIMESTAMP must be within the length of the video, the |
| // BOUNDING_BOX is assumed to be drawn on the closest video's frame |
| // to the TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected |
| // to be exhaustively labeled and no more than 500 BOUNDING_BOX-es per |
| // frame are allowed. If a whole video is unknown, then it should be |
| // mentioned just once with ",,,,,,,,,," in place of LABEL, |
| // [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. |
| // Sample top level CSV file: |
| // TRAIN,gs://folder/train_videos.csv |
| // TEST,gs://folder/test_videos.csv |
| // UNASSIGNED,gs://folder/other_videos.csv |
| // Seven sample rows of a CSV file for a particular ML_USE: |
| // gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 |
| // gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 |
| // gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 |
| // gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, |
| // gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, |
| // gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, |
| // gs://folder/video2.avi,,,,,,,,,,, |
| // * For Text Extraction: |
| // CSV file(s) with each line in format: |
| // ML_USE,GCS_FILE_PATH |
| // GCS_FILE_PATH leads to a .JSONL (that is, JSON Lines) file which |
| // either imports text in-line or as documents. |
| // The in-line .JSONL file contains, per line, a proto that wraps a |
| // TextSnippet proto (in json representation) followed by one or more |
| // AnnotationPayload protos (called annotations), which have |
| // display_name and text_extraction detail populated. The given text |
| // is expected to be annotated exhaustively, for example, if you look |
| // for animals and text contains "dolphin" that is not labeled, then |
| // "dolphin" is assumed to not be an animal. Any given text snippet |
| // content must have 30,000 characters or less, and also be UTF-8 NFC |
| // encoded (ASCII already is). The document .JSONL file contains, per line, a proto that wraps a |
| // Document proto with input_config set. Only PDF documents are |
| // supported now, and each document may be up to 2MB large. Currently |
| // annotations on documents cannot be specified at import. Any given |
| // .JSONL file must be 100MB or smaller. |
| // Three sample CSV rows: |
| // TRAIN,gs://folder/file1.jsonl |
| // VALIDATE,gs://folder/file2.jsonl |
| // TEST,gs://folder/file3.jsonl |
| // Sample in-line JSON Lines file for entity extraction (presented here |
| // with artificial line breaks, but the only actual line break is |
| // denoted by \n).: |
| // { |
| // "text_snippet": { |
| // "content": "dog car cat" |
| // } "annotations": [ |
| // { |
| // "display_name": "animal", |
| // "text_extraction": { |
| // "text_segment": {"start_offset": 0, "end_offset": 3} |
| // } |
| // }, |
| // { |
| // "display_name": "vehicle", |
| // "text_extraction": { |
| // "text_segment": {"start_offset": 4, "end_offset": 7} |
| // } |
| // }, |
| // { |
| // "display_name": "animal", |
| // "text_extraction": { |
| // "text_segment": {"start_offset": 8, "end_offset": 11} |
| // } |
| // }, |
| // ], |
| // }\n |
| // { |
| // "text_snippet": { |
| // "content": "This dog is good." |
| // }, |
| // "annotations": [ |
| // { |
| // "display_name": "animal", |
| // "text_extraction": { |
| // "text_segment": {"start_offset": 5, "end_offset": 8} |
| // } |
| // } |
| // ] |
| // } |
| // Sample document JSON Lines file (presented here with artificial line |
| // breaks, but the only actual line break is denoted by \n).: |
| // { |
| // "document": { |
| // "input_config": { |
| // "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] |
| // } |
| // } |
| // } |
| // }\n |
| // { |
| // "document": { |
| // "input_config": { |
| // "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] |
| // } |
| // } |
| // } |
| // } |
| // |
| // * For Text Classification: |
| // CSV file(s) with each line in format: |
| // ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... |
| // TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If |
| // the column content is a valid gcs file path, i.e. prefixed by |
| // "gs://", it will be treated as a GCS_FILE_PATH, else if the content |
| // is enclosed within double quotes (""), it is |
| // treated as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path |
| // must lead to a .txt file with UTF-8 encoding, for example, |
| // "gs://folder/content.txt", and the content in it is extracted |
| // as a text snippet. In TEXT_SNIPPET case, the column content |
| // excluding quotes is treated as to be imported text snippet. In |
| // both cases, the text snippet/file size must be within 128kB. |
| // Maximum 100 unique labels are allowed per CSV row. |
| // Sample rows: |
| // TRAIN,"They have bad food and very rude",RudeService,BadFood |
| // TRAIN,gs://folder/content.txt,SlowService |
| // TEST,"Typically always bad service there.",RudeService |
| // VALIDATE,"Stomach ache to go.",BadFood |
| // |
| // * For Text Sentiment: |
| // CSV file(s) with each line in format: |
| // ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT |
| // TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If |
| // the column content is a valid gcs file path, that is, prefixed by |
| // "gs://", it is treated as a GCS_FILE_PATH, otherwise it is treated |
| // as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path |
| // must lead to a .txt file with UTF-8 encoding, for example, |
| // "gs://folder/content.txt", and the content in it is extracted |
| // as a text snippet. In TEXT_SNIPPET case, the column content itself |
| // is treated as to be imported text snippet. In both cases, the |
| // text snippet must be up to 500 characters long. |
| // Sample rows: |
| // TRAIN,"@freewrytin this is way too good for your product",2 |
| // TRAIN,"I need this product so bad",3 |
| // TEST,"Thank you for this product.",4 |
| // VALIDATE,gs://folder/content.txt,2 |
| // |
| // * For Tables: |
| // Either |
| // [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or |
| // |
| // [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source] |
| // can be used. All inputs is concatenated into a single |
| // |
| // [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name] |
| // For gcs_source: |
| // CSV file(s), where the first row of the first file is the header, |
| // containing unique column names. If the first row of a subsequent |
| // file is the same as the header, then it is also treated as a |
| // header. All other rows contain values for the corresponding |
| // columns. |
| // Each .CSV file by itself must be 10GB or smaller, and their total |
| // size must be 100GB or smaller. |
| // First three sample rows of a CSV file: |
| // "Id","First Name","Last Name","Dob","Addresses" |
| // |
| // "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" |
| // |
| // "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} |
| // For bigquery_source: |
| // An URI of a BigQuery table. The user data size of the BigQuery |
| // table must be 100GB or smaller. |
| // An imported table must have between 2 and 1,000 columns, inclusive, |
| // and between 1000 and 100,000,000 rows, inclusive. There are at most 5 |
| // import data running in parallel. |
| // Definitions: |
| // ML_USE = "TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED" |
| // Describes how the given example (file) should be used for model |
| // training. "UNASSIGNED" can be used when user has no preference. |
| // GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/image1.png". |
| // LABEL = A display name of an object on an image, video etc., e.g. "dog". |
| // Must be up to 32 characters long and can consist only of ASCII |
| // Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9. |
| // For each label an AnnotationSpec is created which display_name |
| // becomes the label; AnnotationSpecs are given back in predictions. |
| // INSTANCE_ID = A positive integer that identifies a specific instance of a |
| // labeled entity on an example. Used e.g. to track two cars on |
| // a video while being able to tell apart which one is which. |
| // BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX | VERTEX,,,VERTEX,, |
| // A rectangle parallel to the frame of the example (image, |
| // video). If 4 vertices are given they are connected by edges |
| // in the order provided, if 2 are given they are recognized |
| // as diagonally opposite vertices of the rectangle. |
| // VERTEX = COORDINATE,COORDINATE |
| // First coordinate is horizontal (x), the second is vertical (y). |
| // COORDINATE = A float in 0 to 1 range, relative to total length of |
| // image or video in given dimension. For fractions the |
| // leading non-decimal 0 can be omitted (i.e. 0.3 = .3). |
| // Point 0,0 is in top left. |
| // TIME_SEGMENT_START = TIME_OFFSET |
| // Expresses a beginning, inclusive, of a time segment |
| // within an example that has a time dimension |
| // (e.g. video). |
| // TIME_SEGMENT_END = TIME_OFFSET |
| // Expresses an end, exclusive, of a time segment within |
| // an example that has a time dimension (e.g. video). |
| // TIME_OFFSET = A number of seconds as measured from the start of an |
| // example (e.g. video). Fractions are allowed, up to a |
| // microsecond precision. "inf" is allowed, and it means the end |
| // of the example. |
| // TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within |
| // double quotes (""). |
| // SENTIMENT = An integer between 0 and |
| // Dataset.text_sentiment_dataset_metadata.sentiment_max |
| // (inclusive). Describes the ordinal of the sentiment - higher |
| // value means a more positive sentiment. All the values are |
| // completely relative, i.e. neither 0 needs to mean a negative or |
| // neutral sentiment nor sentiment_max needs to mean a positive one |
| // - it is just required that 0 is the least positive sentiment |
| // in the data, and sentiment_max is the most positive one. |
| // The SENTIMENT shouldn't be confused with "score" or "magnitude" |
| // from the previous Natural Language Sentiment Analysis API. |
| // All SENTIMENT values between 0 and sentiment_max must be |
| // represented in the imported data. On prediction the same 0 to |
| // sentiment_max range will be used. The difference between |
| // neighboring sentiment values needs not to be uniform, e.g. 1 and |
| // 2 may be similar whereas the difference between 2 and 3 may be |
| // huge. |
| // |
| // Errors: |
| // If any of the provided CSV files can't be parsed or if more than certain |
| // percent of CSV rows cannot be processed then the operation fails and |
| // nothing is imported. Regardless of overall success or failure the per-row |
| // failures, up to a certain count cap, is listed in |
| // Operation.metadata.partial_failures. |
| // |
| type InputConfig struct { |
| // The source of the input. |
| // |
| // Types that are valid to be assigned to Source: |
| // *InputConfig_GcsSource |
| // *InputConfig_BigquerySource |
| Source isInputConfig_Source `protobuf_oneof:"source"` |
| // Additional domain-specific parameters describing the semantic of the |
| // imported data, any string must be up to 25000 |
| // characters long. |
| // |
| // * For Tables: |
| // `schema_inference_version` - (integer) Required. The version of the |
| // algorithm that should be used for the initial inference of the |
| // schema (columns' DataTypes) of the table the data is being imported |
| // into. Allowed values: "1". |
| Params map[string]string `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *InputConfig) Reset() { *m = InputConfig{} } |
| func (m *InputConfig) String() string { return proto.CompactTextString(m) } |
| func (*InputConfig) ProtoMessage() {} |
| func (*InputConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{0} |
| } |
| |
| func (m *InputConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_InputConfig.Unmarshal(m, b) |
| } |
| func (m *InputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_InputConfig.Marshal(b, m, deterministic) |
| } |
| func (m *InputConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_InputConfig.Merge(m, src) |
| } |
| func (m *InputConfig) XXX_Size() int { |
| return xxx_messageInfo_InputConfig.Size(m) |
| } |
| func (m *InputConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_InputConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_InputConfig proto.InternalMessageInfo |
| |
| type isInputConfig_Source interface { |
| isInputConfig_Source() |
| } |
| |
| type InputConfig_GcsSource struct { |
| GcsSource *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource,proto3,oneof"` |
| } |
| |
| type InputConfig_BigquerySource struct { |
| BigquerySource *BigQuerySource `protobuf:"bytes,3,opt,name=bigquery_source,json=bigquerySource,proto3,oneof"` |
| } |
| |
| func (*InputConfig_GcsSource) isInputConfig_Source() {} |
| |
| func (*InputConfig_BigquerySource) isInputConfig_Source() {} |
| |
| func (m *InputConfig) GetSource() isInputConfig_Source { |
| if m != nil { |
| return m.Source |
| } |
| return nil |
| } |
| |
| func (m *InputConfig) GetGcsSource() *GcsSource { |
| if x, ok := m.GetSource().(*InputConfig_GcsSource); ok { |
| return x.GcsSource |
| } |
| return nil |
| } |
| |
| func (m *InputConfig) GetBigquerySource() *BigQuerySource { |
| if x, ok := m.GetSource().(*InputConfig_BigquerySource); ok { |
| return x.BigquerySource |
| } |
| return nil |
| } |
| |
| func (m *InputConfig) GetParams() map[string]string { |
| if m != nil { |
| return m.Params |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*InputConfig) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*InputConfig_GcsSource)(nil), |
| (*InputConfig_BigquerySource)(nil), |
| } |
| } |
| |
| // Input configuration for BatchPredict Action. |
| // |
| // The format of input depends on the ML problem of the model used for |
| // prediction. As input source the |
| // [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] |
| // is expected, unless specified otherwise. |
| // |
| // The formats are represented in EBNF with commas being literal and with |
| // non-terminal symbols defined near the end of this comment. The formats |
| // are: |
| // |
| // * For Image Classification: |
| // CSV file(s) with each line having just a single column: |
| // GCS_FILE_PATH |
| // which leads to image of up to 30MB in size. Supported |
| // extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in |
| // the Batch predict output. |
| // Three sample rows: |
| // gs://folder/image1.jpeg |
| // gs://folder/image2.gif |
| // gs://folder/image3.png |
| // |
| // * For Image Object Detection: |
| // CSV file(s) with each line having just a single column: |
| // GCS_FILE_PATH |
| // which leads to image of up to 30MB in size. Supported |
| // extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in |
| // the Batch predict output. |
| // Three sample rows: |
| // gs://folder/image1.jpeg |
| // gs://folder/image2.gif |
| // gs://folder/image3.png |
| // * For Video Classification: |
| // CSV file(s) with each line in format: |
| // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END |
| // GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h |
| // duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. |
| // TIME_SEGMENT_START and TIME_SEGMENT_END must be within the |
| // length of the video, and end has to be after the start. |
| // Three sample rows: |
| // gs://folder/video1.mp4,10,40 |
| // gs://folder/video1.mp4,20,60 |
| // gs://folder/vid2.mov,0,inf |
| // |
| // * For Video Object Tracking: |
| // CSV file(s) with each line in format: |
| // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END |
| // GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h |
| // duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. |
| // TIME_SEGMENT_START and TIME_SEGMENT_END must be within the |
| // length of the video, and end has to be after the start. |
| // Three sample rows: |
| // gs://folder/video1.mp4,10,240 |
| // gs://folder/video1.mp4,300,360 |
| // gs://folder/vid2.mov,0,inf |
| // * For Text Classification: |
| // CSV file(s) with each line having just a single column: |
| // GCS_FILE_PATH | TEXT_SNIPPET |
| // Any given text file can have size upto 128kB. |
| // Any given text snippet content must have 60,000 characters or less. |
| // Three sample rows: |
| // gs://folder/text1.txt |
| // "Some text content to predict" |
| // gs://folder/text3.pdf |
| // Supported file extensions: .txt, .pdf |
| // |
| // * For Text Sentiment: |
| // CSV file(s) with each line having just a single column: |
| // GCS_FILE_PATH | TEXT_SNIPPET |
| // Any given text file can have size upto 128kB. |
| // Any given text snippet content must have 500 characters or less. |
| // Three sample rows: |
| // gs://folder/text1.txt |
| // "Some text content to predict" |
| // gs://folder/text3.pdf |
| // Supported file extensions: .txt, .pdf |
| // |
| // * For Text Extraction |
| // .JSONL (i.e. JSON Lines) file(s) which either provide text in-line or |
| // as documents (for a single BatchPredict call only one of the these |
| // formats may be used). |
| // The in-line .JSONL file(s) contain per line a proto that |
| // wraps a temporary user-assigned TextSnippet ID (string up to 2000 |
| // characters long) called "id", a TextSnippet proto (in |
| // json representation) and zero or more TextFeature protos. Any given |
| // text snippet content must have 30,000 characters or less, and also |
| // be UTF-8 NFC encoded (ASCII already is). The IDs provided should be |
| // unique. |
| // The document .JSONL file(s) contain, per line, a proto that wraps a |
| // Document proto with input_config set. Only PDF documents are |
| // supported now, and each document must be up to 2MB large. |
| // Any given .JSONL file must be 100MB or smaller, and no more than 20 |
| // files may be given. |
| // Sample in-line JSON Lines file (presented here with artificial line |
| // breaks, but the only actual line break is denoted by \n): |
| // { |
| // "id": "my_first_id", |
| // "text_snippet": { "content": "dog car cat"}, |
| // "text_features": [ |
| // { |
| // "text_segment": {"start_offset": 4, "end_offset": 6}, |
| // "structural_type": PARAGRAPH, |
| // "bounding_poly": { |
| // "normalized_vertices": [ |
| // {"x": 0.1, "y": 0.1}, |
| // {"x": 0.1, "y": 0.3}, |
| // {"x": 0.3, "y": 0.3}, |
| // {"x": 0.3, "y": 0.1}, |
| // ] |
| // }, |
| // } |
| // ], |
| // }\n |
| // { |
| // "id": "2", |
| // "text_snippet": { |
| // "content": "An elaborate content", |
| // "mime_type": "text/plain" |
| // } |
| // } |
| // Sample document JSON Lines file (presented here with artificial line |
| // breaks, but the only actual line break is denoted by \n).: |
| // { |
| // "document": { |
| // "input_config": { |
| // "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] |
| // } |
| // } |
| // } |
| // }\n |
| // { |
| // "document": { |
| // "input_config": { |
| // "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] |
| // } |
| // } |
| // } |
| // } |
| // |
| // * For Tables: |
| // Either |
| // [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or |
| // |
| // [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]. |
| // GCS case: |
| // CSV file(s), each by itself 10GB or smaller and total size must be |
| // 100GB or smaller, where first file must have a header containing |
| // column names. If the first row of a subsequent file is the same as |
| // the header, then it is also treated as a header. All other rows |
| // contain values for the corresponding columns. |
| // The column names must contain the model's |
| // |
| // [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] |
| // |
| // [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] |
| // (order doesn't matter). The columns corresponding to the model's |
| // input feature column specs must contain values compatible with the |
| // column spec's data types. Prediction on all the rows, i.e. the CSV |
| // lines, will be attempted. For FORECASTING |
| // |
| // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: |
| // all columns having |
| // |
| // [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] |
| // type will be ignored. |
| // First three sample rows of a CSV file: |
| // "First Name","Last Name","Dob","Addresses" |
| // |
| // "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" |
| // |
| // "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} |
| // BigQuery case: |
| // An URI of a BigQuery table. The user data size of the BigQuery |
| // table must be 100GB or smaller. |
| // The column names must contain the model's |
| // |
| // [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] |
| // |
| // [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] |
| // (order doesn't matter). The columns corresponding to the model's |
| // input feature column specs must contain values compatible with the |
| // column spec's data types. Prediction on all the rows of the table |
| // will be attempted. For FORECASTING |
| // |
| // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: |
| // all columns having |
| // |
| // [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] |
| // type will be ignored. |
| // |
| // Definitions: |
| // GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/video.avi". |
| // TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within |
| // double quotes ("") |
| // TIME_SEGMENT_START = TIME_OFFSET |
| // Expresses a beginning, inclusive, of a time segment |
| // within an |
| // example that has a time dimension (e.g. video). |
| // TIME_SEGMENT_END = TIME_OFFSET |
| // Expresses an end, exclusive, of a time segment within |
| // an example that has a time dimension (e.g. video). |
| // TIME_OFFSET = A number of seconds as measured from the start of an |
| // example (e.g. video). Fractions are allowed, up to a |
| // microsecond precision. "inf" is allowed and it means the end |
| // of the example. |
| // |
| // Errors: |
| // If any of the provided CSV files can't be parsed or if more than certain |
| // percent of CSV rows cannot be processed then the operation fails and |
| // prediction does not happen. Regardless of overall success or failure the |
| // per-row failures, up to a certain count cap, will be listed in |
| // Operation.metadata.partial_failures. |
| type BatchPredictInputConfig struct { |
| // Required. The source of the input. |
| // |
| // Types that are valid to be assigned to Source: |
| // *BatchPredictInputConfig_GcsSource |
| // *BatchPredictInputConfig_BigquerySource |
| Source isBatchPredictInputConfig_Source `protobuf_oneof:"source"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchPredictInputConfig) Reset() { *m = BatchPredictInputConfig{} } |
| func (m *BatchPredictInputConfig) String() string { return proto.CompactTextString(m) } |
| func (*BatchPredictInputConfig) ProtoMessage() {} |
| func (*BatchPredictInputConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{1} |
| } |
| |
| func (m *BatchPredictInputConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchPredictInputConfig.Unmarshal(m, b) |
| } |
| func (m *BatchPredictInputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchPredictInputConfig.Marshal(b, m, deterministic) |
| } |
| func (m *BatchPredictInputConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchPredictInputConfig.Merge(m, src) |
| } |
| func (m *BatchPredictInputConfig) XXX_Size() int { |
| return xxx_messageInfo_BatchPredictInputConfig.Size(m) |
| } |
| func (m *BatchPredictInputConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchPredictInputConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchPredictInputConfig proto.InternalMessageInfo |
| |
| type isBatchPredictInputConfig_Source interface { |
| isBatchPredictInputConfig_Source() |
| } |
| |
| type BatchPredictInputConfig_GcsSource struct { |
| GcsSource *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource,proto3,oneof"` |
| } |
| |
| type BatchPredictInputConfig_BigquerySource struct { |
| BigquerySource *BigQuerySource `protobuf:"bytes,2,opt,name=bigquery_source,json=bigquerySource,proto3,oneof"` |
| } |
| |
| func (*BatchPredictInputConfig_GcsSource) isBatchPredictInputConfig_Source() {} |
| |
| func (*BatchPredictInputConfig_BigquerySource) isBatchPredictInputConfig_Source() {} |
| |
| func (m *BatchPredictInputConfig) GetSource() isBatchPredictInputConfig_Source { |
| if m != nil { |
| return m.Source |
| } |
| return nil |
| } |
| |
| func (m *BatchPredictInputConfig) GetGcsSource() *GcsSource { |
| if x, ok := m.GetSource().(*BatchPredictInputConfig_GcsSource); ok { |
| return x.GcsSource |
| } |
| return nil |
| } |
| |
| func (m *BatchPredictInputConfig) GetBigquerySource() *BigQuerySource { |
| if x, ok := m.GetSource().(*BatchPredictInputConfig_BigquerySource); ok { |
| return x.BigquerySource |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*BatchPredictInputConfig) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*BatchPredictInputConfig_GcsSource)(nil), |
| (*BatchPredictInputConfig_BigquerySource)(nil), |
| } |
| } |
| |
| // Input configuration of a [Document][google.cloud.automl.v1beta1.Document]. |
| type DocumentInputConfig struct { |
| // The Google Cloud Storage location of the document file. Only a single path |
| // should be given. |
| // Max supported size: 512MB. |
| // Supported extensions: .PDF. |
| GcsSource *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource,proto3" json:"gcs_source,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *DocumentInputConfig) Reset() { *m = DocumentInputConfig{} } |
| func (m *DocumentInputConfig) String() string { return proto.CompactTextString(m) } |
| func (*DocumentInputConfig) ProtoMessage() {} |
| func (*DocumentInputConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{2} |
| } |
| |
| func (m *DocumentInputConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_DocumentInputConfig.Unmarshal(m, b) |
| } |
| func (m *DocumentInputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_DocumentInputConfig.Marshal(b, m, deterministic) |
| } |
| func (m *DocumentInputConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_DocumentInputConfig.Merge(m, src) |
| } |
| func (m *DocumentInputConfig) XXX_Size() int { |
| return xxx_messageInfo_DocumentInputConfig.Size(m) |
| } |
| func (m *DocumentInputConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_DocumentInputConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_DocumentInputConfig proto.InternalMessageInfo |
| |
| func (m *DocumentInputConfig) GetGcsSource() *GcsSource { |
| if m != nil { |
| return m.GcsSource |
| } |
| return nil |
| } |
| |
| // * For Translation: |
| // CSV file `translation.csv`, with each line in format: |
| // ML_USE,GCS_FILE_PATH |
| // GCS_FILE_PATH leads to a .TSV file which describes examples that have |
| // given ML_USE, using the following row format per line: |
| // TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target |
| // language) |
| // |
| // * For Tables: |
| // Output depends on whether the dataset was imported from GCS or |
| // BigQuery. |
| // GCS case: |
| // |
| // [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination] |
| // must be set. Exported are CSV file(s) `tables_1.csv`, |
| // `tables_2.csv`,...,`tables_N.csv` with each having as header line |
| // the table's column names, and all other lines contain values for |
| // the header columns. |
| // BigQuery case: |
| // |
| // [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] |
| // pointing to a BigQuery project must be set. In the given project a |
| // new dataset will be created with name |
| // |
| // `export_data_<automl-dataset-display-name>_<timestamp-of-export-call>` |
| // where <automl-dataset-display-name> will be made |
| // BigQuery-dataset-name compatible (e.g. most special characters will |
| // become underscores), and timestamp will be in |
| // YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that |
| // dataset a new table called `primary_table` will be created, and |
| // filled with precisely the same data as this obtained on import. |
| type OutputConfig struct { |
| // Required. The destination of the output. |
| // |
| // Types that are valid to be assigned to Destination: |
| // *OutputConfig_GcsDestination |
| // *OutputConfig_BigqueryDestination |
| Destination isOutputConfig_Destination `protobuf_oneof:"destination"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *OutputConfig) Reset() { *m = OutputConfig{} } |
| func (m *OutputConfig) String() string { return proto.CompactTextString(m) } |
| func (*OutputConfig) ProtoMessage() {} |
| func (*OutputConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{3} |
| } |
| |
| func (m *OutputConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_OutputConfig.Unmarshal(m, b) |
| } |
| func (m *OutputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_OutputConfig.Marshal(b, m, deterministic) |
| } |
| func (m *OutputConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_OutputConfig.Merge(m, src) |
| } |
| func (m *OutputConfig) XXX_Size() int { |
| return xxx_messageInfo_OutputConfig.Size(m) |
| } |
| func (m *OutputConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_OutputConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_OutputConfig proto.InternalMessageInfo |
| |
| type isOutputConfig_Destination interface { |
| isOutputConfig_Destination() |
| } |
| |
| type OutputConfig_GcsDestination struct { |
| GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination,proto3,oneof"` |
| } |
| |
| type OutputConfig_BigqueryDestination struct { |
| BigqueryDestination *BigQueryDestination `protobuf:"bytes,2,opt,name=bigquery_destination,json=bigqueryDestination,proto3,oneof"` |
| } |
| |
| func (*OutputConfig_GcsDestination) isOutputConfig_Destination() {} |
| |
| func (*OutputConfig_BigqueryDestination) isOutputConfig_Destination() {} |
| |
| func (m *OutputConfig) GetDestination() isOutputConfig_Destination { |
| if m != nil { |
| return m.Destination |
| } |
| return nil |
| } |
| |
| func (m *OutputConfig) GetGcsDestination() *GcsDestination { |
| if x, ok := m.GetDestination().(*OutputConfig_GcsDestination); ok { |
| return x.GcsDestination |
| } |
| return nil |
| } |
| |
| func (m *OutputConfig) GetBigqueryDestination() *BigQueryDestination { |
| if x, ok := m.GetDestination().(*OutputConfig_BigqueryDestination); ok { |
| return x.BigqueryDestination |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*OutputConfig) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*OutputConfig_GcsDestination)(nil), |
| (*OutputConfig_BigqueryDestination)(nil), |
| } |
| } |
| |
| // Output configuration for BatchPredict Action. |
| // |
| // As destination the |
| // |
| // [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] |
| // must be set unless specified otherwise for a domain. If gcs_destination is |
| // set then in the given directory a new directory is created. Its name |
| // will be |
| // "prediction-<model-display-name>-<timestamp-of-prediction-call>", |
| // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents |
| // of it depends on the ML problem the predictions are made for. |
| // |
| // * For Image Classification: |
| // In the created directory files `image_classification_1.jsonl`, |
| // `image_classification_2.jsonl`,...,`image_classification_N.jsonl` |
| // will be created, where N may be 1, and depends on the |
| // total number of the successfully predicted images and annotations. |
| // A single image will be listed only once with all its annotations, |
| // and its annotations will never be split across files. |
| // Each .JSONL file will contain, per line, a JSON representation of a |
| // proto that wraps image's "ID" : "<id_value>" followed by a list of |
| // zero or more AnnotationPayload protos (called annotations), which |
| // have classification detail populated. |
| // If prediction for any image failed (partially or completely), then an |
| // additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` |
| // files will be created (N depends on total number of failed |
| // predictions). These files will have a JSON representation of a proto |
| // that wraps the same "ID" : "<id_value>" but here followed by |
| // exactly one |
| // |
| // [`google.rpc.Status`](https: |
| // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) |
| // containing only `code` and `message`fields. |
| // |
| // * For Image Object Detection: |
| // In the created directory files `image_object_detection_1.jsonl`, |
| // `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl` |
| // will be created, where N may be 1, and depends on the |
| // total number of the successfully predicted images and annotations. |
| // Each .JSONL file will contain, per line, a JSON representation of a |
| // proto that wraps image's "ID" : "<id_value>" followed by a list of |
| // zero or more AnnotationPayload protos (called annotations), which |
| // have image_object_detection detail populated. A single image will |
| // be listed only once with all its annotations, and its annotations |
| // will never be split across files. |
| // If prediction for any image failed (partially or completely), then |
| // additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` |
| // files will be created (N depends on total number of failed |
| // predictions). These files will have a JSON representation of a proto |
| // that wraps the same "ID" : "<id_value>" but here followed by |
| // exactly one |
| // |
| // [`google.rpc.Status`](https: |
| // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) |
| // containing only `code` and `message`fields. |
| // * For Video Classification: |
| // In the created directory a video_classification.csv file, and a .JSON |
| // file per each video classification requested in the input (i.e. each |
| // line in given CSV(s)), will be created. |
| // |
| // The format of video_classification.csv is: |
| // |
| // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS |
| // where: |
| // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 |
| // the prediction input lines (i.e. video_classification.csv has |
| // precisely the same number of lines as the prediction input had.) |
| // JSON_FILE_NAME = Name of .JSON file in the output directory, which |
| // contains prediction responses for the video time segment. |
| // STATUS = "OK" if prediction completed successfully, or an error code |
| // with message otherwise. If STATUS is not "OK" then the .JSON file |
| // for that line may not exist or be empty. |
| // |
| // Each .JSON file, assuming STATUS is "OK", will contain a list of |
| // AnnotationPayload protos in JSON format, which are the predictions |
| // for the video time segment the file is assigned to in the |
| // video_classification.csv. All AnnotationPayload protos will have |
| // video_classification field set, and will be sorted by |
| // video_classification.type field (note that the returned types are |
| // governed by `classifaction_types` parameter in |
| // [PredictService.BatchPredictRequest.params][]). |
| // |
| // * For Video Object Tracking: |
| // In the created directory a video_object_tracking.csv file will be |
| // created, and multiple files video_object_trackinng_1.json, |
| // video_object_trackinng_2.json,..., video_object_trackinng_N.json, |
| // where N is the number of requests in the input (i.e. the number of |
| // lines in given CSV(s)). |
| // |
| // The format of video_object_tracking.csv is: |
| // |
| // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS |
| // where: |
| // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 |
| // the prediction input lines (i.e. video_object_tracking.csv has |
| // precisely the same number of lines as the prediction input had.) |
| // JSON_FILE_NAME = Name of .JSON file in the output directory, which |
| // contains prediction responses for the video time segment. |
| // STATUS = "OK" if prediction completed successfully, or an error |
| // code with message otherwise. If STATUS is not "OK" then the .JSON |
| // file for that line may not exist or be empty. |
| // |
| // Each .JSON file, assuming STATUS is "OK", will contain a list of |
| // AnnotationPayload protos in JSON format, which are the predictions |
| // for each frame of the video time segment the file is assigned to in |
| // video_object_tracking.csv. All AnnotationPayload protos will have |
| // video_object_tracking field set. |
| // * For Text Classification: |
| // In the created directory files `text_classification_1.jsonl`, |
| // `text_classification_2.jsonl`,...,`text_classification_N.jsonl` |
| // will be created, where N may be 1, and depends on the |
| // total number of inputs and annotations found. |
| // |
| // Each .JSONL file will contain, per line, a JSON representation of a |
| // proto that wraps input text snippet or input text file and a list of |
| // zero or more AnnotationPayload protos (called annotations), which |
| // have classification detail populated. A single text snippet or file |
| // will be listed only once with all its annotations, and its |
| // annotations will never be split across files. |
| // |
| // If prediction for any text snippet or file failed (partially or |
| // completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., |
| // `errors_N.jsonl` files will be created (N depends on total number of |
| // failed predictions). These files will have a JSON representation of a |
| // proto that wraps input text snippet or input text file followed by |
| // exactly one |
| // |
| // [`google.rpc.Status`](https: |
| // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) |
| // containing only `code` and `message`. |
| // |
| // * For Text Sentiment: |
| // In the created directory files `text_sentiment_1.jsonl`, |
| // `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl` |
| // will be created, where N may be 1, and depends on the |
| // total number of inputs and annotations found. |
| // |
| // Each .JSONL file will contain, per line, a JSON representation of a |
| // proto that wraps input text snippet or input text file and a list of |
| // zero or more AnnotationPayload protos (called annotations), which |
| // have text_sentiment detail populated. A single text snippet or file |
| // will be listed only once with all its annotations, and its |
| // annotations will never be split across files. |
| // |
| // If prediction for any text snippet or file failed (partially or |
| // completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., |
| // `errors_N.jsonl` files will be created (N depends on total number of |
| // failed predictions). These files will have a JSON representation of a |
| // proto that wraps input text snippet or input text file followed by |
| // exactly one |
| // |
| // [`google.rpc.Status`](https: |
| // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) |
| // containing only `code` and `message`. |
| // |
| // * For Text Extraction: |
| // In the created directory files `text_extraction_1.jsonl`, |
| // `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl` |
| // will be created, where N may be 1, and depends on the |
| // total number of inputs and annotations found. |
| // The contents of these .JSONL file(s) depend on whether the input |
| // used inline text, or documents. |
| // If input was inline, then each .JSONL file will contain, per line, |
| // a JSON representation of a proto that wraps given in request text |
| // snippet's "id" (if specified), followed by input text snippet, |
| // and a list of zero or more |
| // AnnotationPayload protos (called annotations), which have |
| // text_extraction detail populated. A single text snippet will be |
| // listed only once with all its annotations, and its annotations will |
| // never be split across files. |
| // If input used documents, then each .JSONL file will contain, per |
| // line, a JSON representation of a proto that wraps given in request |
| // document proto, followed by its OCR-ed representation in the form |
| // of a text snippet, finally followed by a list of zero or more |
| // AnnotationPayload protos (called annotations), which have |
| // text_extraction detail populated and refer, via their indices, to |
| // the OCR-ed text snippet. A single document (and its text snippet) |
| // will be listed only once with all its annotations, and its |
| // annotations will never be split across files. |
| // If prediction for any text snippet failed (partially or completely), |
| // then additional `errors_1.jsonl`, `errors_2.jsonl`,..., |
| // `errors_N.jsonl` files will be created (N depends on total number of |
| // failed predictions). These files will have a JSON representation of a |
| // proto that wraps either the "id" : "<id_value>" (in case of inline) |
| // or the document proto (in case of document) but here followed by |
| // exactly one |
| // |
| // [`google.rpc.Status`](https: |
| // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) |
| // containing only `code` and `message`. |
| // |
| // * For Tables: |
| // Output depends on whether |
| // |
| // [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination] |
| // or |
| // |
| // [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination] |
| // is set (either is allowed). |
| // GCS case: |
| // In the created directory files `tables_1.csv`, `tables_2.csv`,..., |
| // `tables_N.csv` will be created, where N may be 1, and depends on |
| // the total number of the successfully predicted rows. |
| // For all CLASSIFICATION |
| // |
| // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: |
| // Each .csv file will contain a header, listing all columns' |
| // |
| // [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] |
| // given on input followed by M target column names in the format of |
| // |
| // "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] |
| // |
| // [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>_<target |
| // value>_score" where M is the number of distinct target values, |
| // i.e. number of distinct values in the target column of the table |
| // used to train the model. Subsequent lines will contain the |
| // respective values of successfully predicted rows, with the last, |
| // i.e. the target, columns having the corresponding prediction |
| // [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. |
| // For REGRESSION and FORECASTING |
| // |
| // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]: |
| // Each .csv file will contain a header, listing all columns' |
| // [display_name-s][google.cloud.automl.v1beta1.display_name] given |
| // on input followed by the predicted target column with name in the |
| // format of |
| // |
| // "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] |
| // |
| // [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" |
| // Subsequent lines will contain the respective values of |
| // successfully predicted rows, with the last, i.e. the target, |
| // column having the predicted target value. |
| // If prediction for any rows failed, then an additional |
| // `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be |
| // created (N depends on total number of failed rows). These files |
| // will have analogous format as `tables_*.csv`, but always with a |
| // single target column having |
| // |
| // [`google.rpc.Status`](https: |
| // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) |
| // represented as a JSON string, and containing only `code` and |
| // `message`. |
| // BigQuery case: |
| // |
| // [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] |
| // pointing to a BigQuery project must be set. In the given project a |
| // new dataset will be created with name |
| // `prediction_<model-display-name>_<timestamp-of-prediction-call>` |
| // where <model-display-name> will be made |
| // BigQuery-dataset-name compatible (e.g. most special characters will |
| // become underscores), and timestamp will be in |
| // YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset |
| // two tables will be created, `predictions`, and `errors`. |
| // The `predictions` table's column names will be the input columns' |
| // |
| // [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] |
| // followed by the target column with name in the format of |
| // |
| // "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] |
| // |
| // [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" |
| // The input feature columns will contain the respective values of |
| // successfully predicted rows, with the target column having an |
| // ARRAY of |
| // |
| // [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], |
| // represented as STRUCT-s, containing |
| // [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. |
| // The `errors` table contains rows for which the prediction has |
| // failed, it has analogous input columns while the target column name |
| // is in the format of |
| // |
| // "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] |
| // |
| // [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", |
| // and as a value has |
| // |
| // [`google.rpc.Status`](https: |
| // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) |
| // represented as a STRUCT, and containing only `code` and `message`. |
| type BatchPredictOutputConfig struct { |
| // Required. The destination of the output. |
| // |
| // Types that are valid to be assigned to Destination: |
| // *BatchPredictOutputConfig_GcsDestination |
| // *BatchPredictOutputConfig_BigqueryDestination |
| Destination isBatchPredictOutputConfig_Destination `protobuf_oneof:"destination"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchPredictOutputConfig) Reset() { *m = BatchPredictOutputConfig{} } |
| func (m *BatchPredictOutputConfig) String() string { return proto.CompactTextString(m) } |
| func (*BatchPredictOutputConfig) ProtoMessage() {} |
| func (*BatchPredictOutputConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{4} |
| } |
| |
| func (m *BatchPredictOutputConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchPredictOutputConfig.Unmarshal(m, b) |
| } |
| func (m *BatchPredictOutputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchPredictOutputConfig.Marshal(b, m, deterministic) |
| } |
| func (m *BatchPredictOutputConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchPredictOutputConfig.Merge(m, src) |
| } |
| func (m *BatchPredictOutputConfig) XXX_Size() int { |
| return xxx_messageInfo_BatchPredictOutputConfig.Size(m) |
| } |
| func (m *BatchPredictOutputConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchPredictOutputConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchPredictOutputConfig proto.InternalMessageInfo |
| |
| type isBatchPredictOutputConfig_Destination interface { |
| isBatchPredictOutputConfig_Destination() |
| } |
| |
| type BatchPredictOutputConfig_GcsDestination struct { |
| GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination,proto3,oneof"` |
| } |
| |
| type BatchPredictOutputConfig_BigqueryDestination struct { |
| BigqueryDestination *BigQueryDestination `protobuf:"bytes,2,opt,name=bigquery_destination,json=bigqueryDestination,proto3,oneof"` |
| } |
| |
| func (*BatchPredictOutputConfig_GcsDestination) isBatchPredictOutputConfig_Destination() {} |
| |
| func (*BatchPredictOutputConfig_BigqueryDestination) isBatchPredictOutputConfig_Destination() {} |
| |
| func (m *BatchPredictOutputConfig) GetDestination() isBatchPredictOutputConfig_Destination { |
| if m != nil { |
| return m.Destination |
| } |
| return nil |
| } |
| |
| func (m *BatchPredictOutputConfig) GetGcsDestination() *GcsDestination { |
| if x, ok := m.GetDestination().(*BatchPredictOutputConfig_GcsDestination); ok { |
| return x.GcsDestination |
| } |
| return nil |
| } |
| |
| func (m *BatchPredictOutputConfig) GetBigqueryDestination() *BigQueryDestination { |
| if x, ok := m.GetDestination().(*BatchPredictOutputConfig_BigqueryDestination); ok { |
| return x.BigqueryDestination |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*BatchPredictOutputConfig) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*BatchPredictOutputConfig_GcsDestination)(nil), |
| (*BatchPredictOutputConfig_BigqueryDestination)(nil), |
| } |
| } |
| |
| // Output configuration for ModelExport Action. |
| type ModelExportOutputConfig struct { |
| // Required. The destination of the output. |
| // |
| // Types that are valid to be assigned to Destination: |
| // *ModelExportOutputConfig_GcsDestination |
| // *ModelExportOutputConfig_GcrDestination |
| Destination isModelExportOutputConfig_Destination `protobuf_oneof:"destination"` |
| // The format in which the model must be exported. The available, and default, |
| // formats depend on the problem and model type (if given problem and type |
| // combination doesn't have a format listed, it means its models are not |
| // exportable): |
| // |
| // * For Image Classification mobile-low-latency-1, mobile-versatile-1, |
| // mobile-high-accuracy-1: |
| // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "docker". |
| // |
| // * For Image Classification mobile-core-ml-low-latency-1, |
| // mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1: |
| // "core_ml" (default). |
| // Formats description: |
| // |
| // * tflite - Used for Android mobile devices. |
| // * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/) |
| // devices. |
| // * tf_saved_model - A tensorflow model in SavedModel format. |
| // * docker - Used for Docker containers. Use the params field to customize |
| // the container. The container is verified to work correctly on |
| // ubuntu 16.04 operating system. See more at |
| // [containers |
| // |
| // quickstart](https: |
| // //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) |
| // * core_ml - Used for iOS mobile devices. |
| ModelFormat string `protobuf:"bytes,4,opt,name=model_format,json=modelFormat,proto3" json:"model_format,omitempty"` |
| // Additional model-type and format specific parameters describing the |
| // requirements for the to be exported model files, any string must be up to |
| // 25000 characters long. |
| // |
| // * For `docker` format: |
| // `cpu_architecture` - (string) "x86_64" (default). |
| // `gpu_architecture` - (string) "none" (default), "nvidia". |
| Params map[string]string `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ModelExportOutputConfig) Reset() { *m = ModelExportOutputConfig{} } |
| func (m *ModelExportOutputConfig) String() string { return proto.CompactTextString(m) } |
| func (*ModelExportOutputConfig) ProtoMessage() {} |
| func (*ModelExportOutputConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{5} |
| } |
| |
| func (m *ModelExportOutputConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ModelExportOutputConfig.Unmarshal(m, b) |
| } |
| func (m *ModelExportOutputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ModelExportOutputConfig.Marshal(b, m, deterministic) |
| } |
| func (m *ModelExportOutputConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ModelExportOutputConfig.Merge(m, src) |
| } |
| func (m *ModelExportOutputConfig) XXX_Size() int { |
| return xxx_messageInfo_ModelExportOutputConfig.Size(m) |
| } |
| func (m *ModelExportOutputConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_ModelExportOutputConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ModelExportOutputConfig proto.InternalMessageInfo |
| |
| type isModelExportOutputConfig_Destination interface { |
| isModelExportOutputConfig_Destination() |
| } |
| |
| type ModelExportOutputConfig_GcsDestination struct { |
| GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination,proto3,oneof"` |
| } |
| |
| type ModelExportOutputConfig_GcrDestination struct { |
| GcrDestination *GcrDestination `protobuf:"bytes,3,opt,name=gcr_destination,json=gcrDestination,proto3,oneof"` |
| } |
| |
| func (*ModelExportOutputConfig_GcsDestination) isModelExportOutputConfig_Destination() {} |
| |
| func (*ModelExportOutputConfig_GcrDestination) isModelExportOutputConfig_Destination() {} |
| |
| func (m *ModelExportOutputConfig) GetDestination() isModelExportOutputConfig_Destination { |
| if m != nil { |
| return m.Destination |
| } |
| return nil |
| } |
| |
| func (m *ModelExportOutputConfig) GetGcsDestination() *GcsDestination { |
| if x, ok := m.GetDestination().(*ModelExportOutputConfig_GcsDestination); ok { |
| return x.GcsDestination |
| } |
| return nil |
| } |
| |
| func (m *ModelExportOutputConfig) GetGcrDestination() *GcrDestination { |
| if x, ok := m.GetDestination().(*ModelExportOutputConfig_GcrDestination); ok { |
| return x.GcrDestination |
| } |
| return nil |
| } |
| |
| func (m *ModelExportOutputConfig) GetModelFormat() string { |
| if m != nil { |
| return m.ModelFormat |
| } |
| return "" |
| } |
| |
| func (m *ModelExportOutputConfig) GetParams() map[string]string { |
| if m != nil { |
| return m.Params |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*ModelExportOutputConfig) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*ModelExportOutputConfig_GcsDestination)(nil), |
| (*ModelExportOutputConfig_GcrDestination)(nil), |
| } |
| } |
| |
| // Output configuration for ExportEvaluatedExamples Action. Note that this call |
| // is available only for 30 days since the moment the model was evaluated. |
| // The output depends on the domain, as follows (note that only examples from |
| // the TEST set are exported): |
| // |
| // * For Tables: |
| // |
| // [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination] |
| // pointing to a BigQuery project must be set. In the given project a |
| // new dataset will be created with name |
| // |
| // `export_evaluated_examples_<model-display-name>_<timestamp-of-export-call>` |
| // where <model-display-name> will be made BigQuery-dataset-name |
| // compatible (e.g. most special characters will become underscores), |
| // and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" |
| // format. In the dataset an `evaluated_examples` table will be |
| // created. It will have all the same columns as the |
| // |
| // [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id] |
| // of the |
| // [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from which |
| // the model was created, as they were at the moment of model's |
| // evaluation (this includes the target column with its ground |
| // truth), followed by a column called "predicted_<target_column>". That |
| // last column will contain the model's prediction result for each |
| // respective row, given as ARRAY of |
| // [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], |
| // represented as STRUCT-s, containing |
| // [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. |
| type ExportEvaluatedExamplesOutputConfig struct { |
| // Required. The destination of the output. |
| // |
| // Types that are valid to be assigned to Destination: |
| // *ExportEvaluatedExamplesOutputConfig_BigqueryDestination |
| Destination isExportEvaluatedExamplesOutputConfig_Destination `protobuf_oneof:"destination"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ExportEvaluatedExamplesOutputConfig) Reset() { *m = ExportEvaluatedExamplesOutputConfig{} } |
| func (m *ExportEvaluatedExamplesOutputConfig) String() string { return proto.CompactTextString(m) } |
| func (*ExportEvaluatedExamplesOutputConfig) ProtoMessage() {} |
| func (*ExportEvaluatedExamplesOutputConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{6} |
| } |
| |
| func (m *ExportEvaluatedExamplesOutputConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ExportEvaluatedExamplesOutputConfig.Unmarshal(m, b) |
| } |
| func (m *ExportEvaluatedExamplesOutputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ExportEvaluatedExamplesOutputConfig.Marshal(b, m, deterministic) |
| } |
| func (m *ExportEvaluatedExamplesOutputConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ExportEvaluatedExamplesOutputConfig.Merge(m, src) |
| } |
| func (m *ExportEvaluatedExamplesOutputConfig) XXX_Size() int { |
| return xxx_messageInfo_ExportEvaluatedExamplesOutputConfig.Size(m) |
| } |
| func (m *ExportEvaluatedExamplesOutputConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_ExportEvaluatedExamplesOutputConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ExportEvaluatedExamplesOutputConfig proto.InternalMessageInfo |
| |
| type isExportEvaluatedExamplesOutputConfig_Destination interface { |
| isExportEvaluatedExamplesOutputConfig_Destination() |
| } |
| |
| type ExportEvaluatedExamplesOutputConfig_BigqueryDestination struct { |
| BigqueryDestination *BigQueryDestination `protobuf:"bytes,2,opt,name=bigquery_destination,json=bigqueryDestination,proto3,oneof"` |
| } |
| |
| func (*ExportEvaluatedExamplesOutputConfig_BigqueryDestination) isExportEvaluatedExamplesOutputConfig_Destination() { |
| } |
| |
| func (m *ExportEvaluatedExamplesOutputConfig) GetDestination() isExportEvaluatedExamplesOutputConfig_Destination { |
| if m != nil { |
| return m.Destination |
| } |
| return nil |
| } |
| |
| func (m *ExportEvaluatedExamplesOutputConfig) GetBigqueryDestination() *BigQueryDestination { |
| if x, ok := m.GetDestination().(*ExportEvaluatedExamplesOutputConfig_BigqueryDestination); ok { |
| return x.BigqueryDestination |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*ExportEvaluatedExamplesOutputConfig) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*ExportEvaluatedExamplesOutputConfig_BigqueryDestination)(nil), |
| } |
| } |
| |
| // The Google Cloud Storage location for the input content. |
| type GcsSource struct { |
| // Required. Google Cloud Storage URIs to input files, up to 2000 characters |
| // long. Accepted forms: |
| // * Full object path, e.g. gs://bucket/directory/object.csv |
| InputUris []string `protobuf:"bytes,1,rep,name=input_uris,json=inputUris,proto3" json:"input_uris,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *GcsSource) Reset() { *m = GcsSource{} } |
| func (m *GcsSource) String() string { return proto.CompactTextString(m) } |
| func (*GcsSource) ProtoMessage() {} |
| func (*GcsSource) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{7} |
| } |
| |
| func (m *GcsSource) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_GcsSource.Unmarshal(m, b) |
| } |
| func (m *GcsSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_GcsSource.Marshal(b, m, deterministic) |
| } |
| func (m *GcsSource) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_GcsSource.Merge(m, src) |
| } |
| func (m *GcsSource) XXX_Size() int { |
| return xxx_messageInfo_GcsSource.Size(m) |
| } |
| func (m *GcsSource) XXX_DiscardUnknown() { |
| xxx_messageInfo_GcsSource.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_GcsSource proto.InternalMessageInfo |
| |
| func (m *GcsSource) GetInputUris() []string { |
| if m != nil { |
| return m.InputUris |
| } |
| return nil |
| } |
| |
| // The BigQuery location for the input content. |
| type BigQuerySource struct { |
| // Required. BigQuery URI to a table, up to 2000 characters long. |
| // Accepted forms: |
| // * BigQuery path e.g. bq://projectId.bqDatasetId.bqTableId |
| InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BigQuerySource) Reset() { *m = BigQuerySource{} } |
| func (m *BigQuerySource) String() string { return proto.CompactTextString(m) } |
| func (*BigQuerySource) ProtoMessage() {} |
| func (*BigQuerySource) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{8} |
| } |
| |
| func (m *BigQuerySource) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BigQuerySource.Unmarshal(m, b) |
| } |
| func (m *BigQuerySource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BigQuerySource.Marshal(b, m, deterministic) |
| } |
| func (m *BigQuerySource) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BigQuerySource.Merge(m, src) |
| } |
| func (m *BigQuerySource) XXX_Size() int { |
| return xxx_messageInfo_BigQuerySource.Size(m) |
| } |
| func (m *BigQuerySource) XXX_DiscardUnknown() { |
| xxx_messageInfo_BigQuerySource.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BigQuerySource proto.InternalMessageInfo |
| |
| func (m *BigQuerySource) GetInputUri() string { |
| if m != nil { |
| return m.InputUri |
| } |
| return "" |
| } |
| |
| // The Google Cloud Storage location where the output is to be written to. |
| type GcsDestination struct { |
| // Required. Google Cloud Storage URI to output directory, up to 2000 |
| // characters long. |
| // Accepted forms: |
| // * Prefix path: gs://bucket/directory |
| // The requesting user must have write permission to the bucket. |
| // The directory is created if it doesn't exist. |
| OutputUriPrefix string `protobuf:"bytes,1,opt,name=output_uri_prefix,json=outputUriPrefix,proto3" json:"output_uri_prefix,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *GcsDestination) Reset() { *m = GcsDestination{} } |
| func (m *GcsDestination) String() string { return proto.CompactTextString(m) } |
| func (*GcsDestination) ProtoMessage() {} |
| func (*GcsDestination) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{9} |
| } |
| |
| func (m *GcsDestination) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_GcsDestination.Unmarshal(m, b) |
| } |
| func (m *GcsDestination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_GcsDestination.Marshal(b, m, deterministic) |
| } |
| func (m *GcsDestination) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_GcsDestination.Merge(m, src) |
| } |
| func (m *GcsDestination) XXX_Size() int { |
| return xxx_messageInfo_GcsDestination.Size(m) |
| } |
| func (m *GcsDestination) XXX_DiscardUnknown() { |
| xxx_messageInfo_GcsDestination.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_GcsDestination proto.InternalMessageInfo |
| |
| func (m *GcsDestination) GetOutputUriPrefix() string { |
| if m != nil { |
| return m.OutputUriPrefix |
| } |
| return "" |
| } |
| |
| // The BigQuery location for the output content. |
| type BigQueryDestination struct { |
| // Required. BigQuery URI to a project, up to 2000 characters long. |
| // Accepted forms: |
| // * BigQuery path e.g. bq://projectId |
| OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BigQueryDestination) Reset() { *m = BigQueryDestination{} } |
| func (m *BigQueryDestination) String() string { return proto.CompactTextString(m) } |
| func (*BigQueryDestination) ProtoMessage() {} |
| func (*BigQueryDestination) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{10} |
| } |
| |
| func (m *BigQueryDestination) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BigQueryDestination.Unmarshal(m, b) |
| } |
| func (m *BigQueryDestination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BigQueryDestination.Marshal(b, m, deterministic) |
| } |
| func (m *BigQueryDestination) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BigQueryDestination.Merge(m, src) |
| } |
| func (m *BigQueryDestination) XXX_Size() int { |
| return xxx_messageInfo_BigQueryDestination.Size(m) |
| } |
| func (m *BigQueryDestination) XXX_DiscardUnknown() { |
| xxx_messageInfo_BigQueryDestination.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BigQueryDestination proto.InternalMessageInfo |
| |
| func (m *BigQueryDestination) GetOutputUri() string { |
| if m != nil { |
| return m.OutputUri |
| } |
| return "" |
| } |
| |
| // The GCR location where the image must be pushed to. |
| type GcrDestination struct { |
| // Required. Google Contained Registry URI of the new image, up to 2000 |
| // characters long. See |
| // |
| // https: |
| // //cloud.google.com/container-registry/do |
| // // cs/pushing-and-pulling#pushing_an_image_to_a_registry |
| // Accepted forms: |
| // * [HOSTNAME]/[PROJECT-ID]/[IMAGE] |
| // * [HOSTNAME]/[PROJECT-ID]/[IMAGE]:[TAG] |
| // |
| // The requesting user must have permission to push images the project. |
| OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *GcrDestination) Reset() { *m = GcrDestination{} } |
| func (m *GcrDestination) String() string { return proto.CompactTextString(m) } |
| func (*GcrDestination) ProtoMessage() {} |
| func (*GcrDestination) Descriptor() ([]byte, []int) { |
| return fileDescriptor_6e2d768504aa30d7, []int{11} |
| } |
| |
| func (m *GcrDestination) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_GcrDestination.Unmarshal(m, b) |
| } |
| func (m *GcrDestination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_GcrDestination.Marshal(b, m, deterministic) |
| } |
| func (m *GcrDestination) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_GcrDestination.Merge(m, src) |
| } |
| func (m *GcrDestination) XXX_Size() int { |
| return xxx_messageInfo_GcrDestination.Size(m) |
| } |
| func (m *GcrDestination) XXX_DiscardUnknown() { |
| xxx_messageInfo_GcrDestination.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_GcrDestination proto.InternalMessageInfo |
| |
| func (m *GcrDestination) GetOutputUri() string { |
| if m != nil { |
| return m.OutputUri |
| } |
| return "" |
| } |
| |
| func init() { |
| proto.RegisterType((*InputConfig)(nil), "google.cloud.automl.v1beta1.InputConfig") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.automl.v1beta1.InputConfig.ParamsEntry") |
| proto.RegisterType((*BatchPredictInputConfig)(nil), "google.cloud.automl.v1beta1.BatchPredictInputConfig") |
| proto.RegisterType((*DocumentInputConfig)(nil), "google.cloud.automl.v1beta1.DocumentInputConfig") |
| proto.RegisterType((*OutputConfig)(nil), "google.cloud.automl.v1beta1.OutputConfig") |
| proto.RegisterType((*BatchPredictOutputConfig)(nil), "google.cloud.automl.v1beta1.BatchPredictOutputConfig") |
| proto.RegisterType((*ModelExportOutputConfig)(nil), "google.cloud.automl.v1beta1.ModelExportOutputConfig") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry") |
| proto.RegisterType((*ExportEvaluatedExamplesOutputConfig)(nil), "google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig") |
| proto.RegisterType((*GcsSource)(nil), "google.cloud.automl.v1beta1.GcsSource") |
| proto.RegisterType((*BigQuerySource)(nil), "google.cloud.automl.v1beta1.BigQuerySource") |
| proto.RegisterType((*GcsDestination)(nil), "google.cloud.automl.v1beta1.GcsDestination") |
| proto.RegisterType((*BigQueryDestination)(nil), "google.cloud.automl.v1beta1.BigQueryDestination") |
| proto.RegisterType((*GcrDestination)(nil), "google.cloud.automl.v1beta1.GcrDestination") |
| } |
| |
| func init() { |
| proto.RegisterFile("google/cloud/automl/v1beta1/io.proto", fileDescriptor_6e2d768504aa30d7) |
| } |
| |
| var fileDescriptor_6e2d768504aa30d7 = []byte{ |
| // 647 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x96, 0xdf, 0x4e, 0xd4, 0x4e, |
| 0x14, 0xc7, 0x7f, 0x6d, 0x7f, 0x12, 0x7a, 0x8a, 0xa0, 0x85, 0x84, 0x06, 0xfc, 0x83, 0xd5, 0x18, |
| 0x02, 0xb1, 0x15, 0xe4, 0x42, 0xab, 0x17, 0xb2, 0xb0, 0xa2, 0x09, 0xc4, 0x75, 0x0d, 0xc4, 0x98, |
| 0x4d, 0x36, 0x43, 0x77, 0x18, 0x27, 0xb6, 0x9d, 0x3a, 0x9d, 0x12, 0x78, 0x0b, 0xef, 0xbd, 0xf6, |
| 0x41, 0x4c, 0xbc, 0xf2, 0x19, 0x7c, 0x00, 0xe3, 0x53, 0x98, 0xce, 0x74, 0x97, 0x16, 0x71, 0x25, |
| 0x86, 0x60, 0xe2, 0xdd, 0xce, 0x39, 0xdf, 0xf3, 0x39, 0xfd, 0x9e, 0xce, 0x9e, 0x5d, 0xb8, 0x45, |
| 0x18, 0x23, 0x11, 0xf6, 0xc3, 0x88, 0xe5, 0x3d, 0x1f, 0xe5, 0x82, 0xc5, 0x91, 0xbf, 0xbf, 0xb4, |
| 0x8b, 0x05, 0x5a, 0xf2, 0x29, 0xf3, 0x52, 0xce, 0x04, 0xb3, 0x67, 0x95, 0xca, 0x93, 0x2a, 0x4f, |
| 0xa9, 0xbc, 0x52, 0x35, 0x73, 0xa5, 0x44, 0xa0, 0x94, 0xfa, 0x28, 0x49, 0x98, 0x40, 0x82, 0xb2, |
| 0x24, 0x53, 0xa5, 0xee, 0x27, 0x1d, 0xac, 0x67, 0x49, 0x9a, 0x8b, 0x35, 0x96, 0xec, 0x51, 0x62, |
| 0x6f, 0x00, 0x90, 0x30, 0xeb, 0x66, 0x2c, 0xe7, 0x21, 0x76, 0xb4, 0x39, 0x6d, 0xde, 0x5a, 0xbe, |
| 0xed, 0x0d, 0xe1, 0x7b, 0x1b, 0x61, 0xf6, 0x52, 0xaa, 0x9f, 0xfe, 0xd7, 0x36, 0x49, 0xff, 0x60, |
| 0xef, 0xc0, 0xc4, 0x2e, 0x25, 0xef, 0x72, 0xcc, 0x0f, 0xfb, 0x34, 0x43, 0xd2, 0x16, 0x87, 0xd2, |
| 0x1a, 0x94, 0xbc, 0x28, 0x6a, 0x06, 0xc8, 0xf1, 0x3e, 0xa5, 0xe4, 0x6e, 0xc2, 0x48, 0x8a, 0x38, |
| 0x8a, 0x33, 0x47, 0x9f, 0x33, 0xe6, 0xad, 0xe5, 0x95, 0xa1, 0xb8, 0x8a, 0x35, 0xaf, 0x25, 0xcb, |
| 0x9a, 0x89, 0xe0, 0x87, 0xed, 0x92, 0x31, 0xf3, 0x00, 0xac, 0x4a, 0xd8, 0xbe, 0x04, 0xc6, 0x5b, |
| 0x7c, 0x28, 0x6d, 0x9b, 0xed, 0xe2, 0xa3, 0x3d, 0x05, 0x17, 0xf6, 0x51, 0x94, 0x63, 0x47, 0x97, |
| 0x31, 0x75, 0x08, 0xf4, 0xfb, 0x5a, 0x63, 0x14, 0x46, 0x94, 0x2f, 0xf7, 0xb3, 0x06, 0xd3, 0x0d, |
| 0x24, 0xc2, 0x37, 0x2d, 0x8e, 0x7b, 0x34, 0x14, 0xe7, 0x35, 0x4f, 0xfd, 0x0c, 0xe6, 0x59, 0xb1, |
| 0xd1, 0x81, 0xc9, 0x75, 0x16, 0xe6, 0x31, 0x4e, 0x6a, 0x0e, 0x9a, 0x7f, 0xee, 0xa0, 0xf2, 0xfc, |
| 0xee, 0x57, 0x0d, 0xc6, 0x9e, 0xe7, 0xe2, 0x88, 0xbb, 0x03, 0x13, 0x05, 0xb7, 0x87, 0x33, 0x41, |
| 0x13, 0x79, 0x27, 0x4b, 0xf8, 0xe2, 0xef, 0xe0, 0xeb, 0x47, 0x25, 0x85, 0x21, 0x52, 0x8b, 0xd8, |
| 0x18, 0xa6, 0x06, 0x83, 0xaa, 0xc2, 0xd5, 0xb4, 0xee, 0x9e, 0x6a, 0x5a, 0xf5, 0x0e, 0x93, 0x7d, |
| 0x5e, 0x25, 0xdc, 0xb8, 0x08, 0x56, 0x85, 0xee, 0x7e, 0xd3, 0xc0, 0xa9, 0xde, 0x81, 0x7f, 0xd8, |
| 0xea, 0x7b, 0x03, 0xa6, 0xb7, 0x58, 0x0f, 0x47, 0xcd, 0x83, 0x94, 0xf1, 0xf3, 0x71, 0x2a, 0xb9, |
| 0xbc, 0xc6, 0x35, 0x4e, 0xc5, 0xe5, 0x3f, 0x71, 0xab, 0x11, 0xfb, 0x06, 0x8c, 0xc5, 0x85, 0x95, |
| 0xee, 0x1e, 0xe3, 0x31, 0x12, 0xce, 0xff, 0xf2, 0x5b, 0x6e, 0xc9, 0xd8, 0x13, 0x19, 0xb2, 0x5f, |
| 0x1d, 0x5b, 0x38, 0x8f, 0x87, 0x76, 0xfc, 0xc5, 0x60, 0xce, 0x7a, 0xf9, 0x1c, 0x7b, 0x25, 0x1f, |
| 0x34, 0xb8, 0xa9, 0x9a, 0x36, 0x0b, 0x0d, 0x12, 0xb8, 0xd7, 0x3c, 0x40, 0x71, 0x1a, 0xe1, 0xac, |
| 0xf6, 0x7a, 0xfe, 0xce, 0x85, 0x59, 0x00, 0x73, 0xb0, 0x12, 0xec, 0xab, 0x00, 0xb4, 0xd8, 0x2e, |
| 0xdd, 0x9c, 0xd3, 0xcc, 0xd1, 0xe6, 0x8c, 0x79, 0xb3, 0x6d, 0xca, 0xc8, 0x36, 0xa7, 0x99, 0x7b, |
| 0x07, 0xc6, 0xeb, 0x2b, 0xcb, 0x9e, 0x05, 0x73, 0x50, 0x50, 0x0e, 0x67, 0xb4, 0xaf, 0x77, 0x1f, |
| 0xc1, 0x78, 0xfd, 0xee, 0xd8, 0x0b, 0x70, 0x99, 0x49, 0xcb, 0x85, 0xbe, 0x9b, 0x72, 0xbc, 0x47, |
| 0x0f, 0xca, 0xb2, 0x09, 0x95, 0xd8, 0xe6, 0xb4, 0x25, 0xc3, 0xee, 0x0a, 0x4c, 0x9e, 0xe0, 0xaa, |
| 0x78, 0xc4, 0x23, 0x44, 0x59, 0x6b, 0x0e, 0x6a, 0x5d, 0xbf, 0xe8, 0xc9, 0x4f, 0x5f, 0xd0, 0xf8, |
| 0xa8, 0xc1, 0xf5, 0x90, 0xc5, 0xc3, 0xa6, 0xdb, 0xd2, 0x5e, 0xaf, 0x96, 0x69, 0xc2, 0x22, 0x94, |
| 0x10, 0x8f, 0x71, 0xe2, 0x13, 0x9c, 0xc8, 0x5f, 0x69, 0x5f, 0xa5, 0x50, 0x4a, 0xb3, 0x13, 0xff, |
| 0x09, 0x3c, 0x54, 0xc7, 0x2f, 0xfa, 0xec, 0x86, 0x14, 0x76, 0xd6, 0x0a, 0x51, 0x67, 0x35, 0x17, |
| 0x6c, 0x2b, 0xea, 0xec, 0x28, 0xd1, 0x77, 0xfd, 0x9a, 0xca, 0x06, 0x81, 0x4c, 0x07, 0x81, 0xcc, |
| 0x6f, 0x06, 0x41, 0x29, 0xd8, 0x1d, 0x91, 0xcd, 0xee, 0xfd, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc0, |
| 0x26, 0xdb, 0xd5, 0x75, 0x08, 0x00, 0x00, |
| } |