| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/cloud/dataproc/v1beta2/jobs.proto |
| |
| package dataproc |
| |
| import ( |
| context "context" |
| fmt "fmt" |
| math "math" |
| |
| proto "github.com/golang/protobuf/proto" |
| empty "github.com/golang/protobuf/ptypes/empty" |
| timestamp "github.com/golang/protobuf/ptypes/timestamp" |
| _ "google.golang.org/genproto/googleapis/api/annotations" |
| field_mask "google.golang.org/genproto/protobuf/field_mask" |
| grpc "google.golang.org/grpc" |
| codes "google.golang.org/grpc/codes" |
| status "google.golang.org/grpc/status" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package |
| |
| // The Log4j level for job execution. When running an |
| // [Apache Hive](http://hive.apache.org/) job, Cloud |
| // Dataproc configures the Hive client to an equivalent verbosity level. |
| type LoggingConfig_Level int32 |
| |
| const ( |
| // Level is unspecified. Use default level for log4j. |
| LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0 |
| // Use ALL level for log4j. |
| LoggingConfig_ALL LoggingConfig_Level = 1 |
| // Use TRACE level for log4j. |
| LoggingConfig_TRACE LoggingConfig_Level = 2 |
| // Use DEBUG level for log4j. |
| LoggingConfig_DEBUG LoggingConfig_Level = 3 |
| // Use INFO level for log4j. |
| LoggingConfig_INFO LoggingConfig_Level = 4 |
| // Use WARN level for log4j. |
| LoggingConfig_WARN LoggingConfig_Level = 5 |
| // Use ERROR level for log4j. |
| LoggingConfig_ERROR LoggingConfig_Level = 6 |
| // Use FATAL level for log4j. |
| LoggingConfig_FATAL LoggingConfig_Level = 7 |
| // Turn off log4j. |
| LoggingConfig_OFF LoggingConfig_Level = 8 |
| ) |
| |
| var LoggingConfig_Level_name = map[int32]string{ |
| 0: "LEVEL_UNSPECIFIED", |
| 1: "ALL", |
| 2: "TRACE", |
| 3: "DEBUG", |
| 4: "INFO", |
| 5: "WARN", |
| 6: "ERROR", |
| 7: "FATAL", |
| 8: "OFF", |
| } |
| |
| var LoggingConfig_Level_value = map[string]int32{ |
| "LEVEL_UNSPECIFIED": 0, |
| "ALL": 1, |
| "TRACE": 2, |
| "DEBUG": 3, |
| "INFO": 4, |
| "WARN": 5, |
| "ERROR": 6, |
| "FATAL": 7, |
| "OFF": 8, |
| } |
| |
| func (x LoggingConfig_Level) String() string { |
| return proto.EnumName(LoggingConfig_Level_name, int32(x)) |
| } |
| |
| func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{0, 0} |
| } |
| |
| // The job state. |
| type JobStatus_State int32 |
| |
| const ( |
| // The job state is unknown. |
| JobStatus_STATE_UNSPECIFIED JobStatus_State = 0 |
| // The job is pending; it has been submitted, but is not yet running. |
| JobStatus_PENDING JobStatus_State = 1 |
| // Job has been received by the service and completed initial setup; |
| // it will soon be submitted to the cluster. |
| JobStatus_SETUP_DONE JobStatus_State = 8 |
| // The job is running on the cluster. |
| JobStatus_RUNNING JobStatus_State = 2 |
| // A CancelJob request has been received, but is pending. |
| JobStatus_CANCEL_PENDING JobStatus_State = 3 |
| // Transient in-flight resources have been canceled, and the request to |
| // cancel the running job has been issued to the cluster. |
| JobStatus_CANCEL_STARTED JobStatus_State = 7 |
| // The job cancellation was successful. |
| JobStatus_CANCELLED JobStatus_State = 4 |
| // The job has completed successfully. |
| JobStatus_DONE JobStatus_State = 5 |
| // The job has completed, but encountered an error. |
| JobStatus_ERROR JobStatus_State = 6 |
| // Job attempt has failed. The detail field contains failure details for |
| // this attempt. |
| // |
| // Applies to restartable jobs only. |
| JobStatus_ATTEMPT_FAILURE JobStatus_State = 9 |
| ) |
| |
| var JobStatus_State_name = map[int32]string{ |
| 0: "STATE_UNSPECIFIED", |
| 1: "PENDING", |
| 8: "SETUP_DONE", |
| 2: "RUNNING", |
| 3: "CANCEL_PENDING", |
| 7: "CANCEL_STARTED", |
| 4: "CANCELLED", |
| 5: "DONE", |
| 6: "ERROR", |
| 9: "ATTEMPT_FAILURE", |
| } |
| |
| var JobStatus_State_value = map[string]int32{ |
| "STATE_UNSPECIFIED": 0, |
| "PENDING": 1, |
| "SETUP_DONE": 8, |
| "RUNNING": 2, |
| "CANCEL_PENDING": 3, |
| "CANCEL_STARTED": 7, |
| "CANCELLED": 4, |
| "DONE": 5, |
| "ERROR": 6, |
| "ATTEMPT_FAILURE": 9, |
| } |
| |
| func (x JobStatus_State) String() string { |
| return proto.EnumName(JobStatus_State_name, int32(x)) |
| } |
| |
| func (JobStatus_State) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{10, 0} |
| } |
| |
| // The job substate. |
| type JobStatus_Substate int32 |
| |
| const ( |
| // The job substate is unknown. |
| JobStatus_UNSPECIFIED JobStatus_Substate = 0 |
| // The Job is submitted to the agent. |
| // |
| // Applies to RUNNING state. |
| JobStatus_SUBMITTED JobStatus_Substate = 1 |
| // The Job has been received and is awaiting execution (it may be waiting |
| // for a condition to be met). See the "details" field for the reason for |
| // the delay. |
| // |
| // Applies to RUNNING state. |
| JobStatus_QUEUED JobStatus_Substate = 2 |
| // The agent-reported status is out of date, which may be caused by a |
| // loss of communication between the agent and Cloud Dataproc. If the |
| // agent does not send a timely update, the job will fail. |
| // |
| // Applies to RUNNING state. |
| JobStatus_STALE_STATUS JobStatus_Substate = 3 |
| ) |
| |
| var JobStatus_Substate_name = map[int32]string{ |
| 0: "UNSPECIFIED", |
| 1: "SUBMITTED", |
| 2: "QUEUED", |
| 3: "STALE_STATUS", |
| } |
| |
| var JobStatus_Substate_value = map[string]int32{ |
| "UNSPECIFIED": 0, |
| "SUBMITTED": 1, |
| "QUEUED": 2, |
| "STALE_STATUS": 3, |
| } |
| |
| func (x JobStatus_Substate) String() string { |
| return proto.EnumName(JobStatus_Substate_name, int32(x)) |
| } |
| |
| func (JobStatus_Substate) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{10, 1} |
| } |
| |
| // The application state, corresponding to |
| // <code>YarnProtos.YarnApplicationStateProto</code>. |
| type YarnApplication_State int32 |
| |
| const ( |
| // Status is unspecified. |
| YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0 |
| // Status is NEW. |
| YarnApplication_NEW YarnApplication_State = 1 |
| // Status is NEW_SAVING. |
| YarnApplication_NEW_SAVING YarnApplication_State = 2 |
| // Status is SUBMITTED. |
| YarnApplication_SUBMITTED YarnApplication_State = 3 |
| // Status is ACCEPTED. |
| YarnApplication_ACCEPTED YarnApplication_State = 4 |
| // Status is RUNNING. |
| YarnApplication_RUNNING YarnApplication_State = 5 |
| // Status is FINISHED. |
| YarnApplication_FINISHED YarnApplication_State = 6 |
| // Status is FAILED. |
| YarnApplication_FAILED YarnApplication_State = 7 |
| // Status is KILLED. |
| YarnApplication_KILLED YarnApplication_State = 8 |
| ) |
| |
| var YarnApplication_State_name = map[int32]string{ |
| 0: "STATE_UNSPECIFIED", |
| 1: "NEW", |
| 2: "NEW_SAVING", |
| 3: "SUBMITTED", |
| 4: "ACCEPTED", |
| 5: "RUNNING", |
| 6: "FINISHED", |
| 7: "FAILED", |
| 8: "KILLED", |
| } |
| |
| var YarnApplication_State_value = map[string]int32{ |
| "STATE_UNSPECIFIED": 0, |
| "NEW": 1, |
| "NEW_SAVING": 2, |
| "SUBMITTED": 3, |
| "ACCEPTED": 4, |
| "RUNNING": 5, |
| "FINISHED": 6, |
| "FAILED": 7, |
| "KILLED": 8, |
| } |
| |
| func (x YarnApplication_State) String() string { |
| return proto.EnumName(YarnApplication_State_name, int32(x)) |
| } |
| |
| func (YarnApplication_State) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{12, 0} |
| } |
| |
| // A matcher that specifies categories of job states. |
| type ListJobsRequest_JobStateMatcher int32 |
| |
| const ( |
| // Match all jobs, regardless of state. |
| ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0 |
| // Only match jobs in non-terminal states: PENDING, RUNNING, or |
| // CANCEL_PENDING. |
| ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1 |
| // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. |
| ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2 |
| ) |
| |
| var ListJobsRequest_JobStateMatcher_name = map[int32]string{ |
| 0: "ALL", |
| 1: "ACTIVE", |
| 2: "NON_ACTIVE", |
| } |
| |
| var ListJobsRequest_JobStateMatcher_value = map[string]int32{ |
| "ALL": 0, |
| "ACTIVE": 1, |
| "NON_ACTIVE": 2, |
| } |
| |
| func (x ListJobsRequest_JobStateMatcher) String() string { |
| return proto.EnumName(ListJobsRequest_JobStateMatcher_name, int32(x)) |
| } |
| |
| func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{17, 0} |
| } |
| |
| // The runtime logging config of the job. |
| type LoggingConfig struct { |
| // The per-package log levels for the driver. This may include |
| // "root" package name to configure rootLogger. |
| // Examples: |
| // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' |
| DriverLogLevels map[string]LoggingConfig_Level `protobuf:"bytes,2,rep,name=driver_log_levels,json=driverLogLevels,proto3" json:"driver_log_levels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=google.cloud.dataproc.v1beta2.LoggingConfig_Level"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *LoggingConfig) Reset() { *m = LoggingConfig{} } |
| func (m *LoggingConfig) String() string { return proto.CompactTextString(m) } |
| func (*LoggingConfig) ProtoMessage() {} |
| func (*LoggingConfig) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{0} |
| } |
| |
| func (m *LoggingConfig) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_LoggingConfig.Unmarshal(m, b) |
| } |
| func (m *LoggingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_LoggingConfig.Marshal(b, m, deterministic) |
| } |
| func (m *LoggingConfig) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_LoggingConfig.Merge(m, src) |
| } |
| func (m *LoggingConfig) XXX_Size() int { |
| return xxx_messageInfo_LoggingConfig.Size(m) |
| } |
| func (m *LoggingConfig) XXX_DiscardUnknown() { |
| xxx_messageInfo_LoggingConfig.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_LoggingConfig proto.InternalMessageInfo |
| |
| func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level { |
| if m != nil { |
| return m.DriverLogLevels |
| } |
| return nil |
| } |
| |
| // A Cloud Dataproc job for running |
| // [Apache Hadoop |
| // MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) |
| // jobs on [Apache Hadoop |
| // YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). |
| type HadoopJob struct { |
| // Required. Indicates the location of the driver's main class. Specify |
| // either the jar file that contains the main class or the main class name. |
| // To specify both, add the jar file to `jar_file_uris`, and then specify |
| // the main class name in this property. |
| // |
| // Types that are valid to be assigned to Driver: |
| // *HadoopJob_MainJarFileUri |
| // *HadoopJob_MainClass |
| Driver isHadoopJob_Driver `protobuf_oneof:"driver"` |
| // Optional. The arguments to pass to the driver. Do not |
| // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as |
| // job properties, since a collision may occur that causes an incorrect job |
| // submission. |
| Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` |
| // Optional. Jar file URIs to add to the CLASSPATHs of the |
| // Hadoop driver and tasks. |
| JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied |
| // to the working directory of Hadoop drivers and distributed tasks. Useful |
| // for naively parallel tasks. |
| FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"` |
| // Optional. HCFS URIs of archives to be extracted in the working directory of |
| // Hadoop drivers and tasks. Supported file types: |
| // .jar, .tar, .tar.gz, .tgz, or .zip. |
| ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"` |
| // Optional. A mapping of property names to values, used to configure Hadoop. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in /etc/hadoop/conf/*-site and |
| // classes in user code. |
| Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *HadoopJob) Reset() { *m = HadoopJob{} } |
| func (m *HadoopJob) String() string { return proto.CompactTextString(m) } |
| func (*HadoopJob) ProtoMessage() {} |
| func (*HadoopJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{1} |
| } |
| |
| func (m *HadoopJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_HadoopJob.Unmarshal(m, b) |
| } |
| func (m *HadoopJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_HadoopJob.Marshal(b, m, deterministic) |
| } |
| func (m *HadoopJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_HadoopJob.Merge(m, src) |
| } |
| func (m *HadoopJob) XXX_Size() int { |
| return xxx_messageInfo_HadoopJob.Size(m) |
| } |
| func (m *HadoopJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_HadoopJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_HadoopJob proto.InternalMessageInfo |
| |
| type isHadoopJob_Driver interface { |
| isHadoopJob_Driver() |
| } |
| |
| type HadoopJob_MainJarFileUri struct { |
| MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"` |
| } |
| |
| type HadoopJob_MainClass struct { |
| MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"` |
| } |
| |
| func (*HadoopJob_MainJarFileUri) isHadoopJob_Driver() {} |
| |
| func (*HadoopJob_MainClass) isHadoopJob_Driver() {} |
| |
| func (m *HadoopJob) GetDriver() isHadoopJob_Driver { |
| if m != nil { |
| return m.Driver |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetMainJarFileUri() string { |
| if x, ok := m.GetDriver().(*HadoopJob_MainJarFileUri); ok { |
| return x.MainJarFileUri |
| } |
| return "" |
| } |
| |
| func (m *HadoopJob) GetMainClass() string { |
| if x, ok := m.GetDriver().(*HadoopJob_MainClass); ok { |
| return x.MainClass |
| } |
| return "" |
| } |
| |
| func (m *HadoopJob) GetArgs() []string { |
| if m != nil { |
| return m.Args |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetFileUris() []string { |
| if m != nil { |
| return m.FileUris |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetArchiveUris() []string { |
| if m != nil { |
| return m.ArchiveUris |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *HadoopJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*HadoopJob) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*HadoopJob_MainJarFileUri)(nil), |
| (*HadoopJob_MainClass)(nil), |
| } |
| } |
| |
| // A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) |
| // applications on YARN. |
| // The specification of the main method to call to drive the job. |
| // Specify either the jar file that contains the main class or the main class |
| // name. To pass both a main jar and a main class in that jar, add the jar to |
| // `CommonJob.jar_file_uris`, and then specify the main class name in |
| // `main_class`. |
| type SparkJob struct { |
| // Types that are valid to be assigned to Driver: |
| // *SparkJob_MainJarFileUri |
| // *SparkJob_MainClass |
| Driver isSparkJob_Driver `protobuf_oneof:"driver"` |
| // Optional. The arguments to pass to the driver. Do not include arguments, |
| // such as `--conf`, that can be set as job properties, since a collision may |
| // occur that causes an incorrect job submission. |
| Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` |
| // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the |
| // Spark driver and tasks. |
| JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. HCFS URIs of files to be copied to the working directory of |
| // Spark drivers and distributed tasks. Useful for naively parallel tasks. |
| FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"` |
| // Optional. HCFS URIs of archives to be extracted in the working directory |
| // of Spark drivers and tasks. Supported file types: |
| // .jar, .tar, .tar.gz, .tgz, and .zip. |
| ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"` |
| // Optional. A mapping of property names to values, used to configure Spark. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in |
| // /etc/spark/conf/spark-defaults.conf and classes in user code. |
| Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SparkJob) Reset() { *m = SparkJob{} } |
| func (m *SparkJob) String() string { return proto.CompactTextString(m) } |
| func (*SparkJob) ProtoMessage() {} |
| func (*SparkJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{2} |
| } |
| |
| func (m *SparkJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SparkJob.Unmarshal(m, b) |
| } |
| func (m *SparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SparkJob.Marshal(b, m, deterministic) |
| } |
| func (m *SparkJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SparkJob.Merge(m, src) |
| } |
| func (m *SparkJob) XXX_Size() int { |
| return xxx_messageInfo_SparkJob.Size(m) |
| } |
| func (m *SparkJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_SparkJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SparkJob proto.InternalMessageInfo |
| |
| type isSparkJob_Driver interface { |
| isSparkJob_Driver() |
| } |
| |
| type SparkJob_MainJarFileUri struct { |
| MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"` |
| } |
| |
| type SparkJob_MainClass struct { |
| MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"` |
| } |
| |
| func (*SparkJob_MainJarFileUri) isSparkJob_Driver() {} |
| |
| func (*SparkJob_MainClass) isSparkJob_Driver() {} |
| |
| func (m *SparkJob) GetDriver() isSparkJob_Driver { |
| if m != nil { |
| return m.Driver |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetMainJarFileUri() string { |
| if x, ok := m.GetDriver().(*SparkJob_MainJarFileUri); ok { |
| return x.MainJarFileUri |
| } |
| return "" |
| } |
| |
| func (m *SparkJob) GetMainClass() string { |
| if x, ok := m.GetDriver().(*SparkJob_MainClass); ok { |
| return x.MainClass |
| } |
| return "" |
| } |
| |
| func (m *SparkJob) GetArgs() []string { |
| if m != nil { |
| return m.Args |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetFileUris() []string { |
| if m != nil { |
| return m.FileUris |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetArchiveUris() []string { |
| if m != nil { |
| return m.ArchiveUris |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *SparkJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*SparkJob) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*SparkJob_MainJarFileUri)(nil), |
| (*SparkJob_MainClass)(nil), |
| } |
| } |
| |
| // A Cloud Dataproc job for running |
| // [Apache |
| // PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) |
| // applications on YARN. |
| type PySparkJob struct { |
| // Required. The HCFS URI of the main Python file to use as the driver. Must |
| // be a .py file. |
| MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri,proto3" json:"main_python_file_uri,omitempty"` |
| // Optional. The arguments to pass to the driver. Do not include arguments, |
| // such as `--conf`, that can be set as job properties, since a collision may |
| // occur that causes an incorrect job submission. |
| Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` |
| // Optional. HCFS file URIs of Python files to pass to the PySpark |
| // framework. Supported file types: .py, .egg, and .zip. |
| PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris,proto3" json:"python_file_uris,omitempty"` |
| // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the |
| // Python driver and tasks. |
| JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. HCFS URIs of files to be copied to the working directory of |
| // Python drivers and distributed tasks. Useful for naively parallel tasks. |
| FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"` |
| // Optional. HCFS URIs of archives to be extracted in the working directory of |
| // .jar, .tar, .tar.gz, .tgz, and .zip. |
| ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"` |
| // Optional. A mapping of property names to values, used to configure PySpark. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in |
| // /etc/spark/conf/spark-defaults.conf and classes in user code. |
| Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PySparkJob) Reset() { *m = PySparkJob{} } |
| func (m *PySparkJob) String() string { return proto.CompactTextString(m) } |
| func (*PySparkJob) ProtoMessage() {} |
| func (*PySparkJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{3} |
| } |
| |
| func (m *PySparkJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PySparkJob.Unmarshal(m, b) |
| } |
| func (m *PySparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PySparkJob.Marshal(b, m, deterministic) |
| } |
| func (m *PySparkJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PySparkJob.Merge(m, src) |
| } |
| func (m *PySparkJob) XXX_Size() int { |
| return xxx_messageInfo_PySparkJob.Size(m) |
| } |
| func (m *PySparkJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_PySparkJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PySparkJob proto.InternalMessageInfo |
| |
| func (m *PySparkJob) GetMainPythonFileUri() string { |
| if m != nil { |
| return m.MainPythonFileUri |
| } |
| return "" |
| } |
| |
| func (m *PySparkJob) GetArgs() []string { |
| if m != nil { |
| return m.Args |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetPythonFileUris() []string { |
| if m != nil { |
| return m.PythonFileUris |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetFileUris() []string { |
| if m != nil { |
| return m.FileUris |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetArchiveUris() []string { |
| if m != nil { |
| return m.ArchiveUris |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *PySparkJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // A list of queries to run on a cluster. |
| type QueryList struct { |
| // Required. The queries to execute. You do not need to terminate a query |
| // with a semicolon. Multiple queries can be specified in one string |
| // by separating each with a semicolon. Here is an example of an Cloud |
| // Dataproc API snippet that uses a QueryList to specify a HiveJob: |
| // |
| // "hiveJob": { |
| // "queryList": { |
| // "queries": [ |
| // "query1", |
| // "query2", |
| // "query3;query4", |
| // ] |
| // } |
| // } |
| Queries []string `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *QueryList) Reset() { *m = QueryList{} } |
| func (m *QueryList) String() string { return proto.CompactTextString(m) } |
| func (*QueryList) ProtoMessage() {} |
| func (*QueryList) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{4} |
| } |
| |
| func (m *QueryList) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_QueryList.Unmarshal(m, b) |
| } |
| func (m *QueryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_QueryList.Marshal(b, m, deterministic) |
| } |
| func (m *QueryList) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_QueryList.Merge(m, src) |
| } |
| func (m *QueryList) XXX_Size() int { |
| return xxx_messageInfo_QueryList.Size(m) |
| } |
| func (m *QueryList) XXX_DiscardUnknown() { |
| xxx_messageInfo_QueryList.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_QueryList proto.InternalMessageInfo |
| |
| func (m *QueryList) GetQueries() []string { |
| if m != nil { |
| return m.Queries |
| } |
| return nil |
| } |
| |
| // A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) |
| // queries on YARN. |
| type HiveJob struct { |
| // Required. The sequence of Hive queries to execute, specified as either |
| // an HCFS file URI or a list of queries. |
| // |
| // Types that are valid to be assigned to Queries: |
| // *HiveJob_QueryFileUri |
| // *HiveJob_QueryList |
| Queries isHiveJob_Queries `protobuf_oneof:"queries"` |
| // Optional. Whether to continue executing queries if a query fails. |
| // The default value is `false`. Setting to `true` can be useful when |
| // executing independent parallel queries. |
| ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"` |
| // Optional. Mapping of query variable names to values (equivalent to the |
| // Hive command: `SET name="value";`). |
| ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. A mapping of property names and values, used to configure Hive. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, |
| // /etc/hive/conf/hive-site.xml, and classes in user code. |
| Properties map[string]string `protobuf:"bytes,5,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. HCFS URIs of jar files to add to the CLASSPATH of the |
| // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes |
| // and UDFs. |
| JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *HiveJob) Reset() { *m = HiveJob{} } |
| func (m *HiveJob) String() string { return proto.CompactTextString(m) } |
| func (*HiveJob) ProtoMessage() {} |
| func (*HiveJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{5} |
| } |
| |
| func (m *HiveJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_HiveJob.Unmarshal(m, b) |
| } |
| func (m *HiveJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_HiveJob.Marshal(b, m, deterministic) |
| } |
| func (m *HiveJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_HiveJob.Merge(m, src) |
| } |
| func (m *HiveJob) XXX_Size() int { |
| return xxx_messageInfo_HiveJob.Size(m) |
| } |
| func (m *HiveJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_HiveJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_HiveJob proto.InternalMessageInfo |
| |
| type isHiveJob_Queries interface { |
| isHiveJob_Queries() |
| } |
| |
| type HiveJob_QueryFileUri struct { |
| QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"` |
| } |
| |
| type HiveJob_QueryList struct { |
| QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"` |
| } |
| |
| func (*HiveJob_QueryFileUri) isHiveJob_Queries() {} |
| |
| func (*HiveJob_QueryList) isHiveJob_Queries() {} |
| |
| func (m *HiveJob) GetQueries() isHiveJob_Queries { |
| if m != nil { |
| return m.Queries |
| } |
| return nil |
| } |
| |
| func (m *HiveJob) GetQueryFileUri() string { |
| if x, ok := m.GetQueries().(*HiveJob_QueryFileUri); ok { |
| return x.QueryFileUri |
| } |
| return "" |
| } |
| |
| func (m *HiveJob) GetQueryList() *QueryList { |
| if x, ok := m.GetQueries().(*HiveJob_QueryList); ok { |
| return x.QueryList |
| } |
| return nil |
| } |
| |
| func (m *HiveJob) GetContinueOnFailure() bool { |
| if m != nil { |
| return m.ContinueOnFailure |
| } |
| return false |
| } |
| |
| func (m *HiveJob) GetScriptVariables() map[string]string { |
| if m != nil { |
| return m.ScriptVariables |
| } |
| return nil |
| } |
| |
| func (m *HiveJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *HiveJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*HiveJob) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*HiveJob_QueryFileUri)(nil), |
| (*HiveJob_QueryList)(nil), |
| } |
| } |
| |
| // A Cloud Dataproc job for running [Apache Spark |
| // SQL](http://spark.apache.org/sql/) queries. |
| type SparkSqlJob struct { |
| // Required. The sequence of Spark SQL queries to execute, specified as |
| // either an HCFS file URI or as a list of queries. |
| // |
| // Types that are valid to be assigned to Queries: |
| // *SparkSqlJob_QueryFileUri |
| // *SparkSqlJob_QueryList |
| Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"` |
| // Optional. Mapping of query variable names to values (equivalent to the |
| // Spark SQL command: SET `name="value";`). |
| ScriptVariables map[string]string `protobuf:"bytes,3,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. A mapping of property names to values, used to configure |
| // Spark SQL's SparkConf. Properties that conflict with values set by the |
| // Cloud Dataproc API may be overwritten. |
| Properties map[string]string `protobuf:"bytes,4,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. |
| JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SparkSqlJob) Reset() { *m = SparkSqlJob{} } |
| func (m *SparkSqlJob) String() string { return proto.CompactTextString(m) } |
| func (*SparkSqlJob) ProtoMessage() {} |
| func (*SparkSqlJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{6} |
| } |
| |
| func (m *SparkSqlJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SparkSqlJob.Unmarshal(m, b) |
| } |
| func (m *SparkSqlJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SparkSqlJob.Marshal(b, m, deterministic) |
| } |
| func (m *SparkSqlJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SparkSqlJob.Merge(m, src) |
| } |
| func (m *SparkSqlJob) XXX_Size() int { |
| return xxx_messageInfo_SparkSqlJob.Size(m) |
| } |
| func (m *SparkSqlJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_SparkSqlJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SparkSqlJob proto.InternalMessageInfo |
| |
| type isSparkSqlJob_Queries interface { |
| isSparkSqlJob_Queries() |
| } |
| |
| type SparkSqlJob_QueryFileUri struct { |
| QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"` |
| } |
| |
| type SparkSqlJob_QueryList struct { |
| QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"` |
| } |
| |
| func (*SparkSqlJob_QueryFileUri) isSparkSqlJob_Queries() {} |
| |
| func (*SparkSqlJob_QueryList) isSparkSqlJob_Queries() {} |
| |
| func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries { |
| if m != nil { |
| return m.Queries |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetQueryFileUri() string { |
| if x, ok := m.GetQueries().(*SparkSqlJob_QueryFileUri); ok { |
| return x.QueryFileUri |
| } |
| return "" |
| } |
| |
| func (m *SparkSqlJob) GetQueryList() *QueryList { |
| if x, ok := m.GetQueries().(*SparkSqlJob_QueryList); ok { |
| return x.QueryList |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetScriptVariables() map[string]string { |
| if m != nil { |
| return m.ScriptVariables |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*SparkSqlJob) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*SparkSqlJob_QueryFileUri)(nil), |
| (*SparkSqlJob_QueryList)(nil), |
| } |
| } |
| |
| // A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) |
| // queries on YARN. |
| type PigJob struct { |
| // Required. The sequence of Pig queries to execute, specified as an HCFS |
| // file URI or a list of queries. |
| // |
| // Types that are valid to be assigned to Queries: |
| // *PigJob_QueryFileUri |
| // *PigJob_QueryList |
| Queries isPigJob_Queries `protobuf_oneof:"queries"` |
| // Optional. Whether to continue executing queries if a query fails. |
| // The default value is `false`. Setting to `true` can be useful when |
| // executing independent parallel queries. |
| ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"` |
| // Optional. Mapping of query variable names to values (equivalent to the Pig |
| // command: `name=[value]`). |
| ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. A mapping of property names to values, used to configure Pig. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, |
| // /etc/pig/conf/pig.properties, and classes in user code. |
| Properties map[string]string `protobuf:"bytes,5,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. HCFS URIs of jar files to add to the CLASSPATH of |
| // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. |
| JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *PigJob) Reset() { *m = PigJob{} } |
| func (m *PigJob) String() string { return proto.CompactTextString(m) } |
| func (*PigJob) ProtoMessage() {} |
| func (*PigJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{7} |
| } |
| |
| func (m *PigJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_PigJob.Unmarshal(m, b) |
| } |
| func (m *PigJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_PigJob.Marshal(b, m, deterministic) |
| } |
| func (m *PigJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_PigJob.Merge(m, src) |
| } |
| func (m *PigJob) XXX_Size() int { |
| return xxx_messageInfo_PigJob.Size(m) |
| } |
| func (m *PigJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_PigJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_PigJob proto.InternalMessageInfo |
| |
| type isPigJob_Queries interface { |
| isPigJob_Queries() |
| } |
| |
| type PigJob_QueryFileUri struct { |
| QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"` |
| } |
| |
| type PigJob_QueryList struct { |
| QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"` |
| } |
| |
| func (*PigJob_QueryFileUri) isPigJob_Queries() {} |
| |
| func (*PigJob_QueryList) isPigJob_Queries() {} |
| |
| func (m *PigJob) GetQueries() isPigJob_Queries { |
| if m != nil { |
| return m.Queries |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetQueryFileUri() string { |
| if x, ok := m.GetQueries().(*PigJob_QueryFileUri); ok { |
| return x.QueryFileUri |
| } |
| return "" |
| } |
| |
| func (m *PigJob) GetQueryList() *QueryList { |
| if x, ok := m.GetQueries().(*PigJob_QueryList); ok { |
| return x.QueryList |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetContinueOnFailure() bool { |
| if m != nil { |
| return m.ContinueOnFailure |
| } |
| return false |
| } |
| |
| func (m *PigJob) GetScriptVariables() map[string]string { |
| if m != nil { |
| return m.ScriptVariables |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetJarFileUris() []string { |
| if m != nil { |
| return m.JarFileUris |
| } |
| return nil |
| } |
| |
| func (m *PigJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*PigJob) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*PigJob_QueryFileUri)(nil), |
| (*PigJob_QueryList)(nil), |
| } |
| } |
| |
| // A Cloud Dataproc job for running |
| // [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) |
| // applications on YARN. |
| type SparkRJob struct { |
| // Required. The HCFS URI of the main R file to use as the driver. |
| // Must be a .R file. |
| MainRFileUri string `protobuf:"bytes,1,opt,name=main_r_file_uri,json=mainRFileUri,proto3" json:"main_r_file_uri,omitempty"` |
| // Optional. The arguments to pass to the driver. Do not include arguments, |
| // such as `--conf`, that can be set as job properties, since a collision may |
| // occur that causes an incorrect job submission. |
| Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` |
| // Optional. HCFS URIs of files to be copied to the working directory of |
| // R drivers and distributed tasks. Useful for naively parallel tasks. |
| FileUris []string `protobuf:"bytes,3,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"` |
| // Optional. HCFS URIs of archives to be extracted in the working directory of |
| // Spark drivers and tasks. Supported file types: |
| // .jar, .tar, .tar.gz, .tgz, and .zip. |
| ArchiveUris []string `protobuf:"bytes,4,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"` |
| // Optional. A mapping of property names to values, used to configure SparkR. |
| // Properties that conflict with values set by the Cloud Dataproc API may be |
| // overwritten. Can include properties set in |
| // /etc/spark/conf/spark-defaults.conf and classes in user code. |
| Properties map[string]string `protobuf:"bytes,5,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. The runtime log config for job execution. |
| LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SparkRJob) Reset() { *m = SparkRJob{} } |
| func (m *SparkRJob) String() string { return proto.CompactTextString(m) } |
| func (*SparkRJob) ProtoMessage() {} |
| func (*SparkRJob) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{8} |
| } |
| |
| func (m *SparkRJob) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SparkRJob.Unmarshal(m, b) |
| } |
| func (m *SparkRJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SparkRJob.Marshal(b, m, deterministic) |
| } |
| func (m *SparkRJob) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SparkRJob.Merge(m, src) |
| } |
| func (m *SparkRJob) XXX_Size() int { |
| return xxx_messageInfo_SparkRJob.Size(m) |
| } |
| func (m *SparkRJob) XXX_DiscardUnknown() { |
| xxx_messageInfo_SparkRJob.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SparkRJob proto.InternalMessageInfo |
| |
| func (m *SparkRJob) GetMainRFileUri() string { |
| if m != nil { |
| return m.MainRFileUri |
| } |
| return "" |
| } |
| |
| func (m *SparkRJob) GetArgs() []string { |
| if m != nil { |
| return m.Args |
| } |
| return nil |
| } |
| |
| func (m *SparkRJob) GetFileUris() []string { |
| if m != nil { |
| return m.FileUris |
| } |
| return nil |
| } |
| |
| func (m *SparkRJob) GetArchiveUris() []string { |
| if m != nil { |
| return m.ArchiveUris |
| } |
| return nil |
| } |
| |
| func (m *SparkRJob) GetProperties() map[string]string { |
| if m != nil { |
| return m.Properties |
| } |
| return nil |
| } |
| |
| func (m *SparkRJob) GetLoggingConfig() *LoggingConfig { |
| if m != nil { |
| return m.LoggingConfig |
| } |
| return nil |
| } |
| |
| // Cloud Dataproc job config. |
| type JobPlacement struct { |
| // Required. The name of the cluster where the job will be submitted. |
| ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` |
| // Output only. A cluster UUID generated by the Cloud Dataproc service when |
| // the job is submitted. |
| ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *JobPlacement) Reset() { *m = JobPlacement{} } |
| func (m *JobPlacement) String() string { return proto.CompactTextString(m) } |
| func (*JobPlacement) ProtoMessage() {} |
| func (*JobPlacement) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{9} |
| } |
| |
| func (m *JobPlacement) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_JobPlacement.Unmarshal(m, b) |
| } |
| func (m *JobPlacement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_JobPlacement.Marshal(b, m, deterministic) |
| } |
| func (m *JobPlacement) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_JobPlacement.Merge(m, src) |
| } |
| func (m *JobPlacement) XXX_Size() int { |
| return xxx_messageInfo_JobPlacement.Size(m) |
| } |
| func (m *JobPlacement) XXX_DiscardUnknown() { |
| xxx_messageInfo_JobPlacement.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_JobPlacement proto.InternalMessageInfo |
| |
| func (m *JobPlacement) GetClusterName() string { |
| if m != nil { |
| return m.ClusterName |
| } |
| return "" |
| } |
| |
| func (m *JobPlacement) GetClusterUuid() string { |
| if m != nil { |
| return m.ClusterUuid |
| } |
| return "" |
| } |
| |
| // Cloud Dataproc job status. |
| type JobStatus struct { |
| // Output only. A state message specifying the overall job state. |
| State JobStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.JobStatus_State" json:"state,omitempty"` |
| // Output only. Optional job state details, such as an error |
| // description if the state is <code>ERROR</code>. |
| Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` |
| // Output only. The time when this state was entered. |
| StateStartTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"` |
| // Output only. Additional state information, which includes |
| // status reported by the agent. |
| Substate JobStatus_Substate `protobuf:"varint,7,opt,name=substate,proto3,enum=google.cloud.dataproc.v1beta2.JobStatus_Substate" json:"substate,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *JobStatus) Reset() { *m = JobStatus{} } |
| func (m *JobStatus) String() string { return proto.CompactTextString(m) } |
| func (*JobStatus) ProtoMessage() {} |
| func (*JobStatus) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{10} |
| } |
| |
| func (m *JobStatus) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_JobStatus.Unmarshal(m, b) |
| } |
| func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_JobStatus.Marshal(b, m, deterministic) |
| } |
| func (m *JobStatus) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_JobStatus.Merge(m, src) |
| } |
| func (m *JobStatus) XXX_Size() int { |
| return xxx_messageInfo_JobStatus.Size(m) |
| } |
| func (m *JobStatus) XXX_DiscardUnknown() { |
| xxx_messageInfo_JobStatus.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_JobStatus proto.InternalMessageInfo |
| |
| func (m *JobStatus) GetState() JobStatus_State { |
| if m != nil { |
| return m.State |
| } |
| return JobStatus_STATE_UNSPECIFIED |
| } |
| |
| func (m *JobStatus) GetDetails() string { |
| if m != nil { |
| return m.Details |
| } |
| return "" |
| } |
| |
| func (m *JobStatus) GetStateStartTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.StateStartTime |
| } |
| return nil |
| } |
| |
| func (m *JobStatus) GetSubstate() JobStatus_Substate { |
| if m != nil { |
| return m.Substate |
| } |
| return JobStatus_UNSPECIFIED |
| } |
| |
| // Encapsulates the full scoping used to reference a job. |
| type JobReference struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Optional. The job ID, which must be unique within the project. |
| // |
| // The ID must contain only letters (a-z, A-Z), numbers (0-9), |
| // underscores (_), or hyphens (-). The maximum length is 100 characters. |
| // |
| // If not specified by the caller, the job ID will be provided by the server. |
| JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *JobReference) Reset() { *m = JobReference{} } |
| func (m *JobReference) String() string { return proto.CompactTextString(m) } |
| func (*JobReference) ProtoMessage() {} |
| func (*JobReference) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{11} |
| } |
| |
| func (m *JobReference) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_JobReference.Unmarshal(m, b) |
| } |
| func (m *JobReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_JobReference.Marshal(b, m, deterministic) |
| } |
| func (m *JobReference) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_JobReference.Merge(m, src) |
| } |
| func (m *JobReference) XXX_Size() int { |
| return xxx_messageInfo_JobReference.Size(m) |
| } |
| func (m *JobReference) XXX_DiscardUnknown() { |
| xxx_messageInfo_JobReference.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_JobReference proto.InternalMessageInfo |
| |
| func (m *JobReference) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *JobReference) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| // A YARN application created by a job. Application information is a subset of |
| // <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>. |
| // |
| // **Beta Feature**: This report is available for testing purposes only. It may |
| // be changed before final release. |
| type YarnApplication struct { |
| // Output only. The application name. |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| // Output only. The application state. |
| State YarnApplication_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.YarnApplication_State" json:"state,omitempty"` |
| // Output only. The numerical progress of the application, from 1 to 100. |
| Progress float32 `protobuf:"fixed32,3,opt,name=progress,proto3" json:"progress,omitempty"` |
| // Optional. Output only. The HTTP URL of the ApplicationMaster, HistoryServer, or |
| // TimelineServer that provides application-specific information. The URL uses |
| // the internal hostname, and requires a proxy server for resolution and, |
| // possibly, access. |
| TrackingUrl string `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl,proto3" json:"tracking_url,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *YarnApplication) Reset() { *m = YarnApplication{} } |
| func (m *YarnApplication) String() string { return proto.CompactTextString(m) } |
| func (*YarnApplication) ProtoMessage() {} |
| func (*YarnApplication) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{12} |
| } |
| |
| func (m *YarnApplication) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_YarnApplication.Unmarshal(m, b) |
| } |
| func (m *YarnApplication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_YarnApplication.Marshal(b, m, deterministic) |
| } |
| func (m *YarnApplication) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_YarnApplication.Merge(m, src) |
| } |
| func (m *YarnApplication) XXX_Size() int { |
| return xxx_messageInfo_YarnApplication.Size(m) |
| } |
| func (m *YarnApplication) XXX_DiscardUnknown() { |
| xxx_messageInfo_YarnApplication.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_YarnApplication proto.InternalMessageInfo |
| |
| func (m *YarnApplication) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| func (m *YarnApplication) GetState() YarnApplication_State { |
| if m != nil { |
| return m.State |
| } |
| return YarnApplication_STATE_UNSPECIFIED |
| } |
| |
| func (m *YarnApplication) GetProgress() float32 { |
| if m != nil { |
| return m.Progress |
| } |
| return 0 |
| } |
| |
| func (m *YarnApplication) GetTrackingUrl() string { |
| if m != nil { |
| return m.TrackingUrl |
| } |
| return "" |
| } |
| |
| // A Cloud Dataproc job resource. |
| type Job struct { |
| // Optional. The fully qualified reference to the job, which can be used to |
| // obtain the equivalent REST path of the job resource. If this property |
| // is not specified when a job is created, the server generates a |
| // <code>job_id</code>. |
| Reference *JobReference `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` |
| // Required. Job information, including how, when, and where to |
| // run the job. |
| Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement,proto3" json:"placement,omitempty"` |
| // Required. The application/framework-specific portion of the job. |
| // |
| // Types that are valid to be assigned to TypeJob: |
| // *Job_HadoopJob |
| // *Job_SparkJob |
| // *Job_PysparkJob |
| // *Job_HiveJob |
| // *Job_PigJob |
| // *Job_SparkRJob |
| // *Job_SparkSqlJob |
| TypeJob isJob_TypeJob `protobuf_oneof:"type_job"` |
| // Output only. The job status. Additional application-specific |
| // status information may be contained in the <code>type_job</code> |
| // and <code>yarn_applications</code> fields. |
| Status *JobStatus `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"` |
| // Output only. The previous job status. |
| StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"` |
| // Output only. The collection of YARN applications spun up by this job. |
| // |
| // **Beta** Feature: This report is available for testing purposes only. It |
| // may be changed before final release. |
| YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications,proto3" json:"yarn_applications,omitempty"` |
| // Output only. The email address of the user submitting the job. For jobs |
| // submitted on the cluster, the address is <code>username@hostname</code>. |
| SubmittedBy string `protobuf:"bytes,10,opt,name=submitted_by,json=submittedBy,proto3" json:"submitted_by,omitempty"` |
| // Output only. A URI pointing to the location of the stdout of the job's |
| // driver program. |
| DriverOutputResourceUri string `protobuf:"bytes,17,opt,name=driver_output_resource_uri,json=driverOutputResourceUri,proto3" json:"driver_output_resource_uri,omitempty"` |
| // Output only. If present, the location of miscellaneous control files |
| // which may be used as part of job setup and handling. If not present, |
| // control files may be placed in the same location as `driver_output_uri`. |
| DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri,proto3" json:"driver_control_files_uri,omitempty"` |
| // Optional. The labels to associate with this job. |
| // Label **keys** must contain 1 to 63 characters, and must conform to |
| // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). |
| // Label **values** may be empty, but, if present, must contain 1 to 63 |
| // characters, and must conform to [RFC |
| // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
| // associated with a job. |
| Labels map[string]string `protobuf:"bytes,18,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` |
| // Optional. Job scheduling configuration. |
| Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling,proto3" json:"scheduling,omitempty"` |
| // Output only. A UUID that uniquely identifies a job within the project |
| // over time. This is in contrast to a user-settable reference.job_id that |
| // may be reused over time. |
| JobUuid string `protobuf:"bytes,22,opt,name=job_uuid,json=jobUuid,proto3" json:"job_uuid,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Job) Reset() { *m = Job{} } |
| func (m *Job) String() string { return proto.CompactTextString(m) } |
| func (*Job) ProtoMessage() {} |
| func (*Job) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{13} |
| } |
| |
| func (m *Job) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Job.Unmarshal(m, b) |
| } |
| func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Job.Marshal(b, m, deterministic) |
| } |
| func (m *Job) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Job.Merge(m, src) |
| } |
| func (m *Job) XXX_Size() int { |
| return xxx_messageInfo_Job.Size(m) |
| } |
| func (m *Job) XXX_DiscardUnknown() { |
| xxx_messageInfo_Job.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Job proto.InternalMessageInfo |
| |
| func (m *Job) GetReference() *JobReference { |
| if m != nil { |
| return m.Reference |
| } |
| return nil |
| } |
| |
| func (m *Job) GetPlacement() *JobPlacement { |
| if m != nil { |
| return m.Placement |
| } |
| return nil |
| } |
| |
| type isJob_TypeJob interface { |
| isJob_TypeJob() |
| } |
| |
| type Job_HadoopJob struct { |
| HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"` |
| } |
| |
| type Job_SparkJob struct { |
| SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,proto3,oneof"` |
| } |
| |
| type Job_PysparkJob struct { |
| PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"` |
| } |
| |
| type Job_HiveJob struct { |
| HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,proto3,oneof"` |
| } |
| |
| type Job_PigJob struct { |
| PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,proto3,oneof"` |
| } |
| |
| type Job_SparkRJob struct { |
| SparkRJob *SparkRJob `protobuf:"bytes,21,opt,name=spark_r_job,json=sparkRJob,proto3,oneof"` |
| } |
| |
| type Job_SparkSqlJob struct { |
| SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"` |
| } |
| |
| func (*Job_HadoopJob) isJob_TypeJob() {} |
| |
| func (*Job_SparkJob) isJob_TypeJob() {} |
| |
| func (*Job_PysparkJob) isJob_TypeJob() {} |
| |
| func (*Job_HiveJob) isJob_TypeJob() {} |
| |
| func (*Job_PigJob) isJob_TypeJob() {} |
| |
| func (*Job_SparkRJob) isJob_TypeJob() {} |
| |
| func (*Job_SparkSqlJob) isJob_TypeJob() {} |
| |
| func (m *Job) GetTypeJob() isJob_TypeJob { |
| if m != nil { |
| return m.TypeJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetHadoopJob() *HadoopJob { |
| if x, ok := m.GetTypeJob().(*Job_HadoopJob); ok { |
| return x.HadoopJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetSparkJob() *SparkJob { |
| if x, ok := m.GetTypeJob().(*Job_SparkJob); ok { |
| return x.SparkJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetPysparkJob() *PySparkJob { |
| if x, ok := m.GetTypeJob().(*Job_PysparkJob); ok { |
| return x.PysparkJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetHiveJob() *HiveJob { |
| if x, ok := m.GetTypeJob().(*Job_HiveJob); ok { |
| return x.HiveJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetPigJob() *PigJob { |
| if x, ok := m.GetTypeJob().(*Job_PigJob); ok { |
| return x.PigJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetSparkRJob() *SparkRJob { |
| if x, ok := m.GetTypeJob().(*Job_SparkRJob); ok { |
| return x.SparkRJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetSparkSqlJob() *SparkSqlJob { |
| if x, ok := m.GetTypeJob().(*Job_SparkSqlJob); ok { |
| return x.SparkSqlJob |
| } |
| return nil |
| } |
| |
| func (m *Job) GetStatus() *JobStatus { |
| if m != nil { |
| return m.Status |
| } |
| return nil |
| } |
| |
| func (m *Job) GetStatusHistory() []*JobStatus { |
| if m != nil { |
| return m.StatusHistory |
| } |
| return nil |
| } |
| |
| func (m *Job) GetYarnApplications() []*YarnApplication { |
| if m != nil { |
| return m.YarnApplications |
| } |
| return nil |
| } |
| |
| func (m *Job) GetSubmittedBy() string { |
| if m != nil { |
| return m.SubmittedBy |
| } |
| return "" |
| } |
| |
| func (m *Job) GetDriverOutputResourceUri() string { |
| if m != nil { |
| return m.DriverOutputResourceUri |
| } |
| return "" |
| } |
| |
| func (m *Job) GetDriverControlFilesUri() string { |
| if m != nil { |
| return m.DriverControlFilesUri |
| } |
| return "" |
| } |
| |
| func (m *Job) GetLabels() map[string]string { |
| if m != nil { |
| return m.Labels |
| } |
| return nil |
| } |
| |
| func (m *Job) GetScheduling() *JobScheduling { |
| if m != nil { |
| return m.Scheduling |
| } |
| return nil |
| } |
| |
| func (m *Job) GetJobUuid() string { |
| if m != nil { |
| return m.JobUuid |
| } |
| return "" |
| } |
| |
| // XXX_OneofWrappers is for the internal use of the proto package. |
| func (*Job) XXX_OneofWrappers() []interface{} { |
| return []interface{}{ |
| (*Job_HadoopJob)(nil), |
| (*Job_SparkJob)(nil), |
| (*Job_PysparkJob)(nil), |
| (*Job_HiveJob)(nil), |
| (*Job_PigJob)(nil), |
| (*Job_SparkRJob)(nil), |
| (*Job_SparkSqlJob)(nil), |
| } |
| } |
| |
| // Job scheduling options. |
| type JobScheduling struct { |
| // Optional. Maximum number of times per hour a driver may be restarted as |
| // a result of driver terminating with non-zero code before job is |
| // reported failed. |
| // |
| // A job may be reported as thrashing if driver exits with non-zero code |
| // 4 times within 10 minute window. |
| // |
| // Maximum value is 10. |
| MaxFailuresPerHour int32 `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour,proto3" json:"max_failures_per_hour,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *JobScheduling) Reset() { *m = JobScheduling{} } |
| func (m *JobScheduling) String() string { return proto.CompactTextString(m) } |
| func (*JobScheduling) ProtoMessage() {} |
| func (*JobScheduling) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{14} |
| } |
| |
| func (m *JobScheduling) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_JobScheduling.Unmarshal(m, b) |
| } |
| func (m *JobScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_JobScheduling.Marshal(b, m, deterministic) |
| } |
| func (m *JobScheduling) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_JobScheduling.Merge(m, src) |
| } |
| func (m *JobScheduling) XXX_Size() int { |
| return xxx_messageInfo_JobScheduling.Size(m) |
| } |
| func (m *JobScheduling) XXX_DiscardUnknown() { |
| xxx_messageInfo_JobScheduling.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_JobScheduling proto.InternalMessageInfo |
| |
| func (m *JobScheduling) GetMaxFailuresPerHour() int32 { |
| if m != nil { |
| return m.MaxFailuresPerHour |
| } |
| return 0 |
| } |
| |
| // A request to submit a job. |
| type SubmitJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job resource. |
| Job *Job `protobuf:"bytes,2,opt,name=job,proto3" json:"job,omitempty"` |
| // Optional. A unique id used to identify the request. If the server |
| // receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests with the same |
| // id, then the second request will be ignored and the |
| // first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend |
| // is returned. |
| // |
| // It is recommended to always set this value to a |
| // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). |
| // |
| // The id must contain only letters (a-z, A-Z), numbers (0-9), |
| // underscores (_), and hyphens (-). The maximum length is 40 characters. |
| RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SubmitJobRequest) Reset() { *m = SubmitJobRequest{} } |
| func (m *SubmitJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*SubmitJobRequest) ProtoMessage() {} |
| func (*SubmitJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{15} |
| } |
| |
| func (m *SubmitJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SubmitJobRequest.Unmarshal(m, b) |
| } |
| func (m *SubmitJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SubmitJobRequest.Marshal(b, m, deterministic) |
| } |
| func (m *SubmitJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SubmitJobRequest.Merge(m, src) |
| } |
| func (m *SubmitJobRequest) XXX_Size() int { |
| return xxx_messageInfo_SubmitJobRequest.Size(m) |
| } |
| func (m *SubmitJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_SubmitJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SubmitJobRequest proto.InternalMessageInfo |
| |
| func (m *SubmitJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *SubmitJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *SubmitJobRequest) GetJob() *Job { |
| if m != nil { |
| return m.Job |
| } |
| return nil |
| } |
| |
| func (m *SubmitJobRequest) GetRequestId() string { |
| if m != nil { |
| return m.RequestId |
| } |
| return "" |
| } |
| |
| // A request to get the resource representation for a job in a project. |
| type GetJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job ID. |
| JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *GetJobRequest) Reset() { *m = GetJobRequest{} } |
| func (m *GetJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*GetJobRequest) ProtoMessage() {} |
| func (*GetJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{16} |
| } |
| |
| func (m *GetJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_GetJobRequest.Unmarshal(m, b) |
| } |
| func (m *GetJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_GetJobRequest.Marshal(b, m, deterministic) |
| } |
| func (m *GetJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_GetJobRequest.Merge(m, src) |
| } |
| func (m *GetJobRequest) XXX_Size() int { |
| return xxx_messageInfo_GetJobRequest.Size(m) |
| } |
| func (m *GetJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_GetJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_GetJobRequest proto.InternalMessageInfo |
| |
| func (m *GetJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *GetJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *GetJobRequest) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| // A request to list jobs in a project. |
| type ListJobsRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,6,opt,name=region,proto3" json:"region,omitempty"` |
| // Optional. The number of results to return in each response. |
| PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` |
| // Optional. The page token, returned by a previous call, to request the |
| // next page of results. |
| PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` |
| // Optional. If set, the returned jobs list includes only jobs that were |
| // submitted to the named cluster. |
| ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` |
| // Optional. Specifies enumerated categories of jobs to list. |
| // (default = match ALL jobs). |
| // |
| // If `filter` is provided, `jobStateMatcher` will be ignored. |
| JobStateMatcher ListJobsRequest_JobStateMatcher `protobuf:"varint,5,opt,name=job_state_matcher,json=jobStateMatcher,proto3,enum=google.cloud.dataproc.v1beta2.ListJobsRequest_JobStateMatcher" json:"job_state_matcher,omitempty"` |
| // Optional. A filter constraining the jobs to list. Filters are |
| // case-sensitive and have the following syntax: |
| // |
| // [field = value] AND [field [= value]] ... |
| // |
| // where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label |
| // key. **value** can be `*` to match all values. |
| // `status.state` can be either `ACTIVE` or `NON_ACTIVE`. |
| // Only the logical `AND` operator is supported; space-separated items are |
| // treated as having an implicit `AND` operator. |
| // |
| // Example filter: |
| // |
| // status.state = ACTIVE AND labels.env = staging AND labels.starred = * |
| Filter string `protobuf:"bytes,7,opt,name=filter,proto3" json:"filter,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ListJobsRequest) Reset() { *m = ListJobsRequest{} } |
| func (m *ListJobsRequest) String() string { return proto.CompactTextString(m) } |
| func (*ListJobsRequest) ProtoMessage() {} |
| func (*ListJobsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{17} |
| } |
| |
| func (m *ListJobsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ListJobsRequest.Unmarshal(m, b) |
| } |
| func (m *ListJobsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ListJobsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *ListJobsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ListJobsRequest.Merge(m, src) |
| } |
| func (m *ListJobsRequest) XXX_Size() int { |
| return xxx_messageInfo_ListJobsRequest.Size(m) |
| } |
| func (m *ListJobsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ListJobsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ListJobsRequest proto.InternalMessageInfo |
| |
| func (m *ListJobsRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *ListJobsRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *ListJobsRequest) GetPageSize() int32 { |
| if m != nil { |
| return m.PageSize |
| } |
| return 0 |
| } |
| |
| func (m *ListJobsRequest) GetPageToken() string { |
| if m != nil { |
| return m.PageToken |
| } |
| return "" |
| } |
| |
| func (m *ListJobsRequest) GetClusterName() string { |
| if m != nil { |
| return m.ClusterName |
| } |
| return "" |
| } |
| |
| func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher { |
| if m != nil { |
| return m.JobStateMatcher |
| } |
| return ListJobsRequest_ALL |
| } |
| |
| func (m *ListJobsRequest) GetFilter() string { |
| if m != nil { |
| return m.Filter |
| } |
| return "" |
| } |
| |
| // A request to update a job. |
| type UpdateJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job ID. |
| JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| // Required. The changes to the job. |
| Job *Job `protobuf:"bytes,4,opt,name=job,proto3" json:"job,omitempty"` |
| // Required. Specifies the path, relative to <code>Job</code>, of |
| // the field to update. For example, to update the labels of a Job the |
| // <code>update_mask</code> parameter would be specified as |
| // <code>labels</code>, and the `PATCH` request body would specify the new |
| // value. <strong>Note:</strong> Currently, <code>labels</code> is the only |
| // field that can be updated. |
| UpdateMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *UpdateJobRequest) Reset() { *m = UpdateJobRequest{} } |
| func (m *UpdateJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*UpdateJobRequest) ProtoMessage() {} |
| func (*UpdateJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{18} |
| } |
| |
| func (m *UpdateJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_UpdateJobRequest.Unmarshal(m, b) |
| } |
| func (m *UpdateJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_UpdateJobRequest.Marshal(b, m, deterministic) |
| } |
| func (m *UpdateJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_UpdateJobRequest.Merge(m, src) |
| } |
| func (m *UpdateJobRequest) XXX_Size() int { |
| return xxx_messageInfo_UpdateJobRequest.Size(m) |
| } |
| func (m *UpdateJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_UpdateJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_UpdateJobRequest proto.InternalMessageInfo |
| |
| func (m *UpdateJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *UpdateJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *UpdateJobRequest) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| func (m *UpdateJobRequest) GetJob() *Job { |
| if m != nil { |
| return m.Job |
| } |
| return nil |
| } |
| |
| func (m *UpdateJobRequest) GetUpdateMask() *field_mask.FieldMask { |
| if m != nil { |
| return m.UpdateMask |
| } |
| return nil |
| } |
| |
| // A list of jobs in a project. |
| type ListJobsResponse struct { |
| // Output only. Jobs list. |
| Jobs []*Job `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"` |
| // Optional. This token is included in the response if there are more results |
| // to fetch. To fetch additional results, provide this value as the |
| // `page_token` in a subsequent <code>ListJobsRequest</code>. |
| NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ListJobsResponse) Reset() { *m = ListJobsResponse{} } |
| func (m *ListJobsResponse) String() string { return proto.CompactTextString(m) } |
| func (*ListJobsResponse) ProtoMessage() {} |
| func (*ListJobsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{19} |
| } |
| |
| func (m *ListJobsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ListJobsResponse.Unmarshal(m, b) |
| } |
| func (m *ListJobsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ListJobsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *ListJobsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ListJobsResponse.Merge(m, src) |
| } |
| func (m *ListJobsResponse) XXX_Size() int { |
| return xxx_messageInfo_ListJobsResponse.Size(m) |
| } |
| func (m *ListJobsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_ListJobsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ListJobsResponse proto.InternalMessageInfo |
| |
| func (m *ListJobsResponse) GetJobs() []*Job { |
| if m != nil { |
| return m.Jobs |
| } |
| return nil |
| } |
| |
| func (m *ListJobsResponse) GetNextPageToken() string { |
| if m != nil { |
| return m.NextPageToken |
| } |
| return "" |
| } |
| |
| // A request to cancel a job. |
| type CancelJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job ID. |
| JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *CancelJobRequest) Reset() { *m = CancelJobRequest{} } |
| func (m *CancelJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*CancelJobRequest) ProtoMessage() {} |
| func (*CancelJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{20} |
| } |
| |
| func (m *CancelJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_CancelJobRequest.Unmarshal(m, b) |
| } |
| func (m *CancelJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_CancelJobRequest.Marshal(b, m, deterministic) |
| } |
| func (m *CancelJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_CancelJobRequest.Merge(m, src) |
| } |
| func (m *CancelJobRequest) XXX_Size() int { |
| return xxx_messageInfo_CancelJobRequest.Size(m) |
| } |
| func (m *CancelJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_CancelJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_CancelJobRequest proto.InternalMessageInfo |
| |
| func (m *CancelJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *CancelJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *CancelJobRequest) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| // A request to delete a job. |
| type DeleteJobRequest struct { |
| // Required. The ID of the Google Cloud Platform project that the job |
| // belongs to. |
| ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` |
| // Required. The Cloud Dataproc region in which to handle the request. |
| Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` |
| // Required. The job ID. |
| JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *DeleteJobRequest) Reset() { *m = DeleteJobRequest{} } |
| func (m *DeleteJobRequest) String() string { return proto.CompactTextString(m) } |
| func (*DeleteJobRequest) ProtoMessage() {} |
| func (*DeleteJobRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_20fb118582e1d7de, []int{21} |
| } |
| |
| func (m *DeleteJobRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_DeleteJobRequest.Unmarshal(m, b) |
| } |
| func (m *DeleteJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_DeleteJobRequest.Marshal(b, m, deterministic) |
| } |
| func (m *DeleteJobRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_DeleteJobRequest.Merge(m, src) |
| } |
| func (m *DeleteJobRequest) XXX_Size() int { |
| return xxx_messageInfo_DeleteJobRequest.Size(m) |
| } |
| func (m *DeleteJobRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_DeleteJobRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_DeleteJobRequest proto.InternalMessageInfo |
| |
| func (m *DeleteJobRequest) GetProjectId() string { |
| if m != nil { |
| return m.ProjectId |
| } |
| return "" |
| } |
| |
| func (m *DeleteJobRequest) GetRegion() string { |
| if m != nil { |
| return m.Region |
| } |
| return "" |
| } |
| |
| func (m *DeleteJobRequest) GetJobId() string { |
| if m != nil { |
| return m.JobId |
| } |
| return "" |
| } |
| |
| func init() { |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.LoggingConfig_Level", LoggingConfig_Level_name, LoggingConfig_Level_value) |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.JobStatus_State", JobStatus_State_name, JobStatus_State_value) |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.JobStatus_Substate", JobStatus_Substate_name, JobStatus_Substate_value) |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.YarnApplication_State", YarnApplication_State_name, YarnApplication_State_value) |
| proto.RegisterEnum("google.cloud.dataproc.v1beta2.ListJobsRequest_JobStateMatcher", ListJobsRequest_JobStateMatcher_name, ListJobsRequest_JobStateMatcher_value) |
| proto.RegisterType((*LoggingConfig)(nil), "google.cloud.dataproc.v1beta2.LoggingConfig") |
| proto.RegisterMapType((map[string]LoggingConfig_Level)(nil), "google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry") |
| proto.RegisterType((*HadoopJob)(nil), "google.cloud.dataproc.v1beta2.HadoopJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntry") |
| proto.RegisterType((*SparkJob)(nil), "google.cloud.dataproc.v1beta2.SparkJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntry") |
| proto.RegisterType((*PySparkJob)(nil), "google.cloud.dataproc.v1beta2.PySparkJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntry") |
| proto.RegisterType((*QueryList)(nil), "google.cloud.dataproc.v1beta2.QueryList") |
| proto.RegisterType((*HiveJob)(nil), "google.cloud.dataproc.v1beta2.HiveJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntry") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntry") |
| proto.RegisterType((*SparkSqlJob)(nil), "google.cloud.dataproc.v1beta2.SparkSqlJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntry") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntry") |
| proto.RegisterType((*PigJob)(nil), "google.cloud.dataproc.v1beta2.PigJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.PigJob.PropertiesEntry") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntry") |
| proto.RegisterType((*SparkRJob)(nil), "google.cloud.dataproc.v1beta2.SparkRJob") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.SparkRJob.PropertiesEntry") |
| proto.RegisterType((*JobPlacement)(nil), "google.cloud.dataproc.v1beta2.JobPlacement") |
| proto.RegisterType((*JobStatus)(nil), "google.cloud.dataproc.v1beta2.JobStatus") |
| proto.RegisterType((*JobReference)(nil), "google.cloud.dataproc.v1beta2.JobReference") |
| proto.RegisterType((*YarnApplication)(nil), "google.cloud.dataproc.v1beta2.YarnApplication") |
| proto.RegisterType((*Job)(nil), "google.cloud.dataproc.v1beta2.Job") |
| proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.Job.LabelsEntry") |
| proto.RegisterType((*JobScheduling)(nil), "google.cloud.dataproc.v1beta2.JobScheduling") |
| proto.RegisterType((*SubmitJobRequest)(nil), "google.cloud.dataproc.v1beta2.SubmitJobRequest") |
| proto.RegisterType((*GetJobRequest)(nil), "google.cloud.dataproc.v1beta2.GetJobRequest") |
| proto.RegisterType((*ListJobsRequest)(nil), "google.cloud.dataproc.v1beta2.ListJobsRequest") |
| proto.RegisterType((*UpdateJobRequest)(nil), "google.cloud.dataproc.v1beta2.UpdateJobRequest") |
| proto.RegisterType((*ListJobsResponse)(nil), "google.cloud.dataproc.v1beta2.ListJobsResponse") |
| proto.RegisterType((*CancelJobRequest)(nil), "google.cloud.dataproc.v1beta2.CancelJobRequest") |
| proto.RegisterType((*DeleteJobRequest)(nil), "google.cloud.dataproc.v1beta2.DeleteJobRequest") |
| } |
| |
| func init() { |
| proto.RegisterFile("google/cloud/dataproc/v1beta2/jobs.proto", fileDescriptor_20fb118582e1d7de) |
| } |
| |
| var fileDescriptor_20fb118582e1d7de = []byte{ |
| // 2550 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcf, 0x93, 0x1b, 0x47, |
| 0xf5, 0x5f, 0xfd, 0xd6, 0x3c, 0xed, 0x8f, 0xd9, 0x8e, 0x1d, 0xeb, 0x2b, 0x7f, 0x53, 0xd9, 0x4c, |
| 0x15, 0x66, 0x71, 0x82, 0x54, 0x11, 0xc1, 0x49, 0xec, 0x0a, 0xb1, 0x56, 0x1a, 0x45, 0xda, 0xc8, |
| 0x5a, 0x65, 0x24, 0xd9, 0x24, 0x1c, 0x86, 0x91, 0xd4, 0x2b, 0x8d, 0x3c, 0x9a, 0x19, 0xf7, 0xcc, |
| 0xac, 0xa3, 0xa4, 0x72, 0xe1, 0xc2, 0x1f, 0xc0, 0x9d, 0x2b, 0x45, 0x15, 0xdc, 0x39, 0x71, 0xe0, |
| 0x42, 0xc1, 0x21, 0x54, 0x91, 0x5b, 0xb8, 0x70, 0xe0, 0xc0, 0x01, 0xaa, 0x38, 0x51, 0x45, 0x71, |
| 0xa1, 0xba, 0x7b, 0x66, 0x56, 0xd2, 0xae, 0xad, 0x59, 0x2f, 0x95, 0x90, 0x9c, 0x3c, 0x7a, 0xaf, |
| 0xdf, 0xeb, 0xf7, 0xfa, 0xf3, 0xe9, 0xd7, 0xaf, 0x7b, 0x0d, 0xfb, 0x63, 0xcb, 0x1a, 0x1b, 0xb8, |
| 0x34, 0x34, 0x2c, 0x6f, 0x54, 0x1a, 0x69, 0xae, 0x66, 0x13, 0x6b, 0x58, 0x3a, 0x79, 0x75, 0x80, |
| 0x5d, 0xad, 0x5c, 0x9a, 0x5a, 0x03, 0xa7, 0x68, 0x13, 0xcb, 0xb5, 0xd0, 0x0b, 0x7c, 0x64, 0x91, |
| 0x8d, 0x2c, 0x06, 0x23, 0x8b, 0xfe, 0xc8, 0xc2, 0xff, 0xfb, 0x8e, 0x34, 0x5b, 0x2f, 0x69, 0xa6, |
| 0x69, 0xb9, 0x9a, 0xab, 0x5b, 0xa6, 0x6f, 0x5c, 0xb8, 0xb6, 0xa0, 0x1d, 0x1a, 0x3a, 0x36, 0x5d, |
| 0x5f, 0xf1, 0xe2, 0x82, 0xe2, 0x58, 0xc7, 0xc6, 0x48, 0x1d, 0xe0, 0x89, 0x76, 0xa2, 0x5b, 0xc4, |
| 0x1f, 0x70, 0xdd, 0x1f, 0xc0, 0x7e, 0x0d, 0xbc, 0xe3, 0x12, 0x9e, 0xd9, 0xee, 0xdc, 0x57, 0xee, |
| 0xad, 0x2a, 0xb9, 0x8b, 0x99, 0xe6, 0x3c, 0x5c, 0xf1, 0x1f, 0x8e, 0x70, 0xf5, 0x19, 0x76, 0x5c, |
| 0x6d, 0x66, 0xf3, 0x01, 0xd2, 0x5f, 0xe3, 0xb0, 0xd5, 0xb2, 0xc6, 0x63, 0xdd, 0x1c, 0x57, 0x2d, |
| 0xf3, 0x58, 0x1f, 0xa3, 0x19, 0xec, 0x8e, 0x88, 0x7e, 0x82, 0x89, 0x6a, 0x58, 0x63, 0xd5, 0xc0, |
| 0x27, 0xd8, 0x70, 0xf2, 0xf1, 0xbd, 0xc4, 0x7e, 0xae, 0x5c, 0x29, 0x3e, 0x75, 0x11, 0x8a, 0x4b, |
| 0x8e, 0x8a, 0x35, 0xe6, 0xa5, 0x65, 0x8d, 0x5b, 0xcc, 0x87, 0x6c, 0xba, 0x64, 0xae, 0xec, 0x8c, |
| 0x96, 0xa5, 0x85, 0x13, 0xb8, 0x72, 0xde, 0x40, 0x24, 0x42, 0xe2, 0x21, 0x9e, 0xe7, 0x63, 0x7b, |
| 0xb1, 0x7d, 0x41, 0xa1, 0x9f, 0xa8, 0x01, 0xa9, 0x13, 0xcd, 0xf0, 0x70, 0x3e, 0xbe, 0x17, 0xdb, |
| 0xdf, 0x2e, 0x97, 0x2f, 0x14, 0x0c, 0x73, 0xad, 0x70, 0x07, 0xb7, 0xe3, 0x6f, 0xc4, 0x24, 0x1b, |
| 0x52, 0x4c, 0x86, 0xae, 0xc2, 0x6e, 0x4b, 0xbe, 0x2f, 0xb7, 0xd4, 0x7e, 0xbb, 0xdb, 0x91, 0xab, |
| 0xcd, 0x7a, 0x53, 0xae, 0x89, 0x1b, 0x28, 0x03, 0x89, 0x4a, 0xab, 0x25, 0xc6, 0x90, 0x00, 0xa9, |
| 0x9e, 0x52, 0xa9, 0xca, 0x62, 0x9c, 0x7e, 0xd6, 0xe4, 0x83, 0xfe, 0x3b, 0x62, 0x02, 0x65, 0x21, |
| 0xd9, 0x6c, 0xd7, 0x8f, 0xc4, 0x24, 0xfd, 0x7a, 0x50, 0x51, 0xda, 0x62, 0x8a, 0xaa, 0x65, 0x45, |
| 0x39, 0x52, 0xc4, 0x34, 0xfd, 0xac, 0x57, 0x7a, 0x95, 0x96, 0x98, 0xa1, 0x8e, 0x8e, 0xea, 0x75, |
| 0x31, 0x2b, 0xfd, 0x2e, 0x01, 0x42, 0x43, 0x1b, 0x59, 0x96, 0x7d, 0x68, 0x0d, 0xd0, 0xcb, 0xb0, |
| 0x3b, 0xd3, 0x74, 0x53, 0x9d, 0x6a, 0x44, 0x3d, 0xd6, 0x0d, 0xac, 0x7a, 0x44, 0xe7, 0xd9, 0x36, |
| 0x36, 0x94, 0x6d, 0xaa, 0x3a, 0xd4, 0x48, 0x5d, 0x37, 0x70, 0x9f, 0xe8, 0xe8, 0x45, 0x00, 0x36, |
| 0x78, 0x68, 0x68, 0x8e, 0xc3, 0xf2, 0xa7, 0xa3, 0x04, 0x2a, 0xab, 0x52, 0x11, 0x42, 0x90, 0xd4, |
| 0xc8, 0xd8, 0xc9, 0x27, 0xf6, 0x12, 0xfb, 0x82, 0xc2, 0xbe, 0x91, 0x04, 0x5b, 0x8b, 0xce, 0x9d, |
| 0x7c, 0x92, 0x29, 0x73, 0xd3, 0xd0, 0xaf, 0x83, 0xae, 0x83, 0x70, 0xaa, 0x4f, 0x31, 0x7d, 0xf6, |
| 0x38, 0x50, 0xbe, 0x04, 0x9b, 0x1a, 0x19, 0x4e, 0xf4, 0x13, 0x5f, 0x9f, 0xe6, 0xf6, 0xbe, 0x8c, |
| 0x0d, 0xf9, 0x3e, 0x80, 0x4d, 0x2c, 0x1b, 0x13, 0x57, 0xc7, 0x4e, 0x3e, 0xc3, 0x58, 0xf2, 0xc6, |
| 0x1a, 0x60, 0xc2, 0x35, 0x28, 0x76, 0x42, 0x53, 0x4e, 0x8e, 0x05, 0x5f, 0xa8, 0x0b, 0xdb, 0x06, |
| 0x47, 0x50, 0x1d, 0x32, 0x08, 0xf3, 0xd9, 0xbd, 0xd8, 0x7e, 0xae, 0xfc, 0xca, 0x45, 0x60, 0x57, |
| 0xb6, 0x8c, 0xc5, 0x9f, 0x85, 0xb7, 0x60, 0x67, 0x65, 0xce, 0x73, 0x78, 0x76, 0x65, 0x91, 0x67, |
| 0xc2, 0x02, 0x67, 0x0e, 0xb2, 0x90, 0xe6, 0xf4, 0x95, 0x7e, 0x9b, 0x80, 0x6c, 0xd7, 0xd6, 0xc8, |
| 0xc3, 0xaf, 0x0f, 0x94, 0x0f, 0xce, 0x81, 0xf2, 0xf5, 0x35, 0x8b, 0x1d, 0x2c, 0xc1, 0x57, 0x18, |
| 0xc9, 0xcf, 0x12, 0x00, 0x9d, 0x79, 0x88, 0xe5, 0x6b, 0x70, 0x85, 0xc1, 0x63, 0xcf, 0xdd, 0x89, |
| 0x65, 0xae, 0xc0, 0x79, 0x90, 0xf8, 0x73, 0x25, 0xae, 0x30, 0xb0, 0x3b, 0x4c, 0x1f, 0x80, 0x1a, |
| 0x60, 0x16, 0x5f, 0xc0, 0x6c, 0x1f, 0xc4, 0x15, 0x27, 0x01, 0xa6, 0xdb, 0xf6, 0xa2, 0xf1, 0x17, |
| 0x83, 0xee, 0xfb, 0xe7, 0xa0, 0xfb, 0xe6, 0x1a, 0x00, 0x4e, 0x97, 0xe5, 0xab, 0x86, 0xaf, 0x74, |
| 0x13, 0x84, 0xf7, 0x3c, 0x4c, 0xe6, 0x2d, 0xdd, 0x71, 0xd1, 0x0b, 0x90, 0x79, 0xe4, 0x61, 0x42, |
| 0x13, 0x8f, 0xd1, 0x95, 0xe1, 0x30, 0x06, 0x32, 0xe9, 0xa7, 0x49, 0xc8, 0x34, 0xf4, 0x13, 0x4c, |
| 0xe1, 0xbf, 0x01, 0xdb, 0x54, 0x3c, 0x3f, 0xbb, 0x8f, 0x37, 0x99, 0x3c, 0x00, 0xbc, 0x09, 0xc0, |
| 0xc7, 0x19, 0xba, 0xe3, 0xb2, 0xe9, 0x73, 0xe5, 0xfd, 0x35, 0xf9, 0x86, 0x01, 0xd1, 0xfd, 0xfe, |
| 0x28, 0x8c, 0xae, 0x08, 0xcf, 0x0d, 0x2d, 0xd3, 0xd5, 0x4d, 0x0f, 0xab, 0x94, 0x2c, 0x9a, 0x6e, |
| 0x78, 0x04, 0xe7, 0x13, 0x7b, 0xb1, 0xfd, 0xac, 0xb2, 0x1b, 0xa8, 0x8e, 0xcc, 0x3a, 0x57, 0xa0, |
| 0x63, 0x10, 0x9d, 0x21, 0xd1, 0x6d, 0x57, 0x3d, 0xd1, 0x88, 0xae, 0x0d, 0x0c, 0xcc, 0x09, 0x93, |
| 0x2b, 0xdf, 0x59, 0x57, 0x78, 0x79, 0x92, 0xc5, 0x2e, 0x33, 0xbf, 0x1f, 0x58, 0xfb, 0x07, 0xb3, |
| 0xb3, 0x2c, 0x45, 0xf7, 0x97, 0x18, 0x93, 0x62, 0x33, 0xdc, 0x8a, 0x38, 0xc3, 0xd3, 0xe8, 0x72, |
| 0x86, 0xed, 0xe9, 0x33, 0x6c, 0x2f, 0x1c, 0xc0, 0x95, 0xf3, 0x82, 0xbc, 0x08, 0x05, 0x2e, 0x5b, |
| 0x21, 0x84, 0x90, 0x34, 0xd2, 0x1f, 0x92, 0x90, 0x63, 0x3b, 0xa1, 0xfb, 0xc8, 0xf8, 0x92, 0x48, |
| 0x32, 0x3d, 0x07, 0xf4, 0x04, 0x83, 0xe4, 0xed, 0x28, 0x25, 0x9a, 0x07, 0x1e, 0x11, 0xf8, 0x0f, |
| 0x96, 0x80, 0xe7, 0xd4, 0xba, 0x7d, 0x81, 0x59, 0x2e, 0x04, 0xfe, 0x1b, 0x67, 0x4b, 0xdd, 0xd9, |
| 0x7a, 0x92, 0xbe, 0x7c, 0x3d, 0xf9, 0xdf, 0x62, 0xd4, 0x3f, 0x92, 0x90, 0xee, 0xe8, 0xe3, 0xaf, |
| 0x48, 0xc5, 0xc1, 0x4f, 0xac, 0x38, 0xeb, 0x68, 0xc1, 0x73, 0x8c, 0xc8, 0xbb, 0xfe, 0x39, 0x05, |
| 0xe7, 0xbb, 0xd1, 0x26, 0xb8, 0x64, 0xbd, 0x39, 0x87, 0x72, 0x99, 0xaf, 0x1b, 0xe5, 0xfe, 0x15, |
| 0x07, 0x81, 0xed, 0x52, 0x85, 0xb2, 0xee, 0x26, 0xec, 0xb0, 0x36, 0x87, 0x9c, 0xdb, 0xe1, 0x6c, |
| 0x52, 0x9d, 0xf2, 0xb4, 0xe6, 0x66, 0xa9, 0x1d, 0x49, 0xac, 0x69, 0x47, 0x92, 0xeb, 0xee, 0x0d, |
| 0xa9, 0x48, 0xf7, 0x86, 0x30, 0xfa, 0x0b, 0x76, 0x23, 0xe9, 0x2f, 0xbd, 0x1b, 0x79, 0x1f, 0x36, |
| 0x0f, 0xad, 0x41, 0xc7, 0xd0, 0x86, 0x78, 0x86, 0x4d, 0x17, 0xdd, 0x80, 0xcd, 0xa1, 0xe1, 0x39, |
| 0x2e, 0x26, 0xaa, 0xa9, 0xcd, 0xf0, 0xe2, 0xd2, 0xe7, 0x7c, 0x45, 0x5b, 0x9b, 0x61, 0xba, 0x90, |
| 0xc1, 0x38, 0xcf, 0xd3, 0x47, 0xbe, 0xe3, 0x60, 0x48, 0xdf, 0xd3, 0x47, 0xd2, 0x3f, 0x13, 0x20, |
| 0x1c, 0x5a, 0x83, 0xae, 0xab, 0xb9, 0x9e, 0x83, 0x6a, 0x90, 0x72, 0x5c, 0xcd, 0xe5, 0x1e, 0xb7, |
| 0xcb, 0xc5, 0x35, 0x39, 0x87, 0x86, 0x45, 0xfa, 0x0f, 0x56, 0xb8, 0x31, 0xca, 0x43, 0x66, 0x84, |
| 0x5d, 0x4d, 0x37, 0xfc, 0xfb, 0x89, 0x12, 0xfc, 0x44, 0x35, 0x10, 0xd9, 0x10, 0xd5, 0x71, 0x35, |
| 0xe2, 0xaa, 0xae, 0x3e, 0xc3, 0xfe, 0xf2, 0x16, 0x82, 0xa9, 0x82, 0x97, 0x86, 0x62, 0x2f, 0x78, |
| 0x69, 0x50, 0xb6, 0x99, 0x4d, 0x97, 0x9a, 0x50, 0x21, 0xba, 0x07, 0x59, 0xc7, 0x1b, 0xf0, 0x40, |
| 0x33, 0x2c, 0xd0, 0x57, 0xa3, 0x07, 0xea, 0x1b, 0x2a, 0xa1, 0x0b, 0xe9, 0xe7, 0x31, 0x48, 0xb1, |
| 0xf8, 0xe9, 0x55, 0xbe, 0xdb, 0xab, 0xf4, 0xe4, 0x95, 0xab, 0x7c, 0x0e, 0x32, 0x1d, 0xb9, 0x5d, |
| 0x6b, 0xb6, 0xdf, 0x11, 0x63, 0x68, 0x1b, 0xa0, 0x2b, 0xf7, 0xfa, 0x1d, 0xb5, 0x76, 0xd4, 0x96, |
| 0xc5, 0x2c, 0x55, 0x2a, 0xfd, 0x76, 0x9b, 0x2a, 0xe3, 0x08, 0xc1, 0x76, 0xb5, 0xd2, 0xae, 0xca, |
| 0x2d, 0x35, 0x30, 0x48, 0x2c, 0xc8, 0xba, 0xbd, 0x8a, 0xd2, 0x93, 0x6b, 0x62, 0x06, 0x6d, 0x81, |
| 0xc0, 0x65, 0x2d, 0xb9, 0xc6, 0x9f, 0x00, 0x98, 0xb7, 0xa5, 0x27, 0x80, 0xe7, 0x60, 0xa7, 0xd2, |
| 0xeb, 0xc9, 0xf7, 0x3a, 0x3d, 0xb5, 0x5e, 0x69, 0xb6, 0xfa, 0x8a, 0x2c, 0x0a, 0x52, 0x03, 0xb2, |
| 0x41, 0x06, 0x68, 0x07, 0x72, 0xcb, 0x71, 0x6e, 0x81, 0xd0, 0xed, 0x1f, 0xdc, 0x6b, 0xf6, 0xe8, |
| 0x24, 0x31, 0x04, 0x90, 0x7e, 0xaf, 0x2f, 0xf7, 0xe5, 0x9a, 0x18, 0x47, 0x22, 0x6c, 0x76, 0x7b, |
| 0x95, 0x96, 0x4c, 0x63, 0xe8, 0xf5, 0xbb, 0x62, 0x42, 0x6a, 0x32, 0x4e, 0x29, 0xf8, 0x18, 0x13, |
| 0x6c, 0x0e, 0x31, 0x92, 0xd8, 0x8e, 0x9a, 0xe2, 0xa1, 0xab, 0xea, 0xa3, 0x45, 0x46, 0x09, 0xbe, |
| 0xb8, 0x39, 0x42, 0x57, 0x21, 0x3d, 0xb5, 0x06, 0x6a, 0xc8, 0xa4, 0xd4, 0xd4, 0x1a, 0x34, 0x47, |
| 0xd2, 0xa7, 0x71, 0xd8, 0x79, 0x5f, 0x23, 0x66, 0xc5, 0xb6, 0x0d, 0x7d, 0xc8, 0x1e, 0xae, 0xd0, |
| 0x35, 0x48, 0x2e, 0x53, 0x33, 0xa1, 0x30, 0x01, 0x3a, 0x0a, 0x28, 0xc6, 0x5f, 0x61, 0x5e, 0x5b, |
| 0x83, 0xdc, 0x8a, 0x5f, 0x4e, 0x34, 0xee, 0xcf, 0x67, 0xdb, 0x8b, 0x90, 0xb5, 0x89, 0x35, 0x26, |
| 0xd8, 0x71, 0xd8, 0x11, 0x14, 0xe7, 0xda, 0x50, 0x48, 0x77, 0x8b, 0x4b, 0xb4, 0xe1, 0x43, 0xba, |
| 0xa5, 0x3d, 0x62, 0xe4, 0x93, 0xa7, 0x21, 0xe5, 0x02, 0x45, 0x9f, 0x18, 0xd2, 0x8f, 0xd7, 0xf1, |
| 0x20, 0x03, 0x89, 0xb6, 0xfc, 0x80, 0x73, 0xa0, 0x2d, 0x3f, 0x50, 0xbb, 0x95, 0xfb, 0x1c, 0xf6, |
| 0xa5, 0x85, 0x4f, 0xa0, 0x4d, 0xc8, 0x56, 0xaa, 0x55, 0xb9, 0xd3, 0x63, 0xe0, 0x2e, 0x10, 0x24, |
| 0x45, 0x55, 0xf5, 0x66, 0xbb, 0xd9, 0x6d, 0xc8, 0x35, 0x31, 0x4d, 0x11, 0xa2, 0xd0, 0x32, 0x4a, |
| 0x00, 0xa4, 0xdf, 0x6d, 0x32, 0x3e, 0x64, 0xa5, 0x9f, 0x01, 0x24, 0x68, 0x95, 0x6d, 0x82, 0x40, |
| 0x02, 0x80, 0xd8, 0x4a, 0xe6, 0xca, 0x2f, 0xaf, 0x67, 0x7a, 0x88, 0xa9, 0x72, 0x6a, 0x8d, 0xda, |
| 0x20, 0xd8, 0x41, 0xfd, 0xf0, 0x4f, 0xff, 0x08, 0xae, 0xc2, 0x92, 0x13, 0x50, 0x21, 0x2c, 0x41, |
| 0x4d, 0x80, 0x09, 0x7b, 0x87, 0x51, 0xa7, 0xd6, 0x80, 0xad, 0xfb, 0xfa, 0x76, 0x22, 0x7c, 0xb8, |
| 0xa1, 0xed, 0xc4, 0x24, 0x7c, 0xc9, 0xaa, 0x83, 0xe0, 0xd0, 0xd2, 0xcc, 0x3c, 0x25, 0x99, 0xa7, |
| 0x6f, 0x46, 0x7c, 0x37, 0x68, 0x6c, 0x28, 0x59, 0x27, 0xb8, 0x7a, 0xb7, 0x20, 0x67, 0xcf, 0x4f, |
| 0x3d, 0xa5, 0x98, 0xa7, 0x6f, 0x45, 0xbe, 0xa3, 0x36, 0x36, 0x14, 0xf0, 0xed, 0xa9, 0xb7, 0x2a, |
| 0x64, 0xd9, 0x09, 0x44, 0x5d, 0xf1, 0x12, 0x75, 0x23, 0xda, 0xe5, 0xa5, 0xb1, 0xa1, 0x64, 0x26, |
| 0xfe, 0x75, 0xf0, 0x2e, 0x64, 0x6c, 0x7d, 0xcc, 0x7c, 0xf0, 0x86, 0xe0, 0x1b, 0x91, 0xfa, 0x91, |
| 0xc6, 0x86, 0x92, 0xb6, 0x79, 0x7b, 0x77, 0x08, 0x39, 0x9e, 0x12, 0x61, 0x5e, 0xae, 0x46, 0x5a, |
| 0xe8, 0xf0, 0xa4, 0xa3, 0x0b, 0xed, 0x84, 0x87, 0x76, 0x07, 0xb6, 0xb8, 0x2f, 0xe7, 0x91, 0xc1, |
| 0xbc, 0x6d, 0x32, 0x6f, 0x37, 0xa3, 0xf7, 0xe6, 0x8d, 0x0d, 0x85, 0x87, 0xe3, 0xdf, 0x64, 0xee, |
| 0x42, 0xda, 0x61, 0x75, 0xd5, 0xbf, 0xb2, 0xef, 0x47, 0xad, 0xc3, 0x8a, 0x6f, 0x87, 0x8e, 0x60, |
| 0x9b, 0x7f, 0xa9, 0x13, 0xdd, 0x71, 0x2d, 0x32, 0xcf, 0x6f, 0xb1, 0xc3, 0x3c, 0xba, 0xa7, 0x2d, |
| 0x6e, 0xdf, 0xe0, 0xe6, 0xe8, 0x07, 0xb0, 0x3b, 0xd7, 0x88, 0xa9, 0x6a, 0xa7, 0x45, 0xc3, 0xc9, |
| 0x0b, 0xcc, 0x67, 0xf1, 0x62, 0xb5, 0x46, 0x11, 0xe7, 0xcb, 0x02, 0xd6, 0x99, 0x38, 0xde, 0x60, |
| 0xa6, 0xbb, 0x2e, 0x1e, 0xa9, 0x83, 0x79, 0x1e, 0xf8, 0x81, 0x1a, 0xca, 0x0e, 0xe6, 0xe8, 0x0e, |
| 0x14, 0xfc, 0xe7, 0x6f, 0xcb, 0x73, 0x6d, 0xcf, 0x55, 0x09, 0x76, 0x2c, 0x8f, 0x0c, 0x79, 0x93, |
| 0xb4, 0xcb, 0x0c, 0xae, 0xf1, 0x11, 0x47, 0x6c, 0x80, 0xe2, 0xeb, 0x69, 0xab, 0xf4, 0x3a, 0xe4, |
| 0x7d, 0x63, 0xda, 0x45, 0x13, 0xcb, 0x60, 0xed, 0x95, 0xc3, 0x4c, 0x77, 0x98, 0xe9, 0x55, 0xae, |
| 0xaf, 0x72, 0x35, 0x6d, 0xb1, 0x1c, 0x6a, 0x58, 0x87, 0xb4, 0xa1, 0x0d, 0xb0, 0xe1, 0xe4, 0x51, |
| 0xa4, 0x54, 0x69, 0x17, 0xd4, 0x62, 0x06, 0xbc, 0x03, 0xf2, 0xad, 0x51, 0x0b, 0xc0, 0x19, 0x4e, |
| 0xf0, 0xc8, 0x33, 0x74, 0x73, 0x9c, 0xbf, 0x12, 0xa9, 0xf3, 0xa1, 0x50, 0x84, 0x36, 0xca, 0x82, |
| 0x3d, 0xfa, 0x3f, 0xc8, 0xd2, 0xf3, 0x82, 0xf5, 0x1e, 0xcf, 0xf3, 0x4e, 0x60, 0x6a, 0x0d, 0x68, |
| 0xdf, 0x51, 0x78, 0x13, 0x72, 0x0b, 0xf3, 0x5f, 0xa8, 0x29, 0x05, 0xc8, 0xba, 0x73, 0x9b, 0xed, |
| 0x4c, 0xe9, 0x00, 0xb6, 0x96, 0xa6, 0x47, 0xaf, 0xc2, 0xd5, 0x99, 0xf6, 0x61, 0x70, 0x27, 0x71, |
| 0x54, 0x1b, 0x13, 0x75, 0x62, 0x79, 0x84, 0xb9, 0x4e, 0x29, 0x68, 0xa6, 0x7d, 0xe8, 0x5f, 0x4b, |
| 0x9c, 0x0e, 0x26, 0x0d, 0xcb, 0x23, 0xd2, 0x2f, 0x63, 0x20, 0x76, 0x19, 0x82, 0xac, 0x78, 0x3e, |
| 0xf2, 0xb0, 0xe3, 0x46, 0x3a, 0x0e, 0xaf, 0x43, 0x9a, 0xe0, 0xb1, 0x6e, 0x99, 0xac, 0xfe, 0xf9, |
| 0x7a, 0x5f, 0x84, 0xde, 0x84, 0x04, 0xdd, 0x62, 0xbc, 0xd4, 0x4a, 0xeb, 0x97, 0x90, 0x5b, 0x53, |
| 0x1b, 0xf4, 0x02, 0x00, 0xe1, 0x61, 0xd0, 0xb9, 0xd9, 0x71, 0x45, 0x4b, 0x39, 0x93, 0x34, 0x47, |
| 0x92, 0x01, 0x5b, 0xef, 0xe0, 0xff, 0x6a, 0xac, 0x85, 0xe5, 0x73, 0x9d, 0x2b, 0xfd, 0xc3, 0xfd, |
| 0xef, 0x71, 0xd8, 0xa1, 0xb7, 0xbe, 0x43, 0x6b, 0xe0, 0x3c, 0xdb, 0x84, 0xe9, 0xb3, 0x13, 0x5e, |
| 0x07, 0xc1, 0xd6, 0xc6, 0x58, 0x75, 0xf4, 0x8f, 0x38, 0xc0, 0x29, 0x25, 0x4b, 0x05, 0x5d, 0xfd, |
| 0x23, 0x4c, 0xd3, 0x67, 0x4a, 0xd7, 0x7a, 0x88, 0xfd, 0x70, 0x15, 0x36, 0xbc, 0x47, 0x05, 0x8b, |
| 0x4d, 0x2d, 0xeb, 0x30, 0x92, 0x4b, 0x4d, 0x2d, 0xeb, 0x7b, 0xa7, 0xb0, 0x4b, 0xf3, 0xe1, 0xad, |
| 0xe6, 0x4c, 0x73, 0x87, 0x13, 0x4c, 0xd8, 0x79, 0xb0, 0x5d, 0xfe, 0xde, 0xba, 0x36, 0x7e, 0x39, |
| 0xd5, 0xa0, 0xce, 0xe0, 0x7b, 0xdc, 0x8b, 0xb2, 0x33, 0x5d, 0x16, 0xa0, 0xe7, 0x21, 0x7d, 0xac, |
| 0x1b, 0x2e, 0x26, 0xac, 0xc2, 0x0b, 0x8a, 0xff, 0x4b, 0xba, 0x05, 0x3b, 0x2b, 0xb6, 0xc1, 0x9f, |
| 0x84, 0x36, 0xe8, 0x59, 0x5f, 0xa9, 0xf6, 0x9a, 0xf7, 0x65, 0xbf, 0x97, 0x38, 0x6a, 0xab, 0xfe, |
| 0xef, 0xb8, 0xf4, 0xb7, 0x18, 0x88, 0x7d, 0x7b, 0xa4, 0xb9, 0xf8, 0x99, 0x11, 0x8e, 0x3f, 0x0d, |
| 0xe1, 0xc4, 0x2a, 0xc2, 0x01, 0x53, 0x93, 0xcf, 0xc0, 0xd4, 0xbb, 0x90, 0xf3, 0x58, 0xac, 0xec, |
| 0x6f, 0x86, 0xfe, 0x91, 0x7b, 0xb6, 0x95, 0xaf, 0xeb, 0xd8, 0x18, 0xdd, 0xd3, 0x9c, 0x87, 0xdc, |
| 0x14, 0xb8, 0x0d, 0x15, 0x48, 0x04, 0xc4, 0xd3, 0x25, 0x77, 0x6c, 0xcb, 0x74, 0x30, 0xba, 0x05, |
| 0xc9, 0xa9, 0x35, 0xe0, 0x8f, 0xad, 0x91, 0x22, 0x52, 0xd8, 0x78, 0x74, 0x03, 0x76, 0x4c, 0xfc, |
| 0xa1, 0xab, 0x2e, 0xb0, 0x87, 0x17, 0x8f, 0x2d, 0x2a, 0xee, 0x04, 0x0c, 0x92, 0x2c, 0x10, 0xab, |
| 0x9a, 0x39, 0xc4, 0xc6, 0x17, 0xb5, 0x87, 0x2c, 0x10, 0x6b, 0xd8, 0xc0, 0x97, 0x80, 0xf4, 0x62, |
| 0x13, 0x96, 0xff, 0x2d, 0xb0, 0xba, 0xe8, 0x9f, 0x12, 0x06, 0x26, 0xe8, 0x37, 0x31, 0x10, 0xc2, |
| 0x22, 0x87, 0x4a, 0xeb, 0x8e, 0xfc, 0x95, 0x72, 0x58, 0x88, 0x00, 0x82, 0xf4, 0xc3, 0xcf, 0x2b, |
| 0xd7, 0x4e, 0x53, 0x7a, 0x65, 0x8f, 0xc7, 0xf9, 0xca, 0xde, 0xd4, 0x1a, 0xfc, 0xe8, 0xb3, 0xbf, |
| 0xfc, 0x24, 0x7e, 0x57, 0xba, 0x13, 0xfe, 0x61, 0xdc, 0x1f, 0xe7, 0x94, 0x3e, 0x3e, 0xb5, 0xf8, |
| 0xa4, 0xc4, 0x2d, 0x9c, 0xd2, 0xc7, 0xfc, 0xe3, 0x13, 0xf6, 0xf7, 0xf3, 0xdb, 0xfc, 0x74, 0xbd, |
| 0x1d, 0xbb, 0x89, 0x7e, 0x1d, 0x83, 0x34, 0x2f, 0x7d, 0x68, 0xdd, 0xa1, 0xb4, 0x54, 0x21, 0x23, |
| 0x85, 0x3f, 0xf8, 0xbc, 0x52, 0x78, 0x42, 0xf8, 0xaa, 0x3e, 0x62, 0x19, 0xbc, 0x8d, 0xde, 0x7a, |
| 0x96, 0x0c, 0x4a, 0x1f, 0x73, 0x1f, 0x9f, 0xa0, 0x3f, 0xc5, 0x20, 0x1b, 0xd0, 0x1d, 0x15, 0x2f, |
| 0x56, 0x8a, 0x0a, 0xa5, 0xc8, 0xe3, 0xf9, 0x3e, 0x92, 0xcc, 0xcf, 0x2b, 0xe8, 0x6c, 0x46, 0x4f, |
| 0x48, 0x93, 0x97, 0x2d, 0x96, 0xe6, 0x2d, 0xf4, 0xda, 0xb3, 0xa4, 0x89, 0x7e, 0x11, 0x03, 0x21, |
| 0x2c, 0x5d, 0x6b, 0x39, 0xb6, 0x5a, 0xe4, 0x22, 0x81, 0x74, 0xc8, 0x02, 0xac, 0x95, 0x2f, 0x87, |
| 0xc3, 0x6d, 0x56, 0xbc, 0x3e, 0x8d, 0x81, 0x10, 0xd6, 0x81, 0xb5, 0xe1, 0xae, 0x56, 0x8c, 0x48, |
| 0xe1, 0x1a, 0xeb, 0x39, 0xd5, 0x94, 0x6a, 0x97, 0xcb, 0x65, 0xc8, 0x02, 0xa3, 0xdb, 0xe3, 0x57, |
| 0x31, 0x10, 0xc2, 0x3a, 0xb3, 0x36, 0xa1, 0xd5, 0x8a, 0x54, 0x78, 0xfe, 0x4c, 0xdd, 0x96, 0x67, |
| 0xb6, 0x3b, 0x8f, 0xb4, 0x31, 0x6e, 0x5e, 0x0e, 0x90, 0xc2, 0xbb, 0xbf, 0xaf, 0x5c, 0x0b, 0x23, |
| 0xe4, 0x61, 0x68, 0xb6, 0xee, 0x14, 0x87, 0xd6, 0xec, 0x8f, 0x95, 0xe2, 0xc4, 0x75, 0x6d, 0xe7, |
| 0x76, 0xa9, 0xf4, 0xf8, 0xf1, 0xe3, 0x15, 0x65, 0x49, 0xf3, 0xdc, 0x09, 0xff, 0xdf, 0x37, 0xdf, |
| 0xb6, 0x0d, 0xcd, 0x3d, 0xb6, 0xc8, 0xec, 0xe0, 0x31, 0xbc, 0x34, 0xb4, 0x66, 0x4f, 0x4f, 0xff, |
| 0x40, 0xa0, 0x5b, 0xa5, 0x43, 0x33, 0xed, 0xc4, 0x3e, 0x90, 0xfd, 0xb1, 0x63, 0xcb, 0xd0, 0xcc, |
| 0x71, 0xd1, 0x22, 0xe3, 0xd2, 0x18, 0x9b, 0x6c, 0x1d, 0x4a, 0xa7, 0x13, 0x3e, 0xe1, 0x7f, 0xf9, |
| 0xdc, 0x09, 0x04, 0x83, 0x34, 0xb3, 0xf8, 0xce, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x12, 0x5b, |
| 0x36, 0x2b, 0x16, 0x24, 0x00, 0x00, |
| } |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ context.Context |
| var _ grpc.ClientConnInterface |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the grpc package it is being compiled against. |
| const _ = grpc.SupportPackageIsVersion6 |
| |
| // JobControllerClient is the client API for JobController service. |
| // |
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. |
| type JobControllerClient interface { |
| // Submits a job to a cluster. |
| SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) |
| // Gets the resource representation for a job in a project. |
| GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) |
| // Lists regions/{region}/jobs in a project. |
| ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) |
| // Updates a job in a project. |
| UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) |
| // Starts a job cancellation request. To access the job resource |
| // after cancellation, call |
| // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) |
| // or |
| // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). |
| CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) |
| // Deletes the job from the project. If the job is active, the delete fails, |
| // and the response returns `FAILED_PRECONDITION`. |
| DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*empty.Empty, error) |
| } |
| |
| type jobControllerClient struct { |
| cc grpc.ClientConnInterface |
| } |
| |
| func NewJobControllerClient(cc grpc.ClientConnInterface) JobControllerClient { |
| return &jobControllerClient{cc} |
| } |
| |
| func (c *jobControllerClient) SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) { |
| out := new(Job) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) { |
| out := new(Job) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/GetJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) { |
| out := new(ListJobsResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/ListJobs", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) { |
| out := new(Job) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) { |
| out := new(Job) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/CancelJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *jobControllerClient) DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*empty.Empty, error) { |
| out := new(empty.Empty) |
| err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| // JobControllerServer is the server API for JobController service. |
| type JobControllerServer interface { |
| // Submits a job to a cluster. |
| SubmitJob(context.Context, *SubmitJobRequest) (*Job, error) |
| // Gets the resource representation for a job in a project. |
| GetJob(context.Context, *GetJobRequest) (*Job, error) |
| // Lists regions/{region}/jobs in a project. |
| ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) |
| // Updates a job in a project. |
| UpdateJob(context.Context, *UpdateJobRequest) (*Job, error) |
| // Starts a job cancellation request. To access the job resource |
| // after cancellation, call |
| // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) |
| // or |
| // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). |
| CancelJob(context.Context, *CancelJobRequest) (*Job, error) |
| // Deletes the job from the project. If the job is active, the delete fails, |
| // and the response returns `FAILED_PRECONDITION`. |
| DeleteJob(context.Context, *DeleteJobRequest) (*empty.Empty, error) |
| } |
| |
| // UnimplementedJobControllerServer can be embedded to have forward compatible implementations. |
| type UnimplementedJobControllerServer struct { |
| } |
| |
| func (*UnimplementedJobControllerServer) SubmitJob(ctx context.Context, req *SubmitJobRequest) (*Job, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method SubmitJob not implemented") |
| } |
| func (*UnimplementedJobControllerServer) GetJob(ctx context.Context, req *GetJobRequest) (*Job, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method GetJob not implemented") |
| } |
| func (*UnimplementedJobControllerServer) ListJobs(ctx context.Context, req *ListJobsRequest) (*ListJobsResponse, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method ListJobs not implemented") |
| } |
| func (*UnimplementedJobControllerServer) UpdateJob(ctx context.Context, req *UpdateJobRequest) (*Job, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method UpdateJob not implemented") |
| } |
| func (*UnimplementedJobControllerServer) CancelJob(ctx context.Context, req *CancelJobRequest) (*Job, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method CancelJob not implemented") |
| } |
| func (*UnimplementedJobControllerServer) DeleteJob(ctx context.Context, req *DeleteJobRequest) (*empty.Empty, error) { |
| return nil, status.Errorf(codes.Unimplemented, "method DeleteJob not implemented") |
| } |
| |
| func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer) { |
| s.RegisterService(&_JobController_serviceDesc, srv) |
| } |
| |
| func _JobController_SubmitJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(SubmitJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).SubmitJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).SubmitJob(ctx, req.(*SubmitJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(GetJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).GetJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/GetJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).GetJob(ctx, req.(*GetJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_ListJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(ListJobsRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).ListJobs(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/ListJobs", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).ListJobs(ctx, req.(*ListJobsRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_UpdateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(UpdateJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).UpdateJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).UpdateJob(ctx, req.(*UpdateJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_CancelJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(CancelJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).CancelJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/CancelJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).CancelJob(ctx, req.(*CancelJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _JobController_DeleteJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(DeleteJobRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(JobControllerServer).DeleteJob(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(JobControllerServer).DeleteJob(ctx, req.(*DeleteJobRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| var _JobController_serviceDesc = grpc.ServiceDesc{ |
| ServiceName: "google.cloud.dataproc.v1beta2.JobController", |
| HandlerType: (*JobControllerServer)(nil), |
| Methods: []grpc.MethodDesc{ |
| { |
| MethodName: "SubmitJob", |
| Handler: _JobController_SubmitJob_Handler, |
| }, |
| { |
| MethodName: "GetJob", |
| Handler: _JobController_GetJob_Handler, |
| }, |
| { |
| MethodName: "ListJobs", |
| Handler: _JobController_ListJobs_Handler, |
| }, |
| { |
| MethodName: "UpdateJob", |
| Handler: _JobController_UpdateJob_Handler, |
| }, |
| { |
| MethodName: "CancelJob", |
| Handler: _JobController_CancelJob_Handler, |
| }, |
| { |
| MethodName: "DeleteJob", |
| Handler: _JobController_DeleteJob_Handler, |
| }, |
| }, |
| Streams: []grpc.StreamDesc{}, |
| Metadata: "google/cloud/dataproc/v1beta2/jobs.proto", |
| } |