Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .codegen/_openapi_sha
Original file line number Diff line number Diff line change
@@ -1 +1 @@
8f5eedbc991c4f04ce1284406577b0c92d59a224
8ed8c222c5478fb2513d4e23a2c451480b1f53cc
6,367 changes: 3,191 additions & 3,176 deletions .gitattributes

Large diffs are not rendered by default.

12 changes: 12 additions & 0 deletions NEXT_CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,15 @@
### Internal Changes

### API Changes
* Add `workspaceClient.workspaceEntityTagAssignments()` service.
* Add `clone()` method for `workspaceClient.pipelines()` service.
* Add `datasetCatalog` and `datasetSchema` fields for `com.databricks.sdk.service.dashboards.CreateDashboardRequest`.
* Add `datasetCatalog` and `datasetSchema` fields for `com.databricks.sdk.service.dashboards.UpdateDashboardRequest`.
* Add `purgeData` field for `com.databricks.sdk.service.database.DeleteSyncedDatabaseTableRequest`.
* Add `cronSchedule` field for `com.databricks.sdk.service.ml.MaterializedFeature`.
* Add `truncation` field for `com.databricks.sdk.service.pipelines.PipelineEvent`.
* Add `gcpServiceAccount` field for `com.databricks.sdk.service.provisioning.CreateGcpKeyInfo`.
* Add `gcpServiceAccount` field for `com.databricks.sdk.service.provisioning.GcpKeyInfo`.
* Add `FOREIGN_TABLE` and `VOLUME` enum values for `com.databricks.sdk.service.sharing.SharedDataObjectDataObjectType`.
* [Breaking] Change `timeWindow` field for `com.databricks.sdk.service.ml.Feature` to no longer be required.
* Change `timeWindow` field for `com.databricks.sdk.service.ml.Feature` to no longer be required.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import com.databricks.sdk.support.Generated;

/** Next Id: 48 */
/** Next Id: 50 */
@Generated
public enum ConnectionType {
BIGQUERY,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import com.databricks.sdk.support.Generated;

/** Latest kind: SECRET_EXTERNAL_AWS_SECRETS_MANAGER = 273; Next id:274 */
/** Latest kind: CONNECTION_KAFKA_SASL = 279; Next id: 280 */
@Generated
public enum SecurableKind {
TABLE_DB_STORAGE,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,7 @@ public class ClusterAttributes {

/**
* Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
* space when its Spark workers are running low on disk space. This feature requires specific AWS
* permissions to function correctly - refer to the User Guide for more details.
* space when its Spark workers are running low on disk space.
*/
@JsonProperty("enable_elastic_disk")
private Boolean enableElasticDisk;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,7 @@ public class ClusterDetails {

/**
* Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
* space when its Spark workers are running low on disk space. This feature requires specific AWS
* permissions to function correctly - refer to the User Guide for more details.
* space when its Spark workers are running low on disk space.
*/
@JsonProperty("enable_elastic_disk")
private Boolean enableElasticDisk;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,7 @@ public class ClusterSpec {

/**
* Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
* space when its Spark workers are running low on disk space. This feature requires specific AWS
* permissions to function correctly - refer to the User Guide for more details.
* space when its Spark workers are running low on disk space.
*/
@JsonProperty("enable_elastic_disk")
private Boolean enableElasticDisk;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,7 @@ public class CreateCluster {

/**
* Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
* space when its Spark workers are running low on disk space. This feature requires specific AWS
* permissions to function correctly - refer to the User Guide for more details.
* space when its Spark workers are running low on disk space.
*/
@JsonProperty("enable_elastic_disk")
private Boolean enableElasticDisk;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,7 @@ public class EditCluster {

/**
* Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
* space when its Spark workers are running low on disk space. This feature requires specific AWS
* permissions to function correctly - refer to the User Guide for more details.
* space when its Spark workers are running low on disk space.
*/
@JsonProperty("enable_elastic_disk")
private Boolean enableElasticDisk;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,7 @@ public class UpdateClusterResource {

/**
* Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
* space when its Spark workers are running low on disk space. This feature requires specific AWS
* permissions to function correctly - refer to the User Guide for more details.
* space when its Spark workers are running low on disk space.
*/
@JsonProperty("enable_elastic_disk")
private Boolean enableElasticDisk;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
package com.databricks.sdk.service.dashboards;

import com.databricks.sdk.support.Generated;
import com.databricks.sdk.support.QueryParam;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;

Expand All @@ -13,6 +15,24 @@ public class CreateDashboardRequest {
@JsonProperty("dashboard")
private Dashboard dashboard;

/**
* Sets the default catalog for all datasets in this dashboard. Does not impact table references
* that use fully qualified catalog names (ex: samples.nyctaxi.trips). Leave blank to keep each
* dataset’s existing configuration.
*/
@JsonIgnore
@QueryParam("dataset_catalog")
private String datasetCatalog;

/**
* Sets the default schema for all datasets in this dashboard. Does not impact table references
* that use fully qualified schema names (ex: nyctaxi.trips). Leave blank to keep each dataset’s
* existing configuration.
*/
@JsonIgnore
@QueryParam("dataset_schema")
private String datasetSchema;

public CreateDashboardRequest setDashboard(Dashboard dashboard) {
this.dashboard = dashboard;
return this;
Expand All @@ -22,21 +42,45 @@ public Dashboard getDashboard() {
return dashboard;
}

public CreateDashboardRequest setDatasetCatalog(String datasetCatalog) {
this.datasetCatalog = datasetCatalog;
return this;
}

public String getDatasetCatalog() {
return datasetCatalog;
}

public CreateDashboardRequest setDatasetSchema(String datasetSchema) {
this.datasetSchema = datasetSchema;
return this;
}

public String getDatasetSchema() {
return datasetSchema;
}

@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CreateDashboardRequest that = (CreateDashboardRequest) o;
return Objects.equals(dashboard, that.dashboard);
return Objects.equals(dashboard, that.dashboard)
&& Objects.equals(datasetCatalog, that.datasetCatalog)
&& Objects.equals(datasetSchema, that.datasetSchema);
}

@Override
public int hashCode() {
return Objects.hash(dashboard);
return Objects.hash(dashboard, datasetCatalog, datasetSchema);
}

@Override
public String toString() {
return new ToStringer(CreateDashboardRequest.class).add("dashboard", dashboard).toString();
return new ToStringer(CreateDashboardRequest.class)
.add("dashboard", dashboard)
.add("datasetCatalog", datasetCatalog)
.add("datasetSchema", datasetSchema)
.toString();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
package com.databricks.sdk.service.dashboards;

import com.databricks.sdk.support.Generated;
import com.databricks.sdk.support.QueryParam;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
Expand All @@ -17,6 +18,24 @@ public class UpdateDashboardRequest {
/** UUID identifying the dashboard. */
@JsonIgnore private String dashboardId;

/**
* Sets the default catalog for all datasets in this dashboard. Does not impact table references
* that use fully qualified catalog names (ex: samples.nyctaxi.trips). Leave blank to keep each
* dataset’s existing configuration.
*/
@JsonIgnore
@QueryParam("dataset_catalog")
private String datasetCatalog;

/**
* Sets the default schema for all datasets in this dashboard. Does not impact table references
* that use fully qualified schema names (ex: nyctaxi.trips). Leave blank to keep each dataset’s
* existing configuration.
*/
@JsonIgnore
@QueryParam("dataset_schema")
private String datasetSchema;

public UpdateDashboardRequest setDashboard(Dashboard dashboard) {
this.dashboard = dashboard;
return this;
Expand All @@ -35,25 +54,47 @@ public String getDashboardId() {
return dashboardId;
}

public UpdateDashboardRequest setDatasetCatalog(String datasetCatalog) {
this.datasetCatalog = datasetCatalog;
return this;
}

public String getDatasetCatalog() {
return datasetCatalog;
}

public UpdateDashboardRequest setDatasetSchema(String datasetSchema) {
this.datasetSchema = datasetSchema;
return this;
}

public String getDatasetSchema() {
return datasetSchema;
}

@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
UpdateDashboardRequest that = (UpdateDashboardRequest) o;
return Objects.equals(dashboard, that.dashboard)
&& Objects.equals(dashboardId, that.dashboardId);
&& Objects.equals(dashboardId, that.dashboardId)
&& Objects.equals(datasetCatalog, that.datasetCatalog)
&& Objects.equals(datasetSchema, that.datasetSchema);
}

@Override
public int hashCode() {
return Objects.hash(dashboard, dashboardId);
return Objects.hash(dashboard, dashboardId, datasetCatalog, datasetSchema);
}

@Override
public String toString() {
return new ToStringer(UpdateDashboardRequest.class)
.add("dashboard", dashboard)
.add("dashboardId", dashboardId)
.add("datasetCatalog", datasetCatalog)
.add("datasetSchema", datasetSchema)
.toString();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
package com.databricks.sdk.service.database;

import com.databricks.sdk.support.Generated;
import com.databricks.sdk.support.QueryParam;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import java.util.Objects;
Expand All @@ -12,6 +13,11 @@ public class DeleteSyncedDatabaseTableRequest {
/** */
@JsonIgnore private String name;

/** Optional. When set to true, the actual PostgreSQL table will be dropped from the database. */
@JsonIgnore
@QueryParam("purge_data")
private Boolean purgeData;

public DeleteSyncedDatabaseTableRequest setName(String name) {
this.name = name;
return this;
Expand All @@ -21,21 +27,33 @@ public String getName() {
return name;
}

public DeleteSyncedDatabaseTableRequest setPurgeData(Boolean purgeData) {
this.purgeData = purgeData;
return this;
}

public Boolean getPurgeData() {
return purgeData;
}

@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DeleteSyncedDatabaseTableRequest that = (DeleteSyncedDatabaseTableRequest) o;
return Objects.equals(name, that.name);
return Objects.equals(name, that.name) && Objects.equals(purgeData, that.purgeData);
}

@Override
public int hashCode() {
return Objects.hash(name);
return Objects.hash(name, purgeData);
}

@Override
public String toString() {
return new ToStringer(DeleteSyncedDatabaseTableRequest.class).add("name", name).toString();
return new ToStringer(DeleteSyncedDatabaseTableRequest.class)
.add("name", name)
.add("purgeData", purgeData)
.toString();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ public class CreateJob {

/**
* The performance mode on a serverless job. This field determines the level of compute
* performance or cost-efficiency for the run.
* performance or cost-efficiency for the run. The performance target does not apply to tasks that
* run on Serverless GPU compute.
*
* <p>* `STANDARD`: Enables cost-efficient execution of serverless workloads. *
* `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
Expand Down
Loading
Loading