Actually its works, i have declare all my clusters config in variables field
permissions:
- level: CAN_VIEW
group_name: data-engineer
- level: CAN_RUN
group_name: data-engineer
- level: CAN_MANAGE
group_name: data-engineer
- level: CAN_RUN
user_name: toto@company.com
variables:
environnement:
default: dev
job_policy_id:
default:
notification_emails:
default:
- toto@company.com
sp-cicd-dev-id:
default:
sp-cicd-qa-id:
default:
sp-cicd-preprod-id:
default:
sp-cicd-prod-id:
default:
databricks_utils:
default: "
personal_compute:
default:
use_autoloader:
default: true
xs_cluster:
description: "very small cluster"
type: complex
default:
spark_version: "16.4.x-scala2.12"
spark_conf:
spark.sql.sources.partitionOverwriteMode: dynamic
delta.autoOptimize.optimizeWrite: true
delta.autoOptimize.autoCompact: true
spark.sql.streaming.stateStore.providerClass: org.apache.spark.sql.execution.streaming.state.RocksDBStateStoreProvider
spark.sql.streaming.stateStore.rocksdb.changelogCheckpointing.enabled: true
aws_attributes:
first_on_demand: 1
# availability: SPOT_WITH_FALLBACK
num_workers: 1
node_type_id: m5d.large
driver_node_type_id: m5d.large
data_security_mode: SINGLE_USER
runtime_engine: STANDARD
single_user_name: ${workspace.current_user.userName}
policy_id: ${var.job_policy_id}
s_cluster:
description: "small cluster"
type: complex
default:
spark_version: "16.4.x-scala2.12"
spark_conf:
spark.sql.sources.partitionOverwriteMode: dynamic
delta.autoOptimize.optimizeWrite: true
delta.autoOptimize.autoCompact: true
spark.sql.streaming.stateStore.providerClass: org.apache.spark.sql.execution.streaming.state.RocksDBStateStoreProvider
spark.sql.streaming.stateStore.rocksdb.changelogCheckpointing.enabled: true
aws_attributes:
first_on_demand: 1
# availability: SPOT_WITH_FALLBACK
num_workers: 2
node_type_id: m5d.large
driver_node_type_id: m5d.large
data_security_mode: SINGLE_USER
runtime_engine: STANDARD
single_user_name: ${workspace.current_user.userName}
policy_id: ${var.job_policy_id}
m_cluster:
default:
spark_version: "16.4.x-scala2.12"
spark_conf:
spark.sql.sources.partitionOverwriteMode: dynamic
delta.autoOptimize.optimizeWrite: true
delta.autoOptimize.autoCompact: true
spark.sql.streaming.stateStore.providerClass: org.apache.spark.sql.execution.streaming.state.RocksDBStateStoreProvider
spark.sql.streaming.stateStore.rocksdb.changelogCheckpointing.enabled: true
aws_attributes:
first_on_demand: 1
# availability: SPOT_WITH_FALLBACK
num_workers: 1
node_type_id: m5d.xlarge
driver_node_type_id: m5d.large
data_security_mode: SINGLE_USER
runtime_engine: STANDARD
single_user_name: ${workspace.current_user.userName}
policy_id: ${var.job_policy_id}
l_cluster:
default:
spark_version: "16.4.x-scala2.12"
spark_conf:
spark.sql.sources.partitionOverwriteMode: dynamic
delta.autoOptimize.optimizeWrite: true
delta.autoOptimize.autoCompact: true
spark.sql.streaming.stateStore.providerClass: org.apache.spark.sql.execution.streaming.state.RocksDBStateStoreProvider
spark.sql.streaming.stateStore.rocksdb.changelogCheckpointing.enabled: true
aws_attributes:
first_on_demand: 1
# availability: SPOT_WITH_FALLBACK
num_workers: 2
node_type_id: m5d.xlarge
driver_node_type_id: m5d.large
data_security_mode: SINGLE_USER
runtime_engine: STANDARD
single_user_name: ${workspace.current_user.userName}
policy_id: ${var.job_policy_id}
xl_cluster:
default:
spark_version: "16.4.x-scala2.12"
spark_conf:
spark.sql.sources.partitionOverwriteMode: dynamic
delta.autoOptimize.optimizeWrite: true
delta.autoOptimize.autoCompact: true
spark.sql.streaming.stateStore.providerClass: org.apache.spark.sql.execution.streaming.state.RocksDBStateStoreProvider
spark.sql.streaming.stateStore.rocksdb.changelogCheckpointing.enabled: true
aws_attributes:
first_on_demand: 1
# availability: SPOT_WITH_FALLBACK
num_workers: 4
node_type_id: m5d.xlarge
driver_node_type_id: m5d.large
data_security_mode: SINGLE_USER
runtime_engine: STANDARD
single_user_name: ${workspace.current_user.userName}
policy_id: ${var.job_policy_id}
xxl_cluster:
default:
spark_version: "16.4.x-scala2.12"
spark_conf:
spark.sql.sources.partitionOverwriteMode: dynamic
delta.autoOptimize.optimizeWrite: true
delta.autoOptimize.autoCompact: true
spark.sql.streaming.stateStore.providerClass: org.apache.spark.sql.execution.streaming.state.RocksDBStateStoreProvider
spark.sql.streaming.stateStore.rocksdb.changelogCheckpointing.enabled: true
aws_attributes:
first_on_demand: 1
# availability: SPOT_WITH_FALLBACK
num_workers: 4
node_type_id: m5d.2xlarge
driver_node_type_id: m5d.large
data_security_mode: SINGLE_USER
runtime_engine: STANDARD
single_user_name: ${workspace.current_user.userName}
policy_id: ${var.job_policy_id}
targets:
dev:
# The default target uses 'mode: development' to create a development copy.
# - Deployed resources get prefixed with '[dev my_user_name]'
# - Any job schedules and triggers are paused by default.
# See also https://docs.databricks.com/dev-tools/bundles/deployment-modes.html.
mode: development
default: true
workspace:
host:
root_path: /Workspace/Users/${workspace.current_user.userName}/.bundle/${bundle.name}/${bundle.target}
variables:
environnement: dev
job_policy_id:
permissions:
- level: CAN_MANAGE
user_name: ${workspace.current_user.userName}
- level: CAN_VIEW
service_principal_name: ${var.sp-cicd-dev-id}
- level: CAN_MANAGE
service_principal_name: ${var.sp-cicd-dev-id}
- level: CAN_RUN
user_name: ${workspace.current_user.userName}
- level: CAN_VIEW
user_name: ${workspace.current_user.userName}
run_as:
user_name: ${workspace.current_user.userName}
sync:
include:
- config/