Below is the cluster policy JSON, with all possible fixes I have applied however issue persists. Changes clearly highlighted in comments
{
"cluster_type": {
"type": "fixed",
"value": "job"
},
"spark_conf.spark.databricks.cluster.profile": {
"type": "forbidden",
"hidden": true
},
"spark_version": {
"type": "unlimited",
"defaultValue": "auto:latest-lts"
},
// FIX #2 — node_type_id must be forbidden when using instance pools
"node_type_id": {
"type": "forbidden", // <-- CHANGED: was "unlimited"
"hidden": true // <-- Added to avoid UI prompts
},
"num_workers": {
"type": "unlimited",
"defaultValue": 4,
"isOptional": true
},
"azure_attributes.availability": {
"type": "unlimited",
"defaultValue": "ON_DEMAND_AZURE"
},
// FIX #1 — spot_bid_max_price must NOT be hidden AND must be explicitly set
"azure_attributes.spot_bid_max_price": {
"type": "fixed",
"value": -1, // <-- same value, but valid only if exposed
"hidden": false // <-- CHANGED: was hidden by default
},
// Using your existing pool
"instance_pool_id": {
"type": "fixed",
"value": "0716-064827-deft298-pool-xzsz66cy",
"hidden": true
},
// Driver also uses the pool
"driver_instance_pool_id": {
"type": "fixed",
"value": "0716-064827-deft298-pool-xzsz66cy",
"hidden": true
},
"autoscale.min_workers": {
"type": "unlimited",
"defaultValue": 1
},
"autoscale.max_workers": {
"type": "unlimited",
"defaultValue": 6
}
}