Thanks @szymon_dybczak - this worked perfectly. This is what I ended up throwing together to see what I was looking for:
from databricks.sdk import WorkspaceClient
url = f"{DATABRICKS_HOST}/api/2.0/pipelines"
headers = wc.config.authenticate()
wc = WorkspaceClient()
DATABRICKS_HOST = wc.config.host
import requests
response = requests.get(url, headers=headers)
payload = response.json()
pipeline_ids = [item.get("pipeline_id") for item in payload.get("statuses", []) if "pipeline_id" in item]
pipeline_payloads = []
for pid in pipeline_ids:
detail_url = f"{DATABRICKS_HOST}/api/2.0/pipelines/{pid}"
detail_response = requests.get(detail_url, headers=headers)
detail_payload = detail_response.json()
pipeline_payloads.append(detail_payload)
from pyspark.sql import Row
rows = [
Row(
pipeline_id=payload.get("pipeline_id"),
name=payload.get("spec", {}).get("name"),
ingestion_gateway_id=payload.get("spec", {}).get("ingestion_definition", {}).get("ingestion_gateway_id")
)
for payload in pipeline_payloads
]
df_pipelines = spark.createDataFrame(rows)
display(df_pipelines)