Not sure what are you trying to achieve by this approach. But you could read a SQL
Databricks notebook source and get the queries in each cell using this script.
from databricks_api import DatabricksAPI
import base64
import re
notebook_context = dbutils.notebook.entry_point.getDbutils().notebook().getContext()
databricks_api_instance = DatabricksAPI(
host=notebook_context.apiUrl().getOrElse(None),
token=notebook_context.apiToken().getOrElse(None),
)
response = databricks_api_instance.workspace.export_workspace(
f"<PATH OF NOTEBOOK A IN WORKSPACE>",
format="SOURCE",
direct_download=None,
headers=None,
)
notebook_content = base64.b64decode(response["content"]).decode("utf-8")
exclude = [".*Databricks notebook source", ".*MAGIC.*"]
regex = re.compile("|".join(exclude))
queries = list(
filter(None, [s.strip() for s in re.sub(regex, "", notebook_content).split("-- COMMAND ----------")])
)
# [spark.sql(query).display() for query in queries]