<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Writing data from Azure Databrick to Azure SQL Database in Administration &amp; Architecture</title>
    <link>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/137739#M4393</link>
    <description>&lt;P&gt;Good day,&lt;/P&gt;&lt;P&gt;We have a customer who wanted to query an Azure SQL Database from a Databricks notebook. We have configured a connection and a catalog for him and he is able to query the Azure SQL Database. Now he has a new request. He also wants to write data back to the Azure SQL Database and that seems not to be supported by federated queries.&lt;/P&gt;&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="Schermafbeelding 2025-11-05 115405.jpg" style="width: 999px;"&gt;&lt;img src="https://community.databricks.com/t5/image/serverpage/image-id/21371iC8F4CC70F39AF608/image-size/large?v=v2&amp;amp;px=999" role="button" title="Schermafbeelding 2025-11-05 115405.jpg" alt="Schermafbeelding 2025-11-05 115405.jpg" /&gt;&lt;/span&gt;&lt;/P&gt;&lt;P&gt;What is currently the best practice for writing data back to an Azure SQL Database from a Databricks notebook?&lt;/P&gt;&lt;P&gt;Kind Regards,&lt;BR /&gt;Marco&lt;/P&gt;</description>
    <pubDate>Wed, 05 Nov 2025 10:56:30 GMT</pubDate>
    <dc:creator>Marco37</dc:creator>
    <dc:date>2025-11-05T10:56:30Z</dc:date>
    <item>
      <title>Writing data from Azure Databrick to Azure SQL Database</title>
      <link>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/137739#M4393</link>
      <description>&lt;P&gt;Good day,&lt;/P&gt;&lt;P&gt;We have a customer who wanted to query an Azure SQL Database from a Databricks notebook. We have configured a connection and a catalog for him and he is able to query the Azure SQL Database. Now he has a new request. He also wants to write data back to the Azure SQL Database and that seems not to be supported by federated queries.&lt;/P&gt;&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="Schermafbeelding 2025-11-05 115405.jpg" style="width: 999px;"&gt;&lt;img src="https://community.databricks.com/t5/image/serverpage/image-id/21371iC8F4CC70F39AF608/image-size/large?v=v2&amp;amp;px=999" role="button" title="Schermafbeelding 2025-11-05 115405.jpg" alt="Schermafbeelding 2025-11-05 115405.jpg" /&gt;&lt;/span&gt;&lt;/P&gt;&lt;P&gt;What is currently the best practice for writing data back to an Azure SQL Database from a Databricks notebook?&lt;/P&gt;&lt;P&gt;Kind Regards,&lt;BR /&gt;Marco&lt;/P&gt;</description>
      <pubDate>Wed, 05 Nov 2025 10:56:30 GMT</pubDate>
      <guid>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/137739#M4393</guid>
      <dc:creator>Marco37</dc:creator>
      <dc:date>2025-11-05T10:56:30Z</dc:date>
    </item>
    <item>
      <title>Re: Writing data from Azure Databrick to Azure SQL Database</title>
      <link>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/137744#M4395</link>
      <description>&lt;P&gt;Yes, &lt;a href="https://community.databricks.com/t5/user/viewprofilepage/user-id/114724"&gt;@Marco37&lt;/a&gt;&amp;nbsp;, you are right that currently, federated queries in Databricks only support reading data from external sources like Azure SQL Database—writing data back is not supported through that connection.One practice I'm aware of for writing data from a Databricks notebook to an Azure SQL Database is to use a JDBC connection. This allows you to write a Spark DataFrame directly into an Azure SQL table.Another option is to write data to ADLS (Azure Data Lake Storage), and then use ADF (Azure Data Factory) to load the data onto the Azure SQL DB.&lt;BR /&gt;&lt;BR /&gt;Hopefully, this helps you in your case.&lt;BR /&gt;Thanks!&lt;BR /&gt;&lt;a href="https://community.databricks.com/t5/user/viewprofilepage/user-id/186444"&gt;@Sat_8&lt;/a&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Wed, 05 Nov 2025 12:09:03 GMT</pubDate>
      <guid>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/137744#M4395</guid>
      <dc:creator>Sat_8</dc:creator>
      <dc:date>2025-11-05T12:09:03Z</dc:date>
    </item>
    <item>
      <title>Re: Writing data from Azure Databrick to Azure SQL Database</title>
      <link>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/138413#M4442</link>
      <description>&lt;P&gt;Thanks Sat_8&amp;nbsp;&lt;span class="lia-unicode-emoji" title=":slightly_smiling_face:"&gt;🙂&lt;/span&gt;&lt;/P&gt;</description>
      <pubDate>Mon, 10 Nov 2025 13:07:03 GMT</pubDate>
      <guid>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/138413#M4442</guid>
      <dc:creator>Marco37</dc:creator>
      <dc:date>2025-11-10T13:07:03Z</dc:date>
    </item>
    <item>
      <title>Re: Writing data from Azure Databrick to Azure SQL Database</title>
      <link>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/138422#M4443</link>
      <description>&lt;DIV&gt;&lt;DIV&gt;&lt;SPAN&gt;&lt;SPAN&gt;You can customize the below code, that makes use of Spark SQL Server access connector, as per your needs:&lt;/SPAN&gt;&lt;/SPAN&gt;&lt;DIV&gt;&amp;nbsp;&lt;/DIV&gt;&lt;/DIV&gt;&lt;/DIV&gt;&lt;LI-CODE lang="python"&gt;def PersistRemoteSQLTableFromDF(
    df: DataFrame,
    databaseName: str,
    tableName: str,
    mode: str = "overwrite",
    schemaName: str = "",
    tableLock: bool = True,
) -&amp;gt; None:
    """
    Persist dataframe into remote SQL table using the SQL Server Spark connector.

    Improvements:
      - input validation and clearer errors
      - tolerant handling of mode casing
      - avoids double underscores from prefix
      - normalizes schema handling
      - ensures tableLock is passed as string expected by the connector
    ARGS:
        - df (DataFrame): dataframe to persist
        - databaseName (str): database name (non-empty)
        - tableName (str): table name (non-empty)
        - mode (str): 'overwrite', 'append', 'ignore', 'error', 'errorifexists', 'default'
        - prefix (str): optional prefix (no extra underscore added)
        - schemaName (str): optional schema name (no trailing dot required)
        - tableLock (bool): if True enables tableLock for better write performance
    RAISES:
        - TypeError, ValueError on invalid args
    """
    # Basic type checks
    if not isinstance(df, DataFrame):
        raise TypeError("Argument Error - 'df' must be a pyspark.sql.DataFrame")
    if not isinstance(databaseName, str) or not isinstance(tableName, str):
        raise TypeError(
            "Argument Error - 'databaseName' and 'tableName' must be strings"
        )

    # Normalize and validate string args
    mode = (mode or "").lower()
    allowed_modes = {
        "overwrite",
        "append",
        "ignore",
        "error",
        "errorifexists",
        "default",
    }
    if mode not in allowed_modes:
        raise ValueError(
            f"Argument Error - Mode '{mode}' is not valid. Accepted save modes: {sorted(allowed_modes)}"
        )

    databaseName = databaseName.strip()
    tableName = tableName.strip()
    if not databaseName or not tableName:
        raise ValueError(
            "Argument Error - 'databaseName' and 'tableName' must be non-empty"
        )

    # Normalise schema name (remove trailing dots) and prefix (avoid double underscores)
    schemaName = (schemaName or "").strip().rstrip(".")

    dbtable = f"{schemaName + '.' if schemaName else ''}{tableName}"

    # tableLock option on connector expects "true"/"false" (string)
    tableLock_opt = "true" if bool(tableLock) else "false"

    try:
        (
            df.write.format("sqlserver")
            .mode(mode)
            .option("host", os.getenv("DEFAULT_SQL_SERVER_NAME"))
            .option("port", "1433")
            .option("database", databaseName)
            .option("dbtable", dbtable)
            .option("tableLock", tableLock_opt)
            .option("user", os.getenv("DEFAULT_SQL_SERVER_USERNAME"))
            .option("password", os.getenv("DEFAULT_SQL_SERVER_PASSWORD"))
            .save()
        )
    except Exception as exc:
        # Surface clearer context while preserving original exception
        raise RuntimeError(
            f"Failed to persist DataFrame to remote table '{dbtable}' in database '{databaseName}': {exc}"
        ) from exc&lt;/LI-CODE&gt;</description>
      <pubDate>Mon, 10 Nov 2025 14:06:18 GMT</pubDate>
      <guid>https://community.databricks.com/t5/administration-architecture/writing-data-from-azure-databrick-to-azure-sql-database/m-p/138422#M4443</guid>
      <dc:creator>Coffee77</dc:creator>
      <dc:date>2025-11-10T14:06:18Z</dc:date>
    </item>
  </channel>
</rss>

