<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: SQL Server To Databricks Table Migration in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78447#M35529</link>
    <description>&lt;P&gt;I hope you are asking for a SQL version of the pyspark code ? Can you please explain the advantages of having it in SQL as compared to pyspark ? There are some options, best would be federated queries against sql server ? select as if it were a databricks table and write it to the target ? alternatively you could create a view against the sql server table and then use that as insert into the databricks table...but in my limited understanding (could be wrong)&amp;nbsp; all of it would basically get optimized similarly in the background and serves no additional benefits to be rewritten in SQL...&lt;/P&gt;</description>
    <pubDate>Fri, 12 Jul 2024 07:04:03 GMT</pubDate>
    <dc:creator>ranged_coop</dc:creator>
    <dc:date>2024-07-12T07:04:03Z</dc:date>
    <item>
      <title>SQL Server To Databricks Table Migration</title>
      <link>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78423#M35523</link>
      <description>&lt;P&gt;Hello,&lt;/P&gt;&lt;P&gt;Is there an equivalent SQL code for the following Pyspark code? I'm trying to copy a table from SQL Server to Databricks and save it as a managed delta table.&lt;/P&gt;&lt;LI-CODE lang="python"&gt;jdbcHostname = "your_sql_server_hostname"
jdbcPort = 1433
jdbcDatabase = "your_database_name"
jdbcUsername = "your_username"
jdbcPassword = "your_password"

# JDBC URL format for SQL Server
jdbcUrl = f"jdbc:sqlserver://{jdbcHostname}:{jdbcPort};database={jdbcDatabase}"

# Connection properties
connectionProperties = {
  "user" : jdbcUsername,
  "password" : jdbcPassword,
  "driver" : "com.microsoft.sqlserver.jdbc.SQLServerDriver"
}
df = spark.read.jdbc(url=jdbcUrl, table=query, properties=connectionProperties)
df.write.format("delta").mode("overwrite").saveAsTable("table_name")&lt;/LI-CODE&gt;</description>
      <pubDate>Thu, 11 Jul 2024 22:55:59 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78423#M35523</guid>
      <dc:creator>YS1</dc:creator>
      <dc:date>2024-07-11T22:55:59Z</dc:date>
    </item>
    <item>
      <title>Re: SQL Server To Databricks Table Migration</title>
      <link>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78447#M35529</link>
      <description>&lt;P&gt;I hope you are asking for a SQL version of the pyspark code ? Can you please explain the advantages of having it in SQL as compared to pyspark ? There are some options, best would be federated queries against sql server ? select as if it were a databricks table and write it to the target ? alternatively you could create a view against the sql server table and then use that as insert into the databricks table...but in my limited understanding (could be wrong)&amp;nbsp; all of it would basically get optimized similarly in the background and serves no additional benefits to be rewritten in SQL...&lt;/P&gt;</description>
      <pubDate>Fri, 12 Jul 2024 07:04:03 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78447#M35529</guid>
      <dc:creator>ranged_coop</dc:creator>
      <dc:date>2024-07-12T07:04:03Z</dc:date>
    </item>
    <item>
      <title>Re: SQL Server To Databricks Table Migration</title>
      <link>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78449#M35530</link>
      <description>&lt;P&gt;The only option to have it in Databricks SQL is lakehouse federation with a SQL Server connection.&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Fri, 12 Jul 2024 07:08:47 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78449#M35530</guid>
      <dc:creator>jacovangelder</dc:creator>
      <dc:date>2024-07-12T07:08:47Z</dc:date>
    </item>
    <item>
      <title>Re: SQL Server To Databricks Table Migration</title>
      <link>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78869#M35626</link>
      <description>&lt;P&gt;&lt;a href="https://community.databricks.com/t5/user/viewprofilepage/user-id/47980"&gt;@ranged_coop&lt;/a&gt;&amp;nbsp;Yes, I'm asking for a SQL version of my Pyspark code. The only reason is to give it to person who only code in SQL which would make it easier for them to understand. Thanks for the suggested solution!&lt;/P&gt;</description>
      <pubDate>Mon, 15 Jul 2024 20:19:21 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78869#M35626</guid>
      <dc:creator>YS1</dc:creator>
      <dc:date>2024-07-15T20:19:21Z</dc:date>
    </item>
    <item>
      <title>Re: SQL Server To Databricks Table Migration</title>
      <link>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78870#M35627</link>
      <description>&lt;P&gt;Thank you&amp;nbsp;&lt;a href="https://community.databricks.com/t5/user/viewprofilepage/user-id/102253"&gt;@jacovangelder&lt;/a&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Mon, 15 Jul 2024 20:19:51 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/sql-server-to-databricks-table-migration/m-p/78870#M35627</guid>
      <dc:creator>YS1</dc:creator>
      <dc:date>2024-07-15T20:19:51Z</dc:date>
    </item>
  </channel>
</rss>

