<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic spark properties files in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/spark-properties-files/m-p/24610#M17131</link>
    <description>&lt;P&gt;I  am trying to migrate a spark job from an on-premises Hadoop cluster to data bricks on azure. Currently, we are keeping many values in the properties file. When executing spark-submit we pass the parameter  --properties /prop.file.txt. and inside the spark code we use spark.conf.get("spark.param1")  to get individual  parameter values .How can we implement properties file in the Databricks notebook&lt;/P&gt;</description>
    <pubDate>Fri, 25 Mar 2022 18:04:54 GMT</pubDate>
    <dc:creator>dataguy73</dc:creator>
    <dc:date>2022-03-25T18:04:54Z</dc:date>
    <item>
      <title>spark properties files</title>
      <link>https://community.databricks.com/t5/data-engineering/spark-properties-files/m-p/24610#M17131</link>
      <description>&lt;P&gt;I  am trying to migrate a spark job from an on-premises Hadoop cluster to data bricks on azure. Currently, we are keeping many values in the properties file. When executing spark-submit we pass the parameter  --properties /prop.file.txt. and inside the spark code we use spark.conf.get("spark.param1")  to get individual  parameter values .How can we implement properties file in the Databricks notebook&lt;/P&gt;</description>
      <pubDate>Fri, 25 Mar 2022 18:04:54 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/spark-properties-files/m-p/24610#M17131</guid>
      <dc:creator>dataguy73</dc:creator>
      <dc:date>2022-03-25T18:04:54Z</dc:date>
    </item>
    <item>
      <title>Re: spark properties files</title>
      <link>https://community.databricks.com/t5/data-engineering/spark-properties-files/m-p/24611#M17132</link>
      <description>&lt;P&gt;I use JSON files and .conf files which reside on the data lake or in the filestore of dbfs.&lt;/P&gt;&lt;P&gt;Then read those files using python/scala&lt;/P&gt;</description>
      <pubDate>Mon, 28 Mar 2022 08:46:22 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/spark-properties-files/m-p/24611#M17132</guid>
      <dc:creator>-werners-</dc:creator>
      <dc:date>2022-03-28T08:46:22Z</dc:date>
    </item>
  </channel>
</rss>

