<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Encapsulate Databricks Pyspark/SparkSql code in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29244#M20990</link>
    <description>&lt;P&gt;Hi All ,&lt;/P&gt;&lt;P&gt;I have Custom code ( Pyspark &amp;amp; SparkSQL) (notebooks) which I want to deploy at customer location and  encapsulate so that end customers don't see the actual code. Currently we have all code in Notebooks (Pyspark/spark sql). Could you please let me know&lt;/P&gt;&lt;P&gt;1)  if there is any way to run the code as executables/jars.&lt;/P&gt;&lt;P&gt;2) How to convert the code as executables.&lt;/P&gt;&lt;P&gt;Thanks a lot for the help.&lt;/P&gt;</description>
    <pubDate>Mon, 07 Feb 2022 12:53:18 GMT</pubDate>
    <dc:creator>Databricks_7045</dc:creator>
    <dc:date>2022-02-07T12:53:18Z</dc:date>
    <item>
      <title>Encapsulate Databricks Pyspark/SparkSql code</title>
      <link>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29244#M20990</link>
      <description>&lt;P&gt;Hi All ,&lt;/P&gt;&lt;P&gt;I have Custom code ( Pyspark &amp;amp; SparkSQL) (notebooks) which I want to deploy at customer location and  encapsulate so that end customers don't see the actual code. Currently we have all code in Notebooks (Pyspark/spark sql). Could you please let me know&lt;/P&gt;&lt;P&gt;1)  if there is any way to run the code as executables/jars.&lt;/P&gt;&lt;P&gt;2) How to convert the code as executables.&lt;/P&gt;&lt;P&gt;Thanks a lot for the help.&lt;/P&gt;</description>
      <pubDate>Mon, 07 Feb 2022 12:53:18 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29244#M20990</guid>
      <dc:creator>Databricks_7045</dc:creator>
      <dc:date>2022-02-07T12:53:18Z</dc:date>
    </item>
    <item>
      <title>Re: Encapsulate Databricks Pyspark/SparkSql code</title>
      <link>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29246#M20992</link>
      <description>&lt;P&gt;With notebooks that is not possible.&lt;/P&gt;&lt;P&gt;You can write your code in scala/java and build a jar, which you then run with spark-submit.&lt;/P&gt;&lt;P&gt;(&lt;A href="https://mungingdata.com/apache-spark/building-jar-sbt/" alt="https://mungingdata.com/apache-spark/building-jar-sbt/" target="_blank"&gt;example&lt;/A&gt;)&lt;/P&gt;&lt;P&gt;Or use python and deploy a wheel.&lt;/P&gt;&lt;P&gt;(&lt;A href="https://mungingdata.com/pyspark/poetry-dependency-management-wheel/" alt="https://mungingdata.com/pyspark/poetry-dependency-management-wheel/" target="_blank"&gt;example&lt;/A&gt;)&lt;/P&gt;&lt;P&gt;This can become quite complex when you have dependencies.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Also: a jar etc can be decompiled rendering your usecase useless.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Tue, 08 Feb 2022 10:33:56 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29246#M20992</guid>
      <dc:creator>-werners-</dc:creator>
      <dc:date>2022-02-08T10:33:56Z</dc:date>
    </item>
    <item>
      <title>Re: Encapsulate Databricks Pyspark/SparkSql code</title>
      <link>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29247#M20993</link>
      <description>&lt;P&gt;Hi , Thanks for the reply . Is it possible to restrict the view access by some premium plan ? please let me know . Thank You&lt;/P&gt;</description>
      <pubDate>Tue, 08 Feb 2022 10:48:08 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29247#M20993</guid>
      <dc:creator>Databricks_7045</dc:creator>
      <dc:date>2022-02-08T10:48:08Z</dc:date>
    </item>
    <item>
      <title>Re: Encapsulate Databricks Pyspark/SparkSql code</title>
      <link>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29248#M20994</link>
      <description>&lt;P&gt;yes certainly.&lt;/P&gt;&lt;P&gt;You can put permissions on notebooks and folders.&lt;/P&gt;&lt;P&gt;But probably the customer will be admin of the databricks workspace.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Tue, 08 Feb 2022 10:50:11 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/encapsulate-databricks-pyspark-sparksql-code/m-p/29248#M20994</guid>
      <dc:creator>-werners-</dc:creator>
      <dc:date>2022-02-08T10:50:11Z</dc:date>
    </item>
  </channel>
</rss>

