<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ? in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11672#M6616</link>
    <description>&lt;P&gt;@Raymund Beltran​&amp;nbsp;- So everything works as expected now? Is that right? If yes, would you be happy to mark your answer as best so others can find it easily?&lt;/P&gt;</description>
    <pubDate>Wed, 03 Nov 2021 20:04:55 GMT</pubDate>
    <dc:creator>Anonymous</dc:creator>
    <dc:date>2021-11-03T20:04:55Z</dc:date>
    <item>
      <title>Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ?</title>
      <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11671#M6615</link>
      <description>&lt;P&gt;Using Databricks spark submit job, setting new cluster&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;1] "spark_version": "8.2.x-scala2.12" =&amp;gt; OK, works fine&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;2] "spark_version": "9.1.x-scala2.12" =&amp;gt; FAIL, with errors&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;Exception in thread "main" java.lang.ExceptionInInitializerError
	at com.databricks.backend.daemon.driver.WSFSCredentialForwardingHelper.withWSFSCredentials(WorkspaceLocalFileSystem.scala:156)
	at com.databricks.backend.daemon.driver.WSFSCredentialForwardingHelper.withWSFSCredentials$(WorkspaceLocalFileSystem.scala:155)
	at com.databricks.backend.daemon.driver.WorkspaceLocalFileSystem.withWSFSCredentials(WorkspaceLocalFileSystem.scala:30)
	at com.databricks.backend.daemon.driver.WorkspaceLocalFileSystem.getFileStatus(WorkspaceLocalFileSystem.scala:63)
	at org.apache.hadoop.fs.Globber.getFileStatus(Globber.java:57)
	at org.apache.hadoop.fs.Globber.glob(Globber.java:252)
	at org.apache.hadoop.fs.FileSystem.globStatus(FileSystem.java:1657)
	at org.apache.spark.deploy.DependencyUtils$.resolveGlobPath(DependencyUtils.scala:192)
	at org.apache.spark.deploy.DependencyUtils$.$anonfun$resolveGlobPaths$2(DependencyUtils.scala:147)
	at org.apache.spark.deploy.DependencyUtils$.$anonfun$resolveGlobPaths$2$adapted(DependencyUtils.scala:145)
	at scala.collection.TraversableLike.$anonfun$flatMap$1(TraversableLike.scala:245)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:38)
	at scala.collection.TraversableLike.flatMap(TraversableLike.scala:245)
	at scala.collection.TraversableLike.flatMap$(TraversableLike.scala:242)
	at scala.collection.AbstractTraversable.flatMap(Traversable.scala:108)
	at org.apache.spark.deploy.DependencyUtils$.resolveGlobPaths(DependencyUtils.scala:145)
	at org.apache.spark.deploy.SparkSubmit.$anonfun$prepareSubmitEnvironment$4(SparkSubmit.scala:363)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.deploy.SparkSubmit.prepareSubmitEnvironment(SparkSubmit.scala:363)
	at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:894)
	at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)
	at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)
	at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)
	at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1039)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1048)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.NullPointerException
	at com.databricks.backend.daemon.driver.WsfsDriverHttpClient.&amp;lt;init&amp;gt;(WSFSDriverHttpClient.scala:26)
	at com.databricks.backend.daemon.driver.WSFSCredentialForwardingHelper$.&amp;lt;init&amp;gt;(WorkspaceLocalFileSystem.scala:277)
	at com.databricks.backend.daemon.driver.WSFSCredentialForwardingHelper$.&amp;lt;clinit&amp;gt;(WorkspaceLocalFileSystem.scala)
	... 28 more&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Wed, 03 Nov 2021 17:31:22 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11671#M6615</guid>
      <dc:creator>raymund</dc:creator>
      <dc:date>2021-11-03T17:31:22Z</dc:date>
    </item>
    <item>
      <title>Re: Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ?</title>
      <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11672#M6616</link>
      <description>&lt;P&gt;@Raymund Beltran​&amp;nbsp;- So everything works as expected now? Is that right? If yes, would you be happy to mark your answer as best so others can find it easily?&lt;/P&gt;</description>
      <pubDate>Wed, 03 Nov 2021 20:04:55 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11672#M6616</guid>
      <dc:creator>Anonymous</dc:creator>
      <dc:date>2021-11-03T20:04:55Z</dc:date>
    </item>
    <item>
      <title>Re: Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ?</title>
      <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11673#M6617</link>
      <description>&lt;P&gt;@Piper Wilson​&amp;nbsp; I removed the comment that its working. Issue still exists with 9.1. Kafka streaming jars doesn't exists. It throws error when kafka streaming jars are not provided and in pyspark code kafka streaming is used. When the kafka streaming jars are explicitly provided in pyspark packages, it throws same error as original issue above&lt;/P&gt;</description>
      <pubDate>Mon, 08 Nov 2021 15:37:33 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11673#M6617</guid>
      <dc:creator>raymund</dc:creator>
      <dc:date>2021-11-08T15:37:33Z</dc:date>
    </item>
    <item>
      <title>Re: Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ?</title>
      <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11674#M6618</link>
      <description>&lt;P&gt;@Raymund Beltran​&amp;nbsp;- Thanks for letting us know. Let's see what the community has to say about this. We'll circle back if we need to. &lt;/P&gt;</description>
      <pubDate>Mon, 08 Nov 2021 16:18:27 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11674#M6618</guid>
      <dc:creator>Anonymous</dc:creator>
      <dc:date>2021-11-08T16:18:27Z</dc:date>
    </item>
    <item>
      <title>Re: Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ?</title>
      <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11675#M6619</link>
      <description>&lt;P&gt;Additional info:&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Using databricks spark-submit api with pyspark&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;"spark_submit_task": {&lt;/P&gt;&lt;P&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;"parameters": [&lt;/P&gt;&lt;P&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;"--packages",&lt;/P&gt;&lt;P&gt;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;"org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1",&lt;/P&gt;&lt;P&gt;....&lt;/P&gt;</description>
      <pubDate>Tue, 09 Nov 2021 20:39:49 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11675#M6619</guid>
      <dc:creator>raymund</dc:creator>
      <dc:date>2021-11-09T20:39:49Z</dc:date>
    </item>
    <item>
      <title>Re: Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ?</title>
      <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11676#M6620</link>
      <description>&lt;P&gt;Thank you. I'm passing the information on. Thanks for your patience!&lt;/P&gt;</description>
      <pubDate>Tue, 09 Nov 2021 21:12:03 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11676#M6620</guid>
      <dc:creator>Anonymous</dc:creator>
      <dc:date>2021-11-09T21:12:03Z</dc:date>
    </item>
    <item>
      <title>Re: Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ?</title>
      <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11677#M6621</link>
      <description>&lt;P&gt;this has been resolved by adding the following spark_conf (not thru --conf)&lt;/P&gt;&lt;P&gt;&amp;nbsp;"spark.hadoop.fs.file.impl": "org.apache.hadoop.fs.LocalFileSystem"&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;example:&lt;/P&gt;&lt;P&gt;------&lt;/P&gt;&lt;P&gt;"new_cluster": {&lt;/P&gt;&lt;P&gt;  "spark_version": "9.1.x-scala2.12",&lt;/P&gt;&lt;P&gt;   ...&lt;/P&gt;&lt;P&gt;  "spark_conf": {&lt;/P&gt;&lt;P&gt;            "spark.hadoop.fs.file.impl": "org.apache.hadoop.fs.LocalFileSystem"&lt;/P&gt;&lt;P&gt;  }&lt;/P&gt;&lt;P&gt;},&lt;/P&gt;&lt;P&gt;"spark_submit_task": {&lt;/P&gt;&lt;P&gt;        "parameters": [&lt;/P&gt;&lt;P&gt;            "--packages",&lt;/P&gt;&lt;P&gt;            "org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1",&lt;/P&gt;&lt;P&gt;...&lt;/P&gt;&lt;P&gt;------------&lt;/P&gt;</description>
      <pubDate>Wed, 10 Nov 2021 22:14:42 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11677#M6621</guid>
      <dc:creator>raymund</dc:creator>
      <dc:date>2021-11-10T22:14:42Z</dc:date>
    </item>
    <item>
      <title>Re: Why adding the package 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1' failed in runtime 9.1.x-scala2.12 but was successful using runtime 8.2.x-scala2.12 ?</title>
      <link>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11678#M6622</link>
      <description>&lt;P&gt;Thank you for sharing the solution to this issue. I think I saw another question with the same error message. &lt;/P&gt;</description>
      <pubDate>Thu, 18 Nov 2021 18:01:43 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/why-adding-the-package-org-apache-spark-spark-sql-kafka-0-10-2/m-p/11678#M6622</guid>
      <dc:creator>jose_gonzalez</dc:creator>
      <dc:date>2021-11-18T18:01:43Z</dc:date>
    </item>
  </channel>
</rss>

