<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: Error writing data to Google Bigquery in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/81774#M36410</link>
    <description>&lt;P&gt;&lt;a href="https://community.databricks.com/t5/user/viewprofilepage/user-id/33963"&gt;@Prabakar&lt;/a&gt;&amp;nbsp;Hello, Can you give me a sample specifically for writing in Pyspark? I'm also getting the same error.&lt;/P&gt;</description>
    <pubDate>Sun, 04 Aug 2024 22:10:19 GMT</pubDate>
    <dc:creator>ambar2595</dc:creator>
    <dc:date>2024-08-04T22:10:19Z</dc:date>
    <item>
      <title>Error writing data to Google Bigquery</title>
      <link>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14685#M9149</link>
      <description>&lt;P&gt;Hello, I'm facing some problems while writing data to Google BigQuery. I'm able to read data from the same table, but when I try to append data I get the following error.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;B&gt;Error getting access token from metadata server at: &lt;A href="http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token" target="test_blank"&gt;http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token&lt;/A&gt;&lt;/B&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;df.write.format("bigquery").mode("append") \
&amp;nbsp;
 .option("credentialsFile", "credentialfile.json") \
&amp;nbsp;
 .option("temporaryGcsBucket", "gs://bucketname/")\
&amp;nbsp;
 .option("parentProject", "projectId") \
&amp;nbsp;
 .option("project", "project") \
&amp;nbsp;
 .option("table","tablename") \
&amp;nbsp;
 .save()&lt;/CODE&gt;&lt;/PRE&gt;&lt;PRE&gt;&lt;CODE&gt;java.io.IOException: Error getting access token from metadata server at: &lt;A href="http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token" target="test_blank"&gt;http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token&lt;/A&gt;
	at shaded.databricks.V2_1_4.com.google.cloud.hadoop.util.CredentialFactory.getCredentialFromMetadataServiceAccount(CredentialFactory.java:259)
	at shaded.databricks.V2_1_4.com.google.cloud.hadoop.util.CredentialFactory.getCredential(CredentialFactory.java:477)
	at shaded.databricks.V2_1_4.com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.getCredential(GoogleHadoopFileSystemBase.java:1601)
	at shaded.databricks.V2_1_4.com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.createGcsFs(GoogleHadoopFileSystemBase.java:1738)
	at shaded.databricks.V2_1_4.com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.configure(GoogleHadoopFileSystemBase.java:1711)
	at shaded.databricks.V2_1_4.com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.initialize(GoogleHadoopFileSystemBase.java:562)
	at shaded.databricks.V2_1_4.com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.initialize(GoogleHadoopFileSystemBase.java:515)
	at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2669)
	at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:370)
	at org.apache.hadoop.fs.Path.getFileSystem(Path.java:295)
	at com.google.cloud.spark.bigquery.BigQueryWriteHelper.&amp;lt;init&amp;gt;(BigQueryWriteHelper.scala:62)
	at com.google.cloud.spark.bigquery.BigQueryInsertableRelation.insert(BigQueryInsertableRelation.scala:41)
	at com.google.cloud.spark.bigquery.BigQueryRelationProvider.createRelation(BigQueryRelationProvider.scala:109)
	at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:48)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:73)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:71)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:94)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:196)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:240)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:165)
	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:236)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:192)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:165)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:164)
	at org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:1079)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withCustomExecutionEnv$5(SQLExecution.scala:126)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:267)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withCustomExecutionEnv$1(SQLExecution.scala:104)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:852)
	at org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:77)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:217)
	at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:1079)
	at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:468)
	at org.apache.spark.sql.DataFrameWriter.saveInternal(DataFrameWriter.scala:438)
	at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:311)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380)
	at py4j.Gateway.invoke(Gateway.java:295)
	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
	at py4j.commands.CallCommand.execute(CallCommand.java:79)
	at py4j.GatewayConnection.run(GatewayConnection.java:251)
	at java.lang.Thread.run(Thread.java:748)
Caused by: shaded.databricks.V2_1_4.com.google.api.client.http.HttpResponseException: 404 Not Found
GET &lt;A href="http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token" target="test_blank"&gt;http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token&lt;/A&gt;
&amp;lt;!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"&amp;gt;
&amp;lt;html xmlns="http://www.w3.org/1999/xhtml"&amp;gt;
&amp;lt;head&amp;gt;
&amp;lt;meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/&amp;gt;
&amp;lt;title&amp;gt;404 - File or directory not found.&amp;lt;/title&amp;gt;
&amp;lt;style type="text/css"&amp;gt;
&amp;lt;!--
body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}
fieldset{padding:0 15px 10px 15px;} 
h1{font-size:2.4em;margin:0;color:#FFF;}
h2{font-size:1.7em;margin:0;color:#CC0000;} 
h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} 
#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;
background-color:#555555;}
#content{margin:0 0 0 2%;position:relative;}
.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}
--&amp;gt;
&amp;lt;/style&amp;gt;
&amp;lt;/head&amp;gt;
&amp;lt;body&amp;gt;
&amp;lt;div id="header"&amp;gt;&amp;lt;h1&amp;gt;Server Error&amp;lt;/h1&amp;gt;&amp;lt;/div&amp;gt;
&amp;lt;div id="content"&amp;gt;
 &amp;lt;div class="content-container"&amp;gt;&amp;lt;fieldset&amp;gt;
  &amp;lt;h2&amp;gt;404 - File or directory not found.&amp;lt;/h2&amp;gt;
  &amp;lt;h3&amp;gt;The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.&amp;lt;/h3&amp;gt;
 &amp;lt;/fieldset&amp;gt;&amp;lt;/div&amp;gt;
&amp;lt;/div&amp;gt;
&amp;lt;/body&amp;gt;
&amp;lt;/html&amp;gt;&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Thu, 23 Sep 2021 14:37:38 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14685#M9149</guid>
      <dc:creator>Fernando_Messas</dc:creator>
      <dc:date>2021-09-23T14:37:38Z</dc:date>
    </item>
    <item>
      <title>Re: Error writing data to Google Bigquery</title>
      <link>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14687#M9151</link>
      <description>&lt;P&gt;Thanks @Kaniz Fatma​&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Thu, 23 Sep 2021 18:49:00 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14687#M9151</guid>
      <dc:creator>Fernando_Messas</dc:creator>
      <dc:date>2021-09-23T18:49:00Z</dc:date>
    </item>
    <item>
      <title>Re: Error writing data to Google Bigquery</title>
      <link>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14688#M9152</link>
      <description>&lt;P&gt;could you please check the config value specified in &lt;B&gt;spark.hadoop.fs.gs.auth.service.account.private.key&amp;nbsp;?&lt;/B&gt;&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;println(spark.conf.get("spark.hadoop.fs.gs.auth.service.account.private.key"))&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Mon, 11 Oct 2021 17:19:24 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14688#M9152</guid>
      <dc:creator>shan_chandra</dc:creator>
      <dc:date>2021-10-11T17:19:24Z</dc:date>
    </item>
    <item>
      <title>Re: Error writing data to Google Bigquery</title>
      <link>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14689#M9153</link>
      <description>&lt;P&gt;@Fernando Messas​&amp;nbsp;, if you are using the credential file in the notebook then you need to encode the base64 format.&lt;/P&gt;&lt;P&gt;You can try something like below:&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;val contentCred = "/dbfs/FileStore/keys/gcp/myCredentials.json"
val jsonBytes = Source.fromFile(contentCred).getLines.mkString
val myEvents = spark.read.format("bigquery")
   .option("credentials", new String(java.util.Base64.getEncoder.encode(jsonBytes.getBytes)))
   .option("parentProject", "myTestProj")
   .option("project", "project1") 
   .option("table","project1.my_data.events")
.load()
display(df)&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Tue, 12 Oct 2021 07:10:23 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14689#M9153</guid>
      <dc:creator>Prabakar</dc:creator>
      <dc:date>2021-10-12T07:10:23Z</dc:date>
    </item>
    <item>
      <title>Re: Error writing data to Google Bigquery</title>
      <link>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14690#M9154</link>
      <description>&lt;P&gt;hi @Fernando Messas​&amp;nbsp;,&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Did Prabakar's or Shan's response fully answered your question, would you be happy to mark their answer as best so that others can quickly find the solution?&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Please let us know If none of these questions works.&lt;/P&gt;</description>
      <pubDate>Fri, 29 Oct 2021 22:42:53 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14690#M9154</guid>
      <dc:creator>jose_gonzalez</dc:creator>
      <dc:date>2021-10-29T22:42:53Z</dc:date>
    </item>
    <item>
      <title>Re: Error writing data to Google Bigquery</title>
      <link>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14691#M9155</link>
      <description>&lt;P&gt;Sometime this error occur when your Private key or your service account key is not going in request header, So if you are using Spark or Databricks then you have to configure the JSON Key in Spark config so it will be added in request header.&lt;/P&gt;</description>
      <pubDate>Thu, 17 Nov 2022 08:18:39 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/14691#M9155</guid>
      <dc:creator>asif5494</dc:creator>
      <dc:date>2022-11-17T08:18:39Z</dc:date>
    </item>
    <item>
      <title>Re: Error writing data to Google Bigquery</title>
      <link>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/81774#M36410</link>
      <description>&lt;P&gt;&lt;a href="https://community.databricks.com/t5/user/viewprofilepage/user-id/33963"&gt;@Prabakar&lt;/a&gt;&amp;nbsp;Hello, Can you give me a sample specifically for writing in Pyspark? I'm also getting the same error.&lt;/P&gt;</description>
      <pubDate>Sun, 04 Aug 2024 22:10:19 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/error-writing-data-to-google-bigquery/m-p/81774#M36410</guid>
      <dc:creator>ambar2595</dc:creator>
      <dc:date>2024-08-04T22:10:19Z</dc:date>
    </item>
  </channel>
</rss>

