<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: issue with group by in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32206#M23475</link>
    <description>&lt;P&gt;Hi @Shivers Robert​&amp;nbsp;&lt;/P&gt;&lt;P&gt;Try to use something like that&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;import pyspark.sql.functions as F
&amp;nbsp;
def year_sum(year, column_year, column_sum):
  return F.when(
    F.col(column_year) == year, F.col(column_sum)
  ).otherwise(F.lit(None))
  
display(df.select(*[F.sum(year_sum(i, 'year', 'your_column_variable')).alias(str(i)) for i in [2018, 2019]]))
#### OR you can use the pivot method
display(df.groupby(F.lit('fake')).pivot('year').agg(F.sum('your_column_variable')).drop('fake'))&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;let meknow if it works.&lt;/P&gt;</description>
    <pubDate>Mon, 10 Jan 2022 08:23:54 GMT</pubDate>
    <dc:creator>Pholo</dc:creator>
    <dc:date>2022-01-10T08:23:54Z</dc:date>
    <item>
      <title>issue with group by</title>
      <link>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32202#M23471</link>
      <description>&lt;P&gt;I am trying to group by a data frame by "PRODUCT", "MARKET" and aggregate the rest ones specified in col_list. There are much more column in the list but for simplification lets take the example below.&lt;/P&gt;&lt;P&gt;Unfortunatelly I am getting the error:&lt;/P&gt;&lt;P&gt;&lt;B&gt;&lt;I&gt;"TypeError: unhashable type: 'Column'&lt;/I&gt;&lt;/B&gt;&lt;/P&gt;&lt;P&gt;on the line with expr&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;col_list = ["value", "units"]
&amp;nbsp;
exprs = {sum(x).alias(x) for x in col_list}
df2 = df1.groupBy("PRODUCT", "MARKET").agg(exprs)&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;TIA&lt;/P&gt;</description>
      <pubDate>Wed, 05 Jan 2022 10:58:54 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32202#M23471</guid>
      <dc:creator>Braxx</dc:creator>
      <dc:date>2022-01-05T10:58:54Z</dc:date>
    </item>
    <item>
      <title>Re: issue with group by</title>
      <link>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32203#M23472</link>
      <description>&lt;P&gt;I think you'll need to comma separate each sum within the aggregate. I've never seen a list comprehension in the aggregate before. &lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;.agg(sum(y).alias(y),
         sum(x).alias(x),
       .....)&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Wed, 05 Jan 2022 12:38:00 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32203#M23472</guid>
      <dc:creator>Anonymous</dc:creator>
      <dc:date>2022-01-05T12:38:00Z</dc:date>
    </item>
    <item>
      <title>Re: issue with group by</title>
      <link>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32204#M23473</link>
      <description>&lt;P&gt;You should replace "{" with "["&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;exprs = [sum(x).alias(x) for x in col_list]&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Thu, 06 Jan 2022 20:14:05 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32204#M23473</guid>
      <dc:creator>Reza</dc:creator>
      <dc:date>2022-01-06T20:14:05Z</dc:date>
    </item>
    <item>
      <title>Re: issue with group by</title>
      <link>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32206#M23475</link>
      <description>&lt;P&gt;Hi @Shivers Robert​&amp;nbsp;&lt;/P&gt;&lt;P&gt;Try to use something like that&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;import pyspark.sql.functions as F
&amp;nbsp;
def year_sum(year, column_year, column_sum):
  return F.when(
    F.col(column_year) == year, F.col(column_sum)
  ).otherwise(F.lit(None))
  
display(df.select(*[F.sum(year_sum(i, 'year', 'your_column_variable')).alias(str(i)) for i in [2018, 2019]]))
#### OR you can use the pivot method
display(df.groupby(F.lit('fake')).pivot('year').agg(F.sum('your_column_variable')).drop('fake'))&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;let meknow if it works.&lt;/P&gt;</description>
      <pubDate>Mon, 10 Jan 2022 08:23:54 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32206#M23475</guid>
      <dc:creator>Pholo</dc:creator>
      <dc:date>2022-01-10T08:23:54Z</dc:date>
    </item>
    <item>
      <title>Re: issue with group by</title>
      <link>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32207#M23476</link>
      <description>&lt;P&gt;ya, Thanks. that's one thing. Another one was a missing "*".&lt;/P&gt;&lt;P&gt;Complete answer:&lt;/P&gt;&lt;PRE&gt;&lt;CODE&gt;col_list = ["value", "units"]
 
exprs = [sum(x).alias(x) for x in col_list]
df2 = df1.groupBy("PRODUCT", "MARKET").agg(*exprs)&lt;/CODE&gt;&lt;/PRE&gt;&lt;P&gt;&lt;/P&gt;</description>
      <pubDate>Mon, 10 Jan 2022 12:11:43 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/issue-with-group-by/m-p/32207#M23476</guid>
      <dc:creator>Braxx</dc:creator>
      <dc:date>2022-01-10T12:11:43Z</dc:date>
    </item>
  </channel>
</rss>

