sure:
%python
from pyspark.sql.functions import from_json, col, concat_ws
from pyspark.sql.types import *
schema = StructType([StructField('meterDateTime', StringType(), True), StructField('meterId', LongType(), True), StructField('meteringState', StringType(), True), StructField('value', DoubleType(), True), StructField('versionTimestamp', StringType(), True), StructField('file_name', StringType(), False), StructField('file_modification_time', TimestampType(), False)])
df = ( spark
.read
.format("json")
.schema(schema)
.load(f'{path_sep}/*/*/*/*.json')
.select("meterId")
)
result = ', '.join(df.tolist())
print(result)