Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fix: add safety check for _jvm and _jsc in _list_hdfs_files
Signed-off-by: Chimey Rock <trinhvanthoai99@gmail.com>
  • Loading branch information
chimeyrock999 committed Sep 26, 2025
commit df17b70c78a52c285190ce340a2bac369565dd2b
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,6 @@ def persist(
self.to_spark_df().write.format(file_format).saveAsTable(table_name)
else:
self.to_spark_df().createOrReplaceTempView(table_name)


def _has_remote_warehouse_in_config(self) -> bool:
"""
Expand Down Expand Up @@ -506,7 +505,7 @@ def to_remote_storage(self) -> List[str]:
spark_session = get_spark_session_or_start_new_with_repoconfig(
store_config=self._config.offline_store
)
return self._list_hdfs_files(spark_session, output_uri)
return _list_hdfs_files(spark_session, output_uri)
else:
raise NotImplementedError(
"to_remote_storage is only implemented for file://, s3:// and hdfs:// uri schemes"
Expand All @@ -515,18 +514,6 @@ def to_remote_storage(self) -> List[str]:
else:
raise NotImplementedError()

def _list_hdfs_files(self, spark_session: SparkSession, uri: str) -> List[str]:
jvm = spark_session._jvm
conf = spark_session._jsc.hadoopConfiguration()
path = jvm.org.apache.hadoop.fs.Path(uri)
fs = jvm.org.apache.hadoop.fs.FileSystem.get(path.toUri(), conf)
statuses = fs.listStatus(path)
files = []
for f in statuses:
if f.isFile():
files.append(f.getPath().toString())
return files

@property
def metadata(self) -> Optional[RetrievalMetadata]:
"""
Expand Down Expand Up @@ -650,6 +637,22 @@ def _list_files_in_folder(folder):
return files


def _list_hdfs_files(spark_session: SparkSession, uri: str) -> List[str]:
jvm = spark_session._jvm
jsc = spark_session._jsc
if jvm is None or jsc is None:
raise RuntimeError("Spark JVM or JavaSparkContext is not available")
conf = jsc.hadoopConfiguration()
path = jvm.org.apache.hadoop.fs.Path(uri)
fs = jvm.org.apache.hadoop.fs.FileSystem.get(path.toUri(), conf)
statuses = fs.listStatus(path)
files = []
for f in statuses:
if f.isFile():
files.append(f.getPath().toString())
return files


def _cast_data_frame(
df_new: pyspark.sql.DataFrame, df_existing: pyspark.sql.DataFrame
) -> pyspark.sql.DataFrame:
Expand Down
Loading