You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by "ueshin (via GitHub)" <gi...@apache.org> on 2023/09/28 00:01:55 UTC
[GitHub] [spark] ueshin commented on a diff in pull request #43156: [SPARK-45362][PYTHON] Project out PARTITION BY expressions before Python UDTF 'eval' method consumes them
ueshin commented on code in PR #43156:
URL: https://github.com/apache/spark/pull/43156#discussion_r1339345417
##########
python/pyspark/worker.py:
##########
@@ -752,17 +757,28 @@ def _check_partition_boundaries(self, arguments: list) -> bool:
prev_table_arg = self._get_table_arg(self._prev_arguments)
cur_partitions_args = []
prev_partitions_args = []
- for i in partition_child_indexes:
+ for i in self._partition_child_indexes:
cur_partitions_args.append(cur_table_arg[i])
prev_partitions_args.append(prev_table_arg[i])
- self._prev_arguments = arguments
result = any(k != v for k, v in zip(cur_partitions_args, prev_partitions_args))
self._prev_arguments = arguments
return result
def _get_table_arg(self, inputs: list) -> Row:
return [x for x in inputs if type(x) is Row][0]
+ def _remove_partition_by_exprs(self, arg: Any) -> Any:
+ if type(arg) is Row:
Review Comment:
```suggestion
if isinstance(arg, Row):
```
##########
python/pyspark/worker.py:
##########
@@ -752,17 +757,28 @@ def _check_partition_boundaries(self, arguments: list) -> bool:
prev_table_arg = self._get_table_arg(self._prev_arguments)
cur_partitions_args = []
prev_partitions_args = []
- for i in partition_child_indexes:
+ for i in self._partition_child_indexes:
cur_partitions_args.append(cur_table_arg[i])
prev_partitions_args.append(prev_table_arg[i])
- self._prev_arguments = arguments
result = any(k != v for k, v in zip(cur_partitions_args, prev_partitions_args))
self._prev_arguments = arguments
return result
def _get_table_arg(self, inputs: list) -> Row:
return [x for x in inputs if type(x) is Row][0]
+ def _remove_partition_by_exprs(self, arg: Any) -> Any:
+ if type(arg) is Row:
+ new_row_keys = []
+ new_row_values = []
+ for (i, (key, value)) in enumerate(arg.asDict().items()):
+ if i not in self._partition_child_indexes:
+ new_row_keys.append(key)
+ new_row_values.append(value)
+ return Row(*new_row_keys)(*new_row_values)
Review Comment:
```suggestion
for (i, (key, value)) in enumerate(zip(arg.__fields__, arg)):
if i not in self._partition_child_indexes:
new_row_keys.append(key)
new_row_values.append(value)
return _create_row(new_row_keys, new_row_values)
```
`arg.asDict()` can cause some issues:
- `dict.items()` may not guarantee the order
- `arg` may contain the same name fields and `asDict()` picks only one of them.
##########
python/pyspark/worker.py:
##########
@@ -735,7 +735,15 @@ def eval(self, *args, **kwargs) -> Iterator:
yield row
self._udtf = self._create_udtf()
if self._udtf.eval is not None:
- result = self._udtf.eval(*args, **kwargs)
+ # Filter the arguments to exclude projected PARTITION BY values added by Catalyst.
+ filtered_args = [self._remove_partition_by_exprs(arg) for arg in args]
+ filtered_kwargs = dict(
+ [
+ (key, self._remove_partition_by_exprs(value))
+ for (key, value) in kwargs.items()
+ ]
+ )
Review Comment:
```suggestion
filtered_kwargs = {
key: self._remove_partition_by_exprs(value)
for key, value in kwargs.items()
}
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For queries about this service, please contact Infrastructure at:
users@infra.apache.org
---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org