You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by bh...@apache.org on 2018/10/08 17:11:57 UTC
[1/6] impala git commit: IMPALA-7661: Increase the sleep time in
test_reconnect
Repository: impala
Updated Branches:
refs/heads/master 0e1de31ba -> d48ffc2d4
IMPALA-7661: Increase the sleep time in test_reconnect
test_reconnect is flaky in ASAN because the time waited for impala
shell to connect to impalad is not enough. This patch increases the
sleep time from 2 secs to 5 secs.
Change-Id: Ia009808adac0da1cfa00b9e9dd41cc276d49c6eb
Reviewed-on: http://gerrit.cloudera.org:8080/11589
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/f8b2eb58
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/f8b2eb58
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/f8b2eb58
Branch: refs/heads/master
Commit: f8b2eb585ad4c5a57763e07a88266a3a757432a2
Parents: 0e1de31
Author: Tianyi Wang <tw...@cloudera.com>
Authored: Thu Oct 4 18:07:21 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Fri Oct 5 22:05:24 2018 +0000
----------------------------------------------------------------------
tests/shell/test_shell_interactive.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/impala/blob/f8b2eb58/tests/shell/test_shell_interactive.py
----------------------------------------------------------------------
diff --git a/tests/shell/test_shell_interactive.py b/tests/shell/test_shell_interactive.py
index 860803d..4d071de 100755
--- a/tests/shell/test_shell_interactive.py
+++ b/tests/shell/test_shell_interactive.py
@@ -231,14 +231,14 @@ class TestImpalaShellInteractive(object):
num_sessions_target = get_num_open_sessions(target_impala_service)
# Connect to localhost:21000 (default)
p = ImpalaShell()
- sleep(2)
+ sleep(5)
# Make sure we're connected <hostname>:21000
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial + 1, \
"Not connected to %s:21000" % hostname
p.send_cmd("connect %s:21001" % hostname)
# Wait for a little while
- sleep(2)
+ sleep(5)
# The number of sessions on the target impalad should have been incremented.
assert get_num_open_sessions(target_impala_service) == num_sessions_target + 1, \
"Not connected to %s:21001" % hostname
[3/6] impala git commit: IMPALA-7643: report # queries actually
executing in stress test
Posted by bh...@apache.org.
IMPALA-7643: report # queries actually executing in stress test
With admission control it's interesting to separate out two categories
of queries:
1. Queries that have started up and are executing
2. Queries that have not made it that far yet, e.g. are waiting to
establish a client connection (hitting --fe_service_threads limit),
are in the planner, are queued in admission control or are starting
up.
We now call 1+2 "Active" and 1 "Executing".
Example output:
Done | Active | Executing | Mem Lmt Ex | AC Reject | AC Timeout | Time Out | Cancel | Err | Incorrect | Next Qry Mem Lmt | Tot Qry Mem Lmt | Tracked Mem | RSS Mem
0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | |
0 | 10 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 510 | 3922 | 158 | 4541
0 | 20 | 10 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 8534 | 570 | 4517
Refactored QueryRunner.run_query() to reduce nesting and make it more
readable.
Testing:
Ran local stress tests with and without --test_admission_control set.
Change-Id: I5692e8e5ba3224becefc24437197bf5a5b450335
Reviewed-on: http://gerrit.cloudera.org:8080/11587
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/81c58d5d
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/81c58d5d
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/81c58d5d
Branch: refs/heads/master
Commit: 81c58d5de0d0295b5535ff15afb284bccb6b0026
Parents: d3db326
Author: Tim Armstrong <ta...@cloudera.com>
Authored: Tue Oct 2 17:26:51 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sat Oct 6 03:13:42 2018 +0000
----------------------------------------------------------------------
tests/stress/concurrent_select.py | 175 ++++++++++++++++++++++-----------
1 file changed, 117 insertions(+), 58 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/impala/blob/81c58d5d/tests/stress/concurrent_select.py
----------------------------------------------------------------------
diff --git a/tests/stress/concurrent_select.py b/tests/stress/concurrent_select.py
index 80a2386..844c245 100755
--- a/tests/stress/concurrent_select.py
+++ b/tests/stress/concurrent_select.py
@@ -427,9 +427,9 @@ class StressRunner(object):
self._mem_mb_needed_for_next_query = Value("i", 0)
# This lock provides a way to stop new queries from running. This lock must be
- # acquired before writing to _num_queries_started. Before query submission
- # _num_queries_started must be incremented. Reading _num_queries_started is allowed
- # without taking this lock.
+ # acquired before writing to _num_queries_submitted. Before query submission
+ # _num_queries_submitted must be incremented. Reading _num_queries_submitted is
+ # allowed without taking this lock.
self._submit_query_lock = Lock()
self.leak_check_interval_mins = None
@@ -439,7 +439,11 @@ class StressRunner(object):
# All values below are cumulative.
self._num_queries_dequeued = Value("i", 0)
- self._num_queries_started = Value("i", 0)
+ # The number of queries that were submitted to a query runner.
+ self._num_queries_submitted = Value("i", 0)
+ # The number of queries that have entered the RUNNING state (i.e. got through Impala's
+ # admission control and started executing) or were cancelled or hit an error.
+ self._num_queries_started_running_or_cancelled = Value("i", 0)
self._num_queries_finished = Value("i", 0)
self._num_queries_exceeded_mem_limit = Value("i", 0)
self._num_queries_ac_rejected = Value("i", 0)
@@ -458,9 +462,9 @@ class StressRunner(object):
self.results_dir = gettempdir()
self._status_headers = [
- "Done", "Running", "Mem Lmt Ex", "AC Reject", "AC Timeout", "Time Out", "Cancel",
- "Err", "Incorrect", "Next Qry Mem Lmt", "Tot Qry Mem Lmt", "Tracked Mem",
- "RSS Mem"]
+ "Done", "Active", "Executing", "Mem Lmt Ex", "AC Reject", "AC Timeout",
+ "Time Out", "Cancel", "Err", "Incorrect", "Next Qry Mem Lmt",
+ "Tot Qry Mem Lmt", "Tracked Mem", "RSS Mem"]
self._num_queries_to_run = None
self._query_producer_thread = None
@@ -563,11 +567,11 @@ class StressRunner(object):
def _start_consuming_queries(self, impala):
def start_additional_runners_if_needed():
try:
- while self._num_queries_started.value < self._num_queries_to_run:
+ while self._num_queries_submitted.value < self._num_queries_to_run:
sleep(1.0 / self.startup_queries_per_sec)
# Remember num dequeued/started are cumulative.
with self._submit_query_lock:
- if self._num_queries_dequeued.value != self._num_queries_started.value:
+ if self._num_queries_dequeued.value != self._num_queries_submitted.value:
# Assume dequeued queries are stuck waiting for cluster resources so there
# is no point in starting an additional runner.
continue
@@ -597,10 +601,10 @@ class StressRunner(object):
# while no queries were running.
ready_to_unlock = None
try:
- while self._num_queries_started.value < self._num_queries_to_run:
+ while self._num_queries_submitted.value < self._num_queries_to_run:
if ready_to_unlock:
assert query_sumbission_is_locked, "Query submission not yet locked"
- assert not self._num_queries_running, "Queries are still running"
+ assert not self._num_queries_active, "Queries are still running"
LOG.debug("Resuming query submission")
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
@@ -613,7 +617,7 @@ class StressRunner(object):
self.leak_check_interval_mins and
time() > self._next_leak_check_unix_time.value
):
- assert self._num_queries_running <= len(self._query_runners), \
+ assert self._num_queries_active <= len(self._query_runners), \
"Each running query should belong to a runner"
LOG.debug("Stopping query submission")
self._submit_query_lock.acquire()
@@ -638,7 +642,7 @@ class StressRunner(object):
max_actual = -1
self._set_mem_usage_values(max_reported, max_actual)
- if query_sumbission_is_locked and not self._num_queries_running:
+ if query_sumbission_is_locked and not self._num_queries_active:
if ready_to_unlock is None:
ready_to_unlock = False
else:
@@ -670,17 +674,33 @@ class StressRunner(object):
self._max_mem_mb_usage.value = actual
@property
- def _num_queries_running(self):
- num_running = self._num_queries_started.value - self._num_queries_finished.value
+ def _num_queries_active(self):
+ """The number of queries that are currently active (i.e. submitted to a query runner
+ and haven't yet completed)."""
+ num_running = self._num_queries_submitted.value - self._num_queries_finished.value
assert num_running >= 0, "The number of running queries is negative"
return num_running
+ @property
+ def _num_queries_executing(self):
+ """The number of queries that are currently executing (i.e. entered the RUNNING state
+ and haven't yet completed)."""
+ num_executing = (self._num_queries_started_running_or_cancelled.value -
+ self._num_queries_finished.value)
+ assert num_executing >= 0, "The number of executing queries is negative"
+ return num_executing
+
+ def increment_num_queries_started_running_or_cancelled(self):
+ """Called by query runner to increment _num_queries_started_running_or_cancelled."""
+ increment(self._num_queries_started_running_or_cancelled)
+
+
def _start_single_runner(self, impalad):
"""Consumer function to take a query of the queue and run it. This is intended to
run in a separate process so validating the result set can use a full CPU.
"""
LOG.debug("New query runner started")
- runner = QueryRunner()
+ runner = QueryRunner(self)
runner.impalad = impalad
runner.results_dir = self.results_dir
runner.use_kerberos = self.use_kerberos
@@ -714,7 +734,7 @@ class StressRunner(object):
solo_runtime = query.solo_runtime_secs_with_spilling
LOG.debug("Waiting for other query runners to start their queries")
- while query_idx > self._num_queries_started.value:
+ while query_idx > self._num_queries_submitted.value:
sleep(0.1)
self._mem_mb_needed_for_next_query.value = mem_limit
@@ -723,13 +743,13 @@ class StressRunner(object):
with self._mem_broker.reserve_mem_mb(mem_limit) as reservation_id:
LOG.debug("Received memory reservation")
with self._submit_query_lock:
- increment(self._num_queries_started)
+ increment(self._num_queries_submitted)
should_cancel = self.cancel_probability > random()
if should_cancel:
timeout = randrange(1, max(int(solo_runtime), 2))
else:
timeout = solo_runtime * max(
- 10, self._num_queries_started.value - self._num_queries_finished.value)
+ 10, self._num_queries_submitted.value - self._num_queries_finished.value)
report = runner.run_query(query, timeout, mem_limit, should_cancel=should_cancel)
LOG.debug("Got execution report for query")
if report.timed_out and should_cancel:
@@ -811,8 +831,10 @@ class StressRunner(object):
print(status_format % (
# Done
self._num_queries_finished.value,
- # Running
- self._num_queries_started.value - self._num_queries_finished.value,
+ # Active
+ self._num_queries_active,
+ # Executing
+ self._num_queries_executing,
# Mem Lmt Ex
self._num_queries_exceeded_mem_limit.value,
# AC Rejected
@@ -1010,7 +1032,11 @@ class QueryRunner(object):
SPILLED_PATTERNS = [re.compile("ExecOption:.*Spilled"), re.compile("SpilledRuns: [^0]")]
BATCH_SIZE = 1024
- def __init__(self):
+ def __init__(self, stress_runner=None):
+ """Creates a new instance. The caller must fill in the below fields. stress_runner
+ must be provided if this is running in the context of a stress run, so that statistics
+ can be updated."""
+ self.stress_runner = stress_runner
self.impalad = None
self.impalad_conn = None
self.use_kerberos = False
@@ -1043,29 +1069,7 @@ class QueryRunner(object):
try:
with self.impalad_conn.cursor() as cursor:
start_time = time()
- if query.db_name:
- LOG.debug("Using %s database", query.db_name)
- cursor.execute("USE %s" % query.db_name)
- if run_set_up and query.set_up_sql:
- LOG.debug("Running set up query:\n%s", self.set_up_sql)
- cursor.execute(query.set_up_sql)
- for query_option, value in self.common_query_options.iteritems():
- cursor.execute(
- "SET {query_option}={value}".format(query_option=query_option, value=value))
- for query_option, value in query.options.iteritems():
- cursor.execute(
- "SET {query_option}={value}".format(query_option=query_option, value=value))
- cursor.execute("SET ABORT_ON_ERROR=1")
- if self.test_admission_control:
- LOG.debug(
- "Running query without mem limit at %s with timeout secs %s:\n%s",
- self.impalad.host_name, timeout_secs, query.sql)
- else:
- LOG.debug("Setting mem limit to %s MB", mem_limit_mb)
- cursor.execute("SET MEM_LIMIT=%sM" % mem_limit_mb)
- LOG.debug(
- "Running query with %s MB mem limit at %s with timeout secs %s:\n%s",
- mem_limit_mb, self.impalad.host_name, timeout_secs, query.sql)
+ self._set_db_and_options(cursor, query, run_set_up, mem_limit_mb, timeout_secs)
error = None
try:
cursor.execute_async(
@@ -1074,19 +1078,10 @@ class QueryRunner(object):
report.query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
LOG.debug("Query id is %s", report.query_id)
- sleep_secs = 0.1
- secs_since_log = 0
- while cursor.is_executing():
- if time() > timeout_unix_time:
- if not should_cancel:
- fetch_and_set_profile(cursor, report)
- self._cancel(cursor, report)
- return report
- if secs_since_log > 5:
- secs_since_log = 0
- LOG.debug("Waiting for query to execute")
- sleep(sleep_secs)
- secs_since_log += sleep_secs
+ if not self._wait_until_fetchable(cursor, report, timeout_unix_time,
+ should_cancel):
+ return report
+
if query.query_type == QueryType.SELECT:
try:
report.result_hash = self._hash_result(cursor, timeout_unix_time, query)
@@ -1118,6 +1113,70 @@ class QueryRunner(object):
report.other_error = error
return report
+ def _set_db_and_options(self, cursor, query, run_set_up, mem_limit_mb, timeout_secs):
+ """Set up a new cursor for running a query by switching to the correct database and
+ setting query options."""
+ if query.db_name:
+ LOG.debug("Using %s database", query.db_name)
+ cursor.execute("USE %s" % query.db_name)
+ if run_set_up and query.set_up_sql:
+ LOG.debug("Running set up query:\n%s", query.set_up_sql)
+ cursor.execute(query.set_up_sql)
+ for query_option, value in self.common_query_options.iteritems():
+ cursor.execute(
+ "SET {query_option}={value}".format(query_option=query_option, value=value))
+ for query_option, value in query.options.iteritems():
+ cursor.execute(
+ "SET {query_option}={value}".format(query_option=query_option, value=value))
+ cursor.execute("SET ABORT_ON_ERROR=1")
+ if self.test_admission_control:
+ LOG.debug(
+ "Running query without mem limit at %s with timeout secs %s:\n%s",
+ self.impalad.host_name, timeout_secs, query.sql)
+ else:
+ LOG.debug("Setting mem limit to %s MB", mem_limit_mb)
+ cursor.execute("SET MEM_LIMIT=%sM" % mem_limit_mb)
+ LOG.debug(
+ "Running query with %s MB mem limit at %s with timeout secs %s:\n%s",
+ mem_limit_mb, self.impalad.host_name, timeout_secs, query.sql)
+
+ def _wait_until_fetchable(self, cursor, report, timeout_unix_time, should_cancel):
+ """Wait up until timeout_unix_time until the query results can be fetched (if it's
+ a SELECT query) or until it has finished executing (if it's a different query type
+ like DML). If the timeout expires we either cancel the query or report the timeout.
+ Return True in the first case or False in the second (timeout) case."""
+ # Loop until the query gets to the right state or a timeout expires.
+ sleep_secs = 0.1
+ secs_since_log = 0
+ # True if we incremented num_queries_started_running_or_cancelled for this query.
+ started_running_or_cancelled = False
+ while True:
+ query_state = cursor.status()
+ # Check if the query got past the PENDING/INITIALIZED states, either because
+ # it's executing or hit an error.
+ if (not started_running_or_cancelled and query_state not in ('PENDING_STATE',
+ 'INITIALIZED_STATE')):
+ started_running_or_cancelled = True
+ if self.stress_runner:
+ self.stress_runner.increment_num_queries_started_running_or_cancelled()
+ # Return if we're ready to fetch results (in the FINISHED state) or we are in
+ # another terminal state like EXCEPTION.
+ if query_state not in ('PENDING_STATE', 'INITIALIZED_STATE', 'RUNNING_STATE'):
+ return True
+
+ if time() > timeout_unix_time:
+ if not should_cancel:
+ fetch_and_set_profile(cursor, report)
+ self._cancel(cursor, report)
+ if not started_running_or_cancelled and self.stress_runner:
+ self.stress_runner.increment_num_queries_started_running_or_cancelled()
+ return False
+ if secs_since_log > 5:
+ secs_since_log = 0
+ LOG.debug("Waiting for query to execute")
+ sleep(sleep_secs)
+ secs_since_log += sleep_secs
+
def _cancel(self, cursor, report):
report.timed_out = True
[4/6] impala git commit: IMPALA-7671: Fix broken SHOW GRANT USER ON
Posted by bh...@apache.org.
IMPALA-7671: Fix broken SHOW GRANT USER ON <object>
This patch fixes the broken SHOW GRANT USER ON <object> that always
shows an empty result due to incorrect comparison between TPrivilege for
the filter vs TPrivilege for the actual privilege that should not
consider the "grantoption".
Testing:
- Added new E2E tests
- Ran all FE tests
- Ran all authorization E2E tests
Change-Id: I7adc403caddd18e5a954cf6affd5d1d555b9f5f0
Reviewed-on: http://gerrit.cloudera.org:8080/11598
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/e5c502e4
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/e5c502e4
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/e5c502e4
Branch: refs/heads/master
Commit: e5c502e4e428bd1cd5b04f06d72eba8fba61e918
Parents: 81c58d5
Author: Fredy Wijaya <fw...@cloudera.com>
Authored: Fri Oct 5 12:13:44 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sat Oct 6 07:25:49 2018 +0000
----------------------------------------------------------------------
.../apache/impala/catalog/AuthorizationPolicy.java | 3 +++
.../queries/QueryTest/show_grant_user.test | 16 ++++++++++++++++
tests/common/impala_test_suite.py | 6 ++++--
3 files changed, 23 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/impala/blob/e5c502e4/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
index e06818c..4819079 100644
--- a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
+++ b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
@@ -491,7 +491,10 @@ public class AuthorizationPolicy implements PrivilegeCache {
* Check if the filter matches the privilege.
*/
private boolean isPrivilegeFiltered(TPrivilege filter, TPrivilege privilege) {
+ // Set the filter with privilege level and has grant option from the given privilege
+ // since those two fields don't matter for the filter.
filter.setPrivilege_level(privilege.getPrivilege_level());
+ filter.setHas_grant_opt(privilege.isHas_grant_opt());
String privName = PrincipalPrivilege.buildPrivilegeName(filter);
return !privName.equalsIgnoreCase(PrincipalPrivilege.buildPrivilegeName(privilege));
}
http://git-wip-us.apache.org/repos/asf/impala/blob/e5c502e4/testdata/workloads/functional-query/queries/QueryTest/show_grant_user.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/show_grant_user.test b/testdata/workloads/functional-query/queries/QueryTest/show_grant_user.test
index 8dd86fe..55ba28f 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/show_grant_user.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/show_grant_user.test
@@ -1,4 +1,11 @@
====
+---- QUERY
+show grant user $USER on database $DATABASE
+---- RESULTS
+'USER','$USER','database','$DATABASE','','','','owner',true,regex:.+
+---- TYPES
+STRING, STRING, STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
+====
---- USER
does_not_exist
---- QUERY
@@ -134,6 +141,15 @@ show grant user user2_shared2
---- TYPES
STRING, STRING, STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
====
+---- USER
+user_1group
+---- QUERY
+show grant user user_1group on table $DATABASE.user_1group_tbl
+---- RESULTS
+'USER','user_1group','table','$DATABASE','user_1group_tbl','','','owner',true,regex:.+
+---- TYPES
+STRING, STRING, STRING, STRING, STRING, STRING, STRING, STRING, BOOLEAN, STRING
+====
---- QUERY
create role sgu_test_role1_group1;
grant role sgu_test_role1_group1 to group group_1;
http://git-wip-us.apache.org/repos/asf/impala/blob/e5c502e4/tests/common/impala_test_suite.py
----------------------------------------------------------------------
diff --git a/tests/common/impala_test_suite.py b/tests/common/impala_test_suite.py
index 979bff5..0f18dea 100644
--- a/tests/common/impala_test_suite.py
+++ b/tests/common/impala_test_suite.py
@@ -373,11 +373,13 @@ class ImpalaTestSuite(BaseTestSuite):
.replace('$GROUP_NAME', group_name)
.replace('$IMPALA_HOME', IMPALA_HOME)
.replace('$FILESYSTEM_PREFIX', FILESYSTEM_PREFIX)
- .replace('$SECONDARY_FILESYSTEM', os.getenv("SECONDARY_FILESYSTEM") or str()))
+ .replace('$SECONDARY_FILESYSTEM', os.getenv("SECONDARY_FILESYSTEM") or str())
+ .replace('$USER', getuser()))
if use_db: query = query.replace('$DATABASE', use_db)
reserved_keywords = ["$DATABASE", "$FILESYSTEM_PREFIX", "$GROUP_NAME",
- "$IMPALA_HOME", "$NAMENODE", "$QUERY", "$SECONDARY_FILESYSTEM"]
+ "$IMPALA_HOME", "$NAMENODE", "$QUERY", "$SECONDARY_FILESYSTEM",
+ "$USER"]
if test_file_vars:
for key, value in test_file_vars.iteritems():
[5/6] impala git commit: IMPALA-7644: Hide Parquet page index writing
with feature flag
Posted by bh...@apache.org.
IMPALA-7644: Hide Parquet page index writing with feature flag
This commit adds the command line flag enable_parquet_page_index_writing
to the Impala daemon that switches Impala's ability of writing the
Parquet page index. By default the flag is false, i.e. Impala doesn't
write the page index.
This flag is only temporary, we plan to remove it once Impala is able to
read the page index and has better testing around it.
Because of this change I had to move test_parquet_page_index.py to the
custom_cluset test suite since I need to set this command line flag
in order to test the functionality. I also merged most of the test cases
because we don't want to restart the cluster too many times.
Change-Id: If9994882aa59cbaf3ae464100caa8211598287bc
Reviewed-on: http://gerrit.cloudera.org:8080/11563
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/843683ed
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/843683ed
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/843683ed
Branch: refs/heads/master
Commit: 843683ed6c2ef41c7c25e9fa4af68801dbdd1a78
Parents: e5c502e
Author: Zoltan Borok-Nagy <bo...@cloudera.com>
Authored: Tue Oct 2 14:11:58 2018 +0200
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Mon Oct 8 13:27:49 2018 +0000
----------------------------------------------------------------------
be/src/common/global-flags.cc | 6 +
be/src/exec/hdfs-parquet-table-writer.cc | 100 +++--
.../queries/QueryTest/stats-extrapolation.test | 14 +-
tests/custom_cluster/test_parquet_page_index.py | 371 ++++++++++++++++++
tests/query_test/test_parquet_page_index.py | 372 -------------------
5 files changed, 446 insertions(+), 417 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/impala/blob/843683ed/be/src/common/global-flags.cc
----------------------------------------------------------------------
diff --git a/be/src/common/global-flags.cc b/be/src/common/global-flags.cc
index 2ea1ca5..ac76b53 100644
--- a/be/src/common/global-flags.cc
+++ b/be/src/common/global-flags.cc
@@ -239,6 +239,12 @@ DEFINE_double_hidden(invalidate_tables_fraction_on_memory_pressure, 0.1,
"The fraction of tables to invalidate when CatalogdTableInvalidator considers the "
"old GC generation to be almost full.");
+DEFINE_bool_hidden(enable_parquet_page_index_writing_debug_only, false, "If true, Impala "
+ "will write the Parquet page index. It is not advised to use it in a production "
+ "environment, only for testing and development. This flag is meant to be temporary. "
+ "We plan to remove this flag once Impala is able to read the page index and has "
+ "better test coverage around it.");
+
// ++========================++
// || Startup flag graveyard ||
// ++========================++
http://git-wip-us.apache.org/repos/asf/impala/blob/843683ed/be/src/exec/hdfs-parquet-table-writer.cc
----------------------------------------------------------------------
diff --git a/be/src/exec/hdfs-parquet-table-writer.cc b/be/src/exec/hdfs-parquet-table-writer.cc
index 8aa4f7a..13137e5 100644
--- a/be/src/exec/hdfs-parquet-table-writer.cc
+++ b/be/src/exec/hdfs-parquet-table-writer.cc
@@ -83,6 +83,8 @@ using namespace apache::thrift;
// the columns and run that function over row batches.
// TODO: we need to pass in the compression from the FE/metadata
+DECLARE_bool(enable_parquet_page_index_writing_debug_only);
+
namespace impala {
// Base class for column writers. This contains most of the logic except for
@@ -205,6 +207,58 @@ class HdfsParquetTableWriter::BaseColumnWriter {
protected:
friend class HdfsParquetTableWriter;
+ Status AddMemoryConsumptionForPageIndex(int64_t new_memory_allocation) {
+ if (UNLIKELY(!table_sink_mem_tracker_->TryConsume(new_memory_allocation))) {
+ return table_sink_mem_tracker_->MemLimitExceeded(parent_->state_,
+ "Failed to allocate memory for Parquet page index.", new_memory_allocation);
+ }
+ page_index_memory_consumption_ += new_memory_allocation;
+ return Status::OK();
+ }
+
+ Status ReserveOffsetIndex(int64_t capacity) {
+ if (!FLAGS_enable_parquet_page_index_writing_debug_only) return Status::OK();
+ RETURN_IF_ERROR(
+ AddMemoryConsumptionForPageIndex(capacity * sizeof(parquet::PageLocation)));
+ offset_index_.page_locations.reserve(capacity);
+ return Status::OK();
+ }
+
+ void AddLocationToOffsetIndex(const parquet::PageLocation& location) {
+ if (!FLAGS_enable_parquet_page_index_writing_debug_only) return;
+ offset_index_.page_locations.push_back(location);
+ }
+
+ Status AddPageStatsToColumnIndex() {
+ if (!FLAGS_enable_parquet_page_index_writing_debug_only) return Status::OK();
+ parquet::Statistics page_stats;
+ page_stats_base_->EncodeToThrift(&page_stats);
+ // If pages_stats contains min_value and max_value, then append them to min_values_
+ // and max_values_ and also mark the page as not null. In case min and max values are
+ // not set, push empty strings to maintain the consistency of the index and mark the
+ // page as null. Always push the null_count.
+ string min_val;
+ string max_val;
+ if ((page_stats.__isset.min_value) && (page_stats.__isset.max_value)) {
+ Status s_min = TruncateDown(page_stats.min_value, PAGE_INDEX_MAX_STRING_LENGTH,
+ &min_val);
+ Status s_max = TruncateUp(page_stats.max_value, PAGE_INDEX_MAX_STRING_LENGTH,
+ &max_val);
+ if (!s_min.ok() || !s_max.ok()) valid_column_index_ = false;
+ column_index_.null_pages.push_back(false);
+ } else {
+ DCHECK(!page_stats.__isset.min_value && !page_stats.__isset.max_value);
+ column_index_.null_pages.push_back(true);
+ DCHECK_EQ(page_stats.null_count, num_values_);
+ }
+ RETURN_IF_ERROR(
+ AddMemoryConsumptionForPageIndex(min_val.capacity() + max_val.capacity()));
+ column_index_.min_values.emplace_back(std::move(min_val));
+ column_index_.max_values.emplace_back(std::move(max_val));
+ column_index_.null_counts.push_back(page_stats.null_count);
+ return Status::OK();
+ }
+
// Encodes value into the current page output buffer and updates the column statistics
// aggregates. Returns true if the value was appended successfully to the current page.
// Returns false if the value was not appended to the current page and the caller can
@@ -645,11 +699,10 @@ Status HdfsParquetTableWriter::BaseColumnWriter::Flush(int64_t* file_pos,
*first_data_page = *file_pos;
int64_t current_row_group_index = 0;
- offset_index_.page_locations.resize(num_data_pages_);
+ RETURN_IF_ERROR(ReserveOffsetIndex(num_data_pages_));
// Write data pages
- for (int i = 0; i < num_data_pages_; ++i) {
- DataPage& page = pages_[i];
+ for (const DataPage& page : pages_) {
parquet::PageLocation location;
if (page.header.data_page_header.num_values == 0) {
@@ -657,7 +710,7 @@ Status HdfsParquetTableWriter::BaseColumnWriter::Flush(int64_t* file_pos,
location.offset = -1;
location.compressed_page_size = 0;
location.first_row_index = -1;
- offset_index_.page_locations[i] = location;
+ AddLocationToOffsetIndex(location);
continue;
}
@@ -677,7 +730,7 @@ Status HdfsParquetTableWriter::BaseColumnWriter::Flush(int64_t* file_pos,
// its name suggests. On the other hand, parquet::PageLocation::compressed_page_size
// also includes the size of the page header.
location.compressed_page_size = page.header.compressed_page_size + len;
- offset_index_.page_locations[i] = location;
+ AddLocationToOffsetIndex(location);
// Write the page data
RETURN_IF_ERROR(parent_->Write(page.data, page.header.compressed_page_size));
@@ -754,37 +807,7 @@ Status HdfsParquetTableWriter::BaseColumnWriter::FinalizeCurrentPage() {
}
DCHECK(page_stats_base_ != nullptr);
- parquet::Statistics page_stats;
- page_stats_base_->EncodeToThrift(&page_stats);
- {
- // If pages_stats contains min_value and max_value, then append them to min_values_
- // and max_values_ and also mark the page as not null. In case min and max values are
- // not set, push empty strings to maintain the consistency of the index and mark the
- // page as null. Always push the null_count.
- string min_val;
- string max_val;
- if ((page_stats.__isset.min_value) && (page_stats.__isset.max_value)) {
- Status s_min = TruncateDown(page_stats.min_value, PAGE_INDEX_MAX_STRING_LENGTH,
- &min_val);
- Status s_max = TruncateUp(page_stats.max_value, PAGE_INDEX_MAX_STRING_LENGTH,
- &max_val);
- if (!s_min.ok() || !s_max.ok()) valid_column_index_ = false;
- column_index_.null_pages.push_back(false);
- } else {
- DCHECK(!page_stats.__isset.min_value && !page_stats.__isset.max_value);
- column_index_.null_pages.push_back(true);
- DCHECK_EQ(page_stats.null_count, num_values_);
- }
- int64_t new_memory_allocation = min_val.capacity() + max_val.capacity();
- if (UNLIKELY(!table_sink_mem_tracker_->TryConsume(new_memory_allocation))) {
- return table_sink_mem_tracker_->MemLimitExceeded(parent_->state_,
- "Failed to allocate memory for Parquet page index.", new_memory_allocation);
- }
- page_index_memory_consumption_ += new_memory_allocation;
- column_index_.min_values.emplace_back(std::move(min_val));
- column_index_.max_values.emplace_back(std::move(max_val));
- column_index_.null_counts.push_back(page_stats.null_count);
- }
+ RETURN_IF_ERROR(AddPageStatsToColumnIndex());
// Update row group statistics from page statistics.
DCHECK(row_group_stats_base_ != nullptr);
@@ -1137,6 +1160,7 @@ Status HdfsParquetTableWriter::Finalize() {
RETURN_IF_ERROR(FlushCurrentRowGroup());
RETURN_IF_ERROR(WritePageIndex());
+ for (auto& column : columns_) column->Reset();
RETURN_IF_ERROR(WriteFileFooter());
stats_.__set_parquet_stats(parquet_insert_stats_);
COUNTER_ADD(parent_->rows_inserted_counter(), row_count_);
@@ -1249,6 +1273,8 @@ Status HdfsParquetTableWriter::FlushCurrentRowGroup() {
}
Status HdfsParquetTableWriter::WritePageIndex() {
+ if (!FLAGS_enable_parquet_page_index_writing_debug_only) return Status::OK();
+
// Currently Impala only write Parquet files with a single row group. The current
// page index logic depends on this behavior as it only keeps one row group's
// statistics in memory.
@@ -1284,8 +1310,6 @@ Status HdfsParquetTableWriter::WritePageIndex() {
row_group->columns[i].__set_offset_index_length(len);
file_pos_ += len;
}
- // Reset column writers.
- for (auto& column : columns_) column->Reset();
return Status::OK();
}
http://git-wip-us.apache.org/repos/asf/impala/blob/843683ed/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test b/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
index 8e95168..3b25427 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/stats-extrapolation.test
@@ -33,17 +33,17 @@ show table stats alltypes
YEAR, MONTH, #ROWS, EXTRAP #ROWS, #FILES, SIZE, BYTES CACHED, CACHE REPLICATION, FORMAT, INCREMENTAL STATS, LOCATION
---- RESULTS
'2009','1',-1,308,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=1'
-'2009','2',-1,289,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=2'
-'2009','3',-1,307,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=3'
+'2009','2',-1,288,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=2'
+'2009','3',-1,308,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=3'
'2009','4',-1,302,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=4'
-'2009','5',-1,307,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=5'
+'2009','5',-1,308,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=5'
'2009','6',-1,302,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=6'
-'2009','7',-1,307,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=7'
-'2009','8',-1,307,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=8'
+'2009','7',-1,308,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=7'
+'2009','8',-1,308,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=8'
'2009','9',-1,302,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=9'
-'2009','10',-1,307,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=10'
+'2009','10',-1,308,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=10'
'2009','11',-1,302,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=11'
-'2009','12',-1,307,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=12'
+'2009','12',-1,308,1,regex:.*B,'NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/alltypes/year=2009/month=12'
'Total','',3650,3650,12,regex:.*B,'0B','','','',''
---- TYPES
STRING,STRING,BIGINT,BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
http://git-wip-us.apache.org/repos/asf/impala/blob/843683ed/tests/custom_cluster/test_parquet_page_index.py
----------------------------------------------------------------------
diff --git a/tests/custom_cluster/test_parquet_page_index.py b/tests/custom_cluster/test_parquet_page_index.py
new file mode 100644
index 0000000..0d2a750
--- /dev/null
+++ b/tests/custom_cluster/test_parquet_page_index.py
@@ -0,0 +1,371 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Targeted Impala insert tests
+
+import os
+
+from collections import namedtuple
+from subprocess import check_call
+from parquet.ttypes import BoundaryOrder, ColumnIndex, OffsetIndex, PageHeader, PageType
+
+from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
+from tests.common.skip import SkipIfLocal
+from tests.util.filesystem_utils import get_fs_path
+from tests.util.get_parquet_metadata import (
+ decode_stats_value,
+ get_parquet_metadata,
+ read_serialized_object
+)
+
+PAGE_INDEX_MAX_STRING_LENGTH = 64
+
+
+@SkipIfLocal.parquet_file_size
+class TestHdfsParquetTableIndexWriter(CustomClusterTestSuite):
+ """Since PARQUET-922 page statistics can be written before the footer.
+ The tests in this class checks if Impala writes the page indices correctly.
+ It is temporarily a custom cluster test suite because we need to set the
+ enable_parquet_page_index_writing command-line flag for the Impala daemon
+ in order to make it write the page index.
+ TODO: IMPALA-5843 Once Impala is able to read the page index and also write it by
+ default, this test suite should be moved back to query tests.
+ """
+ @classmethod
+ def get_workload(cls):
+ return 'functional-query'
+
+ @classmethod
+ def add_test_dimensions(cls):
+ super(CustomClusterTestSuite, cls).add_test_dimensions()
+ cls.ImpalaTestMatrix.add_constraint(
+ lambda v: v.get_value('table_format').file_format == 'parquet')
+
+ def _get_row_group_from_file(self, parquet_file):
+ """Returns namedtuples that contain the schema, stats, offset_index, column_index,
+ and page_headers for each column in the first row group in file 'parquet_file'. Fails
+ if the file contains multiple row groups.
+ """
+ ColumnInfo = namedtuple('ColumnInfo', ['schema', 'stats', 'offset_index',
+ 'column_index', 'page_headers'])
+
+ file_meta_data = get_parquet_metadata(parquet_file)
+ assert len(file_meta_data.row_groups) == 1
+ # We only support flat schemas, the additional element is the root element.
+ schemas = file_meta_data.schema[1:]
+ row_group = file_meta_data.row_groups[0]
+ assert len(schemas) == len(row_group.columns)
+ row_group_index = []
+ with open(parquet_file) as file_handle:
+ for column, schema in zip(row_group.columns, schemas):
+ column_index_offset = column.column_index_offset
+ column_index_length = column.column_index_length
+ column_index = None
+ if column_index_offset and column_index_length:
+ column_index = read_serialized_object(ColumnIndex, file_handle,
+ column_index_offset, column_index_length)
+ column_meta_data = column.meta_data
+ stats = None
+ if column_meta_data:
+ stats = column_meta_data.statistics
+
+ offset_index_offset = column.offset_index_offset
+ offset_index_length = column.offset_index_length
+ offset_index = None
+ page_headers = []
+ if offset_index_offset and offset_index_length:
+ offset_index = read_serialized_object(OffsetIndex, file_handle,
+ offset_index_offset, offset_index_length)
+ for page_loc in offset_index.page_locations:
+ page_header = read_serialized_object(PageHeader, file_handle, page_loc.offset,
+ page_loc.compressed_page_size)
+ page_headers.append(page_header)
+
+ column_info = ColumnInfo(schema, stats, offset_index, column_index, page_headers)
+ row_group_index.append(column_info)
+ return row_group_index
+
+ def _get_row_groups_from_hdfs_folder(self, hdfs_path, tmpdir):
+ """Returns a list of column infos (containing the schema, stats, offset_index,
+ column_index, and page_headers) for the first row group in all parquet files in
+ 'hdfs_path'.
+ """
+ row_group_indexes = []
+ check_call(['hdfs', 'dfs', '-get', hdfs_path, tmpdir.strpath])
+ for root, subdirs, files in os.walk(tmpdir.strpath):
+ for f in files:
+ parquet_file = os.path.join(root, str(f))
+ row_group_indexes.append(self._get_row_group_from_file(parquet_file))
+ return row_group_indexes
+
+ def _validate_page_locations(self, page_locations):
+ """Validate that the page locations are in order."""
+ for previous_loc, current_loc in zip(page_locations[:-1], page_locations[1:]):
+ assert previous_loc.offset < current_loc.offset
+ assert previous_loc.first_row_index < current_loc.first_row_index
+
+ def _validate_null_stats(self, index_size, column_info):
+ """Validates the statistics stored in null_pages and null_counts."""
+ column_index = column_info.column_index
+ column_stats = column_info.stats
+ assert column_index.null_pages is not None
+ assert len(column_index.null_pages) == index_size
+ assert column_index.null_counts is not None
+ assert len(column_index.null_counts) == index_size
+
+ for page_is_null, null_count, page_header in zip(column_index.null_pages,
+ column_index.null_counts, column_info.page_headers):
+ assert page_header.type == PageType.DATA_PAGE
+ num_values = page_header.data_page_header.num_values
+ assert not page_is_null or null_count == num_values
+
+ if column_stats:
+ assert column_stats.null_count == sum(column_index.null_counts)
+
+ def _validate_min_max_values(self, index_size, column_info):
+ """Validate min/max values of the pages in a column chunk."""
+ column_index = column_info.column_index
+ min_values = column_info.column_index.min_values
+ assert len(min_values) == index_size
+ max_values = column_info.column_index.max_values
+ assert len(max_values) == index_size
+
+ if not column_info.stats:
+ return
+
+ column_min_value_str = column_info.stats.min_value
+ column_max_value_str = column_info.stats.max_value
+ if column_min_value_str is None or column_max_value_str is None:
+ # If either is None, then both need to be None.
+ assert column_min_value_str is None and column_max_value_str is None
+ # No min and max value, all pages need to be null
+ for idx, null_page in enumerate(column_index.null_pages):
+ assert null_page, "Page {} of column {} is not null, \
+ but doesn't have min and max values!".format(idx, column_index.schema.name)
+ # Everything is None, no further checks needed.
+ return
+
+ column_min_value = decode_stats_value(column_info.schema, column_min_value_str)
+ for null_page, page_min_str in zip(column_index.null_pages, min_values):
+ if not null_page:
+ page_min_value = decode_stats_value(column_info.schema, page_min_str)
+ # If type is str, page_min_value might have been truncated.
+ if isinstance(page_min_value, basestring):
+ assert page_min_value >= column_min_value[:len(page_min_value)]
+ else:
+ assert page_min_value >= column_min_value
+
+ column_max_value = decode_stats_value(column_info.schema, column_max_value_str)
+ for null_page, page_max_str in zip(column_index.null_pages, max_values):
+ if not null_page:
+ page_max_value = decode_stats_value(column_info.schema, page_max_str)
+ # If type is str, page_max_value might have been truncated and incremented.
+ if (isinstance(page_max_value, basestring) and
+ len(page_max_value) == PAGE_INDEX_MAX_STRING_LENGTH):
+ max_val_prefix = page_max_value.rstrip('\0')
+ assert max_val_prefix[:-1] <= column_max_value
+ else:
+ assert page_max_value <= column_max_value
+
+ def _validate_ordering(self, ordering, schema, null_pages, min_values, max_values):
+ """Check if the ordering of the values reflects the value of 'ordering'."""
+
+ def is_sorted(l, reverse=False):
+ if not reverse:
+ return all(a <= b for a, b in zip(l, l[1:]))
+ else:
+ return all(a >= b for a, b in zip(l, l[1:]))
+
+ # Filter out null pages and decode the actual min/max values.
+ actual_min_values = [decode_stats_value(schema, min_val)
+ for min_val, is_null in zip(min_values, null_pages)
+ if not is_null]
+ actual_max_values = [decode_stats_value(schema, max_val)
+ for max_val, is_null in zip(max_values, null_pages)
+ if not is_null]
+
+ # For ASCENDING and DESCENDING, both min and max values need to be sorted.
+ if ordering == BoundaryOrder.ASCENDING:
+ assert is_sorted(actual_min_values)
+ assert is_sorted(actual_max_values)
+ elif ordering == BoundaryOrder.DESCENDING:
+ assert is_sorted(actual_min_values, reverse=True)
+ assert is_sorted(actual_max_values, reverse=True)
+ else:
+ assert ordering == BoundaryOrder.UNORDERED
+ # For UNORDERED, min and max values cannot be both sorted.
+ assert not is_sorted(actual_min_values) or not is_sorted(actual_max_values)
+ assert (not is_sorted(actual_min_values, reverse=True) or
+ not is_sorted(actual_max_values, reverse=True))
+
+ def _validate_boundary_order(self, column_info):
+ """Validate that min/max values are really in the order specified by
+ boundary order.
+ """
+ column_index = column_info.column_index
+ self._validate_ordering(column_index.boundary_order, column_info.schema,
+ column_index.null_pages, column_index.min_values, column_index.max_values)
+
+ def _validate_parquet_page_index(self, hdfs_path, tmpdir):
+ """Validates that 'hdfs_path' contains exactly one parquet file and that the rowgroup
+ index in that file is in the valid format.
+ """
+ row_group_indexes = self._get_row_groups_from_hdfs_folder(hdfs_path, tmpdir)
+ for columns in row_group_indexes:
+ for column_info in columns:
+ try:
+ index_size = len(column_info.offset_index.page_locations)
+ assert index_size > 0
+ self._validate_page_locations(column_info.offset_index.page_locations)
+ # IMPALA-7304: Impala doesn't write column index for floating-point columns
+ # until PARQUET-1222 is resolved.
+ if column_info.schema.type in [4, 5]:
+ assert column_info.column_index is None
+ continue
+ self._validate_null_stats(index_size, column_info)
+ self._validate_min_max_values(index_size, column_info)
+ self._validate_boundary_order(column_info)
+ except AssertionError as e:
+ e.args += ("Validation failed on column {}.".format(column_info.schema.name),)
+ raise
+
+ def _ctas_table_and_verify_index(self, vector, unique_database, source_table,
+ tmpdir, sorting_column=None):
+ """Copies 'source_table' into a parquet table and makes sure that the index
+ in the resulting parquet file is valid.
+ """
+ table_name = "test_hdfs_parquet_table_writer"
+ qualified_table_name = "{0}.{1}".format(unique_database, table_name)
+ hdfs_path = get_fs_path('/test-warehouse/{0}.db/{1}/'.format(unique_database,
+ table_name))
+ # Setting num_nodes = 1 ensures that the query is executed on the coordinator,
+ # resulting in a single parquet file being written.
+ vector.get_value('exec_option')['num_nodes'] = 1
+ self.execute_query("drop table if exists {0}".format(qualified_table_name))
+ if sorting_column is None:
+ query = ("create table {0} stored as parquet as select * from {1}").format(
+ qualified_table_name, source_table)
+ else:
+ query = ("create table {0} sort by({1}) stored as parquet as select * from {2}"
+ ).format(qualified_table_name, sorting_column, source_table)
+ self.execute_query(query, vector.get_value('exec_option'))
+ self._validate_parquet_page_index(hdfs_path, tmpdir.join(source_table))
+
+ def _create_string_table_with_values(self, vector, unique_database, table_name,
+ values_sql):
+ """Creates a parquet table that has a single string column, then invokes an insert
+ statement on it with the 'values_sql' parameter. E.g. 'values_sql' is "('asdf')".
+ It returns the HDFS path for the table.
+ """
+ qualified_table_name = "{0}.{1}".format(unique_database, table_name)
+ self.execute_query("drop table if exists {0}".format(qualified_table_name))
+ vector.get_value('exec_option')['num_nodes'] = 1
+ query = ("create table {0} (str string) stored as parquet").format(
+ qualified_table_name)
+ self.execute_query(query, vector.get_value('exec_option'))
+ self.execute_query("insert into {0} values {1}".format(qualified_table_name,
+ values_sql), vector.get_value('exec_option'))
+ return get_fs_path('/test-warehouse/{0}.db/{1}/'.format(unique_database,
+ table_name))
+
+ @CustomClusterTestSuite.with_args("--enable_parquet_page_index_writing_debug_only")
+ def test_ctas_tables(self, vector, unique_database, tmpdir):
+ """Test different Parquet files created via CTAS statements."""
+
+ # Test that writing a parquet file populates the rowgroup indexes with the correct
+ # values.
+ self._ctas_table_and_verify_index(vector, unique_database, "functional.alltypes",
+ tmpdir)
+
+ # Test that writing a parquet file populates the rowgroup indexes with the correct
+ # values, using decimal types.
+ self._ctas_table_and_verify_index(vector, unique_database, "functional.decimal_tbl",
+ tmpdir)
+
+ # Test that writing a parquet file populates the rowgroup indexes with the correct
+ # values, using char types.
+ self._ctas_table_and_verify_index(vector, unique_database, "functional.chars_formats",
+ tmpdir)
+
+ # Test that we don't write min/max values in the index for null columns.
+ # Ensure null_count is set for columns with null values.
+ self._ctas_table_and_verify_index(vector, unique_database, "functional.nulltable",
+ tmpdir)
+
+ # Test that when a ColumnChunk is written across multiple pages, the index is
+ # valid.
+ self._ctas_table_and_verify_index(vector, unique_database, "tpch.customer",
+ tmpdir)
+ self._ctas_table_and_verify_index(vector, unique_database, "tpch.orders",
+ tmpdir)
+
+ # Test that when the schema has a sorting column, the index is valid.
+ self._ctas_table_and_verify_index(vector, unique_database,
+ "functional_parquet.zipcode_incomes", tmpdir, "id")
+
+ # Test table with wide row.
+ self._ctas_table_and_verify_index(vector, unique_database,
+ "functional_parquet.widerow", tmpdir)
+
+ # Test tables with wide rows and many columns.
+ self._ctas_table_and_verify_index(vector, unique_database,
+ "functional_parquet.widetable_250_cols", tmpdir)
+ self._ctas_table_and_verify_index(vector, unique_database,
+ "functional_parquet.widetable_500_cols", tmpdir)
+ self._ctas_table_and_verify_index(vector, unique_database,
+ "functional_parquet.widetable_1000_cols", tmpdir)
+
+ @CustomClusterTestSuite.with_args("--enable_parquet_page_index_writing_debug_only")
+ def test_max_string_values(self, vector, unique_database, tmpdir):
+ """Test string values that are all 0xFFs or end with 0xFFs."""
+
+ # String value is all of 0xFFs but its length is less than PAGE_INDEX_TRUNCATE_LENGTH.
+ short_tbl = "short_tbl"
+ short_hdfs_path = self._create_string_table_with_values(vector, unique_database,
+ short_tbl, "(rpad('', {0}, chr(255)))".format(PAGE_INDEX_MAX_STRING_LENGTH - 1))
+ self._validate_parquet_page_index(short_hdfs_path, tmpdir.join(short_tbl))
+
+ # String value is all of 0xFFs and its length is PAGE_INDEX_TRUNCATE_LENGTH.
+ fit_tbl = "fit_tbl"
+ fit_hdfs_path = self._create_string_table_with_values(vector, unique_database,
+ fit_tbl, "(rpad('', {0}, chr(255)))".format(PAGE_INDEX_MAX_STRING_LENGTH))
+ self._validate_parquet_page_index(fit_hdfs_path, tmpdir.join(fit_tbl))
+
+ # All bytes are 0xFFs and the string is longer then PAGE_INDEX_TRUNCATE_LENGTH, so we
+ # should not write page statistics.
+ too_long_tbl = "too_long_tbl"
+ too_long_hdfs_path = self._create_string_table_with_values(vector, unique_database,
+ too_long_tbl, "(rpad('', {0}, chr(255)))".format(
+ PAGE_INDEX_MAX_STRING_LENGTH + 1))
+ row_group_indexes = self._get_row_groups_from_hdfs_folder(too_long_hdfs_path,
+ tmpdir.join(too_long_tbl))
+ column = row_group_indexes[0][0]
+ assert column.column_index is None
+ # We always write the offset index
+ assert column.offset_index is not None
+
+ # Test string with value that starts with 'aaa' following with 0xFFs and its length is
+ # greater than PAGE_INDEX_TRUNCATE_LENGTH. Max value should be 'aab'.
+ aaa_tbl = "aaa_tbl"
+ aaa_hdfs_path = self._create_string_table_with_values(vector, unique_database,
+ aaa_tbl, "(rpad('aaa', {0}, chr(255)))".format(PAGE_INDEX_MAX_STRING_LENGTH + 1))
+ row_group_indexes = self._get_row_groups_from_hdfs_folder(aaa_hdfs_path,
+ tmpdir.join(aaa_tbl))
+ column = row_group_indexes[0][0]
+ assert len(column.column_index.max_values) == 1
+ max_value = column.column_index.max_values[0]
+ assert max_value == 'aab'
http://git-wip-us.apache.org/repos/asf/impala/blob/843683ed/tests/query_test/test_parquet_page_index.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_parquet_page_index.py b/tests/query_test/test_parquet_page_index.py
deleted file mode 100644
index 6235819..0000000
--- a/tests/query_test/test_parquet_page_index.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Targeted Impala insert tests
-
-import os
-
-from collections import namedtuple
-from subprocess import check_call
-from parquet.ttypes import BoundaryOrder, ColumnIndex, OffsetIndex, PageHeader, PageType
-
-from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfLocal
-from tests.util.filesystem_utils import get_fs_path
-from tests.util.get_parquet_metadata import (
- decode_stats_value,
- get_parquet_metadata,
- read_serialized_object
-)
-
-PAGE_INDEX_MAX_STRING_LENGTH = 64
-
-
-@SkipIfLocal.parquet_file_size
-class TestHdfsParquetTableIndexWriter(ImpalaTestSuite):
- """Since PARQUET-922 page statistics can be written before the footer.
- The tests in this class checks if Impala writes the page indices correctly.
- """
- @classmethod
- def get_workload(cls):
- return 'functional-query'
-
- @classmethod
- def add_test_dimensions(cls):
- super(TestHdfsParquetTableIndexWriter, cls).add_test_dimensions()
- cls.ImpalaTestMatrix.add_constraint(
- lambda v: v.get_value('table_format').file_format == 'parquet')
-
- def _get_row_group_from_file(self, parquet_file):
- """Returns namedtuples that contain the schema, stats, offset_index, column_index,
- and page_headers for each column in the first row group in file 'parquet_file'. Fails
- if the file contains multiple row groups.
- """
- ColumnInfo = namedtuple('ColumnInfo', ['schema', 'stats', 'offset_index',
- 'column_index', 'page_headers'])
-
- file_meta_data = get_parquet_metadata(parquet_file)
- assert len(file_meta_data.row_groups) == 1
- # We only support flat schemas, the additional element is the root element.
- schemas = file_meta_data.schema[1:]
- row_group = file_meta_data.row_groups[0]
- assert len(schemas) == len(row_group.columns)
- row_group_index = []
- with open(parquet_file) as file_handle:
- for column, schema in zip(row_group.columns, schemas):
- column_index_offset = column.column_index_offset
- column_index_length = column.column_index_length
- column_index = None
- if column_index_offset and column_index_length:
- column_index = read_serialized_object(ColumnIndex, file_handle,
- column_index_offset, column_index_length)
- column_meta_data = column.meta_data
- stats = None
- if column_meta_data:
- stats = column_meta_data.statistics
-
- offset_index_offset = column.offset_index_offset
- offset_index_length = column.offset_index_length
- offset_index = None
- page_headers = []
- if offset_index_offset and offset_index_length:
- offset_index = read_serialized_object(OffsetIndex, file_handle,
- offset_index_offset, offset_index_length)
- for page_loc in offset_index.page_locations:
- page_header = read_serialized_object(PageHeader, file_handle, page_loc.offset,
- page_loc.compressed_page_size)
- page_headers.append(page_header)
-
- column_info = ColumnInfo(schema, stats, offset_index, column_index, page_headers)
- row_group_index.append(column_info)
- return row_group_index
-
- def _get_row_groups_from_hdfs_folder(self, hdfs_path, tmpdir):
- """Returns a list of column infos (containing the schema, stats, offset_index,
- column_index, and page_headers) for the first row group in all parquet files in
- 'hdfs_path'.
- """
- row_group_indexes = []
- check_call(['hdfs', 'dfs', '-get', hdfs_path, tmpdir.strpath])
- for root, subdirs, files in os.walk(tmpdir.strpath):
- for f in files:
- parquet_file = os.path.join(root, str(f))
- row_group_indexes.append(self._get_row_group_from_file(parquet_file))
- return row_group_indexes
-
- def _validate_page_locations(self, page_locations):
- """Validate that the page locations are in order."""
- for previous_loc, current_loc in zip(page_locations[:-1], page_locations[1:]):
- assert previous_loc.offset < current_loc.offset
- assert previous_loc.first_row_index < current_loc.first_row_index
-
- def _validate_null_stats(self, index_size, column_info):
- """Validates the statistics stored in null_pages and null_counts."""
- column_index = column_info.column_index
- column_stats = column_info.stats
- assert column_index.null_pages is not None
- assert len(column_index.null_pages) == index_size
- assert column_index.null_counts is not None
- assert len(column_index.null_counts) == index_size
-
- for page_is_null, null_count, page_header in zip(column_index.null_pages,
- column_index.null_counts, column_info.page_headers):
- assert page_header.type == PageType.DATA_PAGE
- num_values = page_header.data_page_header.num_values
- assert not page_is_null or null_count == num_values
-
- if column_stats:
- assert column_stats.null_count == sum(column_index.null_counts)
-
- def _validate_min_max_values(self, index_size, column_info):
- """Validate min/max values of the pages in a column chunk."""
- column_index = column_info.column_index
- min_values = column_info.column_index.min_values
- assert len(min_values) == index_size
- max_values = column_info.column_index.max_values
- assert len(max_values) == index_size
-
- if not column_info.stats:
- return
-
- column_min_value_str = column_info.stats.min_value
- column_max_value_str = column_info.stats.max_value
- if column_min_value_str is None or column_max_value_str is None:
- # If either is None, then both need to be None.
- assert column_min_value_str is None and column_max_value_str is None
- # No min and max value, all pages need to be null
- for idx, null_page in enumerate(column_index.null_pages):
- assert null_page, "Page {} of column {} is not null, \
- but doesn't have min and max values!".format(idx, column_index.schema.name)
- # Everything is None, no further checks needed.
- return
-
- column_min_value = decode_stats_value(column_info.schema, column_min_value_str)
- for null_page, page_min_str in zip(column_index.null_pages, min_values):
- if not null_page:
- page_min_value = decode_stats_value(column_info.schema, page_min_str)
- # If type is str, page_min_value might have been truncated.
- if isinstance(page_min_value, basestring):
- assert page_min_value >= column_min_value[:len(page_min_value)]
- else:
- assert page_min_value >= column_min_value
-
- column_max_value = decode_stats_value(column_info.schema, column_max_value_str)
- for null_page, page_max_str in zip(column_index.null_pages, max_values):
- if not null_page:
- page_max_value = decode_stats_value(column_info.schema, page_max_str)
- # If type is str, page_max_value might have been truncated and incremented.
- if (isinstance(page_max_value, basestring) and
- len(page_max_value) == PAGE_INDEX_MAX_STRING_LENGTH):
- max_val_prefix = page_max_value.rstrip('\0')
- assert max_val_prefix[:-1] <= column_max_value
- else:
- assert page_max_value <= column_max_value
-
- def _validate_ordering(self, ordering, schema, null_pages, min_values, max_values):
- """Check if the ordering of the values reflects the value of 'ordering'."""
-
- def is_sorted(l, reverse=False):
- if not reverse:
- return all(a <= b for a, b in zip(l, l[1:]))
- else:
- return all(a >= b for a, b in zip(l, l[1:]))
-
- # Filter out null pages and decode the actual min/max values.
- actual_min_values = [decode_stats_value(schema, min_val)
- for min_val, is_null in zip(min_values, null_pages)
- if not is_null]
- actual_max_values = [decode_stats_value(schema, max_val)
- for max_val, is_null in zip(max_values, null_pages)
- if not is_null]
-
- # For ASCENDING and DESCENDING, both min and max values need to be sorted.
- if ordering == BoundaryOrder.ASCENDING:
- assert is_sorted(actual_min_values)
- assert is_sorted(actual_max_values)
- elif ordering == BoundaryOrder.DESCENDING:
- assert is_sorted(actual_min_values, reverse=True)
- assert is_sorted(actual_max_values, reverse=True)
- else:
- assert ordering == BoundaryOrder.UNORDERED
- # For UNORDERED, min and max values cannot be both sorted.
- assert not is_sorted(actual_min_values) or not is_sorted(actual_max_values)
- assert (not is_sorted(actual_min_values, reverse=True) or
- not is_sorted(actual_max_values, reverse=True))
-
- def _validate_boundary_order(self, column_info):
- """Validate that min/max values are really in the order specified by
- boundary order.
- """
- column_index = column_info.column_index
- self._validate_ordering(column_index.boundary_order, column_info.schema,
- column_index.null_pages, column_index.min_values, column_index.max_values)
-
- def _validate_parquet_page_index(self, hdfs_path, tmpdir):
- """Validates that 'hdfs_path' contains exactly one parquet file and that the rowgroup
- index in that file is in the valid format.
- """
- row_group_indexes = self._get_row_groups_from_hdfs_folder(hdfs_path, tmpdir)
- for columns in row_group_indexes:
- for column_info in columns:
- try:
- index_size = len(column_info.offset_index.page_locations)
- assert index_size > 0
- self._validate_page_locations(column_info.offset_index.page_locations)
- # IMPALA-7304: Impala doesn't write column index for floating-point columns
- # until PARQUET-1222 is resolved.
- if column_info.schema.type in [4, 5]:
- assert column_info.column_index is None
- continue
- self._validate_null_stats(index_size, column_info)
- self._validate_min_max_values(index_size, column_info)
- self._validate_boundary_order(column_info)
- except AssertionError as e:
- e.args += ("Validation failed on column {}.".format(column_info.schema.name),)
- raise
-
- def _ctas_table_and_verify_index(self, vector, unique_database, source_table,
- tmpdir, sorting_column=None):
- """Copies 'source_table' into a parquet table and makes sure that the index
- in the resulting parquet file is valid.
- """
- table_name = "test_hdfs_parquet_table_writer"
- qualified_table_name = "{0}.{1}".format(unique_database, table_name)
- hdfs_path = get_fs_path('/test-warehouse/{0}.db/{1}/'.format(unique_database,
- table_name))
- # Setting num_nodes = 1 ensures that the query is executed on the coordinator,
- # resulting in a single parquet file being written.
- vector.get_value('exec_option')['num_nodes'] = 1
- self.execute_query("drop table if exists {0}".format(qualified_table_name))
- if sorting_column is None:
- query = ("create table {0} stored as parquet as select * from {1}").format(
- qualified_table_name, source_table)
- else:
- query = ("create table {0} sort by({1}) stored as parquet as select * from {2}"
- ).format(qualified_table_name, sorting_column, source_table)
- self.execute_query(query, vector.get_value('exec_option'))
- self._validate_parquet_page_index(hdfs_path, tmpdir.join(source_table))
-
- def _create_string_table_with_values(self, vector, unique_database, table_name,
- values_sql):
- """Creates a parquet table that has a single string column, then invokes an insert
- statement on it with the 'values_sql' parameter. E.g. 'values_sql' is "('asdf')".
- It returns the HDFS path for the table.
- """
- qualified_table_name = "{0}.{1}".format(unique_database, table_name)
- self.execute_query("drop table if exists {0}".format(qualified_table_name))
- vector.get_value('exec_option')['num_nodes'] = 1
- query = ("create table {0} (str string) stored as parquet").format(qualified_table_name)
- self.execute_query(query, vector.get_value('exec_option'))
- self.execute_query("insert into {0} values {1}".format(qualified_table_name,
- values_sql), vector.get_value('exec_option'))
- return get_fs_path('/test-warehouse/{0}.db/{1}/'.format(unique_database,
- table_name))
-
- def test_write_index_alltypes(self, vector, unique_database, tmpdir):
- """Test that writing a parquet file populates the rowgroup indexes with the correct
- values.
- """
- self._ctas_table_and_verify_index(vector, unique_database, "functional.alltypes",
- tmpdir)
-
- def test_write_index_decimals(self, vector, unique_database, tmpdir):
- """Test that writing a parquet file populates the rowgroup indexes with the correct
- values, using decimal types.
- """
- self._ctas_table_and_verify_index(vector, unique_database, "functional.decimal_tbl",
- tmpdir)
-
- def test_write_index_chars(self, vector, unique_database, tmpdir):
- """Test that writing a parquet file populates the rowgroup indexes with the correct
- values, using char types.
- """
- self._ctas_table_and_verify_index(vector, unique_database, "functional.chars_formats",
- tmpdir)
-
- def test_write_index_null(self, vector, unique_database, tmpdir):
- """Test that we don't write min/max values in the index for null columns.
- Ensure null_count is set for columns with null values.
- """
- self._ctas_table_and_verify_index(vector, unique_database, "functional.nulltable",
- tmpdir)
-
- def test_write_index_multi_page(self, vector, unique_database, tmpdir):
- """Test that when a ColumnChunk is written across multiple pages, the index is
- valid.
- """
- self._ctas_table_and_verify_index(vector, unique_database, "tpch.customer",
- tmpdir)
- self._ctas_table_and_verify_index(vector, unique_database, "tpch.orders",
- tmpdir)
-
- def test_write_index_sorting_column(self, vector, unique_database, tmpdir):
- """Test that when the schema has a sorting column, the index is valid."""
- self._ctas_table_and_verify_index(vector, unique_database,
- "functional_parquet.zipcode_incomes", tmpdir, "id")
-
- def test_write_index_wide_table(self, vector, unique_database, tmpdir):
- """Test table with wide row."""
- self._ctas_table_and_verify_index(vector, unique_database,
- "functional_parquet.widerow", tmpdir)
-
- def test_write_index_many_columns_tables(self, vector, unique_database, tmpdir):
- """Test tables with wide rows and many columns."""
- self._ctas_table_and_verify_index(vector, unique_database,
- "functional_parquet.widetable_250_cols", tmpdir)
- self._ctas_table_and_verify_index(vector, unique_database,
- "functional_parquet.widetable_500_cols", tmpdir)
- self._ctas_table_and_verify_index(vector, unique_database,
- "functional_parquet.widetable_1000_cols", tmpdir)
-
- def test_max_string_values(self, vector, unique_database, tmpdir):
- """Test string values that are all 0xFFs or end with 0xFFs."""
-
- # String value is all of 0xFFs but its length is less than PAGE_INDEX_TRUNCATE_LENGTH.
- short_tbl = "short_tbl"
- short_hdfs_path = self._create_string_table_with_values(vector, unique_database,
- short_tbl, "(rpad('', {0}, chr(255)))".format(PAGE_INDEX_MAX_STRING_LENGTH - 1))
- self._validate_parquet_page_index(short_hdfs_path, tmpdir.join(short_tbl))
-
- # String value is all of 0xFFs and its length is PAGE_INDEX_TRUNCATE_LENGTH.
- fit_tbl = "fit_tbl"
- fit_hdfs_path = self._create_string_table_with_values(vector, unique_database,
- fit_tbl, "(rpad('', {0}, chr(255)))".format(PAGE_INDEX_MAX_STRING_LENGTH))
- self._validate_parquet_page_index(fit_hdfs_path, tmpdir.join(fit_tbl))
-
- # All bytes are 0xFFs and the string is longer then PAGE_INDEX_TRUNCATE_LENGTH, so we
- # should not write page statistics.
- too_long_tbl = "too_long_tbl"
- too_long_hdfs_path = self._create_string_table_with_values(vector, unique_database,
- too_long_tbl, "(rpad('', {0}, chr(255)))".format(PAGE_INDEX_MAX_STRING_LENGTH + 1))
- row_group_indexes = self._get_row_groups_from_hdfs_folder(too_long_hdfs_path,
- tmpdir.join(too_long_tbl))
- column = row_group_indexes[0][0]
- assert column.column_index is None
- # We always write the offset index
- assert column.offset_index is not None
-
- # Test string with value that starts with 'aaa' following with 0xFFs and its length is
- # greater than PAGE_INDEX_TRUNCATE_LENGTH. Max value should be 'aab'.
- aaa_tbl = "aaa_tbl"
- aaa_hdfs_path = self._create_string_table_with_values(vector, unique_database,
- aaa_tbl, "(rpad('aaa', {0}, chr(255)))".format(PAGE_INDEX_MAX_STRING_LENGTH + 1))
- row_group_indexes = self._get_row_groups_from_hdfs_folder(aaa_hdfs_path,
- tmpdir.join(aaa_tbl))
- column = row_group_indexes[0][0]
- assert len(column.column_index.max_values) == 1
- max_value = column.column_index.max_values[0]
- assert max_value == 'aab'