You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@arrow.apache.org by "Nicola Crane (Jira)" <ji...@apache.org> on 2022/09/13 09:48:00 UTC

[jira] [Created] (ARROW-17700) [R] Can't open CSV dataset with partitioning and a schema

Nicola Crane created ARROW-17700:
------------------------------------

             Summary: [R] Can't open CSV dataset with partitioning and a schema
                 Key: ARROW-17700
                 URL: https://issues.apache.org/jira/browse/ARROW-17700
             Project: Apache Arrow
          Issue Type: Bug
          Components: R
            Reporter: Nicola Crane


I feel like this might be a duplicate of a previous ticket, but can't find it.


{code:r}
``` r
library(dplyr)
#> 
#> Attaching package: 'dplyr'
#> The following objects are masked from 'package:stats':
#> 
#>     filter, lag
#> The following objects are masked from 'package:base':
#> 
#>     intersect, setdiff, setequal, union
library(arrow)
#> Some features are not enabled in this build of Arrow. Run `arrow_info()` for more information.
#> 
#> Attaching package: 'arrow'
#> The following object is masked from 'package:utils':
#> 
#>     timestamp

# all good!
tf <- tempfile()
dir.create(tf)
write_dataset(mtcars, tf, format = "csv")
open_dataset(tf, format = "csv") %>% collect()
#> # A tibble: 32 × 11
#>      mpg   cyl  disp    hp  drat    wt  qsec    vs    am  gear  carb
#>    <dbl> <int> <dbl> <int> <dbl> <dbl> <dbl> <int> <int> <int> <int>
#>  1  21       6  160    110  3.9   2.62  16.5     0     1     4     4
#>  2  21       6  160    110  3.9   2.88  17.0     0     1     4     4
#>  3  22.8     4  108     93  3.85  2.32  18.6     1     1     4     1
#>  4  21.4     6  258    110  3.08  3.22  19.4     1     0     3     1
#>  5  18.7     8  360    175  3.15  3.44  17.0     0     0     3     2
#>  6  18.1     6  225    105  2.76  3.46  20.2     1     0     3     1
#>  7  14.3     8  360    245  3.21  3.57  15.8     0     0     3     4
#>  8  24.4     4  147.    62  3.69  3.19  20       1     0     4     2
#>  9  22.8     4  141.    95  3.92  3.15  22.9     1     0     4     2
#> 10  19.2     6  168.   123  3.92  3.44  18.3     1     0     4     4
#> # … with 22 more rows

# all good
tf <- tempfile()
dir.create(tf)
write_dataset(group_by(mtcars, cyl), tf, format = "csv")
open_dataset(tf, format = "csv") %>% collect()
#> # A tibble: 32 × 11
#>      mpg  disp    hp  drat    wt  qsec    vs    am  gear  carb   cyl
#>    <dbl> <dbl> <int> <dbl> <dbl> <dbl> <int> <int> <int> <int> <int>
#>  1  22.8 108      93  3.85  2.32  18.6     1     1     4     1     4
#>  2  24.4 147.     62  3.69  3.19  20       1     0     4     2     4
#>  3  22.8 141.     95  3.92  3.15  22.9     1     0     4     2     4
#>  4  32.4  78.7    66  4.08  2.2   19.5     1     1     4     1     4
#>  5  30.4  75.7    52  4.93  1.62  18.5     1     1     4     2     4
#>  6  33.9  71.1    65  4.22  1.84  19.9     1     1     4     1     4
#>  7  21.5 120.     97  3.7   2.46  20.0     1     0     3     1     4
#>  8  27.3  79      66  4.08  1.94  18.9     1     1     4     1     4
#>  9  26   120.     91  4.43  2.14  16.7     0     1     5     2     4
#> 10  30.4  95.1   113  3.77  1.51  16.9     1     1     5     2     4
#> # … with 22 more rows
list.files(tf)
#> [1] "cyl=4" "cyl=6" "cyl=8"

# hive-style=FALSE leads to no `cyl` column, which, sure, makes sense
tf <- tempfile()
dir.create(tf)
write_dataset(group_by(mtcars, cyl), tf, format = "csv", hive_style = FALSE)
open_dataset(tf, format = "csv") %>% collect()
#> # A tibble: 32 × 10
#>      mpg  disp    hp  drat    wt  qsec    vs    am  gear  carb
#>    <dbl> <dbl> <int> <dbl> <dbl> <dbl> <int> <int> <int> <int>
#>  1  22.8 108      93  3.85  2.32  18.6     1     1     4     1
#>  2  24.4 147.     62  3.69  3.19  20       1     0     4     2
#>  3  22.8 141.     95  3.92  3.15  22.9     1     0     4     2
#>  4  32.4  78.7    66  4.08  2.2   19.5     1     1     4     1
#>  5  30.4  75.7    52  4.93  1.62  18.5     1     1     4     2
#>  6  33.9  71.1    65  4.22  1.84  19.9     1     1     4     1
#>  7  21.5 120.     97  3.7   2.46  20.0     1     0     3     1
#>  8  27.3  79      66  4.08  1.94  18.9     1     1     4     1
#>  9  26   120.     91  4.43  2.14  16.7     0     1     5     2
#> 10  30.4  95.1   113  3.77  1.51  16.9     1     1     5     2
#> # … with 22 more rows
list.files(tf)
#> [1] "4" "6" "8"


# *but* if we try to add it in via a schema, it doesn't work

desired_schema <- schema(mpg = float64(), disp = float64(), hp = int64(), drat = float64(), 
    wt = float64(), qsec = float64(), vs = int64(), am = int64(), 
    gear = int64(), carb = int64(), cyl = int64())

tf <- tempfile()
dir.create(tf)
write_dataset(group_by(mtcars, cyl), tf, format = "csv", hive_style = FALSE)
open_dataset(tf, format = "csv", schema = desired_schema) %>% collect()
#> Error in `dplyr::collect()`:
#> ! Invalid: Could not open CSV input source '/tmp/RtmpnInOwc/file13f0d38c5b994/4/part-0.csv': Invalid: CSV parse error: Row #1: Expected 11 columns, got 10: "mpg","disp","hp","drat","wt","qsec","vs","am","gear","carb"
#> /home/nic2/arrow/cpp/src/arrow/csv/parser.cc:477  (ParseLine<SpecializedOptions, false>(values_writer, parsed_writer, data, data_end, is_final, &line_end, bulk_filter))
#> /home/nic2/arrow/cpp/src/arrow/csv/parser.cc:566  ParseChunk<SpecializedOptions>( &values_writer, &parsed_writer, data, data_end, is_final, rows_in_chunk, &data, &finished_parsing, bulk_filter)
#> /home/nic2/arrow/cpp/src/arrow/csv/reader.cc:426  parser->ParseFinal(views, &parsed_size)
#> /home/nic2/arrow/cpp/src/arrow/compute/exec/exec_plan.cc:573  iterator_.Next()
#> /home/nic2/arrow/cpp/src/arrow/record_batch.cc:337  ReadNext(&batch)
#> /home/nic2/arrow/cpp/src/arrow/record_batch.cc:351  ToRecordBatches()
list.files(tf)
#> [1] "4" "6" "8"
```

<sup>Created on 2022-09-13 by the [reprex package](https://reprex.tidyverse.org) (v2.0.1)</sup>

{code}




--
This message was sent by Atlassian Jira
(v8.20.10#820010)