You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@beam.apache.org by da...@apache.org on 2017/01/13 18:28:49 UTC

[1/2] beam git commit: Removing some of the dataflow references.

Repository: beam
Updated Branches:
  refs/heads/python-sdk 8c2251305 -> f25c0e434


Removing some of the dataflow references.


Project: http://git-wip-us.apache.org/repos/asf/beam/repo
Commit: http://git-wip-us.apache.org/repos/asf/beam/commit/c94946a7
Tree: http://git-wip-us.apache.org/repos/asf/beam/tree/c94946a7
Diff: http://git-wip-us.apache.org/repos/asf/beam/diff/c94946a7

Branch: refs/heads/python-sdk
Commit: c94946a76fa96265094604279e0411f61fa7adb6
Parents: 8c22513
Author: Ahmet Altay <al...@google.com>
Authored: Thu Jan 12 16:57:52 2017 -0800
Committer: Davor Bonaci <da...@google.com>
Committed: Fri Jan 13 10:28:26 2017 -0800

----------------------------------------------------------------------
 sdks/python/README.md                           |   8 +-
 .../examples/cookbook/bigshuffle_test.py        |   1 -
 .../apache_beam/examples/snippets/snippets.py   | 121 +++++--------------
 .../examples/snippets/snippets_test.py          |   4 +-
 .../apache_beam/examples/wordcount_debugging.py |   2 +-
 sdks/python/apache_beam/internal/auth.py        |   2 +-
 sdks/python/apache_beam/internal/util.py        |   2 +-
 sdks/python/apache_beam/io/bigquery.py          |  17 ++-
 .../apache_beam/io/filebasedsource_test.py      |  18 +--
 sdks/python/apache_beam/io/fileio_test.py       |   6 +-
 10 files changed, 58 insertions(+), 123 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/README.md
----------------------------------------------------------------------
diff --git a/sdks/python/README.md b/sdks/python/README.md
index 0bee769..8a5e408 100644
--- a/sdks/python/README.md
+++ b/sdks/python/README.md
@@ -145,13 +145,13 @@ http://localhost:8888/apache_beam.html
 Some interesting classes to navigate to:
 
 * `PCollection`, in file
-[`google/cloud/dataflow/pvalue.py`](http://localhost:8888/google.cloud.dataflow.pvalue.html)
+[`apache_beam/pvalue.py`](http://localhost:8888/apache_beam.pvalue.html)
 * `PTransform`, in file
-[`google/cloud/dataflow/transforms/ptransform.py`](http://localhost:8888/google.cloud.dataflow.transforms.ptransform.html)
+[`apache_beam/transforms/ptransform.py`](http://localhost:8888/apache_beamtransforms.ptransform.html)
 * `FlatMap`, `GroupByKey`, and `Map`, in file
-[`google/cloud/dataflow/transforms/core.py`](http://localhost:8888/google.cloud.dataflow.transforms.core.html)
+[`apache_beam/transforms/core.py`](http://localhost:8888/apache_beam.transforms.core.html)
 * combiners, in file
-[`google/cloud/dataflow/transforms/combiners.py`](http://localhost:8888/google.cloud.dataflow.transforms.combiners.html)
+[`apache_beam/transforms/combiners.py`](http://localhost:8888/apache_beam.transforms.combiners.html)
 
 Make sure you installed the package first. If not, run `python setup.py install`, then run pydoc with `pydoc -p 8888`.
 

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/examples/cookbook/bigshuffle_test.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/examples/cookbook/bigshuffle_test.py b/sdks/python/apache_beam/examples/cookbook/bigshuffle_test.py
index fed5664..d73c976 100644
--- a/sdks/python/apache_beam/examples/cookbook/bigshuffle_test.py
+++ b/sdks/python/apache_beam/examples/cookbook/bigshuffle_test.py
@@ -24,7 +24,6 @@ import unittest
 from apache_beam.examples.cookbook import bigshuffle
 
 
-# TODO(dataflow-python): use gensort to generate input files.
 class BigShuffleTest(unittest.TestCase):
 
   SAMPLE_TEXT = 'a b c a b a\naa bb cc aa bb aa'

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/examples/snippets/snippets.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/examples/snippets/snippets.py b/sdks/python/apache_beam/examples/snippets/snippets.py
index e467353..2eadc44 100644
--- a/sdks/python/apache_beam/examples/snippets/snippets.py
+++ b/sdks/python/apache_beam/examples/snippets/snippets.py
@@ -15,21 +15,19 @@
 # limitations under the License.
 #
 
-"""Code snippets used in Cloud Dataflow webdocs.
+"""Code snippets used in webdocs.
 
 The examples here are written specifically to read well with the accompanying
-web docs from https://cloud.google.com/dataflow. Do not rewrite them until you
-make sure the webdocs still read well and the rewritten code supports the
-concept being described. For example, there are snippets that could be shorter
-but they are written like this to make a specific point in the docs.
+web docs. Do not rewrite them until you make sure the webdocs still read well
+and the rewritten code supports the concept being described. For example, there
+are snippets that could be shorter but they are written like this to make a
+specific point in the docs.
 
 The code snippets are all organized as self contained functions. Parts of the
 function body delimited by [START tag] and [END tag] will be included
 automatically in the web docs. The naming convention for the tags is to have as
 prefix the PATH_TO_HTML where they are included followed by a descriptive
-string. For instance a code snippet that will be used as a code example
-at https://cloud.google.com/dataflow/model/pipelines will have the tag
-model_pipelines_DESCRIPTION. The tags can contain only letters, digits and _.
+string. The tags can contain only letters, digits and _.
 """
 
 import apache_beam as beam
@@ -71,10 +69,7 @@ class SnippetUtils(object):
 
 
 def construct_pipeline(renames):
-  """A reverse words snippet as an example for constructing a pipeline.
-
-  URL: https://cloud.google.com/dataflow/pipelines/constructing-your-pipeline
-  """
+  """A reverse words snippet as an example for constructing a pipeline."""
   import re
 
   class ReverseWords(beam.PTransform):
@@ -116,10 +111,7 @@ def construct_pipeline(renames):
 
 
 def model_pipelines(argv):
-  """A wordcount snippet as a simple pipeline example.
-
-  URL: https://cloud.google.com/dataflow/model/pipelines
-  """
+  """A wordcount snippet as a simple pipeline example."""
   # [START model_pipelines]
   import re
 
@@ -157,10 +149,7 @@ def model_pipelines(argv):
 
 
 def model_pcollection(argv):
-  """Creating a PCollection from data in local memory.
-
-  URL: https://cloud.google.com/dataflow/model/pcollection
-  """
+  """Creating a PCollection from data in local memory."""
   from apache_beam.utils.pipeline_options import PipelineOptions
 
   class MyOptions(PipelineOptions):
@@ -191,10 +180,7 @@ def model_pcollection(argv):
 
 
 def pipeline_options_remote(argv):
-  """"Creating a Pipeline using a PipelineOptions object for remote execution.
-
-  URL: https://cloud.google.com/dataflow/pipelines/specifying-exec-params
-  """
+  """Creating a Pipeline using a PipelineOptions object for remote execution."""
 
   from apache_beam import Pipeline
   from apache_beam.utils.pipeline_options import PipelineOptions
@@ -248,10 +234,7 @@ def pipeline_options_remote(argv):
 
 
 def pipeline_options_local(argv):
-  """"Creating a Pipeline using a PipelineOptions object for local execution.
-
-  URL: https://cloud.google.com/dataflow/pipelines/specifying-exec-params
-  """
+  """Creating a Pipeline using a PipelineOptions object for local execution."""
 
   from apache_beam import Pipeline
   from apache_beam.utils.pipeline_options import PipelineOptions
@@ -264,10 +247,10 @@ def pipeline_options_local(argv):
     @classmethod
     def _add_argparse_args(cls, parser):
       parser.add_argument('--input',
-                          help='Input for the dataflow pipeline',
+                          help='Input for the pipeline',
                           default='gs://my-bucket/input')
       parser.add_argument('--output',
-                          help='Output for the dataflow pipeline',
+                          help='Output for the pipeline',
                           default='gs://my-bucket/output')
   # [END pipeline_options_define_custom_with_help_and_default]
 
@@ -288,10 +271,7 @@ def pipeline_options_local(argv):
 
 
 def pipeline_options_command_line(argv):
-  """Creating a Pipeline by passing a list of arguments.
-
-  URL: https://cloud.google.com/dataflow/pipelines/specifying-exec-params
-  """
+  """Creating a Pipeline by passing a list of arguments."""
 
   # [START pipeline_options_command_line]
   # Use Python argparse module to parse custom arguments
@@ -312,10 +292,7 @@ def pipeline_options_command_line(argv):
 
 
 def pipeline_logging(lines, output):
-  """Logging Pipeline Messages.
-
-  URL: https://cloud.google.com/dataflow/pipelines/logging
-  """
+  """Logging Pipeline Messages."""
 
   import re
   import apache_beam as beam
@@ -349,10 +326,7 @@ def pipeline_logging(lines, output):
 
 
 def pipeline_monitoring(renames):
-  """Using monitoring interface snippets.
-
-  URL: https://cloud.google.com/dataflow/pipelines/dataflow-monitoring-intf
-  """
+  """Using monitoring interface snippets."""
 
   import re
   import apache_beam as beam
@@ -363,10 +337,10 @@ def pipeline_monitoring(renames):
     @classmethod
     def _add_argparse_args(cls, parser):
       parser.add_argument('--input',
-                          help='Input for the dataflow pipeline',
+                          help='Input for the pipeline',
                           default='gs://my-bucket/input')
       parser.add_argument('--output',
-                          help='output for the dataflow pipeline',
+                          help='output for the pipeline',
                           default='gs://my-bucket/output')
 
   class ExtractWordsFn(beam.DoFn):
@@ -415,11 +389,7 @@ def pipeline_monitoring(renames):
 
 
 def examples_wordcount_minimal(renames):
-  """MinimalWordCount example snippets.
-
-  URL:
-  https://cloud.google.com/dataflow/examples/wordcount-example#MinimalWordCount
-  """
+  """MinimalWordCount example snippets."""
   import re
 
   import apache_beam as beam
@@ -476,11 +446,7 @@ def examples_wordcount_minimal(renames):
 
 
 def examples_wordcount_wordcount(renames):
-  """WordCount example snippets.
-
-  URL:
-  https://cloud.google.com/dataflow/examples/wordcount-example#WordCount
-  """
+  """WordCount example snippets."""
   import re
 
   import apache_beam as beam
@@ -494,7 +460,7 @@ def examples_wordcount_wordcount(renames):
     @classmethod
     def _add_argparse_args(cls, parser):
       parser.add_argument('--input',
-                          help='Input for the dataflow pipeline',
+                          help='Input for the pipeline',
                           default='gs://my-bucket/input')
 
   options = PipelineOptions(argv)
@@ -535,11 +501,7 @@ def examples_wordcount_wordcount(renames):
 
 
 def examples_wordcount_debugging(renames):
-  """DebuggingWordCount example snippets.
-
-  URL:
-  https://cloud.google.com/dataflow/examples/wordcount-example#DebuggingWordCount
-  """
+  """DebuggingWordCount example snippets."""
   import re
 
   import apache_beam as beam
@@ -854,12 +816,7 @@ def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform,
 
 
 def model_textio(renames):
-  """Using a Read and Write transform to read/write text files.
-
-  URLs:
-    https://cloud.google.com/dataflow/model/pipeline-io
-    https://cloud.google.com/dataflow/model/text-io
-  """
+  """Using a Read and Write transform to read/write text files."""
   def filter_words(x):
     import re
     return re.findall(r'[A-Za-z\']+', x)
@@ -888,10 +845,7 @@ def model_textio(renames):
 
 
 def model_datastoreio():
-  """Using a Read and Write transform to read/write to Cloud Datastore.
-
-  URL: https://cloud.google.com/dataflow/model/datastoreio
-  """
+  """Using a Read and Write transform to read/write to Cloud Datastore."""
 
   import uuid
   from google.datastore.v1 import entity_pb2
@@ -929,10 +883,7 @@ def model_datastoreio():
 
 
 def model_bigqueryio():
-  """Using a Read and Write transform to read/write to BigQuery.
-
-  URL: https://cloud.google.com/dataflow/model/bigquery-io
-  """
+  """Using a Read and Write transform to read/write to BigQuery."""
   import apache_beam as beam
   from apache_beam.utils.pipeline_options import PipelineOptions
 
@@ -984,8 +935,6 @@ def model_composite_transform_example(contents, output_path):
 
   To override the apply method, define a method "apply" that
   takes a PCollection as its only parameter and returns a PCollection.
-
-  URL: https://cloud.google.com/dataflow/model/composite-transforms
   """
   import re
 
@@ -1015,10 +964,7 @@ def model_composite_transform_example(contents, output_path):
 
 
 def model_multiple_pcollections_flatten(contents, output_path):
-  """Merging a PCollection with Flatten.
-
-  URL: https://cloud.google.com/dataflow/model/multiple-pcollections
-  """
+  """Merging a PCollection with Flatten."""
   some_hash_fn = lambda s: ord(s[0])
   import apache_beam as beam
   from apache_beam.utils.pipeline_options import PipelineOptions
@@ -1052,10 +998,7 @@ def model_multiple_pcollections_flatten(contents, output_path):
 
 
 def model_multiple_pcollections_partition(contents, output_path):
-  """Splitting a PCollection with Partition.
-
-  URL: https://cloud.google.com/dataflow/model/multiple-pcollections
-  """
+  """Splitting a PCollection with Partition."""
   some_hash_fn = lambda s: ord(s[0])
 
   def get_percentile(i):
@@ -1085,10 +1028,7 @@ def model_multiple_pcollections_partition(contents, output_path):
 
 
 def model_group_by_key(contents, output_path):
-  """Applying a GroupByKey Transform.
-
-  URL: https://cloud.google.com/dataflow/model/group-by-key
-  """
+  """Applying a GroupByKey Transform."""
   import re
 
   import apache_beam as beam
@@ -1114,10 +1054,7 @@ def model_group_by_key(contents, output_path):
 
 
 def model_co_group_by_key_tuple(email_list, phone_list, output_path):
-  """Applying a CoGroupByKey Transform to a tuple.
-
-  URL: https://cloud.google.com/dataflow/model/group-by-key
-  """
+  """Applying a CoGroupByKey Transform to a tuple."""
   import apache_beam as beam
   from apache_beam.utils.pipeline_options import PipelineOptions
   p = beam.Pipeline(options=PipelineOptions())

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/examples/snippets/snippets_test.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/examples/snippets/snippets_test.py b/sdks/python/apache_beam/examples/snippets/snippets_test.py
index a43e1e0..ffe0f58 100644
--- a/sdks/python/apache_beam/examples/snippets/snippets_test.py
+++ b/sdks/python/apache_beam/examples/snippets/snippets_test.py
@@ -38,7 +38,7 @@ from apache_beam.examples.snippets import snippets
 
 
 class ParDoTest(unittest.TestCase):
-  """Tests for dataflow/model/par-do."""
+  """Tests for model/par-do."""
 
   def test_pardo(self):
     # Note: "words" and "ComputeWordLengthFn" are referenced by name in
@@ -658,7 +658,7 @@ class SnippetsTest(unittest.TestCase):
 
 
 class CombineTest(unittest.TestCase):
-  """Tests for dataflow/model/combine."""
+  """Tests for model/combine."""
 
   def test_global_sum(self):
     pc = [1, 2, 3]

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/examples/wordcount_debugging.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/examples/wordcount_debugging.py b/sdks/python/apache_beam/examples/wordcount_debugging.py
index 14f379d..20d1c2f 100644
--- a/sdks/python/apache_beam/examples/wordcount_debugging.py
+++ b/sdks/python/apache_beam/examples/wordcount_debugging.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 #
 
-"""An example that verifies the counts and includes Dataflow best practices.
+"""An example that verifies the counts and includes best practices.
 
 On top of the basic concepts in the wordcount example, this workflow introduces
 logging to Cloud Logging, and using assertions in a Dataflow pipeline.

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/internal/auth.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/internal/auth.py b/sdks/python/apache_beam/internal/auth.py
index c645f24..ccc67c6 100644
--- a/sdks/python/apache_beam/internal/auth.py
+++ b/sdks/python/apache_beam/internal/auth.py
@@ -116,7 +116,7 @@ class _GCloudWrapperCredentials(OAuth2Credentials):
 
 def get_service_credentials():
   """Get credentials to access Google services."""
-  user_agent = 'dataflow-python-sdk/1.0'
+  user_agent = 'beam-python-sdk/1.0'
   if is_running_in_gce:
     # We are currently running as a GCE taskrunner worker.
     #

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/internal/util.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/internal/util.py b/sdks/python/apache_beam/internal/util.py
index ad60ba6..2d12d49 100644
--- a/sdks/python/apache_beam/internal/util.py
+++ b/sdks/python/apache_beam/internal/util.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 #
 
-"""Utility functions used throughout the dataflow package."""
+"""Utility functions used throughout the package."""
 
 
 class ArgumentPlaceholder(object):

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/io/bigquery.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/io/bigquery.py b/sdks/python/apache_beam/io/bigquery.py
index ea0a281..9877ebf 100644
--- a/sdks/python/apache_beam/io/bigquery.py
+++ b/sdks/python/apache_beam/io/bigquery.py
@@ -37,13 +37,13 @@ The syntax supported is described here:
 https://cloud.google.com/bigquery/bq-command-line-tool-quickstart
 
 BigQuery sources can be used as main inputs or side inputs. A main input
-(common case) is expected to be massive and the Dataflow service will make sure
-it is split into manageable chunks and processed in parallel. Side inputs are
-expected to be small and will be read completely every time a ParDo DoFn gets
-executed. In the example below the lambda function implementing the DoFn for the
-Map transform will get on each call *one* row of the main table and *all* rows
-of the side table. The execution framework may use some caching techniques to
-share the side inputs between calls in order to avoid excessive reading::
+(common case) is expected to be massive and will be split into manageable chunks
+and processed in parallel. Side inputs are expected to be small and will be read
+completely every time a ParDo DoFn gets executed. In the example below the
+lambda function implementing the DoFn for the Map transform will get on each
+call *one* row of the main table and *all* rows of the side table. The runner
+may use some caching techniques to share the side inputs between calls in order
+to avoid excessive reading:
 
   main_table = pipeline | 'very_big' >> beam.io.Read(beam.io.BigQuerySource()
   side_table = pipeline | 'not_big' >> beam.io.Read(beam.io.BigQuerySource()
@@ -1035,8 +1035,7 @@ class BigQueryWrapper(object):
     elif field.type == 'TIMESTAMP':
       # The UTC should come from the timezone library but this is a known
       # issue in python 2.7 so we'll just hardcode it as we're reading using
-      # utcfromtimestamp. This is just to match the output from the dataflow
-      # runner with the local runner.
+      # utcfromtimestamp.
       # Input: 1478134176.985864 --> Output: "2016-11-03 00:49:36.985864 UTC"
       dt = datetime.datetime.utcfromtimestamp(float(value))
       return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC')

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/io/filebasedsource_test.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/io/filebasedsource_test.py b/sdks/python/apache_beam/io/filebasedsource_test.py
index f6fab4a..8f12627 100644
--- a/sdks/python/apache_beam/io/filebasedsource_test.py
+++ b/sdks/python/apache_beam/io/filebasedsource_test.py
@@ -363,22 +363,22 @@ class TestFileBasedSource(unittest.TestCase):
 
     self.assertItemsEqual(expected_data, read_data)
 
-  def _run_dataflow_test(self, pattern, expected_data, splittable=True):
+  def _run_source_test(self, pattern, expected_data, splittable=True):
     pipeline = beam.Pipeline('DirectRunner')
     pcoll = pipeline | 'Read' >> beam.Read(LineSource(
         pattern, splittable=splittable))
     assert_that(pcoll, equal_to(expected_data))
     pipeline.run()
 
-  def test_dataflow_file(self):
+  def test_source_file(self):
     file_name, expected_data = write_data(100)
     assert len(expected_data) == 100
-    self._run_dataflow_test(file_name, expected_data)
+    self._run_source_test(file_name, expected_data)
 
-  def test_dataflow_pattern(self):
+  def test_source_pattern(self):
     pattern, expected_data = write_pattern([34, 66, 40, 24, 24, 12])
     assert len(expected_data) == 200
-    self._run_dataflow_test(pattern, expected_data)
+    self._run_source_test(pattern, expected_data)
 
   def test_unsplittable_does_not_split(self):
     pattern, expected_data = write_pattern([5, 9, 6])
@@ -387,15 +387,15 @@ class TestFileBasedSource(unittest.TestCase):
     splits = [split for split in fbs.split(desired_bundle_size=15)]
     self.assertEquals(3, len(splits))
 
-  def test_dataflow_file_unsplittable(self):
+  def test_source_file_unsplittable(self):
     file_name, expected_data = write_data(100)
     assert len(expected_data) == 100
-    self._run_dataflow_test(file_name, expected_data, False)
+    self._run_source_test(file_name, expected_data, False)
 
-  def test_dataflow_pattern_unsplittable(self):
+  def test_source_pattern_unsplittable(self):
     pattern, expected_data = write_pattern([34, 66, 40, 24, 24, 12])
     assert len(expected_data) == 200
-    self._run_dataflow_test(pattern, expected_data, False)
+    self._run_source_test(pattern, expected_data, False)
 
   def test_read_file_bzip2(self):
     _, lines = write_data(10)

http://git-wip-us.apache.org/repos/asf/beam/blob/c94946a7/sdks/python/apache_beam/io/fileio_test.py
----------------------------------------------------------------------
diff --git a/sdks/python/apache_beam/io/fileio_test.py b/sdks/python/apache_beam/io/fileio_test.py
index 35581eb..68e2bce 100644
--- a/sdks/python/apache_beam/io/fileio_test.py
+++ b/sdks/python/apache_beam/io/fileio_test.py
@@ -759,7 +759,7 @@ class TestNativeTextFileSink(unittest.TestCase):
     with bz2.BZ2File(self.path, 'r') as f:
       self.assertEqual(f.read().splitlines(), [])
 
-  def test_write_dataflow(self):
+  def test_write_native(self):
     pipeline = beam.Pipeline('DirectRunner')
     pcoll = pipeline | beam.core.Create('Create', self.lines)
     pcoll | 'Write' >> beam.Write(fileio.NativeTextFileSink(self.path))  # pylint: disable=expression-not-assigned
@@ -772,7 +772,7 @@ class TestNativeTextFileSink(unittest.TestCase):
 
     self.assertEqual(read_result, self.lines)
 
-  def test_write_dataflow_auto_compression(self):
+  def test_write_native_auto_compression(self):
     pipeline = beam.Pipeline('DirectRunner')
     pcoll = pipeline | beam.core.Create('Create', self.lines)
     pcoll | 'Write' >> beam.Write(  # pylint: disable=expression-not-assigned
@@ -787,7 +787,7 @@ class TestNativeTextFileSink(unittest.TestCase):
 
     self.assertEqual(read_result, self.lines)
 
-  def test_write_dataflow_auto_compression_unsharded(self):
+  def test_write_native_auto_compression_unsharded(self):
     pipeline = beam.Pipeline('DirectRunner')
     pcoll = pipeline | beam.core.Create('Create', self.lines)
     pcoll | 'Write' >> beam.Write(  # pylint: disable=expression-not-assigned


[2/2] beam git commit: This closes #1774

Posted by da...@apache.org.
This closes #1774


Project: http://git-wip-us.apache.org/repos/asf/beam/repo
Commit: http://git-wip-us.apache.org/repos/asf/beam/commit/f25c0e43
Tree: http://git-wip-us.apache.org/repos/asf/beam/tree/f25c0e43
Diff: http://git-wip-us.apache.org/repos/asf/beam/diff/f25c0e43

Branch: refs/heads/python-sdk
Commit: f25c0e434e9abcc94c9377c412e403ac92444013
Parents: 8c22513 c94946a
Author: Davor Bonaci <da...@google.com>
Authored: Fri Jan 13 10:28:39 2017 -0800
Committer: Davor Bonaci <da...@google.com>
Committed: Fri Jan 13 10:28:39 2017 -0800

----------------------------------------------------------------------
 sdks/python/README.md                           |   8 +-
 .../examples/cookbook/bigshuffle_test.py        |   1 -
 .../apache_beam/examples/snippets/snippets.py   | 121 +++++--------------
 .../examples/snippets/snippets_test.py          |   4 +-
 .../apache_beam/examples/wordcount_debugging.py |   2 +-
 sdks/python/apache_beam/internal/auth.py        |   2 +-
 sdks/python/apache_beam/internal/util.py        |   2 +-
 sdks/python/apache_beam/io/bigquery.py          |  17 ++-
 .../apache_beam/io/filebasedsource_test.py      |  18 +--
 sdks/python/apache_beam/io/fileio_test.py       |   6 +-
 10 files changed, 58 insertions(+), 123 deletions(-)
----------------------------------------------------------------------