You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@beam.apache.org by da...@apache.org on 2023/02/09 16:37:11 UTC

[beam] branch users/damccorm/metadata-typo created (now bf709ea4624)

This is an automated email from the ASF dual-hosted git repository.

damccorm pushed a change to branch users/damccorm/metadata-typo
in repository https://gitbox.apache.org/repos/asf/beam.git


      at bf709ea4624 Fix typo - metdata -> metadata

This branch includes the following new commits:

     new bf709ea4624 Fix typo - metdata -> metadata

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[beam] 01/01: Fix typo - metdata -> metadata

Posted by da...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

damccorm pushed a commit to branch users/damccorm/metadata-typo
in repository https://gitbox.apache.org/repos/asf/beam.git

commit bf709ea4624d04f6b7294d045bb89b2f4922fc9a
Author: Danny McCormick <da...@google.com>
AuthorDate: Thu Feb 9 11:36:57 2023 -0500

    Fix typo - metdata -> metadata
---
 .../examples/inference/run_inference_side_inputs.py            |  6 +++---
 sdks/python/apache_beam/ml/inference/base.py                   | 10 +++++-----
 sdks/python/apache_beam/ml/inference/base_test.py              |  6 +++---
 3 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/sdks/python/apache_beam/examples/inference/run_inference_side_inputs.py b/sdks/python/apache_beam/examples/inference/run_inference_side_inputs.py
index b89c9cc0e03..a6e4dc2bdb0 100644
--- a/sdks/python/apache_beam/examples/inference/run_inference_side_inputs.py
+++ b/sdks/python/apache_beam/examples/inference/run_inference_side_inputs.py
@@ -96,12 +96,12 @@ def run(argv=None, save_main_session=True):
   options.view_as(SetupOptions).save_main_session = save_main_session
 
   class GetModel(beam.DoFn):
-    def process(self, element) -> Iterable[base.ModelMetdata]:
+    def process(self, element) -> Iterable[base.ModelMetadata]:
       if time.time() > mid_ts:
-        yield base.ModelMetdata(
+        yield base.ModelMetadata(
             model_id='model_add.pkl', model_name='model_add')
       else:
-        yield base.ModelMetdata(
+        yield base.ModelMetadata(
             model_id='model_sub.pkl', model_name='model_sub')
 
   class _EmitSingletonSideInput(beam.DoFn):
diff --git a/sdks/python/apache_beam/ml/inference/base.py b/sdks/python/apache_beam/ml/inference/base.py
index 842607f36ff..50056107702 100644
--- a/sdks/python/apache_beam/ml/inference/base.py
+++ b/sdks/python/apache_beam/ml/inference/base.py
@@ -86,15 +86,15 @@ PredictionResult.inference.__doc__ = """Results for the inference on the model
 PredictionResult.model_id.__doc__ = """Model ID used to run the prediction."""
 
 
-class ModelMetdata(NamedTuple):
+class ModelMetadata(NamedTuple):
   model_id: str
   model_name: str
 
 
-ModelMetdata.model_id.__doc__ = """Unique identifier for the model. This can be
+ModelMetadata.model_id.__doc__ = """Unique identifier for the model. This can be
     a file path or a URL where the model can be accessed. It is used to load
     the model for inference."""
-ModelMetdata.model_name.__doc__ = """Human-readable name for the model. This
+ModelMetadata.model_name.__doc__ = """Human-readable name for the model. This
     can be used to identify the model in the metrics generated by the
     RunInference transform."""
 
@@ -310,7 +310,7 @@ class RunInference(beam.PTransform[beam.PCollection[ExampleT],
       inference_args: Optional[Dict[str, Any]] = None,
       metrics_namespace: Optional[str] = None,
       *,
-      model_metadata_pcoll: beam.PCollection[ModelMetdata] = None):
+      model_metadata_pcoll: beam.PCollection[ModelMetadata] = None):
     """
     A transform that takes a PCollection of examples (or features) for use
     on an ML model. The transform then outputs inferences (or predictions) for
@@ -530,7 +530,7 @@ class _RunInferenceDoFn(beam.DoFn, Generic[ExampleT, PredictionT]):
     return predictions
 
   def process(
-      self, batch, inference_args, si_model_metadata: Optional[ModelMetdata]):
+      self, batch, inference_args, si_model_metadata: Optional[ModelMetadata]):
     """
     When side input is enabled:
       The method checks if the side input model has been updated, and if so,
diff --git a/sdks/python/apache_beam/ml/inference/base_test.py b/sdks/python/apache_beam/ml/inference/base_test.py
index c427e40841a..319735da236 100644
--- a/sdks/python/apache_beam/ml/inference/base_test.py
+++ b/sdks/python/apache_beam/ml/inference/base_test.py
@@ -392,7 +392,7 @@ class RunInferenceBaseTest(unittest.TestCase):
     test_pipeline = TestPipeline()
     side_input = (
         test_pipeline | "CreateDummySideInput" >> beam.Create(
-            [base.ModelMetdata(1, 1), base.ModelMetdata(2, 2)])
+            [base.ModelMetadata(1, 1), base.ModelMetadata(2, 2)])
         | "ApplySideInputWindow" >> beam.WindowInto(
             window.GlobalWindows(),
             trigger=trigger.Repeatedly(trigger.AfterProcessingTime(1)),
@@ -442,11 +442,11 @@ class RunInferenceBaseTest(unittest.TestCase):
 
     sample_side_input_elements = [(
         first_ts + 8,
-        base.ModelMetdata(
+        base.ModelMetadata(
             model_id='fake_model_id_1', model_name='fake_model_id_1')),
                                   (
                                       first_ts + 15,
-                                      base.ModelMetdata(
+                                      base.ModelMetadata(
                                           model_id='fake_model_id_2',
                                           model_name='fake_model_id_2'))]