You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by zh...@apache.org on 2023/09/08 12:00:13 UTC

[singa] branch dev-postgresql updated: Add training process for the dynamic model

This is an automated email from the ASF dual-hosted git repository.

zhaojing pushed a commit to branch dev-postgresql
in repository https://gitbox.apache.org/repos/asf/singa.git


The following commit(s) were added to refs/heads/dev-postgresql by this push:
     new f84ebe58 Add training process for the dynamic model
     new 5df0cec7 Merge pull request #1093 from NLGithubWP/update_model
f84ebe58 is described below

commit f84ebe58e9a68012d5286c8c2e6cda445dde55b7
Author: working <57...@users.noreply.github.com>
AuthorDate: Fri Sep 8 18:26:25 2023 +0800

    Add training process for the dynamic model
---
 .../model_selection_psql/ms_model_mlp/model.py     | 33 ++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/examples/model_selection_psql/ms_model_mlp/model.py b/examples/model_selection_psql/ms_model_mlp/model.py
index 1e2b8191..70d1a174 100644
--- a/examples/model_selection_psql/ms_model_mlp/model.py
+++ b/examples/model_selection_psql/ms_model_mlp/model.py
@@ -94,7 +94,7 @@ class MSMLP(model.Model):
         self.linear5 = layer.Linear(num_classes)
         self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
         self.sum_error = SumErrorLayer()
-    
+
     def forward(self, inputs):
         y = self.linear1(inputs)
         y = self.relu(y)
@@ -187,9 +187,38 @@ if __name__ == "__main__":
                         dest='max_epoch')
     args = parser.parse_args()
 
+    # generate the boundary
+    f = lambda x: (5 * x + 1)
+    bd_x = np.linspace(-1.0, 1, 200)
+    bd_y = f(bd_x)
+
+    # generate the training data
+    x = np.random.uniform(-1, 1, 400)
+    y = f(x) + 2 * np.random.randn(len(x))
+
+    # choose one precision
+    precision = singa_dtype[args.precision]
+    np_precision = np_dtype[args.precision]
+
+    # convert training data to 2d space
+    label = np.asarray([5 * a + 1 > b for (a, b) in zip(x, y)]).astype(np.int32)
+    data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np_precision)
+
+    dev = device.create_cuda_gpu_on(0)
+    sgd = opt.SGD(0.1, 0.9, 1e-5, dtype=singa_dtype[args.precision])
+    tx = tensor.Tensor((400, 2), dev, precision)
+    ty = tensor.Tensor((400,), dev, tensor.int32)
     model = MLP(data_size=2, perceptron_size=3, num_classes=2)
 
     # attach model to graph
     model.set_optimizer(sgd)
     model.compile([tx], is_train=True, use_graph=args.graph, sequential=True)
-    model.train()
\ No newline at end of file
+    model.train()
+
+    for i in range(args.max_epoch):
+        tx.copy_from_numpy(data)
+        ty.copy_from_numpy(label)
+        out, loss = model(tx, ty, 'fp32', spars=None)
+
+        if i % 100 == 0:
+            print("training loss = ", tensor.to_numpy(loss)[0])
\ No newline at end of file