You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@buildstream.apache.org by tv...@apache.org on 2021/02/04 07:18:59 UTC

[buildstream] branch aevri/win32 created (now df73ac7)

This is an automated email from the ASF dual-hosted git repository.

tvb pushed a change to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git.


      at df73ac7  WIP: win32: WINDOWS.md: repro instructions

This branch includes the following new commits:

     new 042a4b2  jobs/job: send ChildJob the context, not scheduler
     new 05893e1  WIP: pickle things
     new 8452664  TEMP: disable status rendering
     new dcb6373  TEMP: MultiprocessingPdb, traceback in Job._child_action
     new 6144a40  TEMP: testpickle
     new 51083d5  TEMP: breakpoint on recursion error
     new a232a8f  TEMP: time and size of pickling and spawning
     new 3940d11  WIP: spawn instead of fork
     new db07192  _scheduler: don't pass whole queue to child job
     new 63dc3ed  WIP: sandboxnone: use initial SandboxNone
     new 276b47d  WIP: bst-job-replay
     new 461c2c5  WIP: win32: _platform/win32: add support for win32
     new abb0dac  WIP: win32: platform: os.uname and resource
     new 61c228d  WIP: win32: job: replace add_child_handler with thread
     new f784260  WIP: win32: job: signals / win32
     new ee5c3ff  WIP: win32: ImportElement - fix separators
     new 482d025  WIP: win32: use cmd.exe instead of sh
     new 90a6685  WIP: win32: don't set owner of files
     new b9a93e4  WIP: win32: sandboxnone: pass-through env for now
     new 68f9c1a  WIP: win32: adjust examples/running-commands
     new df73ac7  WIP: win32: WINDOWS.md: repro instructions

The 21 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[buildstream] 02/21: WIP: pickle things

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 05893e1dad07927302974112ec904805029b50cb
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Jun 18 15:42:59 2019 +0100

    WIP: pickle things
---
 src/buildstream/_artifactcache.py      |  6 +++
 src/buildstream/_context.py            |  5 +++
 src/buildstream/_elementfactory.py     |  7 ++-
 src/buildstream/_options/optionpool.py | 12 ++++++
 src/buildstream/_plugincontext.py      | 26 +++++++++--
 src/buildstream/_project.py            | 18 +++++++-
 src/buildstream/_scheduler/jobs/job.py | 79 +++++++++++++++++++++++++++++++++-
 src/buildstream/_sourcecache.py        |  6 +++
 src/buildstream/_sourcefactory.py      |  5 ++-
 src/buildstream/element.py             |  1 +
 src/buildstream/source.py              |  2 +
 11 files changed, 156 insertions(+), 11 deletions(-)

diff --git a/src/buildstream/_artifactcache.py b/src/buildstream/_artifactcache.py
index de17ea7..a4497ba 100644
--- a/src/buildstream/_artifactcache.py
+++ b/src/buildstream/_artifactcache.py
@@ -103,6 +103,12 @@ class ArtifactCache(BaseCache):
         self.cas.add_reachable_directories_callback(self._reachable_directories)
         self.cas.add_reachable_digests_callback(self._reachable_digests)
 
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        # TODO: actually pickle the elements, resolving to the same objects.
+        state['_required_elements'] = set()
+        return state
+
     # mark_required_elements():
     #
     # Mark elements whose artifacts are required for the current run.
diff --git a/src/buildstream/_context.py b/src/buildstream/_context.py
index 3f6e6ac..d9ef1f1 100644
--- a/src/buildstream/_context.py
+++ b/src/buildstream/_context.py
@@ -168,6 +168,11 @@ class Context():
         self._casquota = None
         self._directory = directory
 
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        del state['_message_handler']
+        return state
+
     # load()
     #
     # Loads the configuration files
diff --git a/src/buildstream/_elementfactory.py b/src/buildstream/_elementfactory.py
index d6591bf..b2a7f73 100644
--- a/src/buildstream/_elementfactory.py
+++ b/src/buildstream/_elementfactory.py
@@ -33,9 +33,12 @@ class ElementFactory(PluginContext):
 
     def __init__(self, plugin_base, *,
                  format_versions={},
-                 plugin_origins=None):
+                 plugin_origins=None,
+                 pass_=None):
 
-        super().__init__(plugin_base, Element, [_site.element_plugins],
+        assert pass_ is not None
+
+        super().__init__(plugin_base, Element, [_site.element_plugins], 'element' + str(pass_),
                          plugin_origins=plugin_origins,
                          format_versions=format_versions)
 
diff --git a/src/buildstream/_options/optionpool.py b/src/buildstream/_options/optionpool.py
index de3af3e..d5dea39 100644
--- a/src/buildstream/_options/optionpool.py
+++ b/src/buildstream/_options/optionpool.py
@@ -56,6 +56,18 @@ class OptionPool():
         self._environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
         self._environment.globals = []
 
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        del state['_environment']
+        return state
+
+    def __setstate__(self, state):
+        self.__dict__.update(state)
+
+        # jinja2 environment, with default globals cleared out of the way
+        self._environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
+        self._environment.globals = []
+
     # load()
     #
     # Loads the options described in the project.conf
diff --git a/src/buildstream/_plugincontext.py b/src/buildstream/_plugincontext.py
index 7a5407c..0d322c1 100644
--- a/src/buildstream/_plugincontext.py
+++ b/src/buildstream/_plugincontext.py
@@ -42,10 +42,12 @@ from . import _yaml
 #
 class PluginContext():
 
-    def __init__(self, plugin_base, base_type, site_plugin_path, *,
+    def __init__(self, plugin_base, base_type, site_plugin_path, identifier, *,
                  plugin_origins=None, dependencies=None,
                  format_versions={}):
 
+        self._identifier = identifier
+
         # The plugin kinds which were loaded
         self.loaded_dependencies = []
 
@@ -59,10 +61,26 @@ class PluginContext():
 
         # The PluginSource object
         self._plugin_base = plugin_base
-        self._site_source = plugin_base.make_plugin_source(searchpath=site_plugin_path)
+        self._site_plugin_path = site_plugin_path
+        self._site_source = plugin_base.make_plugin_source(
+            searchpath=self._site_plugin_path,
+            identifier='site_plugin-' + self._identifier)
         self._alternate_sources = {}
         self._format_versions = format_versions
 
+    def __getstate__(self):
+        import copy
+        state = copy.copy(self.__dict__)
+        del state['_site_source']
+        state['_types'] = {}
+        return state
+
+    def __setstate__(self, state):
+        self.__dict__ = state
+        self._site_source = self._plugin_base.make_plugin_source(
+            searchpath=self._site_plugin_path,
+            identifier='site_plugin-' + self._identifier)
+
     # lookup():
     #
     # Fetches a type loaded from a plugin in this plugin context
@@ -80,7 +98,7 @@ class PluginContext():
     def _get_local_plugin_source(self, path):
         if ('local', path) not in self._alternate_sources:
             # key by a tuple to avoid collision
-            source = self._plugin_base.make_plugin_source(searchpath=[path])
+            source = self._plugin_base.make_plugin_source(searchpath=[path], identifier='local_plugin-' + path + '-' + self._identifier)
             # Ensure that sources never get garbage collected,
             # as they'll take the plugins with them.
             self._alternate_sources[('local', path)] = source
@@ -121,7 +139,7 @@ class PluginContext():
                 # The plugin didn't have an accompanying YAML file
                 defaults = None
 
-            source = self._plugin_base.make_plugin_source(searchpath=[os.path.dirname(location)])
+            source = self._plugin_base.make_plugin_source(searchpath=[os.path.dirname(location)], identifier='pip_plugin-' + self._identifier)
             self._alternate_sources[('pip', package_name)] = source
 
         else:
diff --git a/src/buildstream/_project.py b/src/buildstream/_project.py
index 1fdc84a..a2df5bce 100644
--- a/src/buildstream/_project.py
+++ b/src/buildstream/_project.py
@@ -86,6 +86,12 @@ class ProjectConfig:
         self.default_mirror = None               # The name of the preferred mirror.
         self._aliases = {}                       # Aliases dictionary
 
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        del state["element_factory"]
+        del state["source_factory"]
+        return state
+
 
 # Project()
 #
@@ -97,6 +103,8 @@ class Project():
                  default_mirror=None, parent_loader=None,
                  search_for_project=True):
 
+        self._pass = None
+
         # The project name
         self.name = None
 
@@ -622,6 +630,8 @@ class Project():
         config_no_include = _yaml.node_copy(self._default_config_node)
         _yaml.composite(config_no_include, project_conf_first_pass)
 
+        assert self._pass is None
+        self._pass = 1
         self._load_pass(config_no_include, self.first_pass_config,
                         ignore_unknown=True)
 
@@ -646,6 +656,8 @@ class Project():
         config = _yaml.node_copy(self._default_config_node)
         _yaml.composite(config, project_conf_second_pass)
 
+        assert self._pass == 1
+        self._pass = 2
         self._load_pass(config, self.config)
 
         self._validate_node(config)
@@ -919,10 +931,12 @@ class Project():
         pluginbase = PluginBase(package='buildstream.plugins')
         output.element_factory = ElementFactory(pluginbase,
                                                 plugin_origins=plugin_element_origins,
-                                                format_versions=element_format_versions)
+                                                format_versions=element_format_versions,
+                                                pass_=self._pass)
         output.source_factory = SourceFactory(pluginbase,
                                               plugin_origins=plugin_source_origins,
-                                              format_versions=source_format_versions)
+                                              format_versions=source_format_versions,
+                                              pass_=self._pass)
 
     # _store_origin()
     #
diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index e11a9f9..4ca2ee4 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -21,7 +21,10 @@
 
 # System imports
 import enum
+import copyreg
+import io
 import os
+import pickle
 import sys
 import signal
 import datetime
@@ -32,7 +35,7 @@ import multiprocessing
 # BuildStream toplevel imports
 from ..._exceptions import ImplError, BstError, set_last_task_error, SkipJob
 from ..._message import Message, MessageType, unconditional_messages
-from ... import _signals, utils
+from ... import _signals, utils, Plugin, Element, Source
 
 
 # Return code values shutdown of job handling child processes
@@ -87,6 +90,80 @@ class _MessageType(enum.Enum):
     SUBCLASS_CUSTOM_MESSAGE = 5
 
 
+def _reduce_element(element):
+    assert isinstance(element, Element)
+    meta_kind = element._meta_kind
+    project = element._get_project()
+    factory = project.config.element_factory
+    args = (factory, meta_kind)
+    state = element.__dict__.copy()
+    del state["_Element__reverse_dependencies"]
+    return (_unreduce_plugin, args, state)
+
+
+def _reduce_source(source):
+    assert isinstance(source, Source)
+    meta_kind = source._meta_kind
+    project = source._get_project()
+    factory = project.config.source_factory
+    args = (factory, meta_kind)
+    return (_unreduce_plugin, args, source.__dict__.copy())
+
+
+def _unreduce_plugin(factory, meta_kind):
+    cls, _ = factory.lookup(meta_kind)
+    plugin = cls.__new__(cls)
+
+    # TODO: find a better way of persisting this factory, otherwise the plugin
+    # will become invalid.
+    plugin.factory = factory
+
+    return plugin
+
+
+def _pickle_child_job(child_job, context):
+
+    # Note: Another way of doing this would be to let PluginBase do it's
+    # import-magic. We would achieve this by first pickling the factories, and
+    # the string names of their plugins. Unpickling the plugins in the child
+    # process would then "just work". There would be an additional cost of
+    # having to load every plugin kind, regardless of which ones are used.
+
+    projects = context.get_projects()
+    element_classes = [
+        cls
+        for p in projects
+        for cls, _ in p.config.element_factory._types.values()
+    ]
+    source_classes = [
+        cls
+        for p in projects
+        for cls, _ in p.config.source_factory._types.values()
+    ]
+
+    data = io.BytesIO()
+    pickler = pickle.Pickler(data)
+    pickler.dispatch_table = copyreg.dispatch_table.copy()
+    for cls in element_classes:
+        pickler.dispatch_table[cls] = _reduce_element
+    for cls in source_classes:
+        pickler.dispatch_table[cls] = _reduce_source
+    pickler.dump(child_job)
+    data.seek(0)
+
+    return data
+
+
+def _unpickle_child_job(pickled):
+    child_job = pickle.load(pickled)
+    return child_job
+
+
+def _do_pickled_child_job(pickled, *child_args):
+    child_job = _unpickle_child_job(pickled)
+    return child_job.child_action(*child_args)
+
+
 # Job()
 #
 # The Job object represents a task that will run in parallel to the main
diff --git a/src/buildstream/_sourcecache.py b/src/buildstream/_sourcecache.py
index ce0694e..96d874b 100644
--- a/src/buildstream/_sourcecache.py
+++ b/src/buildstream/_sourcecache.py
@@ -58,6 +58,12 @@ class SourceCache(BaseCache):
         self.casquota.add_remove_callbacks(self.unrequired_sources, self.cas.remove)
         self.casquota.add_list_refs_callback(self.list_sources)
 
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        # TODO: actually pickle the sources, resolving to the same objects.
+        state['_required_sources'] = set()
+        return state
+
     # mark_required_sources()
     #
     # Mark sources that are required by the current run.
diff --git a/src/buildstream/_sourcefactory.py b/src/buildstream/_sourcefactory.py
index 1d959a1..eca4b50 100644
--- a/src/buildstream/_sourcefactory.py
+++ b/src/buildstream/_sourcefactory.py
@@ -33,9 +33,10 @@ class SourceFactory(PluginContext):
 
     def __init__(self, plugin_base, *,
                  format_versions={},
-                 plugin_origins=None):
+                 plugin_origins=None,
+                 pass_=None):
 
-        super().__init__(plugin_base, Source, [_site.source_plugins],
+        super().__init__(plugin_base, Source, [_site.source_plugins], 'source' + str(pass_),
                          format_versions=format_versions,
                          plugin_origins=plugin_origins)
 
diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index a605460..359e183 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -186,6 +186,7 @@ class Element(Plugin):
     """
 
     def __init__(self, context, project, meta, plugin_conf):
+        self._meta_kind = meta.kind
 
         self.__cache_key_dict = None            # Dict for cache key calculation
         self.__cache_key = None                 # Our cached cache key
diff --git a/src/buildstream/source.py b/src/buildstream/source.py
index 9fc9cf1..90db130 100644
--- a/src/buildstream/source.py
+++ b/src/buildstream/source.py
@@ -312,6 +312,8 @@ class Source(Plugin):
         super().__init__("{}-{}".format(meta.element_name, meta.element_index),
                          context, project, provenance, "source", unique_id=unique_id)
 
+        self._meta_kind = meta.kind
+
         self.__source_cache = context.sourcecache
 
         self.__element_name = meta.element_name         # The name of the element owning this source


[buildstream] 12/21: WIP: win32: _platform/win32: add support for win32

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 461c2c54637e515bff9c2ccbeffe6b5283d04ccb
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Apr 2 13:14:49 2019 +0100

    WIP: win32: _platform/win32: add support for win32
    
    Copy the approach of 'Darwin' and provide a SandboxDummy.
---
 src/buildstream/_platform/platform.py |  4 ++++
 src/buildstream/_platform/win32.py    | 28 ++++++++++++++++++++++++++++
 2 files changed, 32 insertions(+)

diff --git a/src/buildstream/_platform/platform.py b/src/buildstream/_platform/platform.py
index dba60dd..4c147b5 100644
--- a/src/buildstream/_platform/platform.py
+++ b/src/buildstream/_platform/platform.py
@@ -47,6 +47,8 @@ class Platform():
             backend = 'linux'
         elif sys.platform.startswith('darwin'):
             backend = 'darwin'
+        elif sys.platform == 'win32':
+            backend = 'win32'
         else:
             backend = 'unix'
 
@@ -54,6 +56,8 @@ class Platform():
             from .linux import Linux as PlatformImpl  # pylint: disable=cyclic-import
         elif backend == 'darwin':
             from .darwin import Darwin as PlatformImpl  # pylint: disable=cyclic-import
+        elif backend == 'win32':
+            from .win32 import Win32 as PlatformImpl  # pylint: disable=cyclic-import
         elif backend == 'unix':
             from .unix import Unix as PlatformImpl  # pylint: disable=cyclic-import
         else:
diff --git a/src/buildstream/_platform/win32.py b/src/buildstream/_platform/win32.py
new file mode 100644
index 0000000..8bf639d
--- /dev/null
+++ b/src/buildstream/_platform/win32.py
@@ -0,0 +1,28 @@
+import os
+
+from .._exceptions import PlatformError
+from ..sandbox import SandboxNone
+
+from . import Platform
+
+
+class Win32(Platform):
+
+    def __init__(self):
+
+        super().__init__()
+
+    def create_sandbox(self, *args, **kwargs):
+        kwargs['dummy_reason'] = \
+            "There are no supported sandbox " + \
+            "technologies for Win32 at this time"
+        return SandboxNone(*args, **kwargs)
+
+    def check_sandbox_config(self, config):
+        # Check host os and architecture match
+        if config.build_os != self.get_host_os():
+            raise PlatformError("Configured and host OS don't match.")
+        elif config.build_arch != self.get_host_arch():
+            raise PlatformError("Configured and host architecture don't match.")
+
+        return True


[buildstream] 13/21: WIP: win32: platform: os.uname and resource

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit abb0dac8a8bf4281fee48e91924998b1076b9fbc
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Apr 2 13:16:19 2019 +0100

    WIP: win32: platform: os.uname and resource
---
 src/buildstream/_platform/platform.py | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/_platform/platform.py b/src/buildstream/_platform/platform.py
index 4c147b5..0d513fb 100644
--- a/src/buildstream/_platform/platform.py
+++ b/src/buildstream/_platform/platform.py
@@ -20,7 +20,9 @@
 import os
 import platform
 import sys
-import resource
+#import resource
+
+import psutil
 
 from .._exceptions import PlatformError, ImplError
 
@@ -72,7 +74,7 @@ class Platform():
         return cls._instance
 
     def get_cpu_count(self, cap=None):
-        cpu_count = len(os.sched_getaffinity(0))
+        cpu_count = len(psutil.Process().cpu_affinity())
         if cap is None:
             return cpu_count
         else:
@@ -159,6 +161,8 @@ class Platform():
         # Need to set resources for _frontend/app.py as this is dependent on the platform
         # SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
         # Avoid hitting the limit too quickly.
+        return
+        import resource
         limits = resource.getrlimit(resource.RLIMIT_NOFILE)
         if limits[0] != limits[1]:
             if soft_limit is None:


[buildstream] 19/21: WIP: win32: sandboxnone: pass-through env for now

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit b9a93e48be367f274cf2c093d7ebfe5fdd7a6cf0
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Wed Apr 17 16:34:39 2019 +0100

    WIP: win32: sandboxnone: pass-through env for now
    
    Rely on the user running "Developer Command Prompt for VS2017", so that
    we get the correct environment variables for building.
---
 src/buildstream/sandbox/_sandboxnone.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/buildstream/sandbox/_sandboxnone.py b/src/buildstream/sandbox/_sandboxnone.py
index e95a7b9..f8a2bda 100644
--- a/src/buildstream/sandbox/_sandboxnone.py
+++ b/src/buildstream/sandbox/_sandboxnone.py
@@ -59,7 +59,8 @@ class SandboxNone(Sandbox):
 
         path = pathlib.Path(self.get_directory()) / cwd
         print('run', command, 'in', path)
-        result = subprocess.run(command, cwd=path, env=env)
+        #result = subprocess.run(command, cwd=path, env=env)
+        result = subprocess.run(command, cwd=path)
 
         # out = pathlib.Path(self.get_directory()) / 'buildstream-install'
         # out.mkdir(exist_ok=True)


[buildstream] 07/21: TEMP: time and size of pickling and spawning

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit a232a8fa7288a33016afef9b08c2f933cf99958a
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Wed Apr 10 12:59:21 2019 +0100

    TEMP: time and size of pickling and spawning
---
 src/buildstream/_scheduler/jobs/job.py | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index e844c80..440a85b 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -261,12 +261,30 @@ class Job():
 
         self._process = Process(target=child_job.child_action, args=[self._queue])
 
+        import contextlib
+        import time
+        @contextlib.contextmanager
+        def timer(message):
+            then = time.time()
+            yield
+            now = time.time()
+            print(f"({now - then:,.2}s):", message)
+
+        import buildstream.testpickle
+        with timer(f"Pickle {self._child_action}"):
+            pickled_process = buildstream.testpickle.test_pickle_direct(self._child_action)
+        print(f"Size of pickled data: {len(pickled_process.getbuffer()):,}")
+        import pickle
+        pickled_process.seek(0)
+        # unpickled_process = pickle.load(pickled_process)
+
         # Block signals which are handled in the main process such that
         # the child process does not inherit the parent's state, but the main
         # process will be notified of any signal after we launch the child.
         #
-        with _signals.blocked([signal.SIGINT, signal.SIGTSTP, signal.SIGTERM], ignore=False):
-            self._process.start()
+        with timer(f"process.start {self}"):
+            with _signals.blocked([signal.SIGINT, signal.SIGTSTP, signal.SIGTERM], ignore=False):
+                self._process.start()
 
         # Wait for the child task to complete.
         #


[buildstream] 20/21: WIP: win32: adjust examples/running-commands

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 68f9c1ace24f385886fa9b98a8725a55f1caa1da
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Thu Apr 18 09:53:07 2019 +0100

    WIP: win32: adjust examples/running-commands
    
    Illustrate some of the changes necessary to run on win32 without a
    sandbox.
---
 doc/examples/running-commands/elements/base.bst        |  5 -----
 doc/examples/running-commands/elements/base/alpine.bst | 13 -------------
 doc/examples/running-commands/elements/hello.bst       | 11 +++--------
 doc/examples/running-commands/files/src/Makefile       | 12 ------------
 doc/examples/running-commands/project.conf             | 11 ++++++++---
 5 files changed, 11 insertions(+), 41 deletions(-)

diff --git a/doc/examples/running-commands/elements/base.bst b/doc/examples/running-commands/elements/base.bst
deleted file mode 100644
index 1b85a9e..0000000
--- a/doc/examples/running-commands/elements/base.bst
+++ /dev/null
@@ -1,5 +0,0 @@
-kind: stack
-description: Base stack
-
-depends:
-- base/alpine.bst
diff --git a/doc/examples/running-commands/elements/base/alpine.bst b/doc/examples/running-commands/elements/base/alpine.bst
deleted file mode 100644
index cf85df5..0000000
--- a/doc/examples/running-commands/elements/base/alpine.bst
+++ /dev/null
@@ -1,13 +0,0 @@
-kind: import
-description: |
-
-    Alpine Linux base runtime
-
-sources:
-- kind: tar
-
-  # This is a post doctored, trimmed down system image
-  # of the Alpine linux distribution.
-  #
-  url: alpine:integration-tests-base.v1.x86_64.tar.xz
-  ref: 3eb559250ba82b64a68d86d0636a6b127aa5f6d25d3601a79f79214dc9703639
diff --git a/doc/examples/running-commands/elements/hello.bst b/doc/examples/running-commands/elements/hello.bst
index f7f21e0..184d353 100644
--- a/doc/examples/running-commands/elements/hello.bst
+++ b/doc/examples/running-commands/elements/hello.bst
@@ -3,10 +3,6 @@ description: |
 
   Building manually
 
-# Depend on the base system
-depends:
-- base.bst
-
 # Stage the files/src directory for building
 sources:
   - kind: local
@@ -16,7 +12,6 @@ sources:
 config:
 
   build-commands:
-  - make PREFIX="%{prefix}"
-
-  install-commands:
-  - make -j1 PREFIX="%{prefix}" DESTDIR="%{install-root}" install
+  - cl hello.c
+  - mkdir ..\..\..\buildstream-install
+  - copy hello.exe ..\..\..\buildstream-install
diff --git a/doc/examples/running-commands/files/src/Makefile b/doc/examples/running-commands/files/src/Makefile
deleted file mode 100644
index 8c84251..0000000
--- a/doc/examples/running-commands/files/src/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Sample makefile for hello.c
-#
-.PHONY: all install
-
-all: hello
-
-install:
-	install -d ${DESTDIR}${PREFIX}/bin
-	install -m 755 hello ${DESTDIR}${PREFIX}/bin
-
-hello: hello.c
-	$(CC) -Wall -o $@ $<
diff --git a/doc/examples/running-commands/project.conf b/doc/examples/running-commands/project.conf
index 7127b0d..45ac172 100644
--- a/doc/examples/running-commands/project.conf
+++ b/doc/examples/running-commands/project.conf
@@ -7,6 +7,11 @@ format-version: 9
 # Subdirectory where elements are stored
 element-path: elements
 
-# Define an alias for our alpine tarball
-aliases:
-  alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/
+variables:
+  build-root:
+    buildstream\%{project-name}\%{element-name}
+  install-root:
+    buildstream-install
+
+environment:
+  SystemRoot: 'C:\Windows'


[buildstream] 08/21: WIP: spawn instead of fork

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 3940d1193179e0416b5e61f74bac4d4489c0d846
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Jun 18 15:51:13 2019 +0100

    WIP: spawn instead of fork
---
 src/buildstream/_context.py                     | 12 +++---
 src/buildstream/_scheduler/jobs/job.py          | 49 ++++++++++---------------
 src/buildstream/_scheduler/queues/buildqueue.py |  5 ++-
 src/buildstream/_scheduler/scheduler.py         |  4 ++
 4 files changed, 33 insertions(+), 37 deletions(-)

diff --git a/src/buildstream/_context.py b/src/buildstream/_context.py
index d9ef1f1..2e1d0ee 100644
--- a/src/buildstream/_context.py
+++ b/src/buildstream/_context.py
@@ -584,7 +584,9 @@ class Context():
         # we also do not allow it in the main process.
         assert self._log_handle is None
         assert self._log_filename is None
-        assert not utils._is_main_process()
+
+        # Need to deal with global _main_pid var.
+        # assert not utils._is_main_process()
 
         # Create the fully qualified logfile in the log directory,
         # appending the pid and .log extension at the end.
@@ -679,10 +681,10 @@ class Context():
         # If this message is associated with a plugin, print what
         # we know about the plugin.
         plugin_name = ""
-        if message.unique_id:
-            template += " {plugin}"
-            plugin = Plugin._lookup(message.unique_id)
-            plugin_name = plugin.name
+        # if message.unique_id:
+        #     template += " {plugin}"
+        #     plugin = Plugin._lookup(message.unique_id)
+        #     plugin_name = plugin.name
 
         template += ": {message}"
 
diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index 440a85b..ebbbfa6 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -244,7 +244,7 @@ class Job():
     #
     def start(self):
 
-        self._queue = multiprocessing.Queue()
+        self._queue = self._scheduler.manager.Queue()
 
         self._tries += 1
         self._parent_start_listening()
@@ -259,32 +259,18 @@ class Job():
             self._task_id,
         )
 
-        self._process = Process(target=child_job.child_action, args=[self._queue])
-
-        import contextlib
-        import time
-        @contextlib.contextmanager
-        def timer(message):
-            then = time.time()
-            yield
-            now = time.time()
-            print(f"({now - then:,.2}s):", message)
-
-        import buildstream.testpickle
-        with timer(f"Pickle {self._child_action}"):
-            pickled_process = buildstream.testpickle.test_pickle_direct(self._child_action)
-        print(f"Size of pickled data: {len(pickled_process.getbuffer()):,}")
-        import pickle
-        pickled_process.seek(0)
-        # unpickled_process = pickle.load(pickled_process)
+        pickled = _pickle_child_job(child_job, self._scheduler.context)
+        self._process = Process(
+            target=_do_pickled_child_job,
+            args=[pickled, self._queue],
+        )
 
         # Block signals which are handled in the main process such that
         # the child process does not inherit the parent's state, but the main
         # process will be notified of any signal after we launch the child.
         #
-        with timer(f"process.start {self}"):
-            with _signals.blocked([signal.SIGINT, signal.SIGTSTP, signal.SIGTERM], ignore=False):
-                self._process.start()
+        with _signals.blocked([signal.SIGINT, signal.SIGTSTP, signal.SIGTERM], ignore=False):
+            self._process.start()
 
         # Wait for the child task to complete.
         #
@@ -629,19 +615,22 @@ class Job():
         #
         #      http://bugs.python.org/issue3831
         #
-        if not self._listening:
-            self._scheduler.loop.add_reader(
-                self._queue._reader.fileno(), self._parent_recv)
-            self._listening = True
+
+        # if not self._listening:
+        #     self._scheduler.loop.add_reader(
+        #         self._queue._reader.fileno(), self._parent_recv)
+        #     self._listening = True
+        pass
 
     # _parent_stop_listening()
     #
     # Stops listening on the message queue
     #
     def _parent_stop_listening(self):
-        if self._listening:
-            self._scheduler.loop.remove_reader(self._queue._reader.fileno())
-            self._listening = False
+        # if self._listening:
+        #     self._scheduler.loop.remove_reader(self._queue._reader.fileno())
+        #     self._listening = False
+        pass
 
 
 # ChildJob()
@@ -922,7 +911,7 @@ class ChildJob():
     #    exit_code (_ReturnCode): The exit code to exit with
     #
     def _child_shutdown(self, exit_code):
-        self._queue.close()
+        # self._queue.close()
         assert isinstance(exit_code, _ReturnCode)
         sys.exit(int(exit_code))
 
diff --git a/src/buildstream/_scheduler/queues/buildqueue.py b/src/buildstream/_scheduler/queues/buildqueue.py
index b280661..ff65158 100644
--- a/src/buildstream/_scheduler/queues/buildqueue.py
+++ b/src/buildstream/_scheduler/queues/buildqueue.py
@@ -108,8 +108,9 @@ class BuildQueue(Queue):
         #        artifact cache size for a successful build even though we know a
         #        failed build also grows the artifact cache size.
         #
-        if status is JobStatus.OK:
-            self._check_cache_size(job, element, result)
+
+        # if status is JobStatus.OK:
+        #     self._check_cache_size(job, element, result)
 
     def register_pending_element(self, element):
         # Set a "buildable" callback for an element not yet ready
diff --git a/src/buildstream/_scheduler/scheduler.py b/src/buildstream/_scheduler/scheduler.py
index 0ee6293..8a14391 100644
--- a/src/buildstream/_scheduler/scheduler.py
+++ b/src/buildstream/_scheduler/scheduler.py
@@ -75,6 +75,10 @@ class Scheduler():
                  job_start_callback=None,
                  job_complete_callback=None):
 
+        import multiprocessing
+        multiprocessing.set_start_method('spawn')
+        self.manager = multiprocessing.Manager()
+
         #
         # Public members
         #


[buildstream] 18/21: WIP: win32: don't set owner of files

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 90a6685c8f025400b3dd6450900c7ef657be14ad
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Wed Apr 17 16:33:44 2019 +0100

    WIP: win32: don't set owner of files
---
 src/buildstream/element.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/buildstream/element.py b/src/buildstream/element.py
index 359e183..867b6fd 100644
--- a/src/buildstream/element.py
+++ b/src/buildstream/element.py
@@ -1504,7 +1504,7 @@ class Element(Plugin):
         # Ensure deterministic mtime of sources at build time
         vdirectory.set_deterministic_mtime()
         # Ensure deterministic owners of sources at build time
-        vdirectory.set_deterministic_user()
+        #vdirectory.set_deterministic_user()
 
     # _set_required():
     #


[buildstream] 03/21: TEMP: disable status rendering

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 84526643b4dde01a9ca199d7cf30887065a8ee1b
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Apr 2 13:12:07 2019 +0100

    TEMP: disable status rendering
    
    This interferes with pdb debugging in subprocesses.
---
 src/buildstream/_frontend/app.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/buildstream/_frontend/app.py b/src/buildstream/_frontend/app.py
index a9dd46b..6cc2d7e 100644
--- a/src/buildstream/_frontend/app.py
+++ b/src/buildstream/_frontend/app.py
@@ -462,7 +462,7 @@ class App():
     # Render the status area, conditional on some internal state
     #
     def _maybe_render_status(self):
-
+        return
         # If we're suspended or terminating, then dont render the status area
         if self._status and self.stream and \
            not (self.stream.suspended or self.stream.terminated):


[buildstream] 11/21: WIP: bst-job-replay

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 276b47de424cf35b0df80d48604b323df06e0a9e
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Thu Apr 11 10:49:32 2019 +0100

    WIP: bst-job-replay
---
 setup.py                               |  3 ++-
 src/buildstream/_jobreplay.py          | 15 +++++++++++++++
 src/buildstream/_scheduler/jobs/job.py |  5 +++++
 3 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/setup.py b/setup.py
index ab3c6f3..af81886 100755
--- a/setup.py
+++ b/setup.py
@@ -152,7 +152,8 @@ bst_install_entry_points = {
 if not os.environ.get('BST_ARTIFACTS_ONLY', ''):
     check_for_bwrap()
     bst_install_entry_points['console_scripts'] += [
-        'bst = buildstream._frontend:cli'
+        'bst = buildstream._frontend:cli',
+        'bst-job-replay = buildstream._jobreplay:cli',
     ]
 
 #####################################################
diff --git a/src/buildstream/_jobreplay.py b/src/buildstream/_jobreplay.py
new file mode 100644
index 0000000..1c05324
--- /dev/null
+++ b/src/buildstream/_jobreplay.py
@@ -0,0 +1,15 @@
+import multiprocessing
+
+import click
+
+from ._scheduler.jobs.job import _unpickle_child_job
+
+
+@click.command(name='bst-job-replay', short_help="Replay a bst job")
+@click.argument('replayfile', type=click.File("rb"))
+def cli(replayfile):
+    job = _unpickle_child_job(replayfile)
+    queue = multiprocessing.Queue()
+    job._queue = queue
+    job._scheduler_context.set_message_handler(job._child_message_handler)
+    job.child_process()
diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index ebbbfa6..3b228bc 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -24,6 +24,7 @@ import enum
 import copyreg
 import io
 import os
+import pathlib
 import pickle
 import sys
 import signal
@@ -162,6 +163,10 @@ def _pickle_child_job(child_job, context):
     pickler.dump(child_job)
     data.seek(0)
 
+    path = f"{child_job.action_name}_{child_job._task_id}"
+    with open(path, "wb") as f:
+        f.write(data.getvalue())
+
     return data
 
 


[buildstream] 21/21: WIP: win32: WINDOWS.md: repro instructions

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit df73ac7ca93b14e066848ee39d9b27fee26c7c29
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Thu Apr 18 11:16:51 2019 +0100

    WIP: win32: WINDOWS.md: repro instructions
---
 WINDOWS.md | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 59 insertions(+)

diff --git a/WINDOWS.md b/WINDOWS.md
new file mode 100644
index 0000000..46e3655
--- /dev/null
+++ b/WINDOWS.md
@@ -0,0 +1,59 @@
+Running on Windows
+==================
+
+This is a temporary doc tied to the lifetime of the `aevri/win32` branch,
+intended to help you repro the results.
+
+Installation
+------------
+
+First, make sure you have Python 3 installed.
+
+Next, you probably want to create a venv to install BuildStream into, for
+experimentation.
+
+Then, clone and install BuildStream:
+
+    git clone  --branch aevri/win32 https://gitlab.com/buildstream/buildstream.git
+    pip install -e ./buildstream
+
+Next, install some additional dependencies for proper display:
+
+    pip install colorama windows-curses
+
+Finally, make sure you have the build tools installed:
+
+- Download the installer from: https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2017
+- Run the installer.
+- Select to install "Visual C++ build tools". Possibly need to include these
+  optional items:
+    - Windows 10 SDK
+    - Visual C++ tools for CMake
+    - Testing tools core feature - Build Tools
+
+Hello World
+-----------
+
+Here is how to build the "Hello World" example.
+
+First, launch a "Developer Command Prompt for VS 2017". This ensures that you
+have the correct environment variables for building. The next instructions
+assume you are running inside this prompt.
+
+Next, make sure you have activated any virtual environment for BuildStream.
+
+Then, change directory to the buildstream git repository.
+
+Finally, build and run like so:
+
+    bst --help
+
+    cd doc\examples\running-commands
+
+    bst show hello.bst
+
+    bst build hello.bst
+
+    bst artifact checkout hello.bst --directory checkout
+    cd checkout
+    hello.exe


[buildstream] 01/21: jobs/job: send ChildJob the context, not scheduler

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 042a4b2dceffbe21b5bf796d72f7ac28db24662a
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Wed Jun 5 16:50:57 2019 +0100

    jobs/job: send ChildJob the context, not scheduler
    
    Instead of passing the whole scheduler to the ChildJob, only pass the
    part that is used - the context. Reducing the amount of shared state
    makes it easier to follow what's going on, and will make it more
    economical to move to away from the 'fork' model later.
---
 src/buildstream/_scheduler/jobs/job.py | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index ed90bb3..e11a9f9 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -162,7 +162,7 @@ class Job():
         self._parent_start_listening()
 
         child_job = self.create_child_job(  # pylint: disable=assignment-from-no-return
-            self._scheduler,
+            self._scheduler.context,
             self.action_name,
             self._logfile,
             self._max_retries,
@@ -562,11 +562,11 @@ class Job():
 class ChildJob():
 
     def __init__(
-            self, scheduler, action_name, logfile, max_retries, tries, message_unique_id, task_id):
+            self, scheduler_context, action_name, logfile, max_retries, tries, message_unique_id, task_id):
 
         self.action_name = action_name
 
-        self._scheduler = scheduler
+        self._scheduler_context = scheduler_context
         self._logfile = logfile
         self._max_retries = max_retries
         self._tries = tries
@@ -592,7 +592,7 @@ class ChildJob():
         if "unique_id" in kwargs:
             unique_id = kwargs["unique_id"]
             del kwargs["unique_id"]
-        self._scheduler.context.message(
+        self._scheduler_context.message(
             Message(unique_id, message_type, message, **kwargs))
 
     # send_message()
@@ -673,7 +673,7 @@ class ChildJob():
         # Set the global message handler in this child
         # process to forward messages to the parent process
         self._queue = queue
-        self._scheduler.context.set_message_handler(self._child_message_handler)
+        self._scheduler_context.set_message_handler(self._child_message_handler)
 
         starttime = datetime.datetime.now()
         stopped_time = None
@@ -690,7 +690,7 @@ class ChildJob():
         # Time, log and and run the action function
         #
         with _signals.suspendable(stop_time, resume_time), \
-                self._scheduler.context.recorded_messages(self._logfile) as filename:
+                self._scheduler_context.recorded_messages(self._logfile) as filename:
 
             self.message(MessageType.START, self.action_name, logfile=filename)
 


[buildstream] 04/21: TEMP: MultiprocessingPdb, traceback in Job._child_action

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit dcb63739058caa27f18cec1da27973833bb5b88b
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Apr 2 13:22:51 2019 +0100

    TEMP: MultiprocessingPdb, traceback in Job._child_action
---
 src/buildstream/_scheduler/jobs/elementjob.py | 22 +++++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/src/buildstream/_scheduler/jobs/elementjob.py b/src/buildstream/_scheduler/jobs/elementjob.py
index a535f55..f51707f 100644
--- a/src/buildstream/_scheduler/jobs/elementjob.py
+++ b/src/buildstream/_scheduler/jobs/elementjob.py
@@ -23,6 +23,18 @@ from ..._message import MessageType
 from .job import Job, ChildJob
 
 
+import sys
+import pdb
+
+class MultiprocessingPdb(pdb.Pdb):
+    def interaction(self, *args, **kwargs):
+        _stdin = sys.stdin
+        try:
+            sys.stdin = open('/dev/stdin')
+            pdb.Pdb.interaction(self, *args, **kwargs)
+        finally:
+            sys.stdin = _stdin
+
 # ElementJob()
 #
 # A job to run an element's commands. When this job is started
@@ -105,7 +117,15 @@ class ChildElementJob(ChildJob):
                      detail=env_dump)
 
         # Run the action
-        return self._action_cb(self._element)
+        # MultiprocessingPdb().set_trace()
+        try:
+            result = self._action_cb(self._element)
+        except Exception as e:
+            print(e)
+            import traceback
+            traceback.print_exc()
+            raise
+        return result
 
     def child_process_data(self):
         data = {}


[buildstream] 06/21: TEMP: breakpoint on recursion error

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 51083d517d2679765f78514295b857cb18e31f8d
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Wed Apr 10 12:50:20 2019 +0100

    TEMP: breakpoint on recursion error
---
 src/buildstream/_frontend/app.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/buildstream/_frontend/app.py b/src/buildstream/_frontend/app.py
index 6cc2d7e..f4e32ea 100644
--- a/src/buildstream/_frontend/app.py
+++ b/src/buildstream/_frontend/app.py
@@ -281,7 +281,8 @@ class App():
 
             # Exit with the error
             self._error_exit(e)
-        except RecursionError:
+        except RecursionError as e:
+            breakpoint()
             click.echo("RecursionError: Dependency depth is too large. Maximum recursion depth exceeded.",
                        err=True)
             sys.exit(-1)


[buildstream] 09/21: _scheduler: don't pass whole queue to child job

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit db07192c18067f9f84cc686e975a4909c166a9c9
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Apr 9 13:31:39 2019 +0100

    _scheduler: don't pass whole queue to child job
    
    Stop passing the scheduler's job queue's across to child jobs, via the
    'action_cb' parameter. Instead pass a module-level function, which will
    pickle nicely.
    
    This isn't much of a problem while we are in the 'fork' multiprocessing
    model. As we move towards supporting the 'spawn' model for win32, then
    we need to consider what we will be pickling and unpickling, to cross
    the process boundary.
---
 .../_scheduler/queues/artifactpushqueue.py         | 11 ++++++----
 src/buildstream/_scheduler/queues/buildqueue.py    | 10 ++++++---
 src/buildstream/_scheduler/queues/fetchqueue.py    | 15 +++++++++++--
 src/buildstream/_scheduler/queues/pullqueue.py     | 11 ++++++----
 src/buildstream/_scheduler/queues/queue.py         | 25 +++++++++++++---------
 .../_scheduler/queues/sourcepushqueue.py           | 11 ++++++----
 src/buildstream/_scheduler/queues/trackqueue.py    |  8 +++++--
 7 files changed, 62 insertions(+), 29 deletions(-)

diff --git a/src/buildstream/_scheduler/queues/artifactpushqueue.py b/src/buildstream/_scheduler/queues/artifactpushqueue.py
index b861d4f..0b6fa13 100644
--- a/src/buildstream/_scheduler/queues/artifactpushqueue.py
+++ b/src/buildstream/_scheduler/queues/artifactpushqueue.py
@@ -32,13 +32,16 @@ class ArtifactPushQueue(Queue):
     complete_name = "Pushed"
     resources = [ResourceType.UPLOAD]
 
-    def process(self, element):
-        # returns whether an artifact was uploaded or not
-        if not element._push():
-            raise SkipJob(self.action_name)
+    def get_process_func(self):
+        return _raise_skip_if_not_pushed
 
     def status(self, element):
         if element._skip_push():
             return QueueStatus.SKIP
 
         return QueueStatus.READY
+
+
+def _raise_skip_if_not_pushed(element):
+    if not element._push():
+        raise SkipJob(ArtifactPushQueue.action_name)
diff --git a/src/buildstream/_scheduler/queues/buildqueue.py b/src/buildstream/_scheduler/queues/buildqueue.py
index ff65158..d0796f9 100644
--- a/src/buildstream/_scheduler/queues/buildqueue.py
+++ b/src/buildstream/_scheduler/queues/buildqueue.py
@@ -57,7 +57,7 @@ class BuildQueue(Queue):
                           logfile=logfile)
             job = ElementJob(self._scheduler, self.action_name,
                              logfile, element=element, queue=self,
-                             action_cb=self.process,
+                             action_cb=self.get_process_func(),
                              complete_cb=self._job_done,
                              max_retries=self._max_retries)
             self._done_queue.append(element)
@@ -66,8 +66,8 @@ class BuildQueue(Queue):
 
         return super().enqueue(to_queue)
 
-    def process(self, element):
-        return element._assemble()
+    def get_process_func(self):
+        return _assemble_element
 
     def status(self, element):
         if element._cached_success():
@@ -116,3 +116,7 @@ class BuildQueue(Queue):
         # Set a "buildable" callback for an element not yet ready
         # to be processed in the build queue.
         element._set_buildable_callback(self._enqueue_element)
+
+
+def _assemble_element(element):
+    return element._assemble()
diff --git a/src/buildstream/_scheduler/queues/fetchqueue.py b/src/buildstream/_scheduler/queues/fetchqueue.py
index bbb3b3d..9b619e7 100644
--- a/src/buildstream/_scheduler/queues/fetchqueue.py
+++ b/src/buildstream/_scheduler/queues/fetchqueue.py
@@ -41,8 +41,11 @@ class FetchQueue(Queue):
         self._skip_cached = skip_cached
         self._fetch_original = fetch_original
 
-    def process(self, element):
-        element._fetch(fetch_original=self._fetch_original)
+    def get_process_func(self):
+        if self._fetch_original:
+            return _fetch_original
+        else:
+            return _fetch_no_original
 
     def status(self, element):
         # Optionally skip elements that are already in the artifact cache
@@ -78,3 +81,11 @@ class FetchQueue(Queue):
         # Set a "can_query_cache" callback for an element not yet ready
         # to be processed in the fetch queue.
         element._set_can_query_cache_callback(self._enqueue_element)
+
+
+def _fetch_no_original(element):
+    element._fetch(fetch_original=False)
+
+
+def _fetch_original(element):
+    element._fetch(fetch_original=True)
diff --git a/src/buildstream/_scheduler/queues/pullqueue.py b/src/buildstream/_scheduler/queues/pullqueue.py
index 2452933..dfb7cbc 100644
--- a/src/buildstream/_scheduler/queues/pullqueue.py
+++ b/src/buildstream/_scheduler/queues/pullqueue.py
@@ -33,10 +33,8 @@ class PullQueue(Queue):
     complete_name = "Pulled"
     resources = [ResourceType.DOWNLOAD, ResourceType.CACHE]
 
-    def process(self, element):
-        # returns whether an artifact was downloaded or not
-        if not element._pull():
-            raise SkipJob(self.action_name)
+    def get_process_func(self):
+        return _raise_skip_if_not_pulled
 
     def status(self, element):
         if not element._can_query_cache():
@@ -65,3 +63,8 @@ class PullQueue(Queue):
         # immediately ready to query the artifact cache so that it
         # may be pulled.
         element._set_can_query_cache_callback(self._enqueue_element)
+
+
+def _raise_skip_if_not_pulled(element):
+    if not element._pull():
+        raise SkipJob(PullQueue.action_name)
diff --git a/src/buildstream/_scheduler/queues/queue.py b/src/buildstream/_scheduler/queues/queue.py
index 9a07f63..be76e9e 100644
--- a/src/buildstream/_scheduler/queues/queue.py
+++ b/src/buildstream/_scheduler/queues/queue.py
@@ -91,20 +91,25 @@ class Queue():
     #     Abstract Methods for Queue implementations    #
     #####################################################
 
-    # process()
+    # get_process_func()
     #
-    # Abstract method for processing an element
+    # Abstract method, returns a callable for processing an element.
     #
-    # Args:
-    #    element (Element): An element to process
+    # The callable should fit the signature `process(element: Element) -> any`.
     #
-    # Returns:
-    #    (any): An optional something to be returned
-    #           for every element successfully processed
+    # Note that the callable may be executed in a child process, so the return
+    # value should be a simple object (must be pickle-able, i.e. strings,
+    # lists, dicts, numbers, but not Element instances). This is sent to back
+    # to the main process.
     #
+    # This method is the only way for a queue to affect elements, and so is
+    # not optional to implement.
     #
-    def process(self, element):
-        pass
+    # Returns:
+    #    (Callable[[Element], Any]): The callable for processing elements.
+    #
+    def get_process_func(self):
+        raise NotImplementedError()
 
     # status()
     #
@@ -218,7 +223,7 @@ class Queue():
             ElementJob(self._scheduler, self.action_name,
                        self._element_log_path(element),
                        element=element, queue=self,
-                       action_cb=self.process,
+                       action_cb=self.get_process_func(),
                        complete_cb=self._job_done,
                        max_retries=self._max_retries)
             for element in ready
diff --git a/src/buildstream/_scheduler/queues/sourcepushqueue.py b/src/buildstream/_scheduler/queues/sourcepushqueue.py
index c38460e..92587d6 100644
--- a/src/buildstream/_scheduler/queues/sourcepushqueue.py
+++ b/src/buildstream/_scheduler/queues/sourcepushqueue.py
@@ -30,13 +30,16 @@ class SourcePushQueue(Queue):
     complete_name = "Sources pushed"
     resources = [ResourceType.UPLOAD]
 
-    def process(self, element):
-        # Returns whether a source was pushed or not
-        if not element._source_push():
-            raise SkipJob(self.action_name)
+    def get_process_func(self):
+        return _raise_skip_if_not_pushed
 
     def status(self, element):
         if element._skip_source_push():
             return QueueStatus.SKIP
 
         return QueueStatus.READY
+
+
+def _raise_skip_if_not_pushed(element):
+    if not element._source_push():
+        raise SkipJob(SourcePushQueue.action_name)
diff --git a/src/buildstream/_scheduler/queues/trackqueue.py b/src/buildstream/_scheduler/queues/trackqueue.py
index 194bb7e..56fa9c5 100644
--- a/src/buildstream/_scheduler/queues/trackqueue.py
+++ b/src/buildstream/_scheduler/queues/trackqueue.py
@@ -35,8 +35,8 @@ class TrackQueue(Queue):
     complete_name = "Tracked"
     resources = [ResourceType.DOWNLOAD]
 
-    def process(self, element):
-        return element._track()
+    def get_process_func(self, element):
+        return _track_element
 
     def status(self, element):
         # We can skip elements entirely if they have no sources.
@@ -60,3 +60,7 @@ class TrackQueue(Queue):
             source._set_ref(new_ref, save=True)
 
         element._tracking_done()
+
+
+def _track_element(element):
+    return element._track()


[buildstream] 17/21: WIP: win32: use cmd.exe instead of sh

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 482d0256d9e34ae1d3faa88b98f6521ea214ea06
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Wed Apr 17 16:32:50 2019 +0100

    WIP: win32: use cmd.exe instead of sh
---
 src/buildstream/buildelement.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/buildstream/buildelement.py b/src/buildstream/buildelement.py
index 158f5fc..4bb6793 100644
--- a/src/buildstream/buildelement.py
+++ b/src/buildstream/buildelement.py
@@ -294,6 +294,6 @@ class BuildElement(Element):
         # Note the -e switch to 'sh' means to exit with an error
         # if any untested command fails.
         #
-        sandbox.run(['sh', '-c', '-e', cmd + '\n'],
+        sandbox.run(['cmd.exe', '/c', cmd],
                     SandboxFlags.ROOT_READ_ONLY,
                     label=cmd)


[buildstream] 16/21: WIP: win32: ImportElement - fix separators

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit ee5c3ffa88cefc0d1cd08908cdb1bdbeaf05f3bd
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Apr 2 13:36:29 2019 +0100

    WIP: win32: ImportElement - fix separators
---
 src/buildstream/plugins/elements/import.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/src/buildstream/plugins/elements/import.py b/src/buildstream/plugins/elements/import.py
index 61e353d..2861674 100644
--- a/src/buildstream/plugins/elements/import.py
+++ b/src/buildstream/plugins/elements/import.py
@@ -82,10 +82,10 @@ class ImportElement(Element):
         outputdir = rootdir.descend('output', create=True)
 
         # The directory to grab
-        inputdir = inputdir.descend(*self.source.strip(os.sep).split(os.sep))
+        inputdir = inputdir.descend(*self.source.strip('/').split('/'))
 
         # The output target directory
-        outputdir = outputdir.descend(*self.target.strip(os.sep).split(os.sep), create=True)
+        outputdir = outputdir.descend(*self.target.strip('/').split('/'), create=True)
 
         if inputdir.is_empty():
             raise ElementError("{}: No files were found inside directory '{}'"
@@ -95,7 +95,7 @@ class ImportElement(Element):
         outputdir.import_files(inputdir)
 
         # And we're done
-        return '/output'
+        return os.sep + 'output'
 
     def generate_script(self):
         build_root = self.get_variable('build-root')


[buildstream] 14/21: WIP: win32: job: replace add_child_handler with thread

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 61c228d5ead35a9c6318f4e4e9e190d3d8f4365e
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Apr 2 13:26:08 2019 +0100

    WIP: win32: job: replace add_child_handler with thread
---
 src/buildstream/_scheduler/jobs/job.py | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index 3b228bc..debc470 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -49,6 +49,21 @@ class _ReturnCode(enum.IntEnum):
     SKIPPED = 3
 
 
+def _call_on_waitpid_threadfun(running_loop, process, callback):
+    process.join()
+    running_loop.call_soon_threadsafe(callback, process.pid, process.exitcode)
+
+
+def call_on_waitpid(running_loop, pid, callback):
+    import threading
+    t = threading.Thread(
+        target=_call_on_waitpid_threadfun,
+        args=(running_loop, pid, callback)
+    )
+    t.start()
+    return t
+
+
 # JobStatus:
 #
 # The job completion status, passed back through the
@@ -299,8 +314,7 @@ class Job():
         # an event loop callback. Otherwise, if the job completes too fast, then
         # the callback is called immediately.
         #
-        self._watcher = asyncio.get_child_watcher()
-        self._watcher.add_child_handler(self._process.pid, self._parent_child_completed)
+        self._watcher = call_on_waitpid(self._scheduler.loop, self._process, self._parent_child_completed)
 
     # terminate()
     #


[buildstream] 10/21: WIP: sandboxnone: use initial SandboxNone

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 63dc3edf411996e812363f507b4f81b7e9f2ff90
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Wed Apr 10 18:06:06 2019 +0100

    WIP: sandboxnone: use initial SandboxNone
---
 src/buildstream/_platform/darwin.py     |  4 +-
 src/buildstream/sandbox/__init__.py     |  1 +
 src/buildstream/sandbox/_sandboxnone.py | 67 +++++++++++++++++++++++++++++++++
 3 files changed, 70 insertions(+), 2 deletions(-)

diff --git a/src/buildstream/_platform/darwin.py b/src/buildstream/_platform/darwin.py
index 8e08685..2bbd205 100644
--- a/src/buildstream/_platform/darwin.py
+++ b/src/buildstream/_platform/darwin.py
@@ -17,7 +17,7 @@
 
 import os
 
-from ..sandbox import SandboxDummy
+from ..sandbox import SandboxNone
 
 from .platform import Platform
 
@@ -31,7 +31,7 @@ class Darwin(Platform):
         kwargs['dummy_reason'] = \
             "OSXFUSE is not supported and there are no supported sandbox " + \
             "technologies for MacOS at this time"
-        return SandboxDummy(*args, **kwargs)
+        return SandboxNone(*args, **kwargs)
 
     def check_sandbox_config(self, config):
         # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
diff --git a/src/buildstream/sandbox/__init__.py b/src/buildstream/sandbox/__init__.py
index 5966d19..6544348 100644
--- a/src/buildstream/sandbox/__init__.py
+++ b/src/buildstream/sandbox/__init__.py
@@ -20,3 +20,4 @@
 from .sandbox import Sandbox, SandboxFlags, SandboxCommandError
 from ._sandboxremote import SandboxRemote
 from ._sandboxdummy import SandboxDummy
+from ._sandboxnone import SandboxNone
diff --git a/src/buildstream/sandbox/_sandboxnone.py b/src/buildstream/sandbox/_sandboxnone.py
new file mode 100644
index 0000000..e95a7b9
--- /dev/null
+++ b/src/buildstream/sandbox/_sandboxnone.py
@@ -0,0 +1,67 @@
+#
+#  Copyright (C) 2019 Bloomberg Finance LP
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+#        Angelos Evripiotis <je...@bloomberg.net>
+
+import pathlib
+import pprint
+import subprocess
+
+from .._exceptions import SandboxError
+from .sandbox import Sandbox
+
+
+class SandboxNone(Sandbox):
+
+    def __init__(self, *args, **kwargs):
+        # TODO: don't require a dict copy.
+        kwargs = kwargs.copy()
+        kwargs['allow_real_directory'] = True
+
+        super().__init__(*args, **kwargs)
+
+        uid = self._get_config().build_uid
+        gid = self._get_config().build_gid
+        if uid != 0 or gid != 0:
+            raise SandboxError("Chroot sandboxes cannot specify a non-root uid/gid "
+                               "({},{} were supplied via config)".format(uid, gid))
+
+        self.mount_map = None
+
+    def _run(self, command, flags, *, cwd, env):
+
+        install_path = pathlib.Path(self.get_directory()) / 'buildstream-install'
+
+        env = env.copy()
+        env['BST_INSTALLPATH'] = str(install_path)
+
+        # TODO: figure out what to do with 'flags'.
+
+        # TODO: do this in a robust way.
+        if cwd.startswith("/"):
+            cwd = cwd[1:]
+
+        # pprint.pprint(env)
+
+        path = pathlib.Path(self.get_directory()) / cwd
+        print('run', command, 'in', path)
+        result = subprocess.run(command, cwd=path, env=env)
+
+        # out = pathlib.Path(self.get_directory()) / 'buildstream-install'
+        # out.mkdir(exist_ok=True)
+
+        return result.returncode


[buildstream] 05/21: TEMP: testpickle

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit 6144a407d91c58791ce800060786e9ee46ebafee
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Tue Apr 2 13:24:55 2019 +0100

    TEMP: testpickle
---
 src/buildstream/_scheduler/jobs/job.py |  11 ++
 src/buildstream/testpickle.py          | 196 +++++++++++++++++++++++++++++++++
 2 files changed, 207 insertions(+)

diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index 4ca2ee4..e844c80 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -77,6 +77,17 @@ class _Envelope():
 class Process(multiprocessing.Process):
     # pylint: disable=attribute-defined-outside-init
     def start(self):
+
+        e = self
+        print(e)
+        import buildstream.testpickle
+
+        buildstream.testpickle.test_pickle(e, 1)
+
+        for _ in range(10):
+            print('done test pickle', flush=True)
+        # raise Exception("We made it!")
+
         self._popen = self._Popen(self)
         self._sentinel = self._popen.sentinel
 
diff --git a/src/buildstream/testpickle.py b/src/buildstream/testpickle.py
new file mode 100644
index 0000000..f84b808
--- /dev/null
+++ b/src/buildstream/testpickle.py
@@ -0,0 +1,196 @@
+import multiprocessing.reduction
+
+
+class _C:
+    def f(self):
+        pass
+
+
+def test_pickle(*args, **kwargs):
+    import bdb
+    try:
+        _test_pickle(*args, **kwargs)
+    except bdb.BdbQuit:
+        raise
+    except Exception as e:
+        breakpoint()
+        raise
+
+
+def _test_pickle(x, indent=0, visited=None):
+
+    def prefix_print(*messages):
+        print(".   " * indent + f"({type(x).__name__}):", *messages)
+
+    if visited is None:
+        visited = set()
+
+    if id(x) in visited:
+        prefix_print(".. skipping already visited")
+        return
+
+    visited.add(id(x))
+
+    import bdb
+
+    try:
+        test_pickle_direct(x)
+    except bdb.BdbQuit:
+        raise
+    except Exception as e:
+        prefix_print(f'({x}): does not pickle, recursing.', str(e), repr(e), ':.:')
+    else:
+        prefix_print(f'({x}): does pickle, skipping.')
+        return
+
+    if type(x) == type(_C().f):
+        prefix_print(f'method {x.__func__.__name__}')
+        try:
+            if x.__self__ is None:
+                value = x.__class__
+            else:
+                value = x.__self__
+            _test_pickle(value, indent + 1, visited)
+        except:
+            prefix_print(f"while pickling item method {x.__func__.__name__}: '{x}'.")
+            raise
+
+    if type(x).__name__ in ['method', 'instancemethod']:
+        prefix_print(".. skipping method")
+        return
+
+    if type(x).__name__ in ['list', 'tuple', 'set']:
+        prefix_print('... len', len(x))
+        for key, value in enumerate(x):
+            prefix_print(f'[{key}]')
+            try:
+                _test_pickle(value, indent + 1, visited)
+            except:
+                prefix_print(f"while pickling item {key}: {type(x).__name__}: '{x}'.")
+                raise
+        return
+
+    # if type(x).__name__ == 'function':
+    #     prefix_print("function?")
+    #     raise Exception()
+
+    # if type(x).__name__ == 'module':
+    #     prefix_print(".. module")
+    #     test_pickle_direct(x)
+    #     return
+
+    # TODO: make these work properly.
+    # if type(x).__name__ in ['SourceFactory', 'ElementFactory', 'Environment']:
+    #     prefix_print(".. skipping")
+    #     return
+    if type(x).__name__ in ['_UnixSelectorEventLoop', 'AuthenticationString', 'SyncManager']:
+        prefix_print(".. skipping")
+        return
+
+    if type(x).__name__ == 'dict':
+        prefix_print("...", x.keys())
+        for key, value in x.items():
+            prefix_print(f'[{key}]')
+            try:
+                _test_pickle(value, indent + 1, visited)
+            except:
+                prefix_print(f"while pickling ['{key}'].")
+                raise
+        return
+
+    # TODO: we need to make the generators work too, or ideally replace them.
+    # if type(x).__name__ == 'generator':
+    #     prefix_print(".. skipping generator")
+    #     return
+
+    # TODO: we need to make the weakrefs work properly.
+    if type(x).__name__ == 'weakref':
+        prefix_print(".. dereferencing weakref")
+        try:
+            _test_pickle(x(), indent, visited)
+        except:
+            prefix_print(f"while pickling weakref {x}.")
+            raise
+        return
+
+    try:
+        value = x.__getstate__()
+    except AttributeError:
+        pass
+    else:
+        prefix_print("... __getstate__")
+        try:
+            _test_pickle(value, indent + 1, visited)
+        except:
+            prefix_print(f"while pickling a __getstate__.")
+            raise
+        return
+
+    try:
+        x.__dict__
+    except AttributeError:
+        pass
+    else:
+        prefix_print("...", x.__dict__.keys())
+        for key, value in x.__dict__.items():
+            prefix_print(f'__dict__["{key}"]')
+            try:
+                _test_pickle(value, indent + 1, visited)
+            except:
+                prefix_print(f"while pickling member ['{key}'].")
+                raise
+        return
+
+    try:
+        x.__slots__
+    except AttributeError:
+        pass
+    else:
+        prefix_print("...", x.__slots__)
+        for key in x.__slots__:
+            value = getattr(x, key)
+            prefix_print(f'__slots__["{key}"]')
+            try:
+                _test_pickle(value, indent + 1, visited)
+            except:
+                prefix_print(f"while pickling member '{key}'.")
+                raise
+        return
+
+    prefix_print(x)
+    test_pickle_direct(x)
+
+
+def test_pickle_direct(x):
+    import io
+    import pickle
+    import multiprocessing.reduction
+
+    # Note that we should expect to see this complaint if we are not in a
+    # multiprocessing spawning_popen context, this will be fine when we're
+    # actually spawning:
+    #
+    #     Pickling an AuthenticationString object is disallowed for
+    #     security reasons.
+    #
+    # https://github.com/python/cpython/blob/master/Lib/multiprocessing/process.py#L335
+    #
+
+    # Suppress the complaint by pretending we're in a spawning context.
+    # https://github.com/python/cpython/blob/a8474d025cab794257d2fd0bea67840779b9351f/Lib/multiprocessing/popen_spawn_win32.py#L91
+    import multiprocessing.context
+    multiprocessing.context.set_spawning_popen("PPPPPopen")
+
+    data = io.BytesIO()
+
+    # Try to simulate what multiprocessing will do.
+    # https://github.com/python/cpython/blob/master/Lib/multiprocessing/reduction.py
+    try:
+        multiprocessing.reduction.dump(x, data)
+    except:
+        # breakpoint()
+        raise
+    finally:
+        multiprocessing.context.set_spawning_popen(None)
+
+    return data


[buildstream] 15/21: WIP: win32: job: signals / win32

Posted by tv...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tvb pushed a commit to branch aevri/win32
in repository https://gitbox.apache.org/repos/asf/buildstream.git

commit f7842602fdfdd6d9ca20ddc44fcc300c2b6af2f0
Author: Angelos Evripiotis <je...@bloomberg.net>
AuthorDate: Wed Jun 12 16:21:57 2019 +0100

    WIP: win32: job: signals / win32
---
 src/buildstream/_scheduler/jobs/job.py  | 12 ++++++------
 src/buildstream/_scheduler/scheduler.py |  4 +++-
 src/buildstream/_signals.py             |  4 ++++
 3 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/src/buildstream/_scheduler/jobs/job.py b/src/buildstream/_scheduler/jobs/job.py
index debc470..2a25f40 100644
--- a/src/buildstream/_scheduler/jobs/job.py
+++ b/src/buildstream/_scheduler/jobs/job.py
@@ -289,7 +289,7 @@ class Job():
         # the child process does not inherit the parent's state, but the main
         # process will be notified of any signal after we launch the child.
         #
-        with _signals.blocked([signal.SIGINT, signal.SIGTSTP, signal.SIGTERM], ignore=False):
+        with _signals.blocked([signal.SIGINT, signal.SIGTERM], ignore=False):
             self._process.start()
 
         # Wait for the child task to complete.
@@ -772,15 +772,15 @@ class ChildJob():
 
         # This avoids some SIGTSTP signals from grandchildren
         # getting propagated up to the master process
-        os.setsid()
+        #os.setsid()
 
         # First set back to the default signal handlers for the signals
         # we handle, and then clear their blocked state.
         #
-        signal_list = [signal.SIGTSTP, signal.SIGTERM]
-        for sig in signal_list:
-            signal.signal(sig, signal.SIG_DFL)
-        signal.pthread_sigmask(signal.SIG_UNBLOCK, signal_list)
+        #signal_list = [signal.SIGTSTP, signal.SIGTERM]
+        #for sig in signal_list:
+        #    signal.signal(sig, signal.SIG_DFL)
+        #signal.pthread_sigmask(signal.SIG_UNBLOCK, signal_list)
 
         # Assign the queue we passed across the process boundaries
         #
diff --git a/src/buildstream/_scheduler/scheduler.py b/src/buildstream/_scheduler/scheduler.py
index 8a14391..73157b0 100644
--- a/src/buildstream/_scheduler/scheduler.py
+++ b/src/buildstream/_scheduler/scheduler.py
@@ -201,7 +201,7 @@ class Scheduler():
 
         # Block this until we're finished terminating jobs,
         # this will remain blocked forever.
-        signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGINT])
+        #signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGINT])
 
     # jobs_suspended()
     #
@@ -571,11 +571,13 @@ class Scheduler():
     # Connects our signal handler event callbacks to the mainloop
     #
     def _connect_signals(self):
+        return
         self.loop.add_signal_handler(signal.SIGINT, self._interrupt_event)
         self.loop.add_signal_handler(signal.SIGTERM, self._terminate_event)
         self.loop.add_signal_handler(signal.SIGTSTP, self._suspend_event)
 
     def _disconnect_signals(self):
+        return
         self.loop.remove_signal_handler(signal.SIGINT)
         self.loop.remove_signal_handler(signal.SIGTSTP)
         self.loop.remove_signal_handler(signal.SIGTERM)
diff --git a/src/buildstream/_signals.py b/src/buildstream/_signals.py
index 2df2c79..733f28e 100644
--- a/src/buildstream/_signals.py
+++ b/src/buildstream/_signals.py
@@ -135,6 +135,8 @@ def suspend_handler(sig, frame):
 #
 @contextmanager
 def suspendable(suspend_callback, resume_callback):
+    yield
+    return
     global suspendable_stack                  # pylint: disable=global-statement
 
     outermost = bool(not suspendable_stack)
@@ -164,6 +166,8 @@ def suspendable(suspend_callback, resume_callback):
 #
 @contextmanager
 def blocked(signal_list, ignore=True):
+    yield
+    return
 
     with ExitStack() as stack: