You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mynewt.apache.org by cc...@apache.org on 2015/11/21 01:42:49 UTC

[24/42] incubator-mynewt-newt git commit: Move newt source into a "newt" subdirectory.

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/e95057f4/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
----------------------------------------------------------------------
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
deleted file mode 100644
index af4df8a..0000000
--- a/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
+++ /dev/null
@@ -1,344 +0,0 @@
-// Package yaml implements YAML support for the Go language.
-//
-// Source code and other details for the project are available at GitHub:
-//
-//   https://github.com/go-yaml/yaml
-//
-package yaml
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-)
-
-// MapSlice encodes and decodes as a YAML map.
-// The order of keys is preserved when encoding and decoding.
-type MapSlice []MapItem
-
-// MapItem is an item in a MapSlice.
-type MapItem struct {
-	Key, Value interface{}
-}
-
-// The Unmarshaler interface may be implemented by types to customize their
-// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
-// method receives a function that may be called to unmarshal the original
-// YAML value into a field or variable. It is safe to call the unmarshal
-// function parameter more than once if necessary.
-type Unmarshaler interface {
-	UnmarshalYAML(unmarshal func(interface{}) error) error
-}
-
-// The Marshaler interface may be implemented by types to customize their
-// behavior when being marshaled into a YAML document. The returned value
-// is marshaled in place of the original value implementing Marshaler.
-//
-// If an error is returned by MarshalYAML, the marshaling procedure stops
-// and returns with the provided error.
-type Marshaler interface {
-	MarshalYAML() (interface{}, error)
-}
-
-// Unmarshal decodes the first document found within the in byte slice
-// and assigns decoded values into the out value.
-//
-// Maps and pointers (to a struct, string, int, etc) are accepted as out
-// values. If an internal pointer within a struct is not initialized,
-// the yaml package will initialize it if necessary for unmarshalling
-// the provided data. The out parameter must not be nil.
-//
-// The type of the decoded values should be compatible with the respective
-// values in out. If one or more values cannot be decoded due to a type
-// mismatches, decoding continues partially until the end of the YAML
-// content, and a *yaml.TypeError is returned with details for all
-// missed values.
-//
-// Struct fields are only unmarshalled if they are exported (have an
-// upper case first letter), and are unmarshalled using the field name
-// lowercased as the default key. Custom keys may be defined via the
-// "yaml" name in the field tag: the content preceding the first comma
-// is used as the key, and the following comma-separated options are
-// used to tweak the marshalling process (see Marshal).
-// Conflicting names result in a runtime error.
-//
-// For example:
-//
-//     type T struct {
-//         F int `yaml:"a,omitempty"`
-//         B int
-//     }
-//     var t T
-//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
-//
-// See the documentation of Marshal for the format of tags and a list of
-// supported tag options.
-//
-func Unmarshal(in []byte, out interface{}) (err error) {
-	defer handleErr(&err)
-	d := newDecoder()
-	p := newParser(in)
-	defer p.destroy()
-	node := p.parse()
-	if node != nil {
-		v := reflect.ValueOf(out)
-		if v.Kind() == reflect.Ptr && !v.IsNil() {
-			v = v.Elem()
-		}
-		d.unmarshal(node, v)
-	}
-	if len(d.terrors) > 0 {
-		return &TypeError{d.terrors}
-	}
-	return nil
-}
-
-// Marshal serializes the value provided into a YAML document. The structure
-// of the generated document will reflect the structure of the value itself.
-// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
-//
-// Struct fields are only unmarshalled if they are exported (have an upper case
-// first letter), and are unmarshalled using the field name lowercased as the
-// default key. Custom keys may be defined via the "yaml" name in the field
-// tag: the content preceding the first comma is used as the key, and the
-// following comma-separated options are used to tweak the marshalling process.
-// Conflicting names result in a runtime error.
-//
-// The field tag format accepted is:
-//
-//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
-//
-// The following flags are currently supported:
-//
-//     omitempty    Only include the field if it's not set to the zero
-//                  value for the type or to empty slices or maps.
-//                  Does not apply to zero valued structs.
-//
-//     flow         Marshal using a flow style (useful for structs,
-//                  sequences and maps).
-//
-//     inline       Inline the field, which must be a struct or a map,
-//                  causing all of its fields or keys to be processed as if
-//                  they were part of the outer struct. For maps, keys must
-//                  not conflict with the yaml keys of other struct fields.
-//
-// In addition, if the key is "-", the field is ignored.
-//
-// For example:
-//
-//     type T struct {
-//         F int "a,omitempty"
-//         B int
-//     }
-//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
-//
-func Marshal(in interface{}) (out []byte, err error) {
-	defer handleErr(&err)
-	e := newEncoder()
-	defer e.destroy()
-	e.marshal("", reflect.ValueOf(in))
-	e.finish()
-	out = e.out
-	return
-}
-
-func handleErr(err *error) {
-	if v := recover(); v != nil {
-		if e, ok := v.(yamlError); ok {
-			*err = e.err
-		} else {
-			panic(v)
-		}
-	}
-}
-
-type yamlError struct {
-	err error
-}
-
-func fail(err error) {
-	panic(yamlError{err})
-}
-
-func failf(format string, args ...interface{}) {
-	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
-}
-
-// A TypeError is returned by Unmarshal when one or more fields in
-// the YAML document cannot be properly decoded into the requested
-// types. When this error is returned, the value is still
-// unmarshaled partially.
-type TypeError struct {
-	Errors []string
-}
-
-func (e *TypeError) Error() string {
-	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
-}
-
-// --------------------------------------------------------------------------
-// Maintain a mapping of keys to structure field indexes
-
-// The code in this section was copied from mgo/bson.
-
-// structInfo holds details for the serialization of fields of
-// a given struct.
-type structInfo struct {
-	FieldsMap  map[string]fieldInfo
-	FieldsList []fieldInfo
-
-	// InlineMap is the number of the field in the struct that
-	// contains an ,inline map, or -1 if there's none.
-	InlineMap int
-}
-
-type fieldInfo struct {
-	Key       string
-	Num       int
-	OmitEmpty bool
-	Flow      bool
-
-	// Inline holds the field index if the field is part of an inlined struct.
-	Inline []int
-}
-
-var structMap = make(map[reflect.Type]*structInfo)
-var fieldMapMutex sync.RWMutex
-
-func getStructInfo(st reflect.Type) (*structInfo, error) {
-	fieldMapMutex.RLock()
-	sinfo, found := structMap[st]
-	fieldMapMutex.RUnlock()
-	if found {
-		return sinfo, nil
-	}
-
-	n := st.NumField()
-	fieldsMap := make(map[string]fieldInfo)
-	fieldsList := make([]fieldInfo, 0, n)
-	inlineMap := -1
-	for i := 0; i != n; i++ {
-		field := st.Field(i)
-		if field.PkgPath != "" {
-			continue // Private field
-		}
-
-		info := fieldInfo{Num: i}
-
-		tag := field.Tag.Get("yaml")
-		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
-			tag = string(field.Tag)
-		}
-		if tag == "-" {
-			continue
-		}
-
-		inline := false
-		fields := strings.Split(tag, ",")
-		if len(fields) > 1 {
-			for _, flag := range fields[1:] {
-				switch flag {
-				case "omitempty":
-					info.OmitEmpty = true
-				case "flow":
-					info.Flow = true
-				case "inline":
-					inline = true
-				default:
-					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
-				}
-			}
-			tag = fields[0]
-		}
-
-		if inline {
-			switch field.Type.Kind() {
-			case reflect.Map:
-				if inlineMap >= 0 {
-					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
-				}
-				if field.Type.Key() != reflect.TypeOf("") {
-					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
-				}
-				inlineMap = info.Num
-			case reflect.Struct:
-				sinfo, err := getStructInfo(field.Type)
-				if err != nil {
-					return nil, err
-				}
-				for _, finfo := range sinfo.FieldsList {
-					if _, found := fieldsMap[finfo.Key]; found {
-						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
-						return nil, errors.New(msg)
-					}
-					if finfo.Inline == nil {
-						finfo.Inline = []int{i, finfo.Num}
-					} else {
-						finfo.Inline = append([]int{i}, finfo.Inline...)
-					}
-					fieldsMap[finfo.Key] = finfo
-					fieldsList = append(fieldsList, finfo)
-				}
-			default:
-				//return nil, errors.New("Option ,inline needs a struct value or map field")
-				return nil, errors.New("Option ,inline needs a struct value field")
-			}
-			continue
-		}
-
-		if tag != "" {
-			info.Key = tag
-		} else {
-			info.Key = strings.ToLower(field.Name)
-		}
-
-		if _, found = fieldsMap[info.Key]; found {
-			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
-			return nil, errors.New(msg)
-		}
-
-		fieldsList = append(fieldsList, info)
-		fieldsMap[info.Key] = info
-	}
-
-	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
-
-	fieldMapMutex.Lock()
-	structMap[st] = sinfo
-	fieldMapMutex.Unlock()
-	return sinfo, nil
-}
-
-func isZero(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.String:
-		return len(v.String()) == 0
-	case reflect.Interface, reflect.Ptr:
-		return v.IsNil()
-	case reflect.Slice:
-		return v.Len() == 0
-	case reflect.Map:
-		return v.Len() == 0
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Struct:
-		vt := v.Type()
-		for i := v.NumField()-1; i >= 0; i-- {
-			if vt.Field(i).PkgPath != "" {
-				continue // Private field
-			}
-			if !isZero(v.Field(i)) {
-				return false
-			}
-		}
-		return true
-	}
-	return false
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/e95057f4/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
----------------------------------------------------------------------
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
deleted file mode 100644
index d60a6b6..0000000
--- a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
+++ /dev/null
@@ -1,716 +0,0 @@
-package yaml
-
-import (
-	"io"
-)
-
-// The version directive data.
-type yaml_version_directive_t struct {
-	major int8 // The major version number.
-	minor int8 // The minor version number.
-}
-
-// The tag directive data.
-type yaml_tag_directive_t struct {
-	handle []byte // The tag handle.
-	prefix []byte // The tag prefix.
-}
-
-type yaml_encoding_t int
-
-// The stream encoding.
-const (
-	// Let the parser choose the encoding.
-	yaml_ANY_ENCODING yaml_encoding_t = iota
-
-	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
-	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
-	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
-)
-
-type yaml_break_t int
-
-// Line break types.
-const (
-	// Let the parser choose the break type.
-	yaml_ANY_BREAK yaml_break_t = iota
-
-	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
-	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
-	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
-)
-
-type yaml_error_type_t int
-
-// Many bad things could happen with the parser and emitter.
-const (
-	// No error is produced.
-	yaml_NO_ERROR yaml_error_type_t = iota
-
-	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
-	yaml_READER_ERROR   // Cannot read or decode the input stream.
-	yaml_SCANNER_ERROR  // Cannot scan the input stream.
-	yaml_PARSER_ERROR   // Cannot parse the input stream.
-	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
-	yaml_WRITER_ERROR   // Cannot write to the output stream.
-	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
-)
-
-// The pointer position.
-type yaml_mark_t struct {
-	index  int // The position index.
-	line   int // The position line.
-	column int // The position column.
-}
-
-// Node Styles
-
-type yaml_style_t int8
-
-type yaml_scalar_style_t yaml_style_t
-
-// Scalar styles.
-const (
-	// Let the emitter choose the style.
-	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
-
-	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
-	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
-	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
-	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
-	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
-)
-
-type yaml_sequence_style_t yaml_style_t
-
-// Sequence styles.
-const (
-	// Let the emitter choose the style.
-	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
-
-	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
-	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
-)
-
-type yaml_mapping_style_t yaml_style_t
-
-// Mapping styles.
-const (
-	// Let the emitter choose the style.
-	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
-
-	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
-	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
-)
-
-// Tokens
-
-type yaml_token_type_t int
-
-// Token types.
-const (
-	// An empty token.
-	yaml_NO_TOKEN yaml_token_type_t = iota
-
-	yaml_STREAM_START_TOKEN // A STREAM-START token.
-	yaml_STREAM_END_TOKEN   // A STREAM-END token.
-
-	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
-	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
-	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
-	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
-
-	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
-	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
-	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
-
-	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
-	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
-	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
-	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
-
-	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
-	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
-	yaml_KEY_TOKEN         // A KEY token.
-	yaml_VALUE_TOKEN       // A VALUE token.
-
-	yaml_ALIAS_TOKEN  // An ALIAS token.
-	yaml_ANCHOR_TOKEN // An ANCHOR token.
-	yaml_TAG_TOKEN    // A TAG token.
-	yaml_SCALAR_TOKEN // A SCALAR token.
-)
-
-func (tt yaml_token_type_t) String() string {
-	switch tt {
-	case yaml_NO_TOKEN:
-		return "yaml_NO_TOKEN"
-	case yaml_STREAM_START_TOKEN:
-		return "yaml_STREAM_START_TOKEN"
-	case yaml_STREAM_END_TOKEN:
-		return "yaml_STREAM_END_TOKEN"
-	case yaml_VERSION_DIRECTIVE_TOKEN:
-		return "yaml_VERSION_DIRECTIVE_TOKEN"
-	case yaml_TAG_DIRECTIVE_TOKEN:
-		return "yaml_TAG_DIRECTIVE_TOKEN"
-	case yaml_DOCUMENT_START_TOKEN:
-		return "yaml_DOCUMENT_START_TOKEN"
-	case yaml_DOCUMENT_END_TOKEN:
-		return "yaml_DOCUMENT_END_TOKEN"
-	case yaml_BLOCK_SEQUENCE_START_TOKEN:
-		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
-	case yaml_BLOCK_MAPPING_START_TOKEN:
-		return "yaml_BLOCK_MAPPING_START_TOKEN"
-	case yaml_BLOCK_END_TOKEN:
-		return "yaml_BLOCK_END_TOKEN"
-	case yaml_FLOW_SEQUENCE_START_TOKEN:
-		return "yaml_FLOW_SEQUENCE_START_TOKEN"
-	case yaml_FLOW_SEQUENCE_END_TOKEN:
-		return "yaml_FLOW_SEQUENCE_END_TOKEN"
-	case yaml_FLOW_MAPPING_START_TOKEN:
-		return "yaml_FLOW_MAPPING_START_TOKEN"
-	case yaml_FLOW_MAPPING_END_TOKEN:
-		return "yaml_FLOW_MAPPING_END_TOKEN"
-	case yaml_BLOCK_ENTRY_TOKEN:
-		return "yaml_BLOCK_ENTRY_TOKEN"
-	case yaml_FLOW_ENTRY_TOKEN:
-		return "yaml_FLOW_ENTRY_TOKEN"
-	case yaml_KEY_TOKEN:
-		return "yaml_KEY_TOKEN"
-	case yaml_VALUE_TOKEN:
-		return "yaml_VALUE_TOKEN"
-	case yaml_ALIAS_TOKEN:
-		return "yaml_ALIAS_TOKEN"
-	case yaml_ANCHOR_TOKEN:
-		return "yaml_ANCHOR_TOKEN"
-	case yaml_TAG_TOKEN:
-		return "yaml_TAG_TOKEN"
-	case yaml_SCALAR_TOKEN:
-		return "yaml_SCALAR_TOKEN"
-	}
-	return "<unknown token>"
-}
-
-// The token structure.
-type yaml_token_t struct {
-	// The token type.
-	typ yaml_token_type_t
-
-	// The start/end of the token.
-	start_mark, end_mark yaml_mark_t
-
-	// The stream encoding (for yaml_STREAM_START_TOKEN).
-	encoding yaml_encoding_t
-
-	// The alias/anchor/scalar value or tag/tag directive handle
-	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
-	value []byte
-
-	// The tag suffix (for yaml_TAG_TOKEN).
-	suffix []byte
-
-	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
-	prefix []byte
-
-	// The scalar style (for yaml_SCALAR_TOKEN).
-	style yaml_scalar_style_t
-
-	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
-	major, minor int8
-}
-
-// Events
-
-type yaml_event_type_t int8
-
-// Event types.
-const (
-	// An empty event.
-	yaml_NO_EVENT yaml_event_type_t = iota
-
-	yaml_STREAM_START_EVENT   // A STREAM-START event.
-	yaml_STREAM_END_EVENT     // A STREAM-END event.
-	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
-	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
-	yaml_ALIAS_EVENT          // An ALIAS event.
-	yaml_SCALAR_EVENT         // A SCALAR event.
-	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
-	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
-	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
-	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
-)
-
-// The event structure.
-type yaml_event_t struct {
-
-	// The event type.
-	typ yaml_event_type_t
-
-	// The start and end of the event.
-	start_mark, end_mark yaml_mark_t
-
-	// The document encoding (for yaml_STREAM_START_EVENT).
-	encoding yaml_encoding_t
-
-	// The version directive (for yaml_DOCUMENT_START_EVENT).
-	version_directive *yaml_version_directive_t
-
-	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
-	tag_directives []yaml_tag_directive_t
-
-	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
-	anchor []byte
-
-	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
-	tag []byte
-
-	// The scalar value (for yaml_SCALAR_EVENT).
-	value []byte
-
-	// Is the document start/end indicator implicit, or the tag optional?
-	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
-	implicit bool
-
-	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
-	quoted_implicit bool
-
-	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
-	style yaml_style_t
-}
-
-func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
-func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
-func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
-
-// Nodes
-
-const (
-	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
-	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
-	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
-	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
-	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
-	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
-
-	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
-	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
-
-	// Not in original libyaml.
-	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
-	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
-
-	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
-	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
-	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
-)
-
-type yaml_node_type_t int
-
-// Node types.
-const (
-	// An empty node.
-	yaml_NO_NODE yaml_node_type_t = iota
-
-	yaml_SCALAR_NODE   // A scalar node.
-	yaml_SEQUENCE_NODE // A sequence node.
-	yaml_MAPPING_NODE  // A mapping node.
-)
-
-// An element of a sequence node.
-type yaml_node_item_t int
-
-// An element of a mapping node.
-type yaml_node_pair_t struct {
-	key   int // The key of the element.
-	value int // The value of the element.
-}
-
-// The node structure.
-type yaml_node_t struct {
-	typ yaml_node_type_t // The node type.
-	tag []byte           // The node tag.
-
-	// The node data.
-
-	// The scalar parameters (for yaml_SCALAR_NODE).
-	scalar struct {
-		value  []byte              // The scalar value.
-		length int                 // The length of the scalar value.
-		style  yaml_scalar_style_t // The scalar style.
-	}
-
-	// The sequence parameters (for YAML_SEQUENCE_NODE).
-	sequence struct {
-		items_data []yaml_node_item_t    // The stack of sequence items.
-		style      yaml_sequence_style_t // The sequence style.
-	}
-
-	// The mapping parameters (for yaml_MAPPING_NODE).
-	mapping struct {
-		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
-		pairs_start *yaml_node_pair_t    // The beginning of the stack.
-		pairs_end   *yaml_node_pair_t    // The end of the stack.
-		pairs_top   *yaml_node_pair_t    // The top of the stack.
-		style       yaml_mapping_style_t // The mapping style.
-	}
-
-	start_mark yaml_mark_t // The beginning of the node.
-	end_mark   yaml_mark_t // The end of the node.
-
-}
-
-// The document structure.
-type yaml_document_t struct {
-
-	// The document nodes.
-	nodes []yaml_node_t
-
-	// The version directive.
-	version_directive *yaml_version_directive_t
-
-	// The list of tag directives.
-	tag_directives_data  []yaml_tag_directive_t
-	tag_directives_start int // The beginning of the tag directives list.
-	tag_directives_end   int // The end of the tag directives list.
-
-	start_implicit int // Is the document start indicator implicit?
-	end_implicit   int // Is the document end indicator implicit?
-
-	// The start/end of the document.
-	start_mark, end_mark yaml_mark_t
-}
-
-// The prototype of a read handler.
-//
-// The read handler is called when the parser needs to read more bytes from the
-// source. The handler should write not more than size bytes to the buffer.
-// The number of written bytes should be set to the size_read variable.
-//
-// [in,out]   data        A pointer to an application data specified by
-//                        yaml_parser_set_input().
-// [out]      buffer      The buffer to write the data from the source.
-// [in]       size        The size of the buffer.
-// [out]      size_read   The actual number of bytes read from the source.
-//
-// On success, the handler should return 1.  If the handler failed,
-// the returned value should be 0. On EOF, the handler should set the
-// size_read to 0 and return 1.
-type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
-
-// This structure holds information about a potential simple key.
-type yaml_simple_key_t struct {
-	possible     bool        // Is a simple key possible?
-	required     bool        // Is a simple key required?
-	token_number int         // The number of the token.
-	mark         yaml_mark_t // The position mark.
-}
-
-// The states of the parser.
-type yaml_parser_state_t int
-
-const (
-	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
-
-	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
-	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
-	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
-	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
-	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
-	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
-	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
-	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
-	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
-	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
-	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
-	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
-	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
-	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
-	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
-	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
-	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
-	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
-	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
-	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
-	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
-	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
-	yaml_PARSE_END_STATE                               // Expect nothing.
-)
-
-func (ps yaml_parser_state_t) String() string {
-	switch ps {
-	case yaml_PARSE_STREAM_START_STATE:
-		return "yaml_PARSE_STREAM_START_STATE"
-	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
-		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
-	case yaml_PARSE_DOCUMENT_START_STATE:
-		return "yaml_PARSE_DOCUMENT_START_STATE"
-	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
-		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
-	case yaml_PARSE_DOCUMENT_END_STATE:
-		return "yaml_PARSE_DOCUMENT_END_STATE"
-	case yaml_PARSE_BLOCK_NODE_STATE:
-		return "yaml_PARSE_BLOCK_NODE_STATE"
-	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
-		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
-	case yaml_PARSE_FLOW_NODE_STATE:
-		return "yaml_PARSE_FLOW_NODE_STATE"
-	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
-		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
-	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
-		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
-	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
-		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
-	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
-		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
-	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
-		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
-	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
-		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
-	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
-		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
-	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
-		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
-	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
-		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
-	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
-		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
-	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
-		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
-	case yaml_PARSE_END_STATE:
-		return "yaml_PARSE_END_STATE"
-	}
-	return "<unknown parser state>"
-}
-
-// This structure holds aliases data.
-type yaml_alias_data_t struct {
-	anchor []byte      // The anchor.
-	index  int         // The node id.
-	mark   yaml_mark_t // The anchor mark.
-}
-
-// The parser structure.
-//
-// All members are internal. Manage the structure using the
-// yaml_parser_ family of functions.
-type yaml_parser_t struct {
-
-	// Error handling
-
-	error yaml_error_type_t // Error type.
-
-	problem string // Error description.
-
-	// The byte about which the problem occured.
-	problem_offset int
-	problem_value  int
-	problem_mark   yaml_mark_t
-
-	// The error context.
-	context      string
-	context_mark yaml_mark_t
-
-	// Reader stuff
-
-	read_handler yaml_read_handler_t // Read handler.
-
-	input_file io.Reader // File input data.
-	input      []byte    // String input data.
-	input_pos  int
-
-	eof bool // EOF flag
-
-	buffer     []byte // The working buffer.
-	buffer_pos int    // The current position of the buffer.
-
-	unread int // The number of unread characters in the buffer.
-
-	raw_buffer     []byte // The raw buffer.
-	raw_buffer_pos int    // The current position of the buffer.
-
-	encoding yaml_encoding_t // The input encoding.
-
-	offset int         // The offset of the current position (in bytes).
-	mark   yaml_mark_t // The mark of the current position.
-
-	// Scanner stuff
-
-	stream_start_produced bool // Have we started to scan the input stream?
-	stream_end_produced   bool // Have we reached the end of the input stream?
-
-	flow_level int // The number of unclosed '[' and '{' indicators.
-
-	tokens          []yaml_token_t // The tokens queue.
-	tokens_head     int            // The head of the tokens queue.
-	tokens_parsed   int            // The number of tokens fetched from the queue.
-	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
-
-	indent  int   // The current indentation level.
-	indents []int // The indentation levels stack.
-
-	simple_key_allowed bool                // May a simple key occur at the current position?
-	simple_keys        []yaml_simple_key_t // The stack of simple keys.
-
-	// Parser stuff
-
-	state          yaml_parser_state_t    // The current parser state.
-	states         []yaml_parser_state_t  // The parser states stack.
-	marks          []yaml_mark_t          // The stack of marks.
-	tag_directives []yaml_tag_directive_t // The list of TAG directives.
-
-	// Dumper stuff
-
-	aliases []yaml_alias_data_t // The alias data.
-
-	document *yaml_document_t // The currently parsed document.
-}
-
-// Emitter Definitions
-
-// The prototype of a write handler.
-//
-// The write handler is called when the emitter needs to flush the accumulated
-// characters to the output.  The handler should write @a size bytes of the
-// @a buffer to the output.
-//
-// @param[in,out]   data        A pointer to an application data specified by
-//                              yaml_emitter_set_output().
-// @param[in]       buffer      The buffer with bytes to be written.
-// @param[in]       size        The size of the buffer.
-//
-// @returns On success, the handler should return @c 1.  If the handler failed,
-// the returned value should be @c 0.
-//
-type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
-
-type yaml_emitter_state_t int
-
-// The emitter states.
-const (
-	// Expect STREAM-START.
-	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
-
-	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
-	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
-	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
-	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
-	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
-	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
-	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
-	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
-	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
-	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
-	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
-	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
-	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
-	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
-	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
-	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
-	yaml_EMIT_END_STATE                        // Expect nothing.
-)
-
-// The emitter structure.
-//
-// All members are internal.  Manage the structure using the @c yaml_emitter_
-// family of functions.
-type yaml_emitter_t struct {
-
-	// Error handling
-
-	error   yaml_error_type_t // Error type.
-	problem string            // Error description.
-
-	// Writer stuff
-
-	write_handler yaml_write_handler_t // Write handler.
-
-	output_buffer *[]byte   // String output data.
-	output_file   io.Writer // File output data.
-
-	buffer     []byte // The working buffer.
-	buffer_pos int    // The current position of the buffer.
-
-	raw_buffer     []byte // The raw buffer.
-	raw_buffer_pos int    // The current position of the buffer.
-
-	encoding yaml_encoding_t // The stream encoding.
-
-	// Emitter stuff
-
-	canonical   bool         // If the output is in the canonical style?
-	best_indent int          // The number of indentation spaces.
-	best_width  int          // The preferred width of the output lines.
-	unicode     bool         // Allow unescaped non-ASCII characters?
-	line_break  yaml_break_t // The preferred line break.
-
-	state  yaml_emitter_state_t   // The current emitter state.
-	states []yaml_emitter_state_t // The stack of states.
-
-	events      []yaml_event_t // The event queue.
-	events_head int            // The head of the event queue.
-
-	indents []int // The stack of indentation levels.
-
-	tag_directives []yaml_tag_directive_t // The list of tag directives.
-
-	indent int // The current indentation level.
-
-	flow_level int // The current flow level.
-
-	root_context       bool // Is it the document root context?
-	sequence_context   bool // Is it a sequence context?
-	mapping_context    bool // Is it a mapping context?
-	simple_key_context bool // Is it a simple mapping key context?
-
-	line       int  // The current line.
-	column     int  // The current column.
-	whitespace bool // If the last character was a whitespace?
-	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
-	open_ended bool // If an explicit document end is required?
-
-	// Anchor analysis.
-	anchor_data struct {
-		anchor []byte // The anchor value.
-		alias  bool   // Is it an alias?
-	}
-
-	// Tag analysis.
-	tag_data struct {
-		handle []byte // The tag handle.
-		suffix []byte // The tag suffix.
-	}
-
-	// Scalar analysis.
-	scalar_data struct {
-		value                 []byte              // The scalar value.
-		multiline             bool                // Does the scalar contain line breaks?
-		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
-		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
-		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
-		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
-		style                 yaml_scalar_style_t // The output style.
-	}
-
-	// Dumper stuff
-
-	opened bool // If the stream was already opened?
-	closed bool // If the stream was already closed?
-
-	// The information associated with the document nodes.
-	anchors *struct {
-		references int  // The number of references.
-		anchor     int  // The anchor id.
-		serialized bool // If the node has been emitted?
-	}
-
-	last_anchor_id int // The last assigned anchor id.
-
-	document *yaml_document_t // The currently emitted document.
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/e95057f4/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
----------------------------------------------------------------------
diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
deleted file mode 100644
index 8110ce3..0000000
--- a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package yaml
-
-const (
-	// The size of the input raw buffer.
-	input_raw_buffer_size = 512
-
-	// The size of the input buffer.
-	// It should be possible to decode the whole raw buffer.
-	input_buffer_size = input_raw_buffer_size * 3
-
-	// The size of the output buffer.
-	output_buffer_size = 128
-
-	// The size of the output raw buffer.
-	// It should be possible to encode the whole output buffer.
-	output_raw_buffer_size = (output_buffer_size*2 + 2)
-
-	// The size of other stacks and queues.
-	initial_stack_size  = 16
-	initial_queue_size  = 16
-	initial_string_size = 16
-)
-
-// Check if the character at the specified position is an alphabetical
-// character, a digit, '_', or '-'.
-func is_alpha(b []byte, i int) bool {
-	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
-}
-
-// Check if the character at the specified position is a digit.
-func is_digit(b []byte, i int) bool {
-	return b[i] >= '0' && b[i] <= '9'
-}
-
-// Get the value of a digit.
-func as_digit(b []byte, i int) int {
-	return int(b[i]) - '0'
-}
-
-// Check if the character at the specified position is a hex-digit.
-func is_hex(b []byte, i int) bool {
-	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
-}
-
-// Get the value of a hex-digit.
-func as_hex(b []byte, i int) int {
-	bi := b[i]
-	if bi >= 'A' && bi <= 'F' {
-		return int(bi) - 'A' + 10
-	}
-	if bi >= 'a' && bi <= 'f' {
-		return int(bi) - 'a' + 10
-	}
-	return int(bi) - '0'
-}
-
-// Check if the character is ASCII.
-func is_ascii(b []byte, i int) bool {
-	return b[i] <= 0x7F
-}
-
-// Check if the character at the start of the buffer can be printed unescaped.
-func is_printable(b []byte, i int) bool {
-	return ((b[i] == 0x0A) || // . == #x0A
-		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
-		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
-		(b[i] > 0xC2 && b[i] < 0xED) ||
-		(b[i] == 0xED && b[i+1] < 0xA0) ||
-		(b[i] == 0xEE) ||
-		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
-			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
-			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
-}
-
-// Check if the character at the specified position is NUL.
-func is_z(b []byte, i int) bool {
-	return b[i] == 0x00
-}
-
-// Check if the beginning of the buffer is a BOM.
-func is_bom(b []byte, i int) bool {
-	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
-}
-
-// Check if the character at the specified position is space.
-func is_space(b []byte, i int) bool {
-	return b[i] == ' '
-}
-
-// Check if the character at the specified position is tab.
-func is_tab(b []byte, i int) bool {
-	return b[i] == '\t'
-}
-
-// Check if the character at the specified position is blank (space or tab).
-func is_blank(b []byte, i int) bool {
-	//return is_space(b, i) || is_tab(b, i)
-	return b[i] == ' ' || b[i] == '\t'
-}
-
-// Check if the character at the specified position is a line break.
-func is_break(b []byte, i int) bool {
-	return (b[i] == '\r' || // CR (#xD)
-		b[i] == '\n' || // LF (#xA)
-		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
-}
-
-func is_crlf(b []byte, i int) bool {
-	return b[i] == '\r' && b[i+1] == '\n'
-}
-
-// Check if the character is a line break or NUL.
-func is_breakz(b []byte, i int) bool {
-	//return is_break(b, i) || is_z(b, i)
-	return (        // is_break:
-	b[i] == '\r' || // CR (#xD)
-		b[i] == '\n' || // LF (#xA)
-		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
-		// is_z:
-		b[i] == 0)
-}
-
-// Check if the character is a line break, space, or NUL.
-func is_spacez(b []byte, i int) bool {
-	//return is_space(b, i) || is_breakz(b, i)
-	return ( // is_space:
-	b[i] == ' ' ||
-		// is_breakz:
-		b[i] == '\r' || // CR (#xD)
-		b[i] == '\n' || // LF (#xA)
-		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
-		b[i] == 0)
-}
-
-// Check if the character is a line break, space, tab, or NUL.
-func is_blankz(b []byte, i int) bool {
-	//return is_blank(b, i) || is_breakz(b, i)
-	return ( // is_blank:
-	b[i] == ' ' || b[i] == '\t' ||
-		// is_breakz:
-		b[i] == '\r' || // CR (#xD)
-		b[i] == '\n' || // LF (#xA)
-		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
-		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
-		b[i] == 0)
-}
-
-// Determine the width of the character.
-func width(b byte) int {
-	// Don't replace these by a switch without first
-	// confirming that it is being inlined.
-	if b&0x80 == 0x00 {
-		return 1
-	}
-	if b&0xE0 == 0xC0 {
-		return 2
-	}
-	if b&0xF0 == 0xE0 {
-		return 3
-	}
-	if b&0xF8 == 0xF0 {
-		return 4
-	}
-	return 0
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/e95057f4/LICENSE
----------------------------------------------------------------------
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 8f71f43..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/e95057f4/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
deleted file mode 100644
index 2e46929..0000000
--- a/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Newt OS 
-
-

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/e95057f4/cli/build.go
----------------------------------------------------------------------
diff --git a/cli/build.go b/cli/build.go
deleted file mode 100644
index 9e52ddf..0000000
--- a/cli/build.go
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- Copyright 2015 Runtime Inc.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cli
-
-import (
-	"os"
-)
-
-// Recursively iterates through an egg's dependencies, adding each egg
-// encountered to the supplied set.
-func collectDepsAux(clutch *Clutch, egg *Egg, set *map[*Egg]bool) error {
-	if (*set)[egg] {
-		return nil
-	}
-
-	(*set)[egg] = true
-
-	for _, dep := range egg.Deps {
-		if dep.Name == "" {
-			break
-		}
-
-		// Get egg structure
-		degg, err := clutch.ResolveEggName(dep.Name)
-		if err != nil {
-			return err
-		}
-
-		collectDepsAux(clutch, degg, set)
-	}
-
-	return nil
-}
-
-// Recursively iterates through an egg's dependencies.  The resulting array
-// contains a pointer to each encountered egg.
-func collectDeps(clutch *Clutch, egg *Egg) ([]*Egg, error) {
-	set := map[*Egg]bool{}
-
-	err := collectDepsAux(clutch, egg, &set)
-	if err != nil {
-		return nil, err
-	}
-
-	arr := []*Egg{}
-	for p, _ := range set {
-		arr = append(arr, p)
-	}
-
-	return arr, nil
-}
-
-// Calculates the include paths exported by the specified egg and all of
-// its recursive dependencies.
-func recursiveIncludePaths(clutch *Clutch, egg *Egg,
-	t *Target) ([]string, error) {
-
-	deps, err := collectDeps(clutch, egg)
-	if err != nil {
-		return nil, err
-	}
-
-	incls := []string{}
-	for _, p := range deps {
-		eggIncls, err := p.GetIncludes(t)
-		if err != nil {
-			return nil, err
-		}
-		incls = append(incls, eggIncls...)
-	}
-
-	return incls, nil
-}
-
-// Calculates the include paths exported by the specified target's BSP and all
-// of its recursive dependencies.
-func BspIncludePaths(clutch *Clutch, t *Target) ([]string, error) {
-	if t.Bsp == "" {
-		return nil, NewNewtError("Expected a BSP")
-	}
-
-	bspEgg, err := clutch.ResolveEggName(t.Bsp)
-	if err != nil {
-		return nil, NewNewtError("No BSP egg for " + t.Bsp + " exists")
-	}
-
-	return recursiveIncludePaths(clutch, bspEgg, t)
-}
-
-func buildBsp(t *Target, clutch *Clutch, incls *[]string,
-	libs *[]string, capEggs map[string]string) (string, error) {
-
-	if t.Bsp == "" {
-		return "", NewNewtError("Expected a BSP")
-	}
-
-	bspEgg, err := clutch.ResolveEggName(t.Bsp)
-	if err != nil {
-		return "", NewNewtError("No BSP egg for " + t.Bsp + " exists")
-	}
-
-	if err = clutch.Build(t, t.Bsp, *incls, libs); err != nil {
-		return "", err
-	}
-
-	// A BSP doesn't have to contain source; don't fail if no library was
-	// built.
-	if lib := clutch.GetEggLib(t, bspEgg); NodeExist(lib) {
-		*libs = append(*libs, lib)
-	}
-
-	var linkerScript string
-	if bspEgg.LinkerScript != "" {
-		linkerScript = bspEgg.BasePath + "/" + bspEgg.LinkerScript
-	} else {
-		linkerScript = ""
-	}
-
-	return linkerScript, nil
-}
-
-// Creates the set of compiler flags that should be specified when building a
-// particular target-entity pair.  The "entity" is what is being built; either
-// an egg or a project.
-func CreateCflags(clutch *Clutch, c *Compiler, t *Target,
-	entityCflags string) string {
-
-	cflags := c.Cflags + " " + entityCflags + " " + t.Cflags
-
-	// The 'test' identity causes the TEST symbol to be defined.  This allows
-	// egg code to behave differently in test builds.
-	if t.HasIdentity("test") {
-		cflags += " -DTEST"
-	}
-
-	cflags += " -DARCH_" + t.Arch
-
-	// If a non-BSP egg is being built, add the BSP's C flags to the list.
-	// The BSP's compiler flags get exported to all eggs.
-	bspEgg, err := clutch.ResolveEggName(t.Bsp)
-	if err == nil && bspEgg.Cflags != entityCflags {
-		cflags += " " + bspEgg.Cflags
-	}
-
-	return cflags
-}
-
-func EggIncludeDirs(egg *Egg, t *Target) []string {
-	srcDir := egg.BasePath + "/src/"
-
-	incls := egg.Includes
-	incls = append(incls, srcDir)
-	incls = append(incls, srcDir+"/arch/"+t.Arch)
-
-	if t.HasIdentity("test") {
-		testSrcDir := srcDir + "/test"
-		incls = append(incls, testSrcDir)
-		incls = append(incls, testSrcDir+"/arch/"+t.Arch)
-	}
-
-	return incls
-}
-
-// Recursively compiles all the .c and .s files in the specified directory.
-// Architecture-specific files are also compiled.
-func BuildDir(srcDir string, c *Compiler, t *Target, ignDirs []string) error {
-	var err error
-
-	StatusMessage(VERBOSITY_VERBOSE, "compiling src in base directory: %s\n",
-		srcDir)
-
-	// First change into the egg src directory, and build all the objects
-	// there
-	os.Chdir(srcDir)
-
-	// Don't recurse into destination directories.
-	ignDirs = append(ignDirs, "obj")
-	ignDirs = append(ignDirs, "bin")
-
-	// Ignore architecture-specific source files for now.  Use a temporary
-	// string array here so that the "arch" directory is not ignored in the
-	// subsequent architecture-specific compile phase.
-	baseIgnDirs := append(ignDirs, "arch")
-
-	if err = c.RecursiveCompile("*.c", 0, baseIgnDirs); err != nil {
-		return err
-	}
-
-	archDir := srcDir + "/arch/" + t.Arch + "/"
-	StatusMessage(VERBOSITY_VERBOSE,
-		"compiling architecture specific src eggs in directory: %s\n",
-		archDir)
-
-	if NodeExist(archDir) {
-		if err := os.Chdir(archDir); err != nil {
-			return NewNewtError(err.Error())
-		}
-		if err := c.RecursiveCompile("*.c", 0, ignDirs); err != nil {
-			return err
-		}
-
-		// compile assembly sources in recursive compile as well
-		if err = c.RecursiveCompile("*.s", 1, ignDirs); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}

http://git-wip-us.apache.org/repos/asf/incubator-mynewt-newt/blob/e95057f4/cli/clutch.go
----------------------------------------------------------------------
diff --git a/cli/clutch.go b/cli/clutch.go
deleted file mode 100644
index 3d59fc3..0000000
--- a/cli/clutch.go
+++ /dev/null
@@ -1,1002 +0,0 @@
-/*
- Copyright 2015 Runtime Inc.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package cli
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/spf13/viper"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-	"strings"
-)
-
-type Clutch struct {
-	// Nestsitory associated with the Eggs
-	Nest *Nest
-
-	// List of packages for Nest
-	Eggs map[string]*Egg
-
-	EggShells map[string]*EggShell
-
-	Name string
-
-	LarvaFile string
-
-	RemoteUrl string
-
-	Branch string
-}
-
-// Allocate a new package manager structure, and initialize it.
-func NewClutch(nest *Nest) (*Clutch, error) {
-	clutch := &Clutch{
-		Nest: nest,
-	}
-	err := clutch.Init()
-
-	return clutch, err
-}
-
-func (clutch *Clutch) LoadConfigs(t *Target, force bool) error {
-	for _, egg := range clutch.Eggs {
-		if err := egg.LoadConfig(t, force); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (clutch *Clutch) CheckEggDeps(egg *Egg,
-	deps map[string]*DependencyRequirement,
-	reqcap map[string]*DependencyRequirement,
-	caps map[string]*DependencyRequirement,
-	capEggs map[string]string) error {
-
-	for _, depReq := range egg.Deps {
-		// don't process this package if we've already processed it
-		if _, ok := deps[depReq.String()]; ok {
-			continue
-		}
-
-		eggName := egg.Name
-		StatusMessage(VERBOSITY_VERBOSE,
-			"Checking dependency %s for package %s\n", depReq.Name, eggName)
-		egg, ok := clutch.Eggs[depReq.Name]
-		if !ok {
-			return NewNewtError(
-				fmt.Sprintf("No package dependency %s found for %s",
-					depReq.Name, eggName))
-		}
-
-		if ok := depReq.SatisfiesDependency(egg); !ok {
-			return NewNewtError(fmt.Sprintf("Egg %s doesn't satisfy dependency %s",
-				egg.Name, depReq))
-		}
-
-		// We've checked this dependency requirement, all is gute!
-		deps[depReq.String()] = depReq
-	}
-
-	for _, reqCap := range egg.ReqCapabilities {
-		reqcap[reqCap.String()] = reqCap
-	}
-
-	for _, cap := range egg.Capabilities {
-		if caps[cap.String()] != nil && capEggs[cap.String()] != egg.FullName {
-			return NewNewtError(fmt.Sprintf("Multiple eggs with capability %s",
-				cap.String()))
-		}
-		caps[cap.String()] = cap
-		if capEggs != nil {
-			capEggs[cap.String()] = egg.FullName
-		}
-	}
-
-	// Now go through and recurse through the sub-package dependencies
-	for _, depReq := range egg.Deps {
-		if _, ok := deps[depReq.String()]; ok {
-			continue
-		}
-
-		if err := clutch.CheckEggDeps(clutch.Eggs[depReq.Name], deps,
-			reqcap, caps, capEggs); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (clutch *Clutch) VerifyCaps(reqcaps map[string]*DependencyRequirement,
-	caps map[string]*DependencyRequirement) error {
-
-	for name, rcap := range reqcaps {
-		capability, ok := caps[name]
-		if !ok {
-			return NewNewtError(fmt.Sprintf("Required capability %s not found",
-				name))
-		}
-
-		if err := rcap.SatisfiesCapability(capability); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (clutch *Clutch) CheckDeps() error {
-	// Go through all the packages and check that their dependencies are satisfied
-	for _, egg := range clutch.Eggs {
-		deps := map[string]*DependencyRequirement{}
-		reqcap := map[string]*DependencyRequirement{}
-		caps := map[string]*DependencyRequirement{}
-
-		if err := clutch.CheckEggDeps(egg, deps, reqcap, caps, nil); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// Load an individual package specified by eggName into the package list for
-// this repository
-func (clutch *Clutch) loadEgg(eggDir string, eggPrefix string,
-	eggName string) error {
-	StatusMessage(VERBOSITY_VERBOSE, "Loading Egg "+eggDir+"...\n")
-
-	if clutch.Eggs == nil {
-		clutch.Eggs = make(map[string]*Egg)
-	}
-
-	egg, err := NewEgg(clutch.Nest, eggDir)
-	if err != nil {
-		return nil
-	}
-
-	clutch.Eggs[eggPrefix+eggName] = egg
-
-	return nil
-}
-
-func (clutch *Clutch) String() string {
-	str := ""
-	for eggName, _ := range clutch.Eggs {
-		str += eggName + " "
-	}
-	return str
-}
-
-// Recursively load a package.  Given the baseDir of the packages (e.g. egg/ or
-// hw/bsp), and the base package name.
-func (clutch *Clutch) loadEggDir(baseDir string, eggPrefix string,
-	eggName string) error {
-	log.Printf("[DEBUG] Loading eggs in %s, starting with egg %s",
-		baseDir, eggName)
-
-	// first recurse and load subpackages
-	list, err := ioutil.ReadDir(baseDir + "/" + eggName)
-	if err != nil {
-		return NewNewtError(err.Error())
-	}
-
-	for _, ent := range list {
-		if !ent.IsDir() {
-			continue
-		}
-
-		name := ent.Name()
-
-		if name == "src" || name == "include" || strings.HasPrefix(name, ".") ||
-			name == "bin" {
-			continue
-		} else {
-			if err := clutch.loadEggDir(baseDir, eggPrefix,
-				eggName+"/"+name); err != nil {
-				return err
-			}
-		}
-	}
-
-	if NodeNotExist(baseDir + "/" + eggName + "/egg.yml") {
-		return nil
-	}
-
-	return clutch.loadEgg(baseDir+"/"+eggName, eggPrefix, eggName)
-}
-
-// Load all the packages in the repository into the package structure
-func (clutch *Clutch) loadEggs() error {
-	nest := clutch.Nest
-
-	// Multiple package directories to be searched
-	searchDirs := []string{
-		"compiler/",
-		"libs/",
-		"net/",
-		"hw/bsp/",
-		"hw/mcu/",
-		"hw/mcu/stm",
-		"hw/drivers/",
-		"hw/",
-		"project/",
-	}
-
-	for _, eggDir := range searchDirs {
-		eggBaseDir := nest.BasePath + "/" + eggDir
-
-		if NodeNotExist(eggBaseDir) {
-			continue
-		}
-
-		eggList, err := ioutil.ReadDir(eggBaseDir)
-		if err != nil {
-			return NewNewtError(err.Error())
-		}
-
-		for _, subEggDir := range eggList {
-			name := subEggDir.Name()
-			if filepath.HasPrefix(name, ".") || filepath.HasPrefix(name, "..") {
-				continue
-			}
-
-			if !subEggDir.IsDir() {
-				continue
-			}
-
-			if err = clutch.loadEggDir(eggBaseDir, eggDir, name); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-// Initialize the package manager
-func (clutch *Clutch) Init() error {
-	if err := clutch.loadEggs(); err != nil {
-		return err
-	}
-
-	clutch.EggShells = map[string]*EggShell{}
-
-	return nil
-}
-
-// Resolve the package specified by eggName into a package structure.
-func (clutch *Clutch) ResolveEggName(eggName string) (*Egg, error) {
-	egg, ok := clutch.Eggs[eggName]
-	if !ok {
-		return nil, NewNewtError(fmt.Sprintf("Invalid egg '%s' specified "+
-			"(eggs = %s)", eggName, clutch))
-	}
-	return egg, nil
-}
-
-func (clutch *Clutch) ResolveEggShellName(eggName string) (*EggShell, error) {
-	eggShell, ok := clutch.EggShells[eggName]
-	if !ok {
-		return nil, NewNewtError(fmt.Sprintf("Invalid egg '%s' specified "+
-			"(eggs = %s)", eggName, clutch))
-	}
-	return eggShell, nil
-}
-
-func (clutch *Clutch) ResolveEggDir(eggDir string) (*Egg, error) {
-	eggDir = filepath.Clean(eggDir)
-	for name, egg := range clutch.Eggs {
-		if filepath.Clean(egg.BasePath) == eggDir {
-			return clutch.Eggs[name], nil
-		}
-	}
-	return nil, NewNewtError(fmt.Sprintf("Cannot resolve package dir %s in "+
-		"package manager", eggDir))
-}
-
-// Clean the build for the package specified by eggName.   if cleanAll is
-// specified, all architectures are cleaned.
-func (clutch *Clutch) BuildClean(t *Target, eggName string, cleanAll bool) error {
-	egg, err := clutch.ResolveEggName(eggName)
-	if err != nil {
-		return err
-	}
-
-	if err := egg.LoadConfig(t, false); err != nil {
-		return err
-	}
-
-	tName := t.Name + "/"
-	if cleanAll {
-		tName = ""
-	}
-
-	if egg.Clean {
-		return nil
-	}
-	egg.Clean = true
-
-	for _, dep := range egg.Deps {
-		if err := clutch.BuildClean(t, dep.Name, cleanAll); err != nil {
-			return err
-		}
-	}
-
-	c, err := NewCompiler(t.GetCompiler(), t.Cdef, t.Name, []string{})
-	if err != nil {
-		return err
-	}
-
-	if NodeExist(egg.BasePath + "/src/") {
-		if err := c.RecursiveClean(egg.BasePath+"/src/", tName); err != nil {
-			return err
-		}
-
-		if err := os.RemoveAll(egg.BasePath + "/bin/" + tName); err != nil {
-			return NewNewtError(err.Error())
-		}
-	}
-
-	egg.Clean = true
-
-	return nil
-}
-
-func (clutch *Clutch) GetEggLib(t *Target, egg *Egg) string {
-	libDir := egg.BasePath + "/bin/" + t.Name + "/" +
-		"lib" + filepath.Base(egg.Name) + ".a"
-	return libDir
-}
-
-// @param incls                 Extra include paths that get specified during
-//                                  build; not modified by this function.
-// @param libs                  List of libraries that have been built so far;
-//                                  This function appends entries to this list.
-func (clutch *Clutch) buildDeps(egg *Egg, t *Target, incls *[]string,
-	libs *[]string) error {
-
-	StatusMessage(VERBOSITY_VERBOSE,
-		"Building egg dependencies for %s, target %s\n", egg.Name, t.Name)
-
-	var err error
-
-	if egg.Includes, err = egg.GetIncludes(t); err != nil {
-		return err
-	}
-
-	if incls == nil {
-		incls = &[]string{}
-	}
-	if libs == nil {
-		libs = &[]string{}
-	}
-
-	for _, dep := range egg.Deps {
-		if dep.Name == "" {
-			break
-		}
-
-		log.Printf("[DEBUG] Loading package dependency: %s", dep.Name)
-		// Get package structure
-		degg, err := clutch.ResolveEggName(dep.Name)
-		if err != nil {
-			return err
-		}
-
-		// Build the package
-		if err = clutch.Build(t, dep.Name, *incls, libs); err != nil {
-			return err
-		}
-
-		// After build, get dependency package includes.  Build function
-		// generates all the package includes
-		egg.Includes = append(egg.Includes, degg.Includes...)
-		if lib := clutch.GetEggLib(t, degg); NodeExist(lib) {
-			*libs = append(*libs, lib)
-		}
-	}
-
-	// Add on dependency includes to package includes
-	log.Printf("[DEBUG] Egg dependencies for %s built, incls = %s",
-		egg.Name, egg.Includes)
-
-	return nil
-}
-
-// Build the package specified by eggName
-//
-// @param incls            Extra include paths that get specified during
-//                             build.  Note: passed by value.
-// @param lib              List of libraries that have been built so far;
-//                             This function appends entries to this list.
-func (clutch *Clutch) Build(t *Target, eggName string, incls []string,
-	libs *[]string) error {
-
-	// Look up package structure
-	egg, err := clutch.ResolveEggName(eggName)
-	if err != nil {
-		return err
-	}
-
-	if err := egg.LoadConfig(t, false); err != nil {
-		return err
-	}
-
-	// already built the package, no need to rebuild.  This is to handle
-	// recursive calls to Build()
-	if egg.Built {
-		return nil
-	}
-	egg.Built = true
-
-	if err := clutch.buildDeps(egg, t, &incls, libs); err != nil {
-		return err
-	}
-
-	StatusMessage(VERBOSITY_VERBOSE, "Building egg %s for arch %s\n",
-		eggName, t.Arch)
-
-	// NOTE: this assignment must happen after the call to buildDeps(), as
-	// buildDeps() fills in the package includes.
-	incls = append(incls, EggIncludeDirs(egg, t)...)
-	log.Printf("[DEBUG] Egg includes for %s are %s", eggName, incls)
-
-	srcDir := egg.BasePath + "/src/"
-	if NodeNotExist(srcDir) {
-		// nothing to compile, return true!
-		return nil
-	}
-
-	// Build the package designated by eggName
-	// Initialize a compiler
-	c, err := NewCompiler(t.GetCompiler(), t.Cdef, t.Name, incls)
-	if err != nil {
-		return err
-	}
-	// setup Cflags, Lflags and Aflags
-	c.Cflags = CreateCflags(clutch, c, t, egg.Cflags)
-	c.Lflags += " " + egg.Lflags + " " + t.Lflags
-	c.Aflags += " " + egg.Aflags + " " + t.Aflags
-
-	log.Printf("[DEBUG] compiling src eggs in base egg directory: %s", srcDir)
-
-	// For now, ignore test code.  Tests get built later if the test identity
-	// is in effect.
-	ignDirs := []string{"test"}
-
-	if err = BuildDir(srcDir, c, t, ignDirs); err != nil {
-		return err
-	}
-
-	// Now build the test code if requested.
-	if t.HasIdentity("test") {
-		testSrcDir := srcDir + "/test"
-		if err = BuildDir(testSrcDir, c, t, ignDirs); err != nil {
-			return err
-		}
-	}
-
-	// Archive everything into a static library, which can be linked with a
-	// main program
-	if err := os.Chdir(egg.BasePath + "/"); err != nil {
-		return NewNewtError(err.Error())
-	}
-
-	binDir := egg.BasePath + "/bin/" + t.Name + "/"
-
-	if NodeNotExist(binDir) {
-		if err := os.MkdirAll(binDir, 0755); err != nil {
-			return NewNewtError(err.Error())
-		}
-	}
-
-	if err = c.CompileArchive(clutch.GetEggLib(t, egg), []string{}); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Check the include directories for the package, to make sure there are
-// no conflicts in include paths for source code
-func (clutch *Clutch) checkIncludes(egg *Egg) error {
-	incls, err := filepath.Glob(egg.BasePath + "/include/*")
-	if err != nil {
-		return NewNewtError(err.Error())
-	}
-
-	// Append all the architecture specific directories
-	archDir := egg.BasePath + "/include/" + egg.Name + "/arch/"
-	dirs, err := ioutil.ReadDir(archDir)
-	if err != nil {
-		return NewNewtError(err.Error())
-	}
-
-	for _, dir := range dirs {
-		if !dir.IsDir() {
-			return NewNewtError(fmt.Sprintf(
-				"Only directories are allowed in architecture dir: %s",
-				archDir+dir.Name()))
-		}
-
-		incls2, err := filepath.Glob(archDir + dir.Name() + "/*")
-		if err != nil {
-			return NewNewtError(err.Error())
-		}
-
-		incls = append(incls, incls2...)
-	}
-
-	for _, incl := range incls {
-		finfo, err := os.Stat(incl)
-		if err != nil {
-			return NewNewtError(err.Error())
-		}
-
-		bad := false
-		if !finfo.IsDir() {
-			bad = true
-		}
-
-		if filepath.Base(incl) != egg.Name {
-			if egg.IsBsp && filepath.Base(incl) != "bsp" {
-				bad = true
-			}
-		}
-
-		if bad {
-			return NewNewtError(fmt.Sprintf("File %s should not exist"+
-				"in include directory, only file allowed in include "+
-				"directory is a directory with the package name %s",
-				incl, egg.Name))
-		}
-	}
-
-	return nil
-}
-
-// Clean the tests in the tests parameter, for the package identified by
-// eggName.  If cleanAll is set to true, all architectures will be removed.
-func (clutch *Clutch) TestClean(t *Target, eggName string,
-	cleanAll bool) error {
-	egg, err := clutch.ResolveEggName(eggName)
-	if err != nil {
-		return err
-	}
-
-	if err := egg.LoadConfig(t, false); err != nil {
-		return err
-	}
-
-	tName := t.Name + "/"
-	if cleanAll {
-		tName = ""
-	}
-
-	if err := os.RemoveAll(egg.BasePath + "/src/test/bin/" + tName); err != nil {
-		return NewNewtError(err.Error())
-	}
-	if err := os.RemoveAll(egg.BasePath + "/src/test/obj/" + tName); err != nil {
-		return NewNewtError(err.Error())
-	}
-
-	return nil
-}
-
-// Compile tests specified by the tests parameter.  The tests are linked
-// to the package specified by the egg parameter
-func (clutch *Clutch) linkTests(t *Target, egg *Egg,
-	incls []string, libs *[]string) error {
-
-	c, err := NewCompiler(t.GetCompiler(), t.Cdef, t.Name, incls)
-	if err != nil {
-		return err
-	}
-
-	// Configure Lflags.  Since we are only linking, Cflags and Aflags are
-	// unnecessary.
-	c.Lflags += " " + egg.Lflags + " " + t.Lflags
-
-	testBinDir := egg.BasePath + "/src/test/bin/" + t.Name + "/"
-	binFile := testBinDir + egg.TestBinName()
-	options := map[string]bool{}
-
-	// Determine if the test executable is already up to date.
-	linkRequired, err := c.depTracker.LinkRequired(binFile, options, *libs)
-	if err != nil {
-		return err
-	}
-
-	// Build the test executable if necessary.
-	if linkRequired {
-		if NodeNotExist(testBinDir) {
-			if err := os.MkdirAll(testBinDir, 0755); err != nil {
-				return NewNewtError(err.Error())
-			}
-		}
-
-		err = c.CompileBinary(binFile, options, *libs)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// Run all the tests in the tests parameter.  egg is the package to check for
-// the tests.  exitOnFailure specifies whether to exit immediately when a
-// test fails, or continue executing all tests.
-func (clutch *Clutch) runTests(t *Target, egg *Egg, exitOnFailure bool) error {
-	StatusMessage(VERBOSITY_DEFAULT, "Testing egg %s for arch %s\n",
-		egg.Name, t.Arch)
-
-	if err := os.Chdir(egg.BasePath + "/src/test/bin/" + t.Name +
-		"/"); err != nil {
-		return err
-	}
-
-	o, err := ShellCommand("./" + egg.TestBinName())
-	if err != nil {
-		StatusMessage(VERBOSITY_DEFAULT, "%s", string(o))
-
-		// Always terminate on test failure since only one test is being run.
-		return NewtErrorNoTrace(fmt.Sprintf("Test %s failed",
-			egg.TestBinName()))
-	} else {
-		StatusMessage(VERBOSITY_VERBOSE, "%s", string(o))
-		StatusMessage(VERBOSITY_DEFAULT, "Test %s ok!\n", egg.TestBinName())
-		return nil
-	}
-}
-
-// Check to ensure tests exist.  Go through the array of tests specified by
-// the tests parameter.  egg is the package to check for these tests.
-func (clutch *Clutch) testsExist(egg *Egg) error {
-	dirName := egg.BasePath + "/src/test/"
-	if NodeNotExist(dirName) {
-		return NewNewtError("No test exists for package " + egg.Name)
-	}
-
-	return nil
-}
-
-// Test the package identified by eggName, by executing the tests specified.
-// exitOnFailure signifies whether to stop the test program when one of them
-// fails.
-func (clutch *Clutch) Test(t *Target, eggName string,
-	exitOnFailure bool) error {
-
-	// A few identities are implicitly exported when the test command is used:
-	// *    test:       ensures that the test code gets compiled.
-	// *    selftest:   indicates that there is no project
-	t.Identities["test"] = "test"
-	t.Identities["selftest"] = "selftest"
-
-	egg, err := clutch.ResolveEggName(eggName)
-	if err != nil {
-		return err
-	}
-
-	if err := egg.LoadConfig(t, false); err != nil {
-		return err
-	}
-
-	// Make sure the test directories exist
-	if err := clutch.testsExist(egg); err != nil {
-		return err
-	}
-
-	// The egg under test must be compiled with the PKG_TEST symbol defined so
-	// that the appropriate main function gets built.
-	egg.Cflags += " -DPKG_TEST"
-
-	incls := []string{}
-	libs := []string{}
-
-	// If there is a BSP:
-	//     1. Calculate the include paths that it and its dependencies export.
-	//        This set of include paths is accessible during all subsequent
-	//        builds.
-	//     2. Build the BSP package.
-	if t.Bsp != "" {
-		incls, err = BspIncludePaths(clutch, t)
-		if err != nil {
-			return err
-		}
-		_, err = buildBsp(t, clutch, &incls, &libs, nil)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Build the package under test.
-	if err := clutch.Build(t, eggName, incls, &libs); err != nil {
-		return err
-	}
-	lib := clutch.GetEggLib(t, egg)
-	if !NodeExist(lib) {
-		return NewNewtError("Egg " + eggName + " did not produce binary")
-	}
-	libs = append(libs, lib)
-
-	// Compile the package's test code.
-	if err := clutch.linkTests(t, egg, incls, &libs); err != nil {
-		return err
-	}
-
-	// Run the tests.
-	if err := clutch.runTests(t, egg, exitOnFailure); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (cl *Clutch) LoadFromClutch(local *Clutch) error {
-	var err error
-	for _, egg := range local.Eggs {
-		if err := egg.LoadConfig(nil, false); err != nil {
-			return err
-		}
-
-		log.Printf("[DEBUG] Egg %s loaded, putting it into clutch %s",
-			egg.FullName, local.Name)
-
-		eggShell := &EggShell{}
-		eggShell.FullName = egg.FullName
-		eggShell.Deps = egg.Deps
-		eggShell.Caps = egg.Capabilities
-		eggShell.ReqCaps = egg.ReqCapabilities
-		eggShell.Version = egg.Version
-		eggShell.Hash, err = egg.GetHash()
-		if err != nil {
-			return err
-		}
-
-		cl.EggShells[eggShell.FullName] = eggShell
-	}
-
-	return nil
-}
-
-func (cl *Clutch) Serialize() (string, error) {
-	clStr := "name: " + cl.Name + "\n"
-	clStr = clStr + "url: " + cl.RemoteUrl + "\n"
-	clStr = clStr + "eggs:\n"
-
-	buf := bytes.Buffer{}
-
-	indent := "    "
-	for _, eggShell := range cl.EggShells {
-		buf.WriteString(eggShell.Serialize(indent))
-	}
-
-	return clStr + buf.String(), nil
-}
-
-func (cl *Clutch) strSliceToDr(list []string) ([]*DependencyRequirement, error) {
-	drList := []*DependencyRequirement{}
-
-	for _, name := range list {
-		req, err := NewDependencyRequirementParseString(name)
-		if err != nil {
-			return nil, err
-		}
-		drList = append(drList, req)
-	}
-
-	if len(drList) == 0 {
-		return nil, nil
-	} else {
-		return drList, nil
-	}
-}
-
-func (cl *Clutch) fileToEggList(cfg *viper.Viper) (map[string]*EggShell,
-	error) {
-	eggMap := cfg.GetStringMap("eggs")
-
-	eggList := map[string]*EggShell{}
-
-	for name, _ := range eggMap {
-		eggShell, err := NewEggShell(cl)
-		if err != nil {
-			return nil, err
-		}
-		eggShell.FullName = name
-
-		eggDef := cfg.GetStringMap("eggs." + name)
-		eggShell.Version, err = NewVersParseString(eggDef["vers"].(string))
-		if err != nil {
-			return nil, err
-		}
-
-		eggShell.Deps, err = cl.strSliceToDr(
-			cfg.GetStringSlice("eggs." + name + ".deps"))
-		if err != nil {
-			return nil, err
-		}
-
-		eggShell.Caps, err = cl.strSliceToDr(
-			cfg.GetStringSlice("eggs." + name + ".caps"))
-		if err != nil {
-			return nil, err
-		}
-
-		eggShell.ReqCaps, err = cl.strSliceToDr(
-			cfg.GetStringSlice("eggs." + name + ".req_caps"))
-		if err != nil {
-			return nil, err
-		}
-
-		eggList[name] = eggShell
-	}
-
-	return eggList, nil
-}
-
-// Create the manifest file name, it's the manifest dir + manifest name +
-// branch and a.yml extension
-func (clutch *Clutch) GetClutchFile(name string, branch string) string {
-	return name + "@" + branch
-}
-
-func (clutch *Clutch) GetClutchFullFile(name string, branch string) string {
-	return clutch.Nest.ClutchPath + clutch.GetClutchFile(name, branch) + ".yml"
-}
-
-func (clutch *Clutch) Load(name string) error {
-	cfg, err := ReadConfig(clutch.Nest.ClutchPath, name)
-	if err != nil {
-		return err
-	}
-
-	clutchName := name
-	branchName := "master"
-
-	parts := strings.Split(name, "@")
-	if len(parts) == 2 {
-		clutchName = parts[0]
-		branchName = parts[1]
-	}
-
-	if cfg.GetString("name") != clutchName {
-		return NewNewtError(
-			fmt.Sprintf("Wrong name %s in remote larva file (expected %s)",
-				cfg.GetString("name"), clutchName))
-	}
-
-	clutch.Name = cfg.GetString("name")
-	clutch.Branch = branchName
-	clutch.RemoteUrl = cfg.GetString("url")
-
-	clutch.EggShells, err = clutch.fileToEggList(cfg)
-	if err != nil {
-		return err
-	}
-
-	clutch.Nest.Clutches[name] = clutch
-
-	return nil
-}
-
-func (cl *Clutch) Install(name string, url string, branch string) error {
-	clutchFile := cl.GetClutchFullFile(name, branch)
-
-	// XXX: Should warn if file already exists, and require force option
-	os.Remove(clutchFile)
-
-	// Download the manifest
-	dl, err := NewDownloader()
-	if err != nil {
-		return err
-	}
-
-	StatusMessage(VERBOSITY_DEFAULT, "Downloading clutch.yml from %s/"+
-		"%s...", url, branch)
-
-	if err := dl.DownloadFile(url, branch, "clutch.yml",
-		clutchFile); err != nil {
-		return err
-	}
-
-	StatusMessage(VERBOSITY_DEFAULT, OK_STRING)
-
-	// Load the manifest, and ensure that it is in the correct format
-	StatusMessage(VERBOSITY_DEFAULT, "Verifying clutch.yml format...\n")
-	if err := cl.Load(cl.GetClutchFile(name, branch)); err != nil {
-		os.Remove(clutchFile)
-		return err
-	}
-	StatusMessage(VERBOSITY_DEFAULT, OK_STRING)
-
-	return nil
-}
-
-func (clutch *Clutch) InstallEgg(eggName string, branch string,
-	downloaded []*RemoteNest) ([]*RemoteNest, error) {
-	log.Print("[VERBOSE] Looking for ", eggName)
-	egg, err := clutch.ResolveEggName(eggName)
-	if err == nil {
-		log.Printf("[VERBOSE] ", eggName, " installed already")
-		return downloaded, nil
-	}
-	nest := clutch.Nest
-	for _, remoteNest := range downloaded {
-		egg, err = remoteNest.ResolveEggName(eggName)
-		if err == nil {
-			log.Print("[VERBOSE] ", eggName, " present in downloaded clutch ",
-				remoteNest.Name)
-
-			err = remoteNest.fetchEgg(eggName, nest.BasePath)
-			if err != nil {
-				return downloaded, err
-			}
-
-			// update local clutch
-			err = clutch.loadEggDir(nest.BasePath, "", eggName)
-			if err != nil {
-				return downloaded, err
-			}
-
-			deps, err := egg.GetDependencies()
-			if err != nil {
-				return downloaded, err
-			}
-			for _, dep := range deps {
-				log.Print("[VERBOSE] ", eggName, " checking dependency ",
-					dep.Name)
-				depBranch := dep.BranchName()
-				downloaded, err = clutch.InstallEgg(dep.Name, depBranch,
-					downloaded)
-				if err != nil {
-					return downloaded, err
-				}
-			}
-			return downloaded, nil
-		}
-	}
-
-	// Not in downloaded clutches
-	clutches, err := nest.GetClutches()
-	if err != nil {
-		return downloaded, err
-	}
-	for _, remoteClutch := range clutches {
-		eggShell, err := remoteClutch.ResolveEggShellName(eggName)
-		if err == nil {
-			log.Print("[VERBOSE] ", eggName, " present in remote clutch ",
-				remoteClutch.Name, remoteClutch.Branch)
-			if branch == "" {
-				branch = remoteClutch.Branch
-			}
-			remoteNest, err := NewRemoteNest(remoteClutch, branch)
-			if err != nil {
-				return downloaded, err
-			}
-			downloaded = append(downloaded, remoteNest)
-			return clutch.InstallEgg(eggShell.FullName, branch, downloaded)
-		}
-	}
-
-	return downloaded, NewNewtError(fmt.Sprintf("No package %s found\n", eggName))
-}