jcapik pushed to kubernetes (f21). "update tests for etcd-2.0"

notifications at fedoraproject.org notifications at fedoraproject.org
Wed Jun 10 14:48:26 UTC 2015


From 187bdc7c7b77faa5b8217b632e1b0c4bae4d6d62 Mon Sep 17 00:00:00 2001
From: Jan Chaloupka <jchaloup at redhat.com>
Date: Wed, 28 Jan 2015 18:43:21 +0100
Subject: update tests for etcd-2.0


diff --git a/0001-patch.patch b/0001-patch.patch
deleted file mode 100644
index f205138..0000000
--- a/0001-patch.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 3eedc089ba9be8ac3c11bae084b95b8c34728a48 Mon Sep 17 00:00:00 2001
-From: rpm-build <rpm-build>
-Date: Fri, 23 Jan 2015 15:05:29 +0100
-Subject: [PATCH] patch
-
----
- pkg/cloudprovider/gce/gce.go | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/pkg/cloudprovider/gce/gce.go b/pkg/cloudprovider/gce/gce.go
-index 65e62f6..b240ac6 100644
---- a/pkg/cloudprovider/gce/gce.go
-+++ b/pkg/cloudprovider/gce/gce.go
-@@ -32,8 +32,8 @@ import (
- 	"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
- 	"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
- 
--	compute "code.google.com/p/google-api-go-client/compute/v1"
--	container "code.google.com/p/google-api-go-client/container/v1beta1"
-+	compute "google.golang.org/api/compute/v1"
-+	container "google.golang.org/api/container/v1beta1"
- 	"github.com/golang/glog"
- 	"golang.org/x/oauth2"
- 	"golang.org/x/oauth2/google"
--- 
-1.9.3
-
diff --git a/0001-remove-all-third-party-software.patch b/0001-remove-all-third-party-software.patch
deleted file mode 100644
index 5bd73ad..0000000
--- a/0001-remove-all-third-party-software.patch
+++ /dev/null
@@ -1,206406 +0,0 @@
-From 8ffddead8da8731a3c8c7e89946702c6902f7689 Mon Sep 17 00:00:00 2001
-From: Eric Paris <eparis at redhat.com>
-Date: Fri, 9 Jan 2015 12:09:22 -0500
-Subject: [PATCH] remove all third party software
-
----
- Godeps/Godeps.json                                 |   217 -
- Godeps/Readme                                      |     5 -
- Godeps/_workspace/.gitignore                       |     2 -
- .../_workspace/src/code.google.com/p/gcfg/LICENSE  |    57 -
- .../_workspace/src/code.google.com/p/gcfg/README   |     7 -
- .../_workspace/src/code.google.com/p/gcfg/doc.go   |   118 -
- .../src/code.google.com/p/gcfg/example_test.go     |   132 -
- .../_workspace/src/code.google.com/p/gcfg/go1_0.go |     7 -
- .../_workspace/src/code.google.com/p/gcfg/go1_2.go |     9 -
- .../src/code.google.com/p/gcfg/issues_test.go      |    63 -
- .../_workspace/src/code.google.com/p/gcfg/read.go  |   181 -
- .../src/code.google.com/p/gcfg/read_test.go        |   333 -
- .../src/code.google.com/p/gcfg/scanner/errors.go   |   121 -
- .../code.google.com/p/gcfg/scanner/example_test.go |    46 -
- .../src/code.google.com/p/gcfg/scanner/scanner.go  |   342 -
- .../code.google.com/p/gcfg/scanner/scanner_test.go |   417 -
- .../_workspace/src/code.google.com/p/gcfg/set.go   |   281 -
- .../code.google.com/p/gcfg/testdata/gcfg_test.gcfg |     3 -
- .../p/gcfg/testdata/gcfg_unicode_test.gcfg         |     3 -
- .../src/code.google.com/p/gcfg/token/position.go   |   435 -
- .../code.google.com/p/gcfg/token/position_test.go  |   181 -
- .../src/code.google.com/p/gcfg/token/serialize.go  |    56 -
- .../code.google.com/p/gcfg/token/serialize_test.go |   111 -
- .../src/code.google.com/p/gcfg/token/token.go      |    83 -
- .../src/code.google.com/p/gcfg/types/bool.go       |    23 -
- .../src/code.google.com/p/gcfg/types/doc.go        |     4 -
- .../src/code.google.com/p/gcfg/types/enum.go       |    44 -
- .../src/code.google.com/p/gcfg/types/enum_test.go  |    29 -
- .../src/code.google.com/p/gcfg/types/int.go        |    86 -
- .../src/code.google.com/p/gcfg/types/int_test.go   |    67 -
- .../src/code.google.com/p/gcfg/types/scan.go       |    23 -
- .../src/code.google.com/p/gcfg/types/scan_test.go  |    36 -
- .../src/code.google.com/p/go-uuid/uuid/LICENSE     |    27 -
- .../src/code.google.com/p/go-uuid/uuid/dce.go      |    84 -
- .../src/code.google.com/p/go-uuid/uuid/doc.go      |     8 -
- .../src/code.google.com/p/go-uuid/uuid/hash.go     |    53 -
- .../src/code.google.com/p/go-uuid/uuid/node.go     |   101 -
- .../src/code.google.com/p/go-uuid/uuid/time.go     |   132 -
- .../src/code.google.com/p/go-uuid/uuid/util.go     |    43 -
- .../src/code.google.com/p/go-uuid/uuid/uuid.go     |   163 -
- .../code.google.com/p/go-uuid/uuid/uuid_test.go    |   390 -
- .../src/code.google.com/p/go-uuid/uuid/version1.go |    41 -
- .../src/code.google.com/p/go-uuid/uuid/version4.go |    25 -
- .../compute/serviceaccount/serviceaccount.go       |   172 -
- .../p/goauth2/oauth/example/oauthreq.go            |   100 -
- .../oauth/jwt/example/example.client_secrets.json  |     1 -
- .../p/goauth2/oauth/jwt/example/example.pem        |    20 -
- .../p/goauth2/oauth/jwt/example/main.go            |   114 -
- .../src/code.google.com/p/goauth2/oauth/jwt/jwt.go |   511 -
- .../p/goauth2/oauth/jwt/jwt_test.go                |   486 -
- .../src/code.google.com/p/goauth2/oauth/oauth.go   |   405 -
- .../code.google.com/p/goauth2/oauth/oauth_test.go  |   214 -
- .../code.google.com/p/google-api-go-client/AUTHORS |    10 -
- .../p/google-api-go-client/CONTRIBUTORS            |    46 -
- .../code.google.com/p/google-api-go-client/LICENSE |    27 -
- .../p/google-api-go-client/Makefile                |     9 -
- .../code.google.com/p/google-api-go-client/NOTES   |    13 -
- .../code.google.com/p/google-api-go-client/README  |    10 -
- .../code.google.com/p/google-api-go-client/TODO    |     2 -
- .../compute/v1/compute-api.json                    |  9526 -----------
- .../google-api-go-client/compute/v1/compute-gen.go | 16952 -------------------
- .../container/v1beta1/container-api.json           |   579 -
- .../container/v1beta1/container-gen.go             |  1007 --
- .../p/google-api-go-client/googleapi/googleapi.go  |   401 -
- .../googleapi/googleapi_test.go                    |   361 -
- .../googleapi/internal/uritemplates/LICENSE        |    18 -
- .../internal/uritemplates/uritemplates.go          |   359 -
- .../googleapi/internal/uritemplates/utils.go       |    13 -
- .../googleapi/transport/apikey.go                  |    38 -
- .../p/google-api-go-client/googleapi/types.go      |   150 -
- .../p/google-api-go-client/googleapi/types_test.go |    44 -
- .../src/github.com/Sirupsen/logrus/.gitignore      |     1 -
- .../src/github.com/Sirupsen/logrus/.travis.yml     |    10 -
- .../src/github.com/Sirupsen/logrus/LICENSE         |    21 -
- .../src/github.com/Sirupsen/logrus/README.md       |   352 -
- .../src/github.com/Sirupsen/logrus/entry.go        |   248 -
- .../src/github.com/Sirupsen/logrus/entry_test.go   |    53 -
- .../Sirupsen/logrus/examples/basic/basic.go        |    40 -
- .../Sirupsen/logrus/examples/hook/hook.go          |    35 -
- .../src/github.com/Sirupsen/logrus/exported.go     |   182 -
- .../src/github.com/Sirupsen/logrus/formatter.go    |    44 -
- .../Sirupsen/logrus/formatter_bench_test.go        |    88 -
- .../src/github.com/Sirupsen/logrus/hook_test.go    |   122 -
- .../src/github.com/Sirupsen/logrus/hooks.go        |    34 -
- .../Sirupsen/logrus/hooks/airbrake/airbrake.go     |    54 -
- .../Sirupsen/logrus/hooks/papertrail/README.md     |    28 -
- .../Sirupsen/logrus/hooks/papertrail/papertrail.go |    54 -
- .../logrus/hooks/papertrail/papertrail_test.go     |    26 -
- .../Sirupsen/logrus/hooks/sentry/README.md         |    61 -
- .../Sirupsen/logrus/hooks/sentry/sentry.go         |   100 -
- .../Sirupsen/logrus/hooks/sentry/sentry_test.go    |    97 -
- .../Sirupsen/logrus/hooks/syslog/README.md         |    20 -
- .../Sirupsen/logrus/hooks/syslog/syslog.go         |    59 -
- .../Sirupsen/logrus/hooks/syslog/syslog_test.go    |    26 -
- .../github.com/Sirupsen/logrus/json_formatter.go   |    26 -
- .../src/github.com/Sirupsen/logrus/logger.go       |   161 -
- .../src/github.com/Sirupsen/logrus/logrus.go       |    94 -
- .../src/github.com/Sirupsen/logrus/logrus_test.go  |   283 -
- .../github.com/Sirupsen/logrus/terminal_darwin.go  |    12 -
- .../github.com/Sirupsen/logrus/terminal_freebsd.go |    20 -
- .../github.com/Sirupsen/logrus/terminal_linux.go   |    12 -
- .../Sirupsen/logrus/terminal_notwindows.go         |    21 -
- .../github.com/Sirupsen/logrus/terminal_windows.go |    27 -
- .../github.com/Sirupsen/logrus/text_formatter.go   |   124 -
- .../Sirupsen/logrus/text_formatter_test.go         |    33 -
- .../github.com/coreos/go-etcd/etcd/add_child.go    |    23 -
- .../coreos/go-etcd/etcd/add_child_test.go          |    73 -
- .../src/github.com/coreos/go-etcd/etcd/client.go   |   435 -
- .../github.com/coreos/go-etcd/etcd/client_test.go  |    96 -
- .../src/github.com/coreos/go-etcd/etcd/cluster.go  |    51 -
- .../coreos/go-etcd/etcd/compare_and_delete.go      |    34 -
- .../coreos/go-etcd/etcd/compare_and_delete_test.go |    46 -
- .../coreos/go-etcd/etcd/compare_and_swap.go        |    36 -
- .../coreos/go-etcd/etcd/compare_and_swap_test.go   |    57 -
- .../src/github.com/coreos/go-etcd/etcd/debug.go    |    55 -
- .../github.com/coreos/go-etcd/etcd/debug_test.go   |    28 -
- .../src/github.com/coreos/go-etcd/etcd/delete.go   |    40 -
- .../github.com/coreos/go-etcd/etcd/delete_test.go  |    81 -
- .../src/github.com/coreos/go-etcd/etcd/error.go    |    48 -
- .../src/github.com/coreos/go-etcd/etcd/get.go      |    27 -
- .../src/github.com/coreos/go-etcd/etcd/get_test.go |   131 -
- .../src/github.com/coreos/go-etcd/etcd/options.go  |    72 -
- .../src/github.com/coreos/go-etcd/etcd/requests.go |   377 -
- .../src/github.com/coreos/go-etcd/etcd/response.go |    89 -
- .../coreos/go-etcd/etcd/set_curl_chan_test.go      |    42 -
- .../coreos/go-etcd/etcd/set_update_create.go       |   137 -
- .../coreos/go-etcd/etcd/set_update_create_test.go  |   241 -
- .../src/github.com/coreos/go-etcd/etcd/version.go  |     3 -
- .../src/github.com/coreos/go-etcd/etcd/watch.go    |   103 -
- .../github.com/coreos/go-etcd/etcd/watch_test.go   |   119 -
- .../src/github.com/davecgh/go-spew/spew/common.go  |   371 -
- .../github.com/davecgh/go-spew/spew/common_test.go |   192 -
- .../src/github.com/davecgh/go-spew/spew/config.go  |   288 -
- .../src/github.com/davecgh/go-spew/spew/doc.go     |   196 -
- .../src/github.com/davecgh/go-spew/spew/dump.go    |   500 -
- .../github.com/davecgh/go-spew/spew/dump_test.go   |   978 --
- .../davecgh/go-spew/spew/dumpcgo_test.go           |    97 -
- .../davecgh/go-spew/spew/dumpnocgo_test.go         |    26 -
- .../davecgh/go-spew/spew/example_test.go           |   230 -
- .../src/github.com/davecgh/go-spew/spew/format.go  |   413 -
- .../github.com/davecgh/go-spew/spew/format_test.go |  1483 --
- .../davecgh/go-spew/spew/internal_test.go          |   156 -
- .../src/github.com/davecgh/go-spew/spew/spew.go    |   148 -
- .../github.com/davecgh/go-spew/spew/spew_test.go   |   308 -
- .../davecgh/go-spew/spew/testdata/dumpcgo.go       |    82 -
- .../docker/docker/pkg/archive/MAINTAINERS          |     2 -
- .../github.com/docker/docker/pkg/archive/README.md |     1 -
- .../docker/docker/pkg/archive/archive.go           |   802 -
- .../docker/docker/pkg/archive/archive_test.go      |   625 -
- .../docker/docker/pkg/archive/archive_unix.go      |    39 -
- .../docker/docker/pkg/archive/archive_windows.go   |    12 -
- .../docker/docker/pkg/archive/changes.go           |   413 -
- .../docker/docker/pkg/archive/changes_test.go      |   301 -
- .../github.com/docker/docker/pkg/archive/diff.go   |   165 -
- .../docker/docker/pkg/archive/diff_test.go         |   191 -
- .../docker/docker/pkg/archive/example_changes.go   |    97 -
- .../docker/docker/pkg/archive/testdata/broken.tar  |   Bin 13824 -> 0 bytes
- .../docker/docker/pkg/archive/time_linux.go        |    16 -
- .../docker/docker/pkg/archive/time_unsupported.go  |    16 -
- .../docker/docker/pkg/archive/utils_test.go        |   166 -
- .../github.com/docker/docker/pkg/archive/wrap.go   |    59 -
- .../docker/docker/pkg/fileutils/fileutils.go       |    26 -
- .../docker/docker/pkg/ioutils/readers.go           |   114 -
- .../docker/docker/pkg/ioutils/readers_test.go      |    34 -
- .../docker/docker/pkg/ioutils/writers.go           |    39 -
- .../github.com/docker/docker/pkg/pools/pools.go    |   111 -
- .../docker/docker/pkg/pools/pools_nopool.go        |    73 -
- .../docker/docker/pkg/promise/promise.go           |    11 -
- .../docker/docker/pkg/system/MAINTAINERS           |     2 -
- .../github.com/docker/docker/pkg/system/errors.go  |     9 -
- .../github.com/docker/docker/pkg/system/lstat.go   |    16 -
- .../docker/docker/pkg/system/lstat_test.go         |    27 -
- .../docker/docker/pkg/system/lstat_windows.go      |     8 -
- .../github.com/docker/docker/pkg/system/meminfo.go |    17 -
- .../docker/docker/pkg/system/meminfo_linux.go      |    67 -
- .../docker/docker/pkg/system/meminfo_linux_test.go |    37 -
- .../docker/pkg/system/meminfo_unsupported.go       |     7 -
- .../github.com/docker/docker/pkg/system/mknod.go   |    18 -
- .../docker/docker/pkg/system/mknod_windows.go      |    12 -
- .../github.com/docker/docker/pkg/system/stat.go    |    42 -
- .../docker/docker/pkg/system/stat_linux.go         |    14 -
- .../docker/docker/pkg/system/stat_test.go          |    36 -
- .../docker/docker/pkg/system/stat_unsupported.go   |    16 -
- .../docker/docker/pkg/system/stat_windows.go       |    12 -
- .../github.com/docker/docker/pkg/system/umask.go   |    11 -
- .../docker/docker/pkg/system/umask_windows.go      |     8 -
- .../docker/docker/pkg/system/utimes_darwin.go      |    11 -
- .../docker/docker/pkg/system/utimes_freebsd.go     |    24 -
- .../docker/docker/pkg/system/utimes_linux.go       |    28 -
- .../docker/docker/pkg/system/utimes_test.go        |    65 -
- .../docker/docker/pkg/system/utimes_unsupported.go |    13 -
- .../docker/docker/pkg/system/xattrs_linux.go       |    59 -
- .../docker/docker/pkg/system/xattrs_unsupported.go |    11 -
- .../github.com/docker/docker/pkg/units/MAINTAINERS |     2 -
- .../github.com/docker/docker/pkg/units/duration.go |    31 -
- .../docker/docker/pkg/units/duration_test.go       |    46 -
- .../src/github.com/docker/docker/pkg/units/size.go |    91 -
- .../docker/docker/pkg/units/size_test.go           |   108 -
- .../p/go/src/pkg/archive/tar/common.go             |   305 -
- .../p/go/src/pkg/archive/tar/example_test.go       |    79 -
- .../p/go/src/pkg/archive/tar/reader.go             |   820 -
- .../p/go/src/pkg/archive/tar/reader_test.go        |   743 -
- .../p/go/src/pkg/archive/tar/stat_atim.go          |    20 -
- .../p/go/src/pkg/archive/tar/stat_atimespec.go     |    20 -
- .../p/go/src/pkg/archive/tar/stat_unix.go          |    32 -
- .../p/go/src/pkg/archive/tar/tar_test.go           |   284 -
- .../p/go/src/pkg/archive/tar/testdata/gnu.tar      |   Bin 3072 -> 0 bytes
- .../p/go/src/pkg/archive/tar/testdata/nil-uid.tar  |   Bin 1024 -> 0 bytes
- .../p/go/src/pkg/archive/tar/testdata/pax.tar      |   Bin 10240 -> 0 bytes
- .../p/go/src/pkg/archive/tar/testdata/small.txt    |     1 -
- .../p/go/src/pkg/archive/tar/testdata/small2.txt   |     1 -
- .../pkg/archive/tar/testdata/sparse-formats.tar    |   Bin 17920 -> 0 bytes
- .../p/go/src/pkg/archive/tar/testdata/star.tar     |   Bin 3072 -> 0 bytes
- .../p/go/src/pkg/archive/tar/testdata/ustar.tar    |   Bin 2048 -> 0 bytes
- .../p/go/src/pkg/archive/tar/testdata/v7.tar       |   Bin 3584 -> 0 bytes
- .../pkg/archive/tar/testdata/writer-big-long.tar   |   Bin 4096 -> 0 bytes
- .../go/src/pkg/archive/tar/testdata/writer-big.tar |   Bin 4096 -> 0 bytes
- .../p/go/src/pkg/archive/tar/testdata/writer.tar   |   Bin 3584 -> 0 bytes
- .../p/go/src/pkg/archive/tar/testdata/xattrs.tar   |   Bin 5120 -> 0 bytes
- .../p/go/src/pkg/archive/tar/writer.go             |   396 -
- .../p/go/src/pkg/archive/tar/writer_test.go        |   491 -
- .../github.com/elazarl/go-bindata-assetfs/LICENSE  |    23 -
- .../elazarl/go-bindata-assetfs/README.md           |    18 -
- .../elazarl/go-bindata-assetfs/assetfs.go          |   141 -
- .../github.com/elazarl/go-bindata-assetfs/doc.go   |    13 -
- .../src/github.com/emicklei/go-restful/.gitignore  |    70 -
- .../src/github.com/emicklei/go-restful/CHANGES.md  |   130 -
- .../src/github.com/emicklei/go-restful/LICENSE     |    22 -
- .../src/github.com/emicklei/go-restful/README.md   |    70 -
- .../src/github.com/emicklei/go-restful/Srcfile     |     1 -
- .../emicklei/go-restful/bench_curly_test.go        |    51 -
- .../github.com/emicklei/go-restful/bench_test.go   |    43 -
- .../github.com/emicklei/go-restful/bench_test.sh   |    10 -
- .../src/github.com/emicklei/go-restful/compress.go |    89 -
- .../emicklei/go-restful/compress_test.go           |    53 -
- .../github.com/emicklei/go-restful/constants.go    |    29 -
- .../github.com/emicklei/go-restful/container.go    |   261 -
- .../github.com/emicklei/go-restful/cors_filter.go  |   170 -
- .../emicklei/go-restful/cors_filter_test.go        |   125 -
- .../src/github.com/emicklei/go-restful/coverage.sh |     2 -
- .../src/github.com/emicklei/go-restful/curly.go    |   162 -
- .../github.com/emicklei/go-restful/curly_route.go  |    54 -
- .../github.com/emicklei/go-restful/curly_test.go   |   231 -
- .../src/github.com/emicklei/go-restful/doc.go      |   184 -
- .../emicklei/go-restful/doc_examples_test.go       |    35 -
- .../emicklei/go-restful/examples/.goconvey         |     1 -
- .../examples/google_app_engine/.goconvey           |     1 -
- .../go-restful/examples/google_app_engine/app.yaml |    20 -
- .../examples/google_app_engine/datastore/.goconvey |     1 -
- .../examples/google_app_engine/datastore/app.yaml  |    18 -
- .../examples/google_app_engine/datastore/main.go   |   266 -
- .../restful-appstats-integration.go                |    13 -
- .../google_app_engine/restful-user-service.go      |   161 -
- .../emicklei/go-restful/examples/home.html         |     7 -
- .../go-restful/examples/restful-CORS-filter.go     |    67 -
- .../go-restful/examples/restful-NCSA-logging.go    |    54 -
- .../examples/restful-basic-authentication.go       |    35 -
- .../examples/restful-cpuprofiler-service.go        |    65 -
- .../go-restful/examples/restful-curly-router.go    |   107 -
- .../go-restful/examples/restful-encoding-filter.go |    61 -
- .../go-restful/examples/restful-filters.go         |   114 -
- .../go-restful/examples/restful-form-handling.go   |    62 -
- .../go-restful/examples/restful-hello-world.go     |    22 -
- .../go-restful/examples/restful-html-template.go   |    35 -
- .../examples/restful-multi-containers.go           |    43 -
- .../go-restful/examples/restful-options-filter.go  |    51 -
- .../go-restful/examples/restful-path-tail.go       |    26 -
- .../examples/restful-pre-post-filters.go           |    98 -
- .../examples/restful-resource-functions.go         |    63 -
- .../go-restful/examples/restful-route_test.go      |    39 -
- .../examples/restful-routefunction_test.go         |    29 -
- .../go-restful/examples/restful-serve-static.go    |    47 -
- .../go-restful/examples/restful-user-resource.go   |   153 -
- .../go-restful/examples/restful-user-service.go    |   138 -
- .../src/github.com/emicklei/go-restful/filter.go   |    26 -
- .../github.com/emicklei/go-restful/filter_test.go  |   141 -
- .../src/github.com/emicklei/go-restful/install.sh  |     9 -
- .../src/github.com/emicklei/go-restful/jsr311.go   |   247 -
- .../github.com/emicklei/go-restful/jsr311_test.go  |   236 -
- .../src/github.com/emicklei/go-restful/logger.go   |    16 -
- .../emicklei/go-restful/options_filter.go          |    24 -
- .../emicklei/go-restful/options_filter_test.go     |    34 -
- .../github.com/emicklei/go-restful/parameter.go    |    95 -
- .../emicklei/go-restful/path_expression.go         |    56 -
- .../src/github.com/emicklei/go-restful/request.go  |   135 -
- .../github.com/emicklei/go-restful/request_test.go |   204 -
- .../src/github.com/emicklei/go-restful/response.go |   241 -
- .../emicklei/go-restful/response_test.go           |   157 -
- .../src/github.com/emicklei/go-restful/route.go    |   166 -
- .../emicklei/go-restful/route_builder.go           |   208 -
- .../emicklei/go-restful/route_builder_test.go      |    55 -
- .../github.com/emicklei/go-restful/route_test.go   |   108 -
- .../src/github.com/emicklei/go-restful/router.go   |    18 -
- .../emicklei/go-restful/service_error.go           |    23 -
- .../emicklei/go-restful/swagger/CHANGES.md         |    27 -
- .../emicklei/go-restful/swagger/README.md          |    28 -
- .../emicklei/go-restful/swagger/config.go          |    25 -
- .../emicklei/go-restful/swagger/model_builder.go   |   277 -
- .../go-restful/swagger/model_builder_test.go       |   729 -
- .../emicklei/go-restful/swagger/param_sorter.go    |    29 -
- .../go-restful/swagger/param_sorter_test.go        |    52 -
- .../emicklei/go-restful/swagger/swagger.go         |   184 -
- .../emicklei/go-restful/swagger/swagger_test.go    |   116 -
- .../go-restful/swagger/swagger_webservice.go       |   353 -
- .../emicklei/go-restful/swagger/utils_test.go      |    70 -
- .../github.com/emicklei/go-restful/web_service.go  |   190 -
- .../emicklei/go-restful/web_service_container.go   |    39 -
- .../emicklei/go-restful/web_service_test.go        |   115 -
- .../github.com/fsouza/go-dockerclient/.travis.yml  |    13 -
- .../src/github.com/fsouza/go-dockerclient/AUTHORS  |    50 -
- .../fsouza/go-dockerclient/DOCKER-LICENSE          |     6 -
- .../src/github.com/fsouza/go-dockerclient/LICENSE  |    22 -
- .../fsouza/go-dockerclient/README.markdown         |    42 -
- .../fsouza/go-dockerclient/build_test.go           |   144 -
- .../github.com/fsouza/go-dockerclient/change.go    |    43 -
- .../fsouza/go-dockerclient/change_test.go          |    26 -
- .../github.com/fsouza/go-dockerclient/client.go    |   636 -
- .../fsouza/go-dockerclient/client_test.go          |   368 -
- .../github.com/fsouza/go-dockerclient/container.go |   760 -
- .../fsouza/go-dockerclient/container_test.go       |  1524 --
- .../src/github.com/fsouza/go-dockerclient/env.go   |   168 -
- .../github.com/fsouza/go-dockerclient/env_test.go  |   349 -
- .../src/github.com/fsouza/go-dockerclient/event.go |   309 -
- .../fsouza/go-dockerclient/event_test.go           |   129 -
- .../fsouza/go-dockerclient/example_test.go         |   168 -
- .../src/github.com/fsouza/go-dockerclient/exec.go  |   129 -
- .../github.com/fsouza/go-dockerclient/exec_test.go |   128 -
- .../src/github.com/fsouza/go-dockerclient/image.go |   458 -
- .../fsouza/go-dockerclient/image_test.go           |   878 -
- .../src/github.com/fsouza/go-dockerclient/misc.go  |    59 -
- .../github.com/fsouza/go-dockerclient/misc_test.go |   159 -
- .../github.com/fsouza/go-dockerclient/signal.go    |    49 -
- .../github.com/fsouza/go-dockerclient/stdcopy.go   |    91 -
- .../fsouza/go-dockerclient/stdcopy_test.go         |   255 -
- .../src/github.com/fsouza/go-dockerclient/tar.go   |    99 -
- .../fsouza/go-dockerclient/testing/bin/fmtpolice   |    38 -
- .../go-dockerclient/testing/data/.dockerignore     |     3 -
- .../fsouza/go-dockerclient/testing/data/Dockerfile |    15 -
- .../fsouza/go-dockerclient/testing/data/barfile    |     0
- .../fsouza/go-dockerclient/testing/data/ca.pem     |    18 -
- .../fsouza/go-dockerclient/testing/data/cert.pem   |    18 -
- .../go-dockerclient/testing/data/container.tar     |   Bin 2048 -> 0 bytes
- .../go-dockerclient/testing/data/dockerfile.tar    |   Bin 2560 -> 0 bytes
- .../fsouza/go-dockerclient/testing/data/foofile    |     0
- .../fsouza/go-dockerclient/testing/data/key.pem    |    27 -
- .../fsouza/go-dockerclient/testing/data/server.pem |    18 -
- .../go-dockerclient/testing/data/serverkey.pem     |    27 -
- .../fsouza/go-dockerclient/testing/data/symlink    |     1 -
- .../fsouza/go-dockerclient/testing/server.go       |   744 -
- .../fsouza/go-dockerclient/testing/server_test.go  |  1091 --
- .../fsouza/go-dockerclient/testing/writer.go       |    43 -
- .../src/github.com/ghodss/yaml/.gitignore          |    20 -
- .../_workspace/src/github.com/ghodss/yaml/LICENSE  |    50 -
- .../src/github.com/ghodss/yaml/README.md           |   114 -
- .../src/github.com/ghodss/yaml/fields.go           |   497 -
- .../_workspace/src/github.com/ghodss/yaml/yaml.go  |   250 -
- .../src/github.com/ghodss/yaml/yaml_test.go        |   243 -
- .../_workspace/src/github.com/golang/glog/LICENSE  |   191 -
- .../_workspace/src/github.com/golang/glog/README   |    44 -
- .../_workspace/src/github.com/golang/glog/glog.go  |  1177 --
- .../src/github.com/golang/glog/glog_file.go        |   124 -
- .../src/github.com/golang/glog/glog_test.go        |   415 -
- .../github.com/google/cadvisor/client/README.md    |    54 -
- .../github.com/google/cadvisor/client/client.go    |   161 -
- .../google/cadvisor/client/client_test.go          |   154 -
- .../github.com/google/cadvisor/info/container.go   |   379 -
- .../google/cadvisor/info/container_test.go         |    79 -
- .../src/github.com/google/cadvisor/info/machine.go |    53 -
- .../google/cadvisor/info/test/datagen.go           |    76 -
- .../src/github.com/google/cadvisor/info/version.go |    18 -
- .../src/github.com/google/gofuzz/.travis.yml       |    12 -
- .../src/github.com/google/gofuzz/CONTRIBUTING.md   |    67 -
- .../src/github.com/google/gofuzz/LICENSE           |   202 -
- .../src/github.com/google/gofuzz/README.md         |    71 -
- .../_workspace/src/github.com/google/gofuzz/doc.go |    18 -
- .../src/github.com/google/gofuzz/example_test.go   |   225 -
- .../src/github.com/google/gofuzz/fuzz.go           |   366 -
- .../src/github.com/google/gofuzz/fuzz_test.go      |   258 -
- .../src/github.com/imdario/mergo/.travis.yml       |     2 -
- .../src/github.com/imdario/mergo/LICENSE           |    28 -
- .../src/github.com/imdario/mergo/README.md         |    68 -
- .../_workspace/src/github.com/imdario/mergo/doc.go |    44 -
- .../_workspace/src/github.com/imdario/mergo/map.go |   146 -
- .../src/github.com/imdario/mergo/merge.go          |    99 -
- .../src/github.com/imdario/mergo/mergo.go          |    90 -
- .../src/github.com/imdario/mergo/mergo_test.go     |   288 -
- .../github.com/imdario/mergo/testdata/license.yml  |     3 -
- .../github.com/imdario/mergo/testdata/thing.yml    |     5 -
- .../_workspace/src/github.com/miekg/dns/.gitignore |     4 -
- .../src/github.com/miekg/dns/.travis.yml           |    21 -
- Godeps/_workspace/src/github.com/miekg/dns/AUTHORS |     1 -
- .../src/github.com/miekg/dns/CONTRIBUTORS          |     9 -
- .../_workspace/src/github.com/miekg/dns/COPYRIGHT  |     9 -
- Godeps/_workspace/src/github.com/miekg/dns/LICENSE |    32 -
- .../_workspace/src/github.com/miekg/dns/README.md  |   140 -
- .../_workspace/src/github.com/miekg/dns/client.go  |   319 -
- .../src/github.com/miekg/dns/client_test.go        |   195 -
- .../src/github.com/miekg/dns/clientconfig.go       |    94 -
- .../src/github.com/miekg/dns/defaults.go           |   242 -
- Godeps/_workspace/src/github.com/miekg/dns/dns.go  |   193 -
- .../src/github.com/miekg/dns/dns_test.go           |   511 -
- .../_workspace/src/github.com/miekg/dns/dnssec.go  |   756 -
- .../src/github.com/miekg/dns/dnssec_test.go        |   672 -
- .../src/github.com/miekg/dns/dyn_test.go           |     3 -
- Godeps/_workspace/src/github.com/miekg/dns/edns.go |   501 -
- .../src/github.com/miekg/dns/edns_test.go          |    48 -
- .../src/github.com/miekg/dns/example_test.go       |   147 -
- .../src/github.com/miekg/dns/idn/example_test.go   |    18 -
- .../src/github.com/miekg/dns/idn/punycode.go       |   268 -
- .../src/github.com/miekg/dns/idn/punycode_test.go  |    94 -
- .../_workspace/src/github.com/miekg/dns/keygen.go  |   157 -
- .../_workspace/src/github.com/miekg/dns/kscan.go   |   244 -
- .../_workspace/src/github.com/miekg/dns/labels.go  |   162 -
- .../src/github.com/miekg/dns/labels_test.go        |   214 -
- Godeps/_workspace/src/github.com/miekg/dns/msg.go  |  1899 ---
- .../_workspace/src/github.com/miekg/dns/nsecx.go   |   110 -
- .../src/github.com/miekg/dns/nsecx_test.go         |    33 -
- .../src/github.com/miekg/dns/parse_test.go         |  1276 --
- .../src/github.com/miekg/dns/privaterr.go          |   122 -
- .../src/github.com/miekg/dns/privaterr_test.go     |   169 -
- .../_workspace/src/github.com/miekg/dns/rawmsg.go  |    95 -
- .../_workspace/src/github.com/miekg/dns/scanner.go |    43 -
- .../_workspace/src/github.com/miekg/dns/server.go  |   626 -
- .../src/github.com/miekg/dns/server_test.go        |   401 -
- Godeps/_workspace/src/github.com/miekg/dns/sig0.go |   262 -
- .../src/github.com/miekg/dns/sig0_test.go          |    96 -
- .../src/github.com/miekg/dns/singleinflight.go     |    57 -
- Godeps/_workspace/src/github.com/miekg/dns/tlsa.go |    84 -
- Godeps/_workspace/src/github.com/miekg/dns/tsig.go |   378 -
- .../_workspace/src/github.com/miekg/dns/types.go   |  1697 --
- .../src/github.com/miekg/dns/types_test.go         |    42 -
- Godeps/_workspace/src/github.com/miekg/dns/udp.go  |    55 -
- .../src/github.com/miekg/dns/udp_linux.go          |    63 -
- .../src/github.com/miekg/dns/udp_other.go          |    17 -
- .../src/github.com/miekg/dns/udp_windows.go        |    34 -
- .../_workspace/src/github.com/miekg/dns/update.go  |   138 -
- .../src/github.com/miekg/dns/update_test.go        |   105 -
- Godeps/_workspace/src/github.com/miekg/dns/xfr.go  |   236 -
- .../src/github.com/miekg/dns/zgenerate.go          |   157 -
- .../_workspace/src/github.com/miekg/dns/zscan.go   |   956 --
- .../src/github.com/miekg/dns/zscan_rr.go           |  2155 ---
- .../src/github.com/mitchellh/goamz/aws/attempt.go  |    74 -
- .../github.com/mitchellh/goamz/aws/attempt_test.go |    57 -
- .../src/github.com/mitchellh/goamz/aws/aws.go      |   423 -
- .../src/github.com/mitchellh/goamz/aws/aws_test.go |   203 -
- .../src/github.com/mitchellh/goamz/aws/client.go   |   125 -
- .../github.com/mitchellh/goamz/aws/client_test.go  |   121 -
- .../src/github.com/mitchellh/goamz/ec2/ec2.go      |  2599 ---
- .../src/github.com/mitchellh/goamz/ec2/ec2_test.go |  1243 --
- .../github.com/mitchellh/goamz/ec2/ec2i_test.go    |   203 -
- .../github.com/mitchellh/goamz/ec2/ec2t_test.go    |   580 -
- .../mitchellh/goamz/ec2/ec2test/filter.go          |    84 -
- .../mitchellh/goamz/ec2/ec2test/server.go          |   993 --
- .../github.com/mitchellh/goamz/ec2/export_test.go  |    22 -
- .../mitchellh/goamz/ec2/responses_test.go          |   854 -
- .../src/github.com/mitchellh/goamz/ec2/sign.go     |    45 -
- .../github.com/mitchellh/goamz/ec2/sign_test.go    |    68 -
- .../src/github.com/mitchellh/mapstructure/LICENSE  |    21 -
- .../github.com/mitchellh/mapstructure/README.md    |    46 -
- .../mitchellh/mapstructure/decode_hooks.go         |    84 -
- .../mitchellh/mapstructure/decode_hooks_test.go    |   191 -
- .../src/github.com/mitchellh/mapstructure/error.go |    32 -
- .../mitchellh/mapstructure/mapstructure.go         |   704 -
- .../mapstructure/mapstructure_benchmark_test.go    |   243 -
- .../mapstructure/mapstructure_bugs_test.go         |    47 -
- .../mapstructure/mapstructure_examples_test.go     |   169 -
- .../mitchellh/mapstructure/mapstructure_test.go    |   828 -
- .../src/github.com/racker/perigee/.gitignore       |     2 -
- .../src/github.com/racker/perigee/LICENSE          |   202 -
- .../src/github.com/racker/perigee/README.md        |   120 -
- .../src/github.com/racker/perigee/api.go           |   269 -
- .../src/github.com/racker/perigee/api_test.go      |   226 -
- .../github.com/rackspace/gophercloud/.travis.yml   |    14 -
- .../rackspace/gophercloud/CONTRIBUTING.md          |   275 -
- .../rackspace/gophercloud/CONTRIBUTORS.md          |    12 -
- .../src/github.com/rackspace/gophercloud/LICENSE   |   191 -
- .../src/github.com/rackspace/gophercloud/README.md |   161 -
- .../github.com/rackspace/gophercloud/UPGRADING.md  |   338 -
- .../rackspace/gophercloud/acceptance/README.md     |    57 -
- .../openstack/blockstorage/v1/snapshots_test.go    |    70 -
- .../openstack/blockstorage/v1/volumes_test.go      |    63 -
- .../openstack/blockstorage/v1/volumetypes_test.go  |    49 -
- .../acceptance/openstack/client_test.go            |    40 -
- .../openstack/compute/v2/bootfromvolume_test.go    |    50 -
- .../openstack/compute/v2/compute_test.go           |    97 -
- .../openstack/compute/v2/extension_test.go         |    47 -
- .../openstack/compute/v2/flavors_test.go           |    57 -
- .../acceptance/openstack/compute/v2/images_test.go |    37 -
- .../acceptance/openstack/compute/v2/pkg.go         |     3 -
- .../openstack/compute/v2/servers_test.go           |   393 -
- .../openstack/identity/v2/extension_test.go        |    46 -
- .../openstack/identity/v2/identity_test.go         |    47 -
- .../acceptance/openstack/identity/v2/pkg.go        |     1 -
- .../openstack/identity/v2/tenant_test.go           |    32 -
- .../acceptance/openstack/identity/v2/token_test.go |    38 -
- .../openstack/identity/v3/endpoint_test.go         |   111 -
- .../openstack/identity/v3/identity_test.go         |    39 -
- .../acceptance/openstack/identity/v3/pkg.go        |     1 -
- .../openstack/identity/v3/service_test.go          |    36 -
- .../acceptance/openstack/identity/v3/token_test.go |    42 -
- .../openstack/networking/v2/apiversion_test.go     |    51 -
- .../acceptance/openstack/networking/v2/common.go   |    39 -
- .../openstack/networking/v2/extension_test.go      |    45 -
- .../networking/v2/extensions/layer3_test.go        |   300 -
- .../networking/v2/extensions/lbaas/common.go       |    78 -
- .../networking/v2/extensions/lbaas/member_test.go  |    95 -
- .../networking/v2/extensions/lbaas/monitor_test.go |    77 -
- .../networking/v2/extensions/lbaas/pkg.go          |     1 -
- .../networking/v2/extensions/lbaas/pool_test.go    |    98 -
- .../networking/v2/extensions/lbaas/vip_test.go     |   101 -
- .../openstack/networking/v2/extensions/pkg.go      |     1 -
- .../networking/v2/extensions/provider_test.go      |    68 -
- .../networking/v2/extensions/security_test.go      |   171 -
- .../openstack/networking/v2/network_test.go        |    68 -
- .../acceptance/openstack/networking/v2/pkg.go      |     1 -
- .../openstack/networking/v2/port_test.go           |   117 -
- .../openstack/networking/v2/subnet_test.go         |    86 -
- .../openstack/objectstorage/v1/accounts_test.go    |    44 -
- .../openstack/objectstorage/v1/common.go           |    28 -
- .../openstack/objectstorage/v1/containers_test.go  |    89 -
- .../openstack/objectstorage/v1/objects_test.go     |   119 -
- .../gophercloud/acceptance/openstack/pkg.go        |     4 -
- .../acceptance/rackspace/blockstorage/v1/common.go |    38 -
- .../rackspace/blockstorage/v1/snapshot_test.go     |    82 -
- .../rackspace/blockstorage/v1/volume_test.go       |    71 -
- .../rackspace/blockstorage/v1/volume_type_test.go  |    46 -
- .../acceptance/rackspace/client_test.go            |    28 -
- .../rackspace/compute/v2/bootfromvolume_test.go    |    46 -
- .../rackspace/compute/v2/compute_test.go           |    60 -
- .../rackspace/compute/v2/flavors_test.go           |    61 -
- .../acceptance/rackspace/compute/v2/images_test.go |    63 -
- .../rackspace/compute/v2/keypairs_test.go          |    87 -
- .../rackspace/compute/v2/networks_test.go          |    53 -
- .../acceptance/rackspace/compute/v2/pkg.go         |     1 -
- .../rackspace/compute/v2/servers_test.go           |   199 -
- .../rackspace/compute/v2/virtualinterfaces_test.go |    53 -
- .../rackspace/identity/v2/extension_test.go        |    54 -
- .../rackspace/identity/v2/identity_test.go         |    50 -
- .../rackspace/identity/v2/tenant_test.go           |    37 -
- .../rackspace/objectstorage/v1/accounts_test.go    |    33 -
- .../rackspace/objectstorage/v1/bulk_test.go        |    23 -
- .../objectstorage/v1/cdncontainers_test.go         |    61 -
- .../rackspace/objectstorage/v1/cdnobjects_test.go  |    46 -
- .../rackspace/objectstorage/v1/common.go           |    54 -
- .../rackspace/objectstorage/v1/containers_test.go  |    85 -
- .../rackspace/objectstorage/v1/objects_test.go     |   112 -
- .../gophercloud/acceptance/rackspace/pkg.go        |     1 -
- .../rackspace/gophercloud/acceptance/tools/pkg.go  |     1 -
- .../gophercloud/acceptance/tools/tools.go          |    82 -
- .../rackspace/gophercloud/auth_options.go          |    38 -
- .../rackspace/gophercloud/auth_results.go          |    15 -
- .../rackspace/gophercloud/endpoint_search.go       |    65 -
- .../rackspace/gophercloud/endpoint_search_test.go  |    19 -
- .../rackspace/gophercloud/openstack/auth_env.go    |    58 -
- .../openstack/blockstorage/v1/apiversions/doc.go   |     3 -
- .../blockstorage/v1/apiversions/requests.go        |    28 -
- .../blockstorage/v1/apiversions/requests_test.go   |   145 -
- .../blockstorage/v1/apiversions/results.go         |    58 -
- .../openstack/blockstorage/v1/apiversions/urls.go  |    15 -
- .../blockstorage/v1/apiversions/urls_test.go       |    26 -
- .../openstack/blockstorage/v1/snapshots/doc.go     |     5 -
- .../blockstorage/v1/snapshots/fixtures.go          |   114 -
- .../blockstorage/v1/snapshots/requests.go          |   188 -
- .../blockstorage/v1/snapshots/requests_test.go     |   104 -
- .../openstack/blockstorage/v1/snapshots/results.go |   123 -
- .../openstack/blockstorage/v1/snapshots/urls.go    |    27 -
- .../blockstorage/v1/snapshots/urls_test.go         |    50 -
- .../openstack/blockstorage/v1/snapshots/util.go    |    22 -
- .../blockstorage/v1/snapshots/util_test.go         |    38 -
- .../openstack/blockstorage/v1/volumes/doc.go       |     5 -
- .../openstack/blockstorage/v1/volumes/fixtures.go  |   105 -
- .../openstack/blockstorage/v1/volumes/requests.go  |   217 -
- .../blockstorage/v1/volumes/requests_test.go       |    95 -
- .../openstack/blockstorage/v1/volumes/results.go   |   113 -
- .../openstack/blockstorage/v1/volumes/urls.go      |    23 -
- .../openstack/blockstorage/v1/volumes/urls_test.go |    44 -
- .../openstack/blockstorage/v1/volumes/util.go      |    22 -
- .../openstack/blockstorage/v1/volumes/util_test.go |    38 -
- .../openstack/blockstorage/v1/volumetypes/doc.go   |     9 -
- .../blockstorage/v1/volumetypes/fixtures.go        |    60 -
- .../blockstorage/v1/volumetypes/requests.go        |    87 -
- .../blockstorage/v1/volumetypes/requests_test.go   |   118 -
- .../blockstorage/v1/volumetypes/results.go         |    72 -
- .../openstack/blockstorage/v1/volumetypes/urls.go  |    19 -
- .../blockstorage/v1/volumetypes/urls_test.go       |    38 -
- .../rackspace/gophercloud/openstack/client.go      |   205 -
- .../rackspace/gophercloud/openstack/client_test.go |   161 -
- .../gophercloud/openstack/common/README.md         |     3 -
- .../gophercloud/openstack/common/extensions/doc.go |    15 -
- .../openstack/common/extensions/errors.go          |     1 -
- .../openstack/common/extensions/fixtures.go        |    91 -
- .../openstack/common/extensions/requests.go        |    26 -
- .../openstack/common/extensions/requests_test.go   |    38 -
- .../openstack/common/extensions/results.go         |    65 -
- .../openstack/common/extensions/urls.go            |    13 -
- .../openstack/common/extensions/urls_test.go       |    26 -
- .../v2/extensions/bootfromvolume/requests.go       |   111 -
- .../v2/extensions/bootfromvolume/requests_test.go  |    51 -
- .../v2/extensions/bootfromvolume/results.go        |    10 -
- .../compute/v2/extensions/bootfromvolume/urls.go   |     7 -
- .../v2/extensions/bootfromvolume/urls_test.go      |    16 -
- .../openstack/compute/v2/extensions/delegate.go    |    23 -
- .../compute/v2/extensions/delegate_test.go         |    96 -
- .../compute/v2/extensions/diskconfig/doc.go        |     3 -
- .../compute/v2/extensions/diskconfig/requests.go   |   114 -
- .../v2/extensions/diskconfig/requests_test.go      |    87 -
- .../compute/v2/extensions/diskconfig/results.go    |    60 -
- .../v2/extensions/diskconfig/results_test.go       |    68 -
- .../openstack/compute/v2/extensions/doc.go         |     3 -
- .../compute/v2/extensions/keypairs/doc.go          |     3 -
- .../compute/v2/extensions/keypairs/fixtures.go     |   171 -
- .../compute/v2/extensions/keypairs/requests.go     |    88 -
- .../v2/extensions/keypairs/requests_test.go        |    71 -
- .../compute/v2/extensions/keypairs/results.go      |    94 -
- .../compute/v2/extensions/keypairs/urls.go         |    25 -
- .../compute/v2/extensions/keypairs/urls_test.go    |    40 -
- .../openstack/compute/v2/flavors/doc.go            |     7 -
- .../openstack/compute/v2/flavors/requests.go       |    72 -
- .../openstack/compute/v2/flavors/requests_test.go  |   129 -
- .../openstack/compute/v2/flavors/results.go        |   122 -
- .../openstack/compute/v2/flavors/urls.go           |    13 -
- .../openstack/compute/v2/flavors/urls_test.go      |    26 -
- .../gophercloud/openstack/compute/v2/images/doc.go |     7 -
- .../openstack/compute/v2/images/requests.go        |    71 -
- .../openstack/compute/v2/images/requests_test.go   |   175 -
- .../openstack/compute/v2/images/results.go         |    90 -
- .../openstack/compute/v2/images/urls.go            |    11 -
- .../openstack/compute/v2/images/urls_test.go       |    26 -
- .../openstack/compute/v2/servers/doc.go            |     6 -
- .../openstack/compute/v2/servers/fixtures.go       |   459 -
- .../openstack/compute/v2/servers/requests.go       |   538 -
- .../openstack/compute/v2/servers/requests_test.go  |   176 -
- .../openstack/compute/v2/servers/results.go        |   150 -
- .../openstack/compute/v2/servers/urls.go           |    31 -
- .../openstack/compute/v2/servers/urls_test.go      |    56 -
- .../openstack/compute/v2/servers/util.go           |    20 -
- .../openstack/compute/v2/servers/util_test.go      |    38 -
- .../gophercloud/openstack/endpoint_location.go     |   124 -
- .../openstack/endpoint_location_test.go            |   225 -
- .../openstack/identity/v2/extensions/delegate.go   |    52 -
- .../identity/v2/extensions/delegate_test.go        |    38 -
- .../openstack/identity/v2/extensions/doc.go        |     3 -
- .../openstack/identity/v2/extensions/fixtures.go   |    60 -
- .../openstack/identity/v2/tenants/doc.go           |     7 -
- .../openstack/identity/v2/tenants/fixtures.go      |    65 -
- .../openstack/identity/v2/tenants/requests.go      |    33 -
- .../openstack/identity/v2/tenants/requests_test.go |    29 -
- .../openstack/identity/v2/tenants/results.go       |    62 -
- .../openstack/identity/v2/tenants/urls.go          |     7 -
- .../openstack/identity/v2/tokens/doc.go            |     5 -
- .../openstack/identity/v2/tokens/errors.go         |    30 -
- .../openstack/identity/v2/tokens/fixtures.go       |   128 -
- .../openstack/identity/v2/tokens/requests.go       |    87 -
- .../openstack/identity/v2/tokens/requests_test.go  |   140 -
- .../openstack/identity/v2/tokens/results.go        |   133 -
- .../openstack/identity/v2/tokens/urls.go           |     8 -
- .../openstack/identity/v3/endpoints/doc.go         |     6 -
- .../openstack/identity/v3/endpoints/errors.go      |    21 -
- .../openstack/identity/v3/endpoints/requests.go    |   133 -
- .../identity/v3/endpoints/requests_test.go         |   226 -
- .../openstack/identity/v3/endpoints/results.go     |    82 -
- .../openstack/identity/v3/endpoints/urls.go        |    11 -
- .../openstack/identity/v3/endpoints/urls_test.go   |    23 -
- .../openstack/identity/v3/services/doc.go          |     3 -
- .../openstack/identity/v3/services/requests.go     |    91 -
- .../identity/v3/services/requests_test.go          |   209 -
- .../openstack/identity/v3/services/results.go      |    80 -
- .../openstack/identity/v3/services/urls.go         |    11 -
- .../openstack/identity/v3/services/urls_test.go    |    23 -
- .../openstack/identity/v3/tokens/doc.go            |     6 -
- .../openstack/identity/v3/tokens/errors.go         |    72 -
- .../openstack/identity/v3/tokens/requests.go       |   286 -
- .../openstack/identity/v3/tokens/requests_test.go  |   514 -
- .../openstack/identity/v3/tokens/results.go        |    73 -
- .../openstack/identity/v3/tokens/urls.go           |     7 -
- .../openstack/identity/v3/tokens/urls_test.go      |    21 -
- .../openstack/networking/v2/apiversions/doc.go     |     4 -
- .../openstack/networking/v2/apiversions/errors.go  |     1 -
- .../networking/v2/apiversions/requests.go          |    21 -
- .../networking/v2/apiversions/requests_test.go     |   182 -
- .../openstack/networking/v2/apiversions/results.go |    77 -
- .../openstack/networking/v2/apiversions/urls.go    |    15 -
- .../networking/v2/apiversions/urls_test.go         |    26 -
- .../openstack/networking/v2/common/common_tests.go |    14 -
- .../openstack/networking/v2/extensions/delegate.go |    41 -
- .../networking/v2/extensions/delegate_test.go      |   105 -
- .../networking/v2/extensions/external/doc.go       |     3 -
- .../networking/v2/extensions/external/requests.go  |    56 -
- .../networking/v2/extensions/external/results.go   |    81 -
- .../v2/extensions/external/results_test.go         |   254 -
- .../networking/v2/extensions/layer3/doc.go         |     5 -
- .../v2/extensions/layer3/floatingips/requests.go   |   190 -
- .../extensions/layer3/floatingips/requests_test.go |   306 -
- .../v2/extensions/layer3/floatingips/results.go    |   127 -
- .../v2/extensions/layer3/floatingips/urls.go       |    13 -
- .../v2/extensions/layer3/routers/requests.go       |   246 -
- .../v2/extensions/layer3/routers/requests_test.go  |   338 -
- .../v2/extensions/layer3/routers/results.go        |   161 -
- .../v2/extensions/layer3/routers/urls.go           |    21 -
- .../networking/v2/extensions/lbaas/doc.go          |     3 -
- .../v2/extensions/lbaas/members/requests.go        |   139 -
- .../v2/extensions/lbaas/members/requests_test.go   |   243 -
- .../v2/extensions/lbaas/members/results.go         |   122 -
- .../networking/v2/extensions/lbaas/members/urls.go |    16 -
- .../v2/extensions/lbaas/monitors/requests.go       |   282 -
- .../v2/extensions/lbaas/monitors/requests_test.go  |   312 -
- .../v2/extensions/lbaas/monitors/results.go        |   147 -
- .../v2/extensions/lbaas/monitors/urls.go           |    16 -
- .../v2/extensions/lbaas/pools/requests.go          |   205 -
- .../v2/extensions/lbaas/pools/requests_test.go     |   317 -
- .../v2/extensions/lbaas/pools/results.go           |   146 -
- .../networking/v2/extensions/lbaas/pools/urls.go   |    25 -
- .../v2/extensions/lbaas/vips/requests.go           |   273 -
- .../v2/extensions/lbaas/vips/requests_test.go      |   336 -
- .../networking/v2/extensions/lbaas/vips/results.go |   166 -
- .../networking/v2/extensions/lbaas/vips/urls.go    |    16 -
- .../networking/v2/extensions/provider/doc.go       |    21 -
- .../networking/v2/extensions/provider/results.go   |   124 -
- .../v2/extensions/provider/results_test.go         |   253 -
- .../networking/v2/extensions/security/doc.go       |    32 -
- .../v2/extensions/security/groups/requests.go      |   107 -
- .../v2/extensions/security/groups/requests_test.go |   213 -
- .../v2/extensions/security/groups/results.go       |   108 -
- .../v2/extensions/security/groups/urls.go          |    13 -
- .../v2/extensions/security/rules/requests.go       |   183 -
- .../v2/extensions/security/rules/requests_test.go  |   243 -
- .../v2/extensions/security/rules/results.go        |   133 -
- .../v2/extensions/security/rules/urls.go           |    13 -
- .../openstack/networking/v2/networks/doc.go        |     9 -
- .../openstack/networking/v2/networks/errors.go     |     1 -
- .../openstack/networking/v2/networks/requests.go   |   209 -
- .../networking/v2/networks/requests_test.go        |   275 -
- .../openstack/networking/v2/networks/results.go    |   116 -
- .../openstack/networking/v2/networks/urls.go       |    27 -
- .../openstack/networking/v2/networks/urls_test.go  |    38 -
- .../openstack/networking/v2/ports/doc.go           |     8 -
- .../openstack/networking/v2/ports/errors.go        |    11 -
- .../openstack/networking/v2/ports/requests.go      |   245 -
- .../openstack/networking/v2/ports/requests_test.go |   321 -
- .../openstack/networking/v2/ports/results.go       |   126 -
- .../openstack/networking/v2/ports/urls.go          |    31 -
- .../openstack/networking/v2/ports/urls_test.go     |    44 -
- .../openstack/networking/v2/subnets/doc.go         |    10 -
- .../openstack/networking/v2/subnets/errors.go      |    13 -
- .../openstack/networking/v2/subnets/requests.go    |   254 -
- .../networking/v2/subnets/requests_test.go         |   362 -
- .../openstack/networking/v2/subnets/results.go     |   132 -
- .../openstack/networking/v2/subnets/urls.go        |    31 -
- .../openstack/networking/v2/subnets/urls_test.go   |    44 -
- .../openstack/objectstorage/v1/accounts/doc.go     |     8 -
- .../objectstorage/v1/accounts/fixtures.go          |    38 -
- .../objectstorage/v1/accounts/requests.go          |   106 -
- .../objectstorage/v1/accounts/requests_test.go     |    33 -
- .../openstack/objectstorage/v1/accounts/results.go |    34 -
- .../openstack/objectstorage/v1/accounts/urls.go    |    11 -
- .../objectstorage/v1/accounts/urls_test.go         |    26 -
- .../openstack/objectstorage/v1/containers/doc.go   |     8 -
- .../objectstorage/v1/containers/fixtures.go        |   132 -
- .../objectstorage/v1/containers/requests.go        |   204 -
- .../objectstorage/v1/containers/requests_test.go   |    91 -
- .../objectstorage/v1/containers/results.go         |   139 -
- .../openstack/objectstorage/v1/containers/urls.go  |    23 -
- .../objectstorage/v1/containers/urls_test.go       |    43 -
- .../openstack/objectstorage/v1/objects/doc.go      |     5 -
- .../openstack/objectstorage/v1/objects/fixtures.go |   164 -
- .../openstack/objectstorage/v1/objects/requests.go |   416 -
- .../objectstorage/v1/objects/requests_test.go      |   132 -
- .../openstack/objectstorage/v1/objects/results.go  |   162 -
- .../openstack/objectstorage/v1/objects/urls.go     |    33 -
- .../objectstorage/v1/objects/urls_test.go          |    56 -
- .../gophercloud/openstack/utils/choose_version.go  |   114 -
- .../openstack/utils/choose_version_test.go         |   105 -
- .../github.com/rackspace/gophercloud/package.go    |    38 -
- .../rackspace/gophercloud/pagination/http.go       |    64 -
- .../rackspace/gophercloud/pagination/linked.go     |    61 -
- .../gophercloud/pagination/linked_test.go          |   107 -
- .../rackspace/gophercloud/pagination/marker.go     |    34 -
- .../gophercloud/pagination/marker_test.go          |   113 -
- .../rackspace/gophercloud/pagination/null.go       |    20 -
- .../rackspace/gophercloud/pagination/pager.go      |   115 -
- .../gophercloud/pagination/pagination_test.go      |    13 -
- .../rackspace/gophercloud/pagination/pkg.go        |     4 -
- .../rackspace/gophercloud/pagination/single.go     |     9 -
- .../gophercloud/pagination/single_test.go          |    71 -
- .../src/github.com/rackspace/gophercloud/params.go |   184 -
- .../rackspace/gophercloud/params_test.go           |   142 -
- .../rackspace/gophercloud/provider_client.go       |    33 -
- .../rackspace/gophercloud/provider_client_test.go  |    16 -
- .../rackspace/gophercloud/rackspace/auth_env.go    |    57 -
- .../blockstorage/v1/snapshots/delegate.go          |   134 -
- .../blockstorage/v1/snapshots/delegate_test.go     |    97 -
- .../rackspace/blockstorage/v1/snapshots/doc.go     |     3 -
- .../rackspace/blockstorage/v1/snapshots/results.go |   149 -
- .../rackspace/blockstorage/v1/volumes/delegate.go  |    75 -
- .../blockstorage/v1/volumes/delegate_test.go       |   106 -
- .../rackspace/blockstorage/v1/volumes/doc.go       |     3 -
- .../rackspace/blockstorage/v1/volumes/results.go   |    66 -
- .../blockstorage/v1/volumetypes/delegate.go        |    18 -
- .../blockstorage/v1/volumetypes/delegate_test.go   |    64 -
- .../rackspace/blockstorage/v1/volumetypes/doc.go   |     3 -
- .../blockstorage/v1/volumetypes/results.go         |    37 -
- .../rackspace/gophercloud/rackspace/client.go      |   156 -
- .../rackspace/gophercloud/rackspace/client_test.go |    38 -
- .../compute/v2/bootfromvolume/delegate.go          |    12 -
- .../compute/v2/bootfromvolume/delegate_test.go     |    52 -
- .../rackspace/compute/v2/flavors/delegate.go       |    46 -
- .../rackspace/compute/v2/flavors/delegate_test.go  |    62 -
- .../rackspace/compute/v2/flavors/doc.go            |     3 -
- .../rackspace/compute/v2/flavors/fixtures.go       |   128 -
- .../rackspace/compute/v2/images/delegate.go        |    22 -
- .../rackspace/compute/v2/images/delegate_test.go   |    62 -
- .../gophercloud/rackspace/compute/v2/images/doc.go |     3 -
- .../rackspace/compute/v2/images/fixtures.go        |   199 -
- .../rackspace/compute/v2/keypairs/delegate.go      |    33 -
- .../rackspace/compute/v2/keypairs/delegate_test.go |    72 -
- .../rackspace/compute/v2/keypairs/doc.go           |     3 -
- .../rackspace/compute/v2/networks/doc.go           |     3 -
- .../rackspace/compute/v2/networks/requests.go      |   101 -
- .../rackspace/compute/v2/networks/requests_test.go |   156 -
- .../rackspace/compute/v2/networks/results.go       |    81 -
- .../rackspace/compute/v2/networks/urls.go          |    27 -
- .../rackspace/compute/v2/networks/urls_test.go     |    38 -
- .../rackspace/compute/v2/servers/delegate.go       |    61 -
- .../rackspace/compute/v2/servers/delegate_test.go  |   112 -
- .../rackspace/compute/v2/servers/doc.go            |     3 -
- .../rackspace/compute/v2/servers/fixtures.go       |   439 -
- .../rackspace/compute/v2/servers/requests.go       |   158 -
- .../rackspace/compute/v2/servers/requests_test.go  |    57 -
- .../compute/v2/virtualinterfaces/requests.go       |    51 -
- .../compute/v2/virtualinterfaces/requests_test.go  |   165 -
- .../compute/v2/virtualinterfaces/results.go        |    81 -
- .../rackspace/compute/v2/virtualinterfaces/urls.go |    15 -
- .../compute/v2/virtualinterfaces/urls_test.go      |    32 -
- .../rackspace/identity/v2/extensions/delegate.go   |    24 -
- .../identity/v2/extensions/delegate_test.go        |    39 -
- .../rackspace/identity/v2/extensions/doc.go        |     3 -
- .../rackspace/identity/v2/tenants/delegate.go      |    17 -
- .../rackspace/identity/v2/tenants/delegate_test.go |    28 -
- .../rackspace/identity/v2/tenants/doc.go           |     3 -
- .../rackspace/identity/v2/tokens/delegate.go       |    60 -
- .../rackspace/identity/v2/tokens/delegate_test.go  |    36 -
- .../rackspace/identity/v2/tokens/doc.go            |     3 -
- .../objectstorage/v1/accounts/delegate.go          |    39 -
- .../objectstorage/v1/accounts/delegate_test.go     |    30 -
- .../rackspace/objectstorage/v1/accounts/doc.go     |     3 -
- .../rackspace/objectstorage/v1/bulk/doc.go         |     3 -
- .../rackspace/objectstorage/v1/bulk/requests.go    |    51 -
- .../objectstorage/v1/bulk/requests_test.go         |    36 -
- .../rackspace/objectstorage/v1/bulk/results.go     |    28 -
- .../rackspace/objectstorage/v1/bulk/urls.go        |    11 -
- .../rackspace/objectstorage/v1/bulk/urls_test.go   |    26 -
- .../objectstorage/v1/cdncontainers/delegate.go     |    71 -
- .../v1/cdncontainers/delegate_test.go              |    50 -
- .../objectstorage/v1/cdncontainers/doc.go          |     3 -
- .../objectstorage/v1/cdncontainers/requests.go     |    58 -
- .../v1/cdncontainers/requests_test.go              |    29 -
- .../objectstorage/v1/cdncontainers/results.go      |     8 -
- .../objectstorage/v1/cdncontainers/urls.go         |     7 -
- .../objectstorage/v1/cdncontainers/urls_test.go    |    20 -
- .../objectstorage/v1/cdnobjects/delegate.go        |    11 -
- .../objectstorage/v1/cdnobjects/delegate_test.go   |    19 -
- .../rackspace/objectstorage/v1/cdnobjects/doc.go   |     3 -
- .../objectstorage/v1/containers/delegate.go        |    93 -
- .../objectstorage/v1/containers/delegate_test.go   |    91 -
- .../rackspace/objectstorage/v1/containers/doc.go   |     3 -
- .../rackspace/objectstorage/v1/objects/delegate.go |    90 -
- .../objectstorage/v1/objects/delegate_test.go      |   115 -
- .../rackspace/objectstorage/v1/objects/doc.go      |     3 -
- .../github.com/rackspace/gophercloud/results.go    |    83 -
- .../rackspace/gophercloud/script/acceptancetest    |     5 -
- .../rackspace/gophercloud/script/bootstrap         |    26 -
- .../rackspace/gophercloud/script/cibuild           |     5 -
- .../github.com/rackspace/gophercloud/script/test   |     5 -
- .../rackspace/gophercloud/script/unittest          |     5 -
- .../rackspace/gophercloud/service_client.go        |    32 -
- .../rackspace/gophercloud/service_client_test.go   |    14 -
- .../gophercloud/testhelper/client/fake.go          |    17 -
- .../gophercloud/testhelper/convenience.go          |   329 -
- .../rackspace/gophercloud/testhelper/doc.go        |     4 -
- .../gophercloud/testhelper/http_responses.go       |    91 -
- .../src/github.com/rackspace/gophercloud/util.go   |    39 -
- .../github.com/rackspace/gophercloud/util_test.go  |    14 -
- .../src/github.com/skratchdot/LICENSE-MIT          |    22 -
- .../github.com/skratchdot/open-golang/open/exec.go |    15 -
- .../skratchdot/open-golang/open/exec_darwin.go     |    15 -
- .../skratchdot/open-golang/open/exec_windows.go    |    21 -
- .../github.com/skratchdot/open-golang/open/open.go |    50 -
- .../skratchdot/open-golang/open/open_test.go       |    70 -
- .../skynetservices/skydns/msg/service.go           |    95 -
- .../src/github.com/spf13/cobra/.gitignore          |    24 -
- .../src/github.com/spf13/cobra/.travis.yml         |     6 -
- .../src/github.com/spf13/cobra/LICENSE.txt         |   174 -
- .../src/github.com/spf13/cobra/README.md           |   399 -
- .../_workspace/src/github.com/spf13/cobra/cobra.go |   104 -
- .../src/github.com/spf13/cobra/cobra_test.go       |   553 -
- .../src/github.com/spf13/cobra/command.go          |   790 -
- .../_workspace/src/github.com/spf13/pflag/LICENSE  |    28 -
- .../src/github.com/spf13/pflag/README.md           |   155 -
- .../_workspace/src/github.com/spf13/pflag/bool.go  |    74 -
- .../src/github.com/spf13/pflag/duration.go         |    71 -
- .../src/github.com/spf13/pflag/example_test.go     |    73 -
- .../src/github.com/spf13/pflag/export_test.go      |    29 -
- .../_workspace/src/github.com/spf13/pflag/flag.go  |   621 -
- .../src/github.com/spf13/pflag/flag_test.go        |   354 -
- .../src/github.com/spf13/pflag/float32.go          |    74 -
- .../src/github.com/spf13/pflag/float64.go          |    74 -
- .../_workspace/src/github.com/spf13/pflag/int.go   |    74 -
- .../_workspace/src/github.com/spf13/pflag/int32.go |    74 -
- .../_workspace/src/github.com/spf13/pflag/int64.go |    74 -
- .../_workspace/src/github.com/spf13/pflag/int8.go  |    74 -
- Godeps/_workspace/src/github.com/spf13/pflag/ip.go |    79 -
- .../src/github.com/spf13/pflag/ipmask.go           |    89 -
- .../src/github.com/spf13/pflag/string.go           |    69 -
- .../_workspace/src/github.com/spf13/pflag/uint.go  |    74 -
- .../src/github.com/spf13/pflag/uint16.go           |    76 -
- .../src/github.com/spf13/pflag/uint32.go           |    75 -
- .../src/github.com/spf13/pflag/uint64.go           |    74 -
- .../_workspace/src/github.com/spf13/pflag/uint8.go |    74 -
- .../src/github.com/stretchr/objx/.gitignore        |    22 -
- .../src/github.com/stretchr/objx/README.md         |     3 -
- .../src/github.com/stretchr/objx/accessors.go      |   179 -
- .../src/github.com/stretchr/objx/accessors_test.go |   145 -
- .../stretchr/objx/codegen/array-access.txt         |    14 -
- .../github.com/stretchr/objx/codegen/index.html    |    86 -
- .../github.com/stretchr/objx/codegen/template.txt  |   286 -
- .../stretchr/objx/codegen/types_list.txt           |    20 -
- .../src/github.com/stretchr/objx/constants.go      |    13 -
- .../src/github.com/stretchr/objx/conversions.go    |   117 -
- .../github.com/stretchr/objx/conversions_test.go   |    94 -
- .../_workspace/src/github.com/stretchr/objx/doc.go |    72 -
- .../src/github.com/stretchr/objx/fixture_test.go   |    98 -
- .../_workspace/src/github.com/stretchr/objx/map.go |   222 -
- .../src/github.com/stretchr/objx/map_for_test.go   |    10 -
- .../src/github.com/stretchr/objx/map_test.go       |   147 -
- .../src/github.com/stretchr/objx/mutations.go      |    81 -
- .../src/github.com/stretchr/objx/mutations_test.go |    77 -
- .../src/github.com/stretchr/objx/security.go       |    14 -
- .../src/github.com/stretchr/objx/security_test.go  |    12 -
- .../stretchr/objx/simple_example_test.go           |    41 -
- .../src/github.com/stretchr/objx/tests.go          |    17 -
- .../src/github.com/stretchr/objx/tests_test.go     |    24 -
- .../stretchr/objx/type_specific_codegen.go         |  2881 ----
- .../stretchr/objx/type_specific_codegen_test.go    |  2867 ----
- .../src/github.com/stretchr/objx/value.go          |    13 -
- .../src/github.com/stretchr/objx/value_test.go     |     1 -
- .../stretchr/testify/assert/assertions.go          |   490 -
- .../stretchr/testify/assert/assertions_test.go     |   401 -
- .../src/github.com/stretchr/testify/assert/doc.go  |    74 -
- .../github.com/stretchr/testify/assert/errors.go   |    10 -
- .../src/github.com/stretchr/testify/mock/doc.go    |    43 -
- .../src/github.com/stretchr/testify/mock/mock.go   |   505 -
- .../github.com/stretchr/testify/mock/mock_test.go  |   657 -
- .../src/github.com/vaughan0/go-ini/LICENSE         |    14 -
- .../src/github.com/vaughan0/go-ini/README.md       |    70 -
- .../src/github.com/vaughan0/go-ini/ini.go          |   123 -
- .../github.com/vaughan0/go-ini/ini_linux_test.go   |    43 -
- .../src/github.com/vaughan0/go-ini/ini_test.go     |    89 -
- .../src/github.com/vaughan0/go-ini/test.ini        |     2 -
- .../src/golang.org/x/net/context/context.go        |   432 -
- .../src/golang.org/x/net/context/context_test.go   |   553 -
- .../golang.org/x/net/context/withtimeout_test.go   |    26 -
- .../src/golang.org/x/net/html/atom/atom.go         |    78 -
- .../src/golang.org/x/net/html/atom/atom_test.go    |   109 -
- .../src/golang.org/x/net/html/atom/gen.go          |   636 -
- .../src/golang.org/x/net/html/atom/table.go        |   694 -
- .../src/golang.org/x/net/html/atom/table_test.go   |   341 -
- .../src/golang.org/x/net/html/charset/charset.go   |   231 -
- .../golang.org/x/net/html/charset/charset_test.go  |   215 -
- .../src/golang.org/x/net/html/charset/gen.go       |   111 -
- .../src/golang.org/x/net/html/charset/table.go     |   235 -
- .../x/net/html/charset/testdata/HTTP-charset.html  |    48 -
- .../html/charset/testdata/HTTP-vs-UTF-8-BOM.html   |    48 -
- .../charset/testdata/HTTP-vs-meta-charset.html     |    49 -
- .../charset/testdata/HTTP-vs-meta-content.html     |    49 -
- .../charset/testdata/No-encoding-declaration.html  |    47 -
- .../golang.org/x/net/html/charset/testdata/README  |     1 -
- .../x/net/html/charset/testdata/UTF-16BE-BOM.html  |   Bin 2670 -> 0 bytes
- .../x/net/html/charset/testdata/UTF-16LE-BOM.html  |   Bin 2682 -> 0 bytes
- .../testdata/UTF-8-BOM-vs-meta-charset.html        |    49 -
- .../testdata/UTF-8-BOM-vs-meta-content.html        |    48 -
- .../charset/testdata/meta-charset-attribute.html   |    48 -
- .../charset/testdata/meta-content-attribute.html   |    48 -
- .../_workspace/src/golang.org/x/net/html/const.go  |   100 -
- Godeps/_workspace/src/golang.org/x/net/html/doc.go |   106 -
- .../src/golang.org/x/net/html/doctype.go           |   156 -
- .../_workspace/src/golang.org/x/net/html/entity.go |  2253 ---
- .../src/golang.org/x/net/html/entity_test.go       |    29 -
- .../_workspace/src/golang.org/x/net/html/escape.go |   258 -
- .../src/golang.org/x/net/html/escape_test.go       |    97 -
- .../src/golang.org/x/net/html/example_test.go      |    40 -
- .../src/golang.org/x/net/html/foreign.go           |   226 -
- .../_workspace/src/golang.org/x/net/html/node.go   |   193 -
- .../src/golang.org/x/net/html/node_test.go         |   146 -
- .../_workspace/src/golang.org/x/net/html/parse.go  |  2092 ---
- .../src/golang.org/x/net/html/parse_test.go        |   388 -
- .../_workspace/src/golang.org/x/net/html/render.go |   271 -
- .../src/golang.org/x/net/html/render_test.go       |   156 -
- .../src/golang.org/x/net/html/testdata/go1.html    |  2237 ---
- .../golang.org/x/net/html/testdata/webkit/README   |    28 -
- .../x/net/html/testdata/webkit/adoption01.dat      |   194 -
- .../x/net/html/testdata/webkit/adoption02.dat      |    31 -
- .../x/net/html/testdata/webkit/comments01.dat      |   135 -
- .../x/net/html/testdata/webkit/doctype01.dat       |   370 -
- .../x/net/html/testdata/webkit/entities01.dat      |   603 -
- .../x/net/html/testdata/webkit/entities02.dat      |   249 -
- .../x/net/html/testdata/webkit/html5test-com.dat   |   246 -
- .../x/net/html/testdata/webkit/inbody01.dat        |    43 -
- .../x/net/html/testdata/webkit/isindex.dat         |    40 -
- .../pending-spec-changes-plain-text-unsafe.dat     |   Bin 115 -> 0 bytes
- .../html/testdata/webkit/pending-spec-changes.dat  |    52 -
- .../net/html/testdata/webkit/plain-text-unsafe.dat |   Bin 4166 -> 0 bytes
- .../x/net/html/testdata/webkit/scriptdata01.dat    |   308 -
- .../html/testdata/webkit/scripted/adoption01.dat   |    15 -
- .../net/html/testdata/webkit/scripted/webkit01.dat |    28 -
- .../x/net/html/testdata/webkit/tables01.dat        |   212 -
- .../x/net/html/testdata/webkit/tests1.dat          |  1952 ---
- .../x/net/html/testdata/webkit/tests10.dat         |   799 -
- .../x/net/html/testdata/webkit/tests11.dat         |   482 -
- .../x/net/html/testdata/webkit/tests12.dat         |    62 -
- .../x/net/html/testdata/webkit/tests14.dat         |    74 -
- .../x/net/html/testdata/webkit/tests15.dat         |   208 -
- .../x/net/html/testdata/webkit/tests16.dat         |  2299 ---
- .../x/net/html/testdata/webkit/tests17.dat         |   153 -
- .../x/net/html/testdata/webkit/tests18.dat         |   269 -
- .../x/net/html/testdata/webkit/tests19.dat         |  1237 --
- .../x/net/html/testdata/webkit/tests2.dat          |   763 -
- .../x/net/html/testdata/webkit/tests20.dat         |   455 -
- .../x/net/html/testdata/webkit/tests21.dat         |   221 -
- .../x/net/html/testdata/webkit/tests22.dat         |   157 -
- .../x/net/html/testdata/webkit/tests23.dat         |   155 -
- .../x/net/html/testdata/webkit/tests24.dat         |    79 -
- .../x/net/html/testdata/webkit/tests25.dat         |   219 -
- .../x/net/html/testdata/webkit/tests26.dat         |   313 -
- .../x/net/html/testdata/webkit/tests3.dat          |   305 -
- .../x/net/html/testdata/webkit/tests4.dat          |    59 -
- .../x/net/html/testdata/webkit/tests5.dat          |   191 -
- .../x/net/html/testdata/webkit/tests6.dat          |   663 -
- .../x/net/html/testdata/webkit/tests7.dat          |   390 -
- .../x/net/html/testdata/webkit/tests8.dat          |   148 -
- .../x/net/html/testdata/webkit/tests9.dat          |   457 -
- .../net/html/testdata/webkit/tests_innerHTML_1.dat |   741 -
- .../x/net/html/testdata/webkit/tricky01.dat        |   261 -
- .../x/net/html/testdata/webkit/webkit01.dat        |   610 -
- .../x/net/html/testdata/webkit/webkit02.dat        |   159 -
- .../_workspace/src/golang.org/x/net/html/token.go  |  1219 --
- .../src/golang.org/x/net/html/token_test.go        |   748 -
- .../src/golang.org/x/net/websocket/client.go       |    98 -
- .../golang.org/x/net/websocket/exampledial_test.go |    31 -
- .../x/net/websocket/examplehandler_test.go         |    26 -
- .../src/golang.org/x/net/websocket/hybi.go         |   564 -
- .../src/golang.org/x/net/websocket/hybi_test.go    |   590 -
- .../src/golang.org/x/net/websocket/server.go       |   114 -
- .../src/golang.org/x/net/websocket/websocket.go    |   411 -
- .../golang.org/x/net/websocket/websocket_test.go   |   341 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE     |   188 -
- .../src/gopkg.in/v2/yaml/LICENSE.libyaml           |    31 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/README.md   |   128 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/apic.go     |   742 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/decode.go   |   665 -
- .../_workspace/src/gopkg.in/v2/yaml/decode_test.go |   902 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/emitterc.go |  1685 --
- Godeps/_workspace/src/gopkg.in/v2/yaml/encode.go   |   290 -
- .../_workspace/src/gopkg.in/v2/yaml/encode_test.go |   434 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/parserc.go  |  1096 --
- Godeps/_workspace/src/gopkg.in/v2/yaml/readerc.go  |   391 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/resolve.go  |   203 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/scannerc.go |  2710 ---
- Godeps/_workspace/src/gopkg.in/v2/yaml/sorter.go   |   104 -
- .../_workspace/src/gopkg.in/v2/yaml/suite_test.go  |    12 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/writerc.go  |    89 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/yaml.go     |   334 -
- Godeps/_workspace/src/gopkg.in/v2/yaml/yamlh.go    |   716 -
- .../src/gopkg.in/v2/yaml/yamlprivateh.go           |   173 -
- .../src/speter.net/go/exp/math/dec/inf/LICENSE     |    57 -
- .../go/exp/math/dec/inf/benchmark_test.go          |   210 -
- .../src/speter.net/go/exp/math/dec/inf/dec.go      |   615 -
- .../go/exp/math/dec/inf/dec_go1_2_test.go          |    33 -
- .../go/exp/math/dec/inf/dec_internal_test.go       |    40 -
- .../src/speter.net/go/exp/math/dec/inf/dec_test.go |   379 -
- .../speter.net/go/exp/math/dec/inf/example_test.go |    62 -
- .../src/speter.net/go/exp/math/dec/inf/rounder.go  |   145 -
- .../go/exp/math/dec/inf/rounder_example_test.go    |    72 -
- .../speter.net/go/exp/math/dec/inf/rounder_test.go |   109 -
- 1083 files changed, 197448 deletions(-)
- delete mode 100644 Godeps/Godeps.json
- delete mode 100644 Godeps/Readme
- delete mode 100644 Godeps/_workspace/.gitignore
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/LICENSE
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/README
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/doc.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/example_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/go1_0.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/go1_2.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/issues_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/read.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/read_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/scanner/errors.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/scanner/example_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/scanner/scanner.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/scanner/scanner_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/set.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/testdata/gcfg_test.gcfg
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/testdata/gcfg_unicode_test.gcfg
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/token/position.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/token/position_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/token/serialize.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/token/serialize_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/token/token.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/types/bool.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/types/doc.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/types/enum.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/types/enum_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/types/int.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/types/int_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/types/scan.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/gcfg/types/scan_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/compute/serviceaccount/serviceaccount.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/oauth/example/oauthreq.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/example.client_secrets.json
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/example.pem
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/main.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/jwt.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/jwt_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/oauth/oauth.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/oauth/oauth_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/AUTHORS
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/CONTRIBUTORS
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/LICENSE
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/Makefile
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/NOTES
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/README
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/TODO
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/compute-api.json
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/compute-gen.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/container-api.json
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/container-gen.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/googleapi.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/googleapi_test.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/LICENSE
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/uritemplates.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/utils.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/transport/apikey.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/types.go
- delete mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/types_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
- delete mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go
- delete mode 100644 Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpcgo_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/example_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/internal_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
- delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go
- delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/.gitignore
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/CHANGES.md
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/Srcfile
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/bench_curly_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/bench_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/bench_test.sh
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/compress.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/compress_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/constants.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/container.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/cors_filter.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/cors_filter_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/coverage.sh
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/curly.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/curly_route.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/curly_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/doc_examples_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/.goconvey
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/app.yaml
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/app.yaml
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/home.html
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-CORS-filter.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-basic-authentication.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-curly-router.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-encoding-filter.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-filters.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-form-handling.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-hello-world.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-html-template.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-multi-containers.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-options-filter.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-path-tail.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-resource-functions.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-route_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-routefunction_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-serve-static.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-user-resource.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-user-service.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/filter.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/filter_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/install.sh
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/jsr311.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/jsr311_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/logger.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/options_filter.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/options_filter_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/path_expression.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/request.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/request_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/response.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/response_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/route.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/route_builder.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/route_builder_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/route_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/router.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/service_error.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/CHANGES.md
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/config.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/utils_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/web_service.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_container.go
- delete mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/stdcopy.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/stdcopy_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/bin/fmtpolice
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem
- delete mode 120000 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/symlink
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/writer.go
- delete mode 100644 Godeps/_workspace/src/github.com/ghodss/yaml/.gitignore
- delete mode 100644 Godeps/_workspace/src/github.com/ghodss/yaml/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/ghodss/yaml/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/ghodss/yaml/fields.go
- delete mode 100644 Godeps/_workspace/src/github.com/ghodss/yaml/yaml.go
- delete mode 100644 Godeps/_workspace/src/github.com/ghodss/yaml/yaml_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/golang/glog/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/golang/glog/README
- delete mode 100644 Godeps/_workspace/src/github.com/golang/glog/glog.go
- delete mode 100644 Godeps/_workspace/src/github.com/golang/glog/glog_file.go
- delete mode 100644 Godeps/_workspace/src/github.com/golang/glog/glog_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/cadvisor/client/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/google/cadvisor/client/client.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/cadvisor/client/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/cadvisor/info/container.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/cadvisor/info/container_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/cadvisor/info/machine.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/cadvisor/info/test/datagen.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/cadvisor/info/version.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/gofuzz/.travis.yml
- delete mode 100644 Godeps/_workspace/src/github.com/google/gofuzz/CONTRIBUTING.md
- delete mode 100644 Godeps/_workspace/src/github.com/google/gofuzz/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/google/gofuzz/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/google/gofuzz/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/gofuzz/example_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/gofuzz/fuzz.go
- delete mode 100644 Godeps/_workspace/src/github.com/google/gofuzz/fuzz_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/.travis.yml
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/map.go
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/merge.go
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/mergo.go
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/mergo_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/testdata/license.yml
- delete mode 100644 Godeps/_workspace/src/github.com/imdario/mergo/testdata/thing.yml
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/.gitignore
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/.travis.yml
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/AUTHORS
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/CONTRIBUTORS
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/COPYRIGHT
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/client.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/defaults.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/dns.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/dns_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/dnssec.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/dyn_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/edns.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/edns_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/example_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/idn/example_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/keygen.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/kscan.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/labels.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/labels_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/msg.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/nsecx.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/parse_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/privaterr.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/rawmsg.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/scanner.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/server.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/server_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/sig0.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/singleinflight.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/tlsa.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/tsig.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/types.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/types_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/udp.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/udp_linux.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/udp_other.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/update.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/update_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/xfr.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/zscan.go
- delete mode 100644 Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2i_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2t_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2test/filter.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2test/server.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/export_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/responses_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/sign.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/sign_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/racker/perigee/.gitignore
- delete mode 100644 Godeps/_workspace/src/github.com/racker/perigee/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/racker/perigee/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/racker/perigee/api.go
- delete mode 100644 Godeps/_workspace/src/github.com/racker/perigee/api_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/.travis.yml
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTING.md
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTORS.md
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/UPGRADING.md
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/extension_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/flavors_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/images_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/extension_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/identity_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/tenant_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/token_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/identity_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/service_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/token_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/common.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extension_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/layer3_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/common.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/member_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitor_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vip_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/network_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/port_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/subnet_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/common.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/common.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/snapshot_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_type_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/bootfromvolume_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/compute_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/flavors_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/images_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/keypairs_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/networks_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/servers_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/virtualinterfaces_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/extension_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/identity_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tenant_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/accounts_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/bulk_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdncontainers_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdnobjects_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/containers_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/objects_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools/tools.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/auth_options.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/auth_results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/endpoint_search.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/endpoint_search_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/auth_env.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/utils/choose_version.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/utils/choose_version_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/package.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/http.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/linked.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/linked_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/marker.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/marker_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/null.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pager.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pagination_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pkg.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/single.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/single_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/params.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/params_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/auth_env.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/results.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/script/acceptancetest
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/script/bootstrap
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/script/cibuild
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/script/test
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/script/unittest
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/service_client.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/service_client_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/client/fake.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/convenience.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/http_responses.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/util.go
- delete mode 100644 Godeps/_workspace/src/github.com/rackspace/gophercloud/util_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/skratchdot/LICENSE-MIT
- delete mode 100644 Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec.go
- delete mode 100644 Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec_darwin.go
- delete mode 100644 Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec_windows.go
- delete mode 100644 Godeps/_workspace/src/github.com/skratchdot/open-golang/open/open.go
- delete mode 100644 Godeps/_workspace/src/github.com/skratchdot/open-golang/open/open_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/skynetservices/skydns/msg/service.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/.gitignore
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/LICENSE.txt
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/cobra.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/command.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/bool.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/duration.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/example_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/export_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/flag.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/float32.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/float64.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/int.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/int32.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/int64.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/int8.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/ip.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/string.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/uint.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/uint16.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/uint32.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/uint64.go
- delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/uint8.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/.gitignore
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/constants.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/map_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/security_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/objx/value_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go
- delete mode 100644 Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE
- delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md
- delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go
- delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go
- delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/context/context.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/context/context_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/atom/atom.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/atom/atom_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/atom/gen.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/atom/table.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/atom/table_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/charset.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/charset_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/gen.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/table.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-charset.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/README
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-content-attribute.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/const.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/doc.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/doctype.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/entity.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/entity_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/escape.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/escape_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/example_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/foreign.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/node.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/node_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/parse.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/parse_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/render.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/render_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/go1.html
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/README
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/adoption01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/adoption02.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/comments01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/doctype01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/entities01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/entities02.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/html5test-com.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/inbody01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/isindex.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scriptdata01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tables01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests1.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests10.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests11.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests12.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests14.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests15.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests16.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests17.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests18.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests19.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests2.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests20.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests21.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests22.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests23.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests24.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests25.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests26.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests3.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests4.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests5.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests6.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests7.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests8.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests9.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tricky01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/webkit01.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/webkit02.dat
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/token.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/html/token_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/client.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/exampledial_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/examplehandler_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/hybi.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/hybi_test.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/server.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/websocket.go
- delete mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/websocket_test.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE.libyaml
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/README.md
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/apic.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/decode.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/decode_test.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/emitterc.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/encode.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/encode_test.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/parserc.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/readerc.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/resolve.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/scannerc.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/sorter.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/suite_test.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/writerc.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/yaml.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/yamlh.go
- delete mode 100644 Godeps/_workspace/src/gopkg.in/v2/yaml/yamlprivateh.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/LICENSE
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/benchmark_test.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_go1_2_test.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_internal_test.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_test.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/example_test.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder_example_test.go
- delete mode 100644 Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder_test.go
-
-diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
-deleted file mode 100644
-index 48b7bba..0000000
---- a/Godeps/Godeps.json
-+++ /dev/null
-@@ -1,217 +0,0 @@
--{
--	"ImportPath": "github.com/GoogleCloudPlatform/kubernetes",
--	"GoVersion": "go1.3",
--	"Packages": [
--		"./..."
--	],
--	"Deps": [
--		{
--			"ImportPath": "code.google.com/p/gcfg",
--			"Rev": "c2d3050044d05357eaf6c3547249ba57c5e235cb"
--		},
--		{
--			"ImportPath": "code.google.com/p/go-uuid/uuid",
--			"Comment": "null-12",
--			"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
--		},
--		{
--			"ImportPath": "code.google.com/p/goauth2/compute/serviceaccount",
--			"Comment": "weekly-50",
--			"Rev": "7fc9d958c83464bd7650240569bf93a102266e6a"
--		},
--		{
--			"ImportPath": "code.google.com/p/goauth2/oauth",
--			"Comment": "weekly-50",
--			"Rev": "7fc9d958c83464bd7650240569bf93a102266e6a"
--		},
--		{
--			"ImportPath": "code.google.com/p/google-api-go-client/compute/v1",
--			"Comment": "release-96",
--			"Rev": "98c78185197025f935947caac56a7b6d022f89d2"
--		},
--		{
--			"ImportPath": "code.google.com/p/google-api-go-client/container/v1beta1",
--			"Comment": "release-105",
--			"Rev": "98c78185197025f935947caac56a7b6d022f89d2"
--		},
--		{
--			"ImportPath": "code.google.com/p/google-api-go-client/googleapi",
--			"Comment": "release-96",
--			"Rev": "98c78185197025f935947caac56a7b6d022f89d2"
--		},
--		{
--			"ImportPath": "github.com/Sirupsen/logrus",
--			"Comment": "v0.6.2-10-g51fe59a",
--			"Rev": "51fe59aca108dc5680109e7b2051cbdcfa5a253c"
--		},
--		{
--			"ImportPath": "github.com/coreos/go-etcd/etcd",
--			"Comment": "v0.2.0-rc1-120-g23142f6",
--			"Rev": "23142f6773a676cc2cae8dd0cb90b2ea761c853f"
--		},
--		{
--			"ImportPath": "github.com/davecgh/go-spew/spew",
--			"Rev": "83f84dc933714d51504ceed59f43ead21d096fe7"
--		},
--		{
--			"ImportPath": "github.com/docker/docker/pkg/archive",
--			"Comment": "v1.4.1-108-g364720b",
--			"Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
--		},
--		{
--			"ImportPath": "github.com/docker/docker/pkg/fileutils",
--			"Comment": "v1.4.1-108-g364720b",
--			"Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
--		},
--		{
--			"ImportPath": "github.com/docker/docker/pkg/ioutils",
--			"Comment": "v1.4.1-108-g364720b",
--			"Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
--		},
--		{
--			"ImportPath": "github.com/docker/docker/pkg/pools",
--			"Comment": "v1.4.1-108-g364720b",
--			"Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
--		},
--		{
--			"ImportPath": "github.com/docker/docker/pkg/promise",
--			"Comment": "v1.4.1-108-g364720b",
--			"Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
--		},
--		{
--			"ImportPath": "github.com/docker/docker/pkg/system",
--			"Comment": "v1.4.1-108-g364720b",
--			"Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
--		},
--		{
--			"ImportPath": "github.com/docker/docker/pkg/units",
--			"Comment": "v1.4.1-108-g364720b",
--			"Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
--		},
--		{
--			"ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar",
--			"Comment": "v1.4.1-108-g364720b",
--			"Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
--		},
--		{
--			"ImportPath": "github.com/elazarl/go-bindata-assetfs",
--			"Rev": "ae4665cf2d188c65764c73fe4af5378acc549510"
--		},
--		{
--			"ImportPath": "github.com/emicklei/go-restful",
--			"Comment": "v1.1.2-50-g692a500",
--			"Rev": "692a50017a7049b26cf7ea4ccfc0d8c77369a793"
--		},
--		{
--			"ImportPath": "github.com/fsouza/go-dockerclient",
--			"Comment": "0.2.1-334-g9c377ff",
--			"Rev": "9c377ffd9aed48a012adf1c3fd517fe98394120b"
--		},
--		{
--			"ImportPath": "github.com/ghodss/yaml",
--			"Rev": "4fb5c728a37b361a1e971a3bb3d785fcc96b6ef5"
--		},
--		{
--			"ImportPath": "github.com/golang/glog",
--			"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
--		},
--		{
--			"ImportPath": "github.com/google/cadvisor/client",
--			"Comment": "0.6.2",
--			"Rev": "89088df70eca64cf9d6b9a23a3d2bc21a30916d6"
--		},
--		{
--			"ImportPath": "github.com/google/cadvisor/info",
--			"Comment": "0.6.2",
--			"Rev": "89088df70eca64cf9d6b9a23a3d2bc21a30916d6"
--		},
--		{
--			"ImportPath": "github.com/google/gofuzz",
--			"Rev": "aef70dacbc78771e35beb261bb3a72986adf7906"
--		},
--		{
--			"ImportPath": "github.com/imdario/mergo",
--			"Comment": "0.1.3-8-g6633656",
--			"Rev": "6633656539c1639d9d78127b7d47c622b5d7b6dc"
--		},
--		{
--			"ImportPath": "github.com/miekg/dns",
--			"Rev": "3f504e8dabd5d562e997d19ce0200aa41973e1b2"
--		},
--		{
--			"ImportPath": "github.com/mitchellh/goamz/aws",
--			"Rev": "9cad7da945e699385c1a3e115aa255211921c9bb"
--		},
--		{
--			"ImportPath": "github.com/mitchellh/goamz/ec2",
--			"Rev": "9cad7da945e699385c1a3e115aa255211921c9bb"
--		},
--		{
--			"ImportPath": "github.com/mitchellh/mapstructure",
--			"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
--		},
--		{
--			"ImportPath": "github.com/racker/perigee",
--			"Comment": "v0.0.0-18-g0c00cb0",
--			"Rev": "0c00cb0a026b71034ebc8205263c77dad3577db5"
--		},
--		{
--			"ImportPath": "github.com/rackspace/gophercloud",
--			"Comment": "v1.0.0",
--			"Rev": "da56de6a59e53fdd61be1b5d9b87df34c47ac420"
--		},
--		{
--			"ImportPath": "github.com/skratchdot/open-golang/open",
--			"Rev": "ba570a111973b539baf23c918213059543b5bb6e"
--		},
--		{
--			"ImportPath": "github.com/skynetservices/skydns/msg",
--			"Comment": "2.0.1d-2-g245a121",
--			"Rev": "245a1216be2a7f5377ea56e957fdfa0de6ecd067"
--		},
--		{
--			"ImportPath": "github.com/spf13/cobra",
--			"Rev": "e1e66f7b4e667751cf530ddb6e72b79d6eeb0235"
--		},
--		{
--			"ImportPath": "github.com/spf13/pflag",
--			"Rev": "463bdc838f2b35e9307e91d480878bda5fff7232"
--		},
--		{
--			"ImportPath": "github.com/stretchr/objx",
--			"Rev": "d40df0cc104c06eae2dfe03d7dddb83802d52f9a"
--		},
--		{
--			"ImportPath": "github.com/stretchr/testify/assert",
--			"Rev": "37614ac27794505bf7867ca93aac883cadb6a5f7"
--		},
--		{
--			"ImportPath": "github.com/stretchr/testify/mock",
--			"Rev": "37614ac27794505bf7867ca93aac883cadb6a5f7"
--		},
--		{
--			"ImportPath": "github.com/vaughan0/go-ini",
--			"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
--		},
--		{
--			"ImportPath": "golang.org/x/net/context",
--			"Rev": "cbcac7bb8415db9b6cb4d1ebab1dc9afbd688b97"
--		},
--		{
--			"ImportPath": "golang.org/x/net/html",
--			"Rev": "cbcac7bb8415db9b6cb4d1ebab1dc9afbd688b97"
--		},
--		{
--			"ImportPath": "golang.org/x/net/websocket",
--			"Rev": "cbcac7bb8415db9b6cb4d1ebab1dc9afbd688b97"
--		},
--		{
--			"ImportPath": "gopkg.in/v2/yaml",
--			"Rev": "d466437aa4adc35830964cffc5b5f262c63ddcb4"
--		},
--		{
--			"ImportPath": "speter.net/go/exp/math/dec/inf",
--			"Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7"
--		}
--	]
--}
-diff --git a/Godeps/Readme b/Godeps/Readme
-deleted file mode 100644
-index 4cdaa53..0000000
---- a/Godeps/Readme
-+++ /dev/null
-@@ -1,5 +0,0 @@
--This directory tree is generated automatically by godep.
--
--Please do not edit.
--
--See https://github.com/tools/godep for more information.
-diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore
-deleted file mode 100644
-index f037d68..0000000
---- a/Godeps/_workspace/.gitignore
-+++ /dev/null
-@@ -1,2 +0,0 @@
--/pkg
--/bin
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/LICENSE b/Godeps/_workspace/src/code.google.com/p/gcfg/LICENSE
-deleted file mode 100644
-index b0a9e76..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/LICENSE
-+++ /dev/null
-@@ -1,57 +0,0 @@
--Copyright (c) 2012 Péter Surányi. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--
------------------------------------------------------------------------
--Portions of gcfg's source code have been derived from Go, and are
--covered by the following license:
------------------------------------------------------------------------
--
--Copyright (c) 2009 The Go Authors. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--   * Neither the name of Google Inc. nor the names of its
--contributors may be used to endorse or promote products derived from
--this software without specific prior written permission.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/README b/Godeps/_workspace/src/code.google.com/p/gcfg/README
-deleted file mode 100644
-index 8f621c3..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/README
-+++ /dev/null
-@@ -1,7 +0,0 @@
--Gcfg reads INI-style configuration files into Go structs;
--supports user-defined types and subsections.
--
--Project page: https://code.google.com/p/gcfg
--Package docs: http://godoc.org/code.google.com/p/gcfg
--
--My other projects: https://speter.net
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/doc.go b/Godeps/_workspace/src/code.google.com/p/gcfg/doc.go
-deleted file mode 100644
-index 99687b4..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/doc.go
-+++ /dev/null
-@@ -1,118 +0,0 @@
--// Package gcfg reads "INI-style" text-based configuration files with
--// "name=value" pairs grouped into sections (gcfg files).
--//
--// This package is still a work in progress; see the sections below for planned
--// changes.
--//
--// Syntax
--//
--// The syntax is based on that used by git config:
--// http://git-scm.com/docs/git-config#_syntax .
--// There are some (planned) differences compared to the git config format:
--//  - improve data portability:
--//    - must be encoded in UTF-8 (for now) and must not contain the 0 byte
--//    - include and "path" type is not supported
--//      (path type may be implementable as a user-defined type)
--//  - internationalization
--//    - section and variable names can contain unicode letters, unicode digits
--//      (as defined in http://golang.org/ref/spec#Characters ) and hyphens
--//      (U+002D), starting with a unicode letter
--//  - disallow potentially ambiguous or misleading definitions:
--//    - `[sec.sub]` format is not allowed (deprecated in gitconfig)
--//    - `[sec ""]` is not allowed
--//      - use `[sec]` for section name "sec" and empty subsection name
--//    - (planned) within a single file, definitions must be contiguous for each:
--//      - section: '[secA]' -> '[secB]' -> '[secA]' is an error
--//      - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
--//      - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
--//
--// Data structure
--//
--// The functions in this package read values into a user-defined struct.
--// Each section corresponds to a struct field in the config struct, and each
--// variable in a section corresponds to a data field in the section struct.
--// The mapping of each section or variable name to fields is done either based
--// on the "gcfg" struct tag or by matching the name of the section or variable,
--// ignoring case. In the latter case, hyphens '-' in section and variable names
--// correspond to underscores '_' in field names.
--// Fields must be exported; to use a section or variable name starting with a
--// letter that is neither upper- or lower-case, prefix the field name with 'X'.
--// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
--//
--// For sections with subsections, the corresponding field in config must be a
--// map, rather than a struct, with string keys and pointer-to-struct values.
--// Values for subsection variables are stored in the map with the subsection
--// name used as the map key.
--// (Note that unlike section and variable names, subsection names are case
--// sensitive.)
--// When using a map, and there is a section with the same section name but
--// without a subsection name, its values are stored with the empty string used
--// as the key.
--//
--// The functions in this package panic if config is not a pointer to a struct,
--// or when a field is not of a suitable type (either a struct or a map with
--// string keys and pointer-to-struct values).
--//
--// Parsing of values
--//
--// The section structs in the config struct may contain single-valued or
--// multi-valued variables. Variables of unnamed slice type (that is, a type
--// starting with `[]`) are treated as multi-value; all others (including named
--// slice types) are treated as single-valued variables.
--//
--// Single-valued variables are handled based on the type as follows.
--// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
--// and if necessary, a new instance is allocated.
--//
--// For types implementing the encoding.TextUnmarshaler interface, the
--// UnmarshalText method is used to set the value. Implementing this method is
--// the recommended way for parsing user-defined types.
--//
--// For fields of string kind, the value string is assigned to the field, after
--// unquoting and unescaping as needed.
--// For fields of bool kind, the field is set to true if the value is "true",
--// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
--// "0", ignoring case. In addition, single-valued bool fields can be specified
--// with a "blank" value (variable name without equals sign and value); in such
--// case the value is set to true.
--//
--// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
--// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
--// unintuitively handling zero-padded numbers as octal.) Other types having
--// [u]int* as the underlying type, such as os.FileMode and uintptr allow
--// decimal, hexadecimal, or octal values.
--// Parsing mode for integer types can be overridden using the struct tag option
--// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
--// (each standing for decimal, hexadecimal, and octal, respectively.)
--//
--// All other types are parsed using fmt.Sscanf with the "%v" verb.
--//
--// For multi-valued variables, each individual value is parsed as above and
--// appended to the slice. If the first value is specified as a "blank" value
--// (variable name without equals sign and value), a new slice is allocated;
--// that is any values previously set in the slice will be ignored.
--//
--// The types subpackage for provides helpers for parsing "enum-like" and integer
--// types.
--//
--// TODO
--//
--// The following is a list of changes under consideration:
--//  - documentation
--//    - self-contained syntax documentation
--//    - more practical examples
--//    - move TODOs to issue tracker (eventually)
--//  - syntax
--//    - reconsider valid escape sequences
--//      (gitconfig doesn't support \r in value, \t in subsection name, etc.)
--//  - reading / parsing gcfg files
--//    - define internal representation structure
--//    - support multiple inputs (readers, strings, files)
--//    - support declaring encoding (?)
--//    - support varying fields sets for subsections (?)
--//  - writing gcfg files
--//  - error handling
--//    - make error context accessible programmatically?
--//    - limit input size?
--//
--package gcfg
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/example_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/example_test.go
-deleted file mode 100644
-index 884f3fb..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/example_test.go
-+++ /dev/null
-@@ -1,132 +0,0 @@
--package gcfg_test
--
--import (
--	"fmt"
--	"log"
--)
--
--import "code.google.com/p/gcfg"
--
--func ExampleReadStringInto() {
--	cfgStr := `; Comment line
--[section]
--name=value # comment`
--	cfg := struct {
--		Section struct {
--			Name string
--		}
--	}{}
--	err := gcfg.ReadStringInto(&cfg, cfgStr)
--	if err != nil {
--		log.Fatalf("Failed to parse gcfg data: %s", err)
--	}
--	fmt.Println(cfg.Section.Name)
--	// Output: value
--}
--
--func ExampleReadStringInto_bool() {
--	cfgStr := `; Comment line
--[section]
--switch=on`
--	cfg := struct {
--		Section struct {
--			Switch bool
--		}
--	}{}
--	err := gcfg.ReadStringInto(&cfg, cfgStr)
--	if err != nil {
--		log.Fatalf("Failed to parse gcfg data: %s", err)
--	}
--	fmt.Println(cfg.Section.Switch)
--	// Output: true
--}
--
--func ExampleReadStringInto_hyphens() {
--	cfgStr := `; Comment line
--[section-name]
--variable-name=value # comment`
--	cfg := struct {
--		Section_Name struct {
--			Variable_Name string
--		}
--	}{}
--	err := gcfg.ReadStringInto(&cfg, cfgStr)
--	if err != nil {
--		log.Fatalf("Failed to parse gcfg data: %s", err)
--	}
--	fmt.Println(cfg.Section_Name.Variable_Name)
--	// Output: value
--}
--
--func ExampleReadStringInto_tags() {
--	cfgStr := `; Comment line
--[section]
--var-name=value # comment`
--	cfg := struct {
--		Section struct {
--			FieldName string `gcfg:"var-name"`
--		}
--	}{}
--	err := gcfg.ReadStringInto(&cfg, cfgStr)
--	if err != nil {
--		log.Fatalf("Failed to parse gcfg data: %s", err)
--	}
--	fmt.Println(cfg.Section.FieldName)
--	// Output: value
--}
--
--func ExampleReadStringInto_subsections() {
--	cfgStr := `; Comment line
--[profile "A"]
--color = white
--
--[profile "B"]
--color = black
--`
--	cfg := struct {
--		Profile map[string]*struct {
--			Color string
--		}
--	}{}
--	err := gcfg.ReadStringInto(&cfg, cfgStr)
--	if err != nil {
--		log.Fatalf("Failed to parse gcfg data: %s", err)
--	}
--	fmt.Printf("%s %s\n", cfg.Profile["A"].Color, cfg.Profile["B"].Color)
--	// Output: white black
--}
--
--func ExampleReadStringInto_multivalue() {
--	cfgStr := `; Comment line
--[section]
--multi=value1
--multi=value2`
--	cfg := struct {
--		Section struct {
--			Multi []string
--		}
--	}{}
--	err := gcfg.ReadStringInto(&cfg, cfgStr)
--	if err != nil {
--		log.Fatalf("Failed to parse gcfg data: %s", err)
--	}
--	fmt.Println(cfg.Section.Multi)
--	// Output: [value1 value2]
--}
--
--func ExampleReadStringInto_unicode() {
--	cfgStr := `; Comment line
--[甲]
--乙=丙 # comment`
--	cfg := struct {
--		X甲 struct {
--			X乙 string
--		}
--	}{}
--	err := gcfg.ReadStringInto(&cfg, cfgStr)
--	if err != nil {
--		log.Fatalf("Failed to parse gcfg data: %s", err)
--	}
--	fmt.Println(cfg.X甲.X乙)
--	// Output: 丙
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/go1_0.go b/Godeps/_workspace/src/code.google.com/p/gcfg/go1_0.go
-deleted file mode 100644
-index 6670210..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/go1_0.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--// +build !go1.2
--
--package gcfg
--
--type textUnmarshaler interface {
--	UnmarshalText(text []byte) error
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/go1_2.go b/Godeps/_workspace/src/code.google.com/p/gcfg/go1_2.go
-deleted file mode 100644
-index 6f5843b..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/go1_2.go
-+++ /dev/null
-@@ -1,9 +0,0 @@
--// +build go1.2
--
--package gcfg
--
--import (
--	"encoding"
--)
--
--type textUnmarshaler encoding.TextUnmarshaler
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/issues_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/issues_test.go
-deleted file mode 100644
-index 796dd10..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/issues_test.go
-+++ /dev/null
-@@ -1,63 +0,0 @@
--package gcfg
--
--import (
--	"fmt"
--	"math/big"
--	"strings"
--	"testing"
--)
--
--type Config1 struct {
--	Section struct {
--		Int    int
--		BigInt big.Int
--	}
--}
--
--var testsIssue1 = []struct {
--	cfg      string
--	typename string
--}{
--	{"[section]\nint=X", "int"},
--	{"[section]\nint=", "int"},
--	{"[section]\nint=1A", "int"},
--	{"[section]\nbigint=X", "big.Int"},
--	{"[section]\nbigint=", "big.Int"},
--	{"[section]\nbigint=1A", "big.Int"},
--}
--
--// Value parse error should:
--//  - include plain type name
--//  - not include reflect internals
--func TestIssue1(t *testing.T) {
--	for i, tt := range testsIssue1 {
--		var c Config1
--		err := ReadStringInto(&c, tt.cfg)
--		switch {
--		case err == nil:
--			t.Errorf("%d fail: got ok; wanted error", i)
--		case !strings.Contains(err.Error(), tt.typename):
--			t.Errorf("%d fail: error message doesn't contain type name %q: %v",
--				i, tt.typename, err)
--		case strings.Contains(err.Error(), "reflect"):
--			t.Errorf("%d fail: error message includes reflect internals: %v",
--				i, err)
--		default:
--			t.Logf("%d pass: %v", i, err)
--		}
--	}
--}
--
--type confIssue2 struct{ Main struct{ Foo string } }
--
--var testsIssue2 = []readtest{
--	{"[main]\n;\nfoo = bar\n", &confIssue2{struct{ Foo string }{"bar"}}, true},
--	{"[main]\r\n;\r\nfoo = bar\r\n", &confIssue2{struct{ Foo string }{"bar"}}, true},
--}
--
--func TestIssue2(t *testing.T) {
--	for i, tt := range testsIssue2 {
--		id := fmt.Sprintf("issue2:%d", i)
--		testRead(t, id, tt)
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/read.go b/Godeps/_workspace/src/code.google.com/p/gcfg/read.go
-deleted file mode 100644
-index 4719c2b..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/read.go
-+++ /dev/null
-@@ -1,181 +0,0 @@
--package gcfg
--
--import (
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"strings"
--)
--
--import (
--	"code.google.com/p/gcfg/scanner"
--	"code.google.com/p/gcfg/token"
--)
--
--var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'}
--
--// no error: invalid literals should be caught by scanner
--func unquote(s string) string {
--	u, q, esc := make([]rune, 0, len(s)), false, false
--	for _, c := range s {
--		if esc {
--			uc, ok := unescape[c]
--			switch {
--			case ok:
--				u = append(u, uc)
--				fallthrough
--			case !q && c == '\n':
--				esc = false
--				continue
--			}
--			panic("invalid escape sequence")
--		}
--		switch c {
--		case '"':
--			q = !q
--		case '\\':
--			esc = true
--		default:
--			u = append(u, c)
--		}
--	}
--	if q {
--		panic("missing end quote")
--	}
--	if esc {
--		panic("invalid escape sequence")
--	}
--	return string(u)
--}
--
--func readInto(config interface{}, fset *token.FileSet, file *token.File, src []byte) error {
--	var s scanner.Scanner
--	var errs scanner.ErrorList
--	s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
--	sect, sectsub := "", ""
--	pos, tok, lit := s.Scan()
--	errfn := func(msg string) error {
--		return fmt.Errorf("%s: %s", fset.Position(pos), msg)
--	}
--	for {
--		if errs.Len() > 0 {
--			return errs.Err()
--		}
--		switch tok {
--		case token.EOF:
--			return nil
--		case token.EOL, token.COMMENT:
--			pos, tok, lit = s.Scan()
--		case token.LBRACK:
--			pos, tok, lit = s.Scan()
--			if errs.Len() > 0 {
--				return errs.Err()
--			}
--			if tok != token.IDENT {
--				return errfn("expected section name")
--			}
--			sect, sectsub = lit, ""
--			pos, tok, lit = s.Scan()
--			if errs.Len() > 0 {
--				return errs.Err()
--			}
--			if tok == token.STRING {
--				sectsub = unquote(lit)
--				if sectsub == "" {
--					return errfn("empty subsection name")
--				}
--				pos, tok, lit = s.Scan()
--				if errs.Len() > 0 {
--					return errs.Err()
--				}
--			}
--			if tok != token.RBRACK {
--				if sectsub == "" {
--					return errfn("expected subsection name or right bracket")
--				}
--				return errfn("expected right bracket")
--			}
--			pos, tok, lit = s.Scan()
--			if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
--				return errfn("expected EOL, EOF, or comment")
--			}
--		case token.IDENT:
--			if sect == "" {
--				return errfn("expected section header")
--			}
--			n := lit
--			pos, tok, lit = s.Scan()
--			if errs.Len() > 0 {
--				return errs.Err()
--			}
--			blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
--			if !blank {
--				if tok != token.ASSIGN {
--					return errfn("expected '='")
--				}
--				pos, tok, lit = s.Scan()
--				if errs.Len() > 0 {
--					return errs.Err()
--				}
--				if tok != token.STRING {
--					return errfn("expected value")
--				}
--				v = unquote(lit)
--				pos, tok, lit = s.Scan()
--				if errs.Len() > 0 {
--					return errs.Err()
--				}
--				if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
--					return errfn("expected EOL, EOF, or comment")
--				}
--			}
--			err := set(config, sect, sectsub, n, blank, v)
--			if err != nil {
--				return err
--			}
--		default:
--			if sect == "" {
--				return errfn("expected section header")
--			}
--			return errfn("expected section header or variable declaration")
--		}
--	}
--	panic("never reached")
--}
--
--// ReadInto reads gcfg formatted data from reader and sets the values into the
--// corresponding fields in config.
--func ReadInto(config interface{}, reader io.Reader) error {
--	src, err := ioutil.ReadAll(reader)
--	if err != nil {
--		return err
--	}
--	fset := token.NewFileSet()
--	file := fset.AddFile("", fset.Base(), len(src))
--	return readInto(config, fset, file, src)
--}
--
--// ReadStringInto reads gcfg formatted data from str and sets the values into
--// the corresponding fields in config.
--func ReadStringInto(config interface{}, str string) error {
--	r := strings.NewReader(str)
--	return ReadInto(config, r)
--}
--
--// ReadFileInto reads gcfg formatted data from the file filename and sets the
--// values into the corresponding fields in config.
--func ReadFileInto(config interface{}, filename string) error {
--	f, err := os.Open(filename)
--	if err != nil {
--		return err
--	}
--	defer f.Close()
--	src, err := ioutil.ReadAll(f)
--	if err != nil {
--		return err
--	}
--	fset := token.NewFileSet()
--	file := fset.AddFile(filename, fset.Base(), len(src))
--	return readInto(config, fset, file, src)
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/read_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/read_test.go
-deleted file mode 100644
-index 4a7d8e1..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/read_test.go
-+++ /dev/null
-@@ -1,333 +0,0 @@
--package gcfg
--
--import (
--	"fmt"
--	"math/big"
--	"os"
--	"reflect"
--	"testing"
--)
--
--const (
--	// 64 spaces
--	sp64 = "                                                                "
--	// 512 spaces
--	sp512 = sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64
--	// 4096 spaces
--	sp4096 = sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512
--)
--
--type cBasic struct {
--	Section           cBasicS1
--	Hyphen_In_Section cBasicS2
--	unexported        cBasicS1
--	Exported          cBasicS3
--	TagName           cBasicS1 `gcfg:"tag-name"`
--}
--type cBasicS1 struct {
--	Name  string
--	Int   int
--	PName *string
--}
--type cBasicS2 struct {
--	Hyphen_In_Name string
--}
--type cBasicS3 struct {
--	unexported string
--}
--
--type nonMulti []string
--
--type unmarshalable string
--
--func (u *unmarshalable) UnmarshalText(text []byte) error {
--	s := string(text)
--	if s == "error" {
--		return fmt.Errorf("%s", s)
--	}
--	*u = unmarshalable(s)
--	return nil
--}
--
--var _ textUnmarshaler = new(unmarshalable)
--
--type cUni struct {
--	X甲       cUniS1
--	XSection cUniS2
--}
--type cUniS1 struct {
--	X乙 string
--}
--type cUniS2 struct {
--	XName string
--}
--
--type cMulti struct {
--	M1 cMultiS1
--	M2 cMultiS2
--	M3 cMultiS3
--}
--type cMultiS1 struct{ Multi []string }
--type cMultiS2 struct{ NonMulti nonMulti }
--type cMultiS3 struct{ MultiInt []int }
--
--type cSubs struct{ Sub map[string]*cSubsS1 }
--type cSubsS1 struct{ Name string }
--
--type cBool struct{ Section cBoolS1 }
--type cBoolS1 struct{ Bool bool }
--
--type cTxUnm struct{ Section cTxUnmS1 }
--type cTxUnmS1 struct{ Name unmarshalable }
--
--type cNum struct {
--	N1 cNumS1
--	N2 cNumS2
--	N3 cNumS3
--}
--type cNumS1 struct {
--	Int    int
--	IntDHO int `gcfg:",int=dho"`
--	Big    *big.Int
--}
--type cNumS2 struct {
--	MultiInt []int
--	MultiBig []*big.Int
--}
--type cNumS3 struct{ FileMode os.FileMode }
--type readtest struct {
--	gcfg string
--	exp  interface{}
--	ok   bool
--}
--
--func newString(s string) *string {
--	return &s
--}
--
--var readtests = []struct {
--	group string
--	tests []readtest
--}{{"scanning", []readtest{
--	{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	// hyphen in name
--	{"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
--	// quoted string value
--	{"[section]\nname=\"\"", &cBasic{Section: cBasicS1{Name: ""}}, true},
--	{"[section]\nname=\" \"", &cBasic{Section: cBasicS1{Name: " "}}, true},
--	{"[section]\nname=\"value\"", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\nname=\" value \"", &cBasic{Section: cBasicS1{Name: " value "}}, true},
--	{"\n[section]\nname=\"va ; lue\"", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
--	{"[section]\nname=\"val\" \"ue\"", &cBasic{Section: cBasicS1{Name: "val ue"}}, true},
--	{"[section]\nname=\"value", &cBasic{}, false},
--	// escape sequences
--	{"[section]\nname=\"va\\\\lue\"", &cBasic{Section: cBasicS1{Name: "va\\lue"}}, true},
--	{"[section]\nname=\"va\\\"lue\"", &cBasic{Section: cBasicS1{Name: "va\"lue"}}, true},
--	{"[section]\nname=\"va\\nlue\"", &cBasic{Section: cBasicS1{Name: "va\nlue"}}, true},
--	{"[section]\nname=\"va\\tlue\"", &cBasic{Section: cBasicS1{Name: "va\tlue"}}, true},
--	{"\n[section]\nname=\\", &cBasic{}, false},
--	{"\n[section]\nname=\\a", &cBasic{}, false},
--	{"\n[section]\nname=\"val\\a\"", &cBasic{}, false},
--	{"\n[section]\nname=val\\", &cBasic{}, false},
--	{"\n[sub \"A\\\n\"]\nname=value", &cSubs{}, false},
--	{"\n[sub \"A\\\t\"]\nname=value", &cSubs{}, false},
--	// broken line
--	{"[section]\nname=value \\\n value", &cBasic{Section: cBasicS1{Name: "value  value"}}, true},
--	{"[section]\nname=\"value \\\n value\"", &cBasic{}, false},
--}}, {"scanning:whitespace", []readtest{
--	{" \n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{" [section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\t[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[ section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section ]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\n name=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\nname =value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\nname= value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\nname=value ", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\r\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{";cmnt\r\n[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	// long lines
--	{sp4096 + "[section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[" + sp4096 + "section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section" + sp4096 + "]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]" + sp4096 + "\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\n" + sp4096 + "name=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\nname" + sp4096 + "=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\nname=" + sp4096 + "value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\nname=value\n" + sp4096, &cBasic{Section: cBasicS1{Name: "value"}}, true},
--}}, {"scanning:comments", []readtest{
--	{"; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"# cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{" ; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\t; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\n[section]; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\n[section] ; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\n[section]\nname=value; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\n[section]\nname=value ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\n[section]\nname=\"value\" ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\n[section]\nname=value ; \"cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"\n[section]\nname=\"va ; lue\" ; cmnt", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
--	{"\n[section]\nname=; cmnt", &cBasic{Section: cBasicS1{Name: ""}}, true},
--}}, {"scanning:subsections", []readtest{
--	{"\n[sub \"A\"]\nname=value", &cSubs{map[string]*cSubsS1{"A": &cSubsS1{"value"}}}, true},
--	{"\n[sub \"b\"]\nname=value", &cSubs{map[string]*cSubsS1{"b": &cSubsS1{"value"}}}, true},
--	{"\n[sub \"A\\\\\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\\": &cSubsS1{"value"}}}, true},
--	{"\n[sub \"A\\\"\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\"": &cSubsS1{"value"}}}, true},
--}}, {"syntax", []readtest{
--	// invalid line
--	{"\n[section]\n=", &cBasic{}, false},
--	// no section
--	{"name=value", &cBasic{}, false},
--	// empty section
--	{"\n[]\nname=value", &cBasic{}, false},
--	// empty subsection
--	{"\n[sub \"\"]\nname=value", &cSubs{}, false},
--}}, {"setting", []readtest{
--	{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	// pointer
--	{"[section]", &cBasic{Section: cBasicS1{PName: nil}}, true},
--	{"[section]\npname=value", &cBasic{Section: cBasicS1{PName: newString("value")}}, true},
--	// section name not matched
--	{"\n[nonexistent]\nname=value", &cBasic{}, false},
--	// subsection name not matched
--	{"\n[section \"nonexistent\"]\nname=value", &cBasic{}, false},
--	// variable name not matched
--	{"\n[section]\nnonexistent=value", &cBasic{}, false},
--	// hyphen in name
--	{"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
--	// ignore unexported fields
--	{"[unexported]\nname=value", &cBasic{}, false},
--	{"[exported]\nunexported=value", &cBasic{}, false},
--	// 'X' prefix for non-upper/lower-case letters
--	{"[甲]\n乙=丙", &cUni{X甲: cUniS1{X乙: "丙"}}, true},
--	//{"[section]\nxname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
--	//{"[xsection]\nname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
--	// name specified as struct tag
--	{"[tag-name]\nname=value", &cBasic{TagName: cBasicS1{Name: "value"}}, true},
--}}, {"multivalue", []readtest{
--	// unnamed slice type: treat as multi-value
--	{"\n[m1]", &cMulti{M1: cMultiS1{}}, true},
--	{"\n[m1]\nmulti=value", &cMulti{M1: cMultiS1{[]string{"value"}}}, true},
--	{"\n[m1]\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
--	// "blank" empties multi-valued slice -- here same result as above
--	{"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
--	// named slice type: do not treat as multi-value
--	{"\n[m2]", &cMulti{}, true},
--	{"\n[m2]\nmulti=value", &cMulti{}, false},
--	{"\n[m2]\nmulti=value1\nmulti=value2", &cMulti{}, false},
--}}, {"type:string", []readtest{
--	{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
--	{"[section]\nname=", &cBasic{Section: cBasicS1{Name: ""}}, true},
--}}, {"type:bool", []readtest{
--	// explicit values
--	{"[section]\nbool=true", &cBool{cBoolS1{true}}, true},
--	{"[section]\nbool=yes", &cBool{cBoolS1{true}}, true},
--	{"[section]\nbool=on", &cBool{cBoolS1{true}}, true},
--	{"[section]\nbool=1", &cBool{cBoolS1{true}}, true},
--	{"[section]\nbool=tRuE", &cBool{cBoolS1{true}}, true},
--	{"[section]\nbool=false", &cBool{cBoolS1{false}}, true},
--	{"[section]\nbool=no", &cBool{cBoolS1{false}}, true},
--	{"[section]\nbool=off", &cBool{cBoolS1{false}}, true},
--	{"[section]\nbool=0", &cBool{cBoolS1{false}}, true},
--	{"[section]\nbool=NO", &cBool{cBoolS1{false}}, true},
--	// "blank" value handled as true
--	{"[section]\nbool", &cBool{cBoolS1{true}}, true},
--	// bool parse errors
--	{"[section]\nbool=maybe", &cBool{}, false},
--	{"[section]\nbool=t", &cBool{}, false},
--	{"[section]\nbool=truer", &cBool{}, false},
--	{"[section]\nbool=2", &cBool{}, false},
--	{"[section]\nbool=-1", &cBool{}, false},
--}}, {"type:numeric", []readtest{
--	{"[section]\nint=0", &cBasic{Section: cBasicS1{Int: 0}}, true},
--	{"[section]\nint=1", &cBasic{Section: cBasicS1{Int: 1}}, true},
--	{"[section]\nint=-1", &cBasic{Section: cBasicS1{Int: -1}}, true},
--	{"[section]\nint=0.2", &cBasic{}, false},
--	{"[section]\nint=1e3", &cBasic{}, false},
--	// primitive [u]int(|8|16|32|64) and big.Int is parsed as dec or hex (not octal)
--	{"[n1]\nint=010", &cNum{N1: cNumS1{Int: 10}}, true},
--	{"[n1]\nint=0x10", &cNum{N1: cNumS1{Int: 0x10}}, true},
--	{"[n1]\nbig=1", &cNum{N1: cNumS1{Big: big.NewInt(1)}}, true},
--	{"[n1]\nbig=0x10", &cNum{N1: cNumS1{Big: big.NewInt(0x10)}}, true},
--	{"[n1]\nbig=010", &cNum{N1: cNumS1{Big: big.NewInt(10)}}, true},
--	{"[n2]\nmultiint=010", &cNum{N2: cNumS2{MultiInt: []int{10}}}, true},
--	{"[n2]\nmultibig=010", &cNum{N2: cNumS2{MultiBig: []*big.Int{big.NewInt(10)}}}, true},
--	// set parse mode for int types via struct tag
--	{"[n1]\nintdho=010", &cNum{N1: cNumS1{IntDHO: 010}}, true},
--	// octal allowed for named type
--	{"[n3]\nfilemode=0777", &cNum{N3: cNumS3{FileMode: 0777}}, true},
--}}, {"type:textUnmarshaler", []readtest{
--	{"[section]\nname=value", &cTxUnm{Section: cTxUnmS1{Name: "value"}}, true},
--	{"[section]\nname=error", &cTxUnm{}, false},
--}},
--}
--
--func TestReadStringInto(t *testing.T) {
--	for _, tg := range readtests {
--		for i, tt := range tg.tests {
--			id := fmt.Sprintf("%s:%d", tg.group, i)
--			testRead(t, id, tt)
--		}
--	}
--}
--
--func TestReadStringIntoMultiBlankPreset(t *testing.T) {
--	tt := readtest{"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true}
--	cfg := &cMulti{M1: cMultiS1{[]string{"preset1", "preset2"}}}
--	testReadInto(t, "multi:blank", tt, cfg)
--}
--
--func testRead(t *testing.T, id string, tt readtest) {
--	// get the type of the expected result
--	restyp := reflect.TypeOf(tt.exp).Elem()
--	// create a new instance to hold the actual result
--	res := reflect.New(restyp).Interface()
--	testReadInto(t, id, tt, res)
--}
--
--func testReadInto(t *testing.T, id string, tt readtest, res interface{}) {
--	err := ReadStringInto(res, tt.gcfg)
--	if tt.ok {
--		if err != nil {
--			t.Errorf("%s fail: got error %v, wanted ok", id, err)
--			return
--		} else if !reflect.DeepEqual(res, tt.exp) {
--			t.Errorf("%s fail: got value %#v, wanted value %#v", id, res, tt.exp)
--			return
--		}
--		if !testing.Short() {
--			t.Logf("%s pass: got value %#v", id, res)
--		}
--	} else { // !tt.ok
--		if err == nil {
--			t.Errorf("%s fail: got value %#v, wanted error", id, res)
--			return
--		}
--		if !testing.Short() {
--			t.Logf("%s pass: got error %v", id, err)
--		}
--	}
--}
--
--func TestReadFileInto(t *testing.T) {
--	res := &struct{ Section struct{ Name string } }{}
--	err := ReadFileInto(res, "testdata/gcfg_test.gcfg")
--	if err != nil {
--		t.Errorf(err.Error())
--	}
--	if "value" != res.Section.Name {
--		t.Errorf("got %q, wanted %q", res.Section.Name, "value")
--	}
--}
--
--func TestReadFileIntoUnicode(t *testing.T) {
--	res := &struct{ X甲 struct{ X乙 string } }{}
--	err := ReadFileInto(res, "testdata/gcfg_unicode_test.gcfg")
--	if err != nil {
--		t.Errorf(err.Error())
--	}
--	if "丙" != res.X甲.X乙 {
--		t.Errorf("got %q, wanted %q", res.X甲.X乙, "丙")
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/errors.go b/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/errors.go
-deleted file mode 100644
-index 4ff920a..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/errors.go
-+++ /dev/null
-@@ -1,121 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package scanner
--
--import (
--	"fmt"
--	"io"
--	"sort"
--)
--
--import (
--	"code.google.com/p/gcfg/token"
--)
--
--// In an ErrorList, an error is represented by an *Error.
--// The position Pos, if valid, points to the beginning of
--// the offending token, and the error condition is described
--// by Msg.
--//
--type Error struct {
--	Pos token.Position
--	Msg string
--}
--
--// Error implements the error interface.
--func (e Error) Error() string {
--	if e.Pos.Filename != "" || e.Pos.IsValid() {
--		// don't print "<unknown position>"
--		// TODO(gri) reconsider the semantics of Position.IsValid
--		return e.Pos.String() + ": " + e.Msg
--	}
--	return e.Msg
--}
--
--// ErrorList is a list of *Errors.
--// The zero value for an ErrorList is an empty ErrorList ready to use.
--//
--type ErrorList []*Error
--
--// Add adds an Error with given position and error message to an ErrorList.
--func (p *ErrorList) Add(pos token.Position, msg string) {
--	*p = append(*p, &Error{pos, msg})
--}
--
--// Reset resets an ErrorList to no errors.
--func (p *ErrorList) Reset() { *p = (*p)[0:0] }
--
--// ErrorList implements the sort Interface.
--func (p ErrorList) Len() int      { return len(p) }
--func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
--
--func (p ErrorList) Less(i, j int) bool {
--	e := &p[i].Pos
--	f := &p[j].Pos
--	if e.Filename < f.Filename {
--		return true
--	}
--	if e.Filename == f.Filename {
--		return e.Offset < f.Offset
--	}
--	return false
--}
--
--// Sort sorts an ErrorList. *Error entries are sorted by position,
--// other errors are sorted by error message, and before any *Error
--// entry.
--//
--func (p ErrorList) Sort() {
--	sort.Sort(p)
--}
--
--// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
--func (p *ErrorList) RemoveMultiples() {
--	sort.Sort(p)
--	var last token.Position // initial last.Line is != any legal error line
--	i := 0
--	for _, e := range *p {
--		if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
--			last = e.Pos
--			(*p)[i] = e
--			i++
--		}
--	}
--	(*p) = (*p)[0:i]
--}
--
--// An ErrorList implements the error interface.
--func (p ErrorList) Error() string {
--	switch len(p) {
--	case 0:
--		return "no errors"
--	case 1:
--		return p[0].Error()
--	}
--	return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
--}
--
--// Err returns an error equivalent to this error list.
--// If the list is empty, Err returns nil.
--func (p ErrorList) Err() error {
--	if len(p) == 0 {
--		return nil
--	}
--	return p
--}
--
--// PrintError is a utility function that prints a list of errors to w,
--// one error per line, if the err parameter is an ErrorList. Otherwise
--// it prints the err string.
--//
--func PrintError(w io.Writer, err error) {
--	if list, ok := err.(ErrorList); ok {
--		for _, e := range list {
--			fmt.Fprintf(w, "%s\n", e)
--		}
--	} else if err != nil {
--		fmt.Fprintf(w, "%s\n", err)
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/example_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/example_test.go
-deleted file mode 100644
-index 05eadf5..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/example_test.go
-+++ /dev/null
-@@ -1,46 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package scanner_test
--
--import (
--	"fmt"
--)
--
--import (
--	"code.google.com/p/gcfg/scanner"
--	"code.google.com/p/gcfg/token"
--)
--
--func ExampleScanner_Scan() {
--	// src is the input that we want to tokenize.
--	src := []byte(`[profile "A"]
--color = blue ; Comment`)
--
--	// Initialize the scanner.
--	var s scanner.Scanner
--	fset := token.NewFileSet()                      // positions are relative to fset
--	file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
--	s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
--
--	// Repeated calls to Scan yield the token sequence found in the input.
--	for {
--		pos, tok, lit := s.Scan()
--		if tok == token.EOF {
--			break
--		}
--		fmt.Printf("%s\t%q\t%q\n", fset.Position(pos), tok, lit)
--	}
--
--	// output:
--	// 1:1	"["	""
--	// 1:2	"IDENT"	"profile"
--	// 1:10	"STRING"	"\"A\""
--	// 1:13	"]"	""
--	// 1:14	"\n"	""
--	// 2:1	"IDENT"	"color"
--	// 2:7	"="	""
--	// 2:9	"STRING"	"blue"
--	// 2:14	"COMMENT"	"; Comment"
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/scanner.go b/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/scanner.go
-deleted file mode 100644
-index f65a4f5..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/scanner.go
-+++ /dev/null
-@@ -1,342 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package scanner implements a scanner for gcfg configuration text.
--// It takes a []byte as source which can then be tokenized
--// through repeated calls to the Scan method.
--//
--// Note that the API for the scanner package may change to accommodate new
--// features or implementation changes in gcfg.
--//
--package scanner
--
--import (
--	"fmt"
--	"path/filepath"
--	"unicode"
--	"unicode/utf8"
--)
--
--import (
--	"code.google.com/p/gcfg/token"
--)
--
--// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
--// encountered and a handler was installed, the handler is called with a
--// position and an error message. The position points to the beginning of
--// the offending token.
--//
--type ErrorHandler func(pos token.Position, msg string)
--
--// A Scanner holds the scanner's internal state while processing
--// a given text.  It can be allocated as part of another data
--// structure but must be initialized via Init before use.
--//
--type Scanner struct {
--	// immutable state
--	file *token.File  // source file handle
--	dir  string       // directory portion of file.Name()
--	src  []byte       // source
--	err  ErrorHandler // error reporting; or nil
--	mode Mode         // scanning mode
--
--	// scanning state
--	ch         rune // current character
--	offset     int  // character offset
--	rdOffset   int  // reading offset (position after current character)
--	lineOffset int  // current line offset
--	nextVal    bool // next token is expected to be a value
--
--	// public state - ok to modify
--	ErrorCount int // number of errors encountered
--}
--
--// Read the next Unicode char into s.ch.
--// s.ch < 0 means end-of-file.
--//
--func (s *Scanner) next() {
--	if s.rdOffset < len(s.src) {
--		s.offset = s.rdOffset
--		if s.ch == '\n' {
--			s.lineOffset = s.offset
--			s.file.AddLine(s.offset)
--		}
--		r, w := rune(s.src[s.rdOffset]), 1
--		switch {
--		case r == 0:
--			s.error(s.offset, "illegal character NUL")
--		case r >= 0x80:
--			// not ASCII
--			r, w = utf8.DecodeRune(s.src[s.rdOffset:])
--			if r == utf8.RuneError && w == 1 {
--				s.error(s.offset, "illegal UTF-8 encoding")
--			}
--		}
--		s.rdOffset += w
--		s.ch = r
--	} else {
--		s.offset = len(s.src)
--		if s.ch == '\n' {
--			s.lineOffset = s.offset
--			s.file.AddLine(s.offset)
--		}
--		s.ch = -1 // eof
--	}
--}
--
--// A mode value is a set of flags (or 0).
--// They control scanner behavior.
--//
--type Mode uint
--
--const (
--	ScanComments Mode = 1 << iota // return comments as COMMENT tokens
--)
--
--// Init prepares the scanner s to tokenize the text src by setting the
--// scanner at the beginning of src. The scanner uses the file set file
--// for position information and it adds line information for each line.
--// It is ok to re-use the same file when re-scanning the same file as
--// line information which is already present is ignored. Init causes a
--// panic if the file size does not match the src size.
--//
--// Calls to Scan will invoke the error handler err if they encounter a
--// syntax error and err is not nil. Also, for each error encountered,
--// the Scanner field ErrorCount is incremented by one. The mode parameter
--// determines how comments are handled.
--//
--// Note that Init may call err if there is an error in the first character
--// of the file.
--//
--func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
--	// Explicitly initialize all fields since a scanner may be reused.
--	if file.Size() != len(src) {
--		panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
--	}
--	s.file = file
--	s.dir, _ = filepath.Split(file.Name())
--	s.src = src
--	s.err = err
--	s.mode = mode
--
--	s.ch = ' '
--	s.offset = 0
--	s.rdOffset = 0
--	s.lineOffset = 0
--	s.ErrorCount = 0
--	s.nextVal = false
--
--	s.next()
--}
--
--func (s *Scanner) error(offs int, msg string) {
--	if s.err != nil {
--		s.err(s.file.Position(s.file.Pos(offs)), msg)
--	}
--	s.ErrorCount++
--}
--
--func (s *Scanner) scanComment() string {
--	// initial [;#] already consumed
--	offs := s.offset - 1 // position of initial [;#]
--
--	for s.ch != '\n' && s.ch >= 0 {
--		s.next()
--	}
--	return string(s.src[offs:s.offset])
--}
--
--func isLetter(ch rune) bool {
--	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
--}
--
--func isDigit(ch rune) bool {
--	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
--}
--
--func (s *Scanner) scanIdentifier() string {
--	offs := s.offset
--	for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
--		s.next()
--	}
--	return string(s.src[offs:s.offset])
--}
--
--func (s *Scanner) scanEscape(val bool) {
--	offs := s.offset
--	ch := s.ch
--	s.next() // always make progress
--	switch ch {
--	case '\\', '"':
--		// ok
--	case 'n', 't':
--		if val {
--			break // ok
--		}
--		fallthrough
--	default:
--		s.error(offs, "unknown escape sequence")
--	}
--}
--
--func (s *Scanner) scanString() string {
--	// '"' opening already consumed
--	offs := s.offset - 1
--
--	for s.ch != '"' {
--		ch := s.ch
--		s.next()
--		if ch == '\n' || ch < 0 {
--			s.error(offs, "string not terminated")
--			break
--		}
--		if ch == '\\' {
--			s.scanEscape(false)
--		}
--	}
--
--	s.next()
--
--	return string(s.src[offs:s.offset])
--}
--
--func stripCR(b []byte) []byte {
--	c := make([]byte, len(b))
--	i := 0
--	for _, ch := range b {
--		if ch != '\r' {
--			c[i] = ch
--			i++
--		}
--	}
--	return c[:i]
--}
--
--func (s *Scanner) scanValString() string {
--	offs := s.offset
--
--	hasCR := false
--	end := offs
--	inQuote := false
--loop:
--	for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
--		ch := s.ch
--		s.next()
--		switch {
--		case inQuote && ch == '\\':
--			s.scanEscape(true)
--		case !inQuote && ch == '\\':
--			if s.ch == '\r' {
--				hasCR = true
--				s.next()
--			}
--			if s.ch != '\n' {
--				s.error(offs, "unquoted '\\' must be followed by new line")
--				break loop
--			}
--			s.next()
--		case ch == '"':
--			inQuote = !inQuote
--		case ch == '\r':
--			hasCR = true
--		case ch < 0 || inQuote && ch == '\n':
--			s.error(offs, "string not terminated")
--			break loop
--		}
--		if inQuote || !isWhiteSpace(ch) {
--			end = s.offset
--		}
--	}
--
--	lit := s.src[offs:end]
--	if hasCR {
--		lit = stripCR(lit)
--	}
--
--	return string(lit)
--}
--
--func isWhiteSpace(ch rune) bool {
--	return ch == ' ' || ch == '\t' || ch == '\r'
--}
--
--func (s *Scanner) skipWhitespace() {
--	for isWhiteSpace(s.ch) {
--		s.next()
--	}
--}
--
--// Scan scans the next token and returns the token position, the token,
--// and its literal string if applicable. The source end is indicated by
--// token.EOF.
--//
--// If the returned token is a literal (token.IDENT, token.STRING) or
--// token.COMMENT, the literal string has the corresponding value.
--//
--// If the returned token is token.ILLEGAL, the literal string is the
--// offending character.
--//
--// In all other cases, Scan returns an empty literal string.
--//
--// For more tolerant parsing, Scan will return a valid token if
--// possible even if a syntax error was encountered. Thus, even
--// if the resulting token sequence contains no illegal tokens,
--// a client may not assume that no error occurred. Instead it
--// must check the scanner's ErrorCount or the number of calls
--// of the error handler, if there was one installed.
--//
--// Scan adds line information to the file added to the file
--// set with Init. Token positions are relative to that file
--// and thus relative to the file set.
--//
--func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
--scanAgain:
--	s.skipWhitespace()
--
--	// current token start
--	pos = s.file.Pos(s.offset)
--
--	// determine token value
--	switch ch := s.ch; {
--	case s.nextVal:
--		lit = s.scanValString()
--		tok = token.STRING
--		s.nextVal = false
--	case isLetter(ch):
--		lit = s.scanIdentifier()
--		tok = token.IDENT
--	default:
--		s.next() // always make progress
--		switch ch {
--		case -1:
--			tok = token.EOF
--		case '\n':
--			tok = token.EOL
--		case '"':
--			tok = token.STRING
--			lit = s.scanString()
--		case '[':
--			tok = token.LBRACK
--		case ']':
--			tok = token.RBRACK
--		case ';', '#':
--			// comment
--			lit = s.scanComment()
--			if s.mode&ScanComments == 0 {
--				// skip comment
--				goto scanAgain
--			}
--			tok = token.COMMENT
--		case '=':
--			tok = token.ASSIGN
--			s.nextVal = true
--		default:
--			s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
--			tok = token.ILLEGAL
--			lit = string(ch)
--		}
--	}
--
--	return
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/scanner_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/scanner_test.go
-deleted file mode 100644
-index 33227c1..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/scanner/scanner_test.go
-+++ /dev/null
-@@ -1,417 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package scanner
--
--import (
--	"os"
--	"strings"
--	"testing"
--)
--
--import (
--	"code.google.com/p/gcfg/token"
--)
--
--var fset = token.NewFileSet()
--
--const /* class */ (
--	special = iota
--	literal
--	operator
--)
--
--func tokenclass(tok token.Token) int {
--	switch {
--	case tok.IsLiteral():
--		return literal
--	case tok.IsOperator():
--		return operator
--	}
--	return special
--}
--
--type elt struct {
--	tok   token.Token
--	lit   string
--	class int
--	pre   string
--	suf   string
--}
--
--var tokens = [...]elt{
--	// Special tokens
--	{token.COMMENT, "; a comment", special, "", "\n"},
--	{token.COMMENT, "# a comment", special, "", "\n"},
--
--	// Operators and delimiters
--	{token.ASSIGN, "=", operator, "", "value"},
--	{token.LBRACK, "[", operator, "", ""},
--	{token.RBRACK, "]", operator, "", ""},
--	{token.EOL, "\n", operator, "", ""},
--
--	// Identifiers
--	{token.IDENT, "foobar", literal, "", ""},
--	{token.IDENT, "a۰۱۸", literal, "", ""},
--	{token.IDENT, "foo६४", literal, "", ""},
--	{token.IDENT, "bar9876", literal, "", ""},
--	{token.IDENT, "foo-bar", literal, "", ""},
--	{token.IDENT, "foo", literal, ";\n", ""},
--	// String literals (subsection names)
--	{token.STRING, `"foobar"`, literal, "", ""},
--	{token.STRING, `"\""`, literal, "", ""},
--	// String literals (values)
--	{token.STRING, `"\n"`, literal, "=", ""},
--	{token.STRING, `"foobar"`, literal, "=", ""},
--	{token.STRING, `"foo\nbar"`, literal, "=", ""},
--	{token.STRING, `"foo\"bar"`, literal, "=", ""},
--	{token.STRING, `"foo\\bar"`, literal, "=", ""},
--	{token.STRING, `"foobar"`, literal, "=", ""},
--	{token.STRING, `"foobar"`, literal, "= ", ""},
--	{token.STRING, `"foobar"`, literal, "=", "\n"},
--	{token.STRING, `"foobar"`, literal, "=", ";"},
--	{token.STRING, `"foobar"`, literal, "=", " ;"},
--	{token.STRING, `"foobar"`, literal, "=", "#"},
--	{token.STRING, `"foobar"`, literal, "=", " #"},
--	{token.STRING, "foobar", literal, "=", ""},
--	{token.STRING, "foobar", literal, "= ", ""},
--	{token.STRING, "foobar", literal, "=", " "},
--	{token.STRING, `"foo" "bar"`, literal, "=", " "},
--	{token.STRING, "foo\\\nbar", literal, "=", ""},
--	{token.STRING, "foo\\\r\nbar", literal, "=", ""},
--}
--
--const whitespace = "  \t  \n\n\n" // to separate tokens
--
--var source = func() []byte {
--	var src []byte
--	for _, t := range tokens {
--		src = append(src, t.pre...)
--		src = append(src, t.lit...)
--		src = append(src, t.suf...)
--		src = append(src, whitespace...)
--	}
--	return src
--}()
--
--func newlineCount(s string) int {
--	n := 0
--	for i := 0; i < len(s); i++ {
--		if s[i] == '\n' {
--			n++
--		}
--	}
--	return n
--}
--
--func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
--	pos := fset.Position(p)
--	if pos.Filename != expected.Filename {
--		t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
--	}
--	if pos.Offset != expected.Offset {
--		t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
--	}
--	if pos.Line != expected.Line {
--		t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
--	}
--	if pos.Column != expected.Column {
--		t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
--	}
--}
--
--// Verify that calling Scan() provides the correct results.
--func TestScan(t *testing.T) {
--	// make source
--	src_linecount := newlineCount(string(source))
--	whitespace_linecount := newlineCount(whitespace)
--
--	index := 0
--
--	// error handler
--	eh := func(_ token.Position, msg string) {
--		t.Errorf("%d: error handler called (msg = %s)", index, msg)
--	}
--
--	// verify scan
--	var s Scanner
--	s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments)
--	// epos is the expected position
--	epos := token.Position{
--		Filename: "",
--		Offset:   0,
--		Line:     1,
--		Column:   1,
--	}
--	for {
--		pos, tok, lit := s.Scan()
--		if lit == "" {
--			// no literal value for non-literal tokens
--			lit = tok.String()
--		}
--		e := elt{token.EOF, "", special, "", ""}
--		if index < len(tokens) {
--			e = tokens[index]
--		}
--		if tok == token.EOF {
--			lit = "<EOF>"
--			epos.Line = src_linecount
--			epos.Column = 2
--		}
--		if e.pre != "" && strings.ContainsRune("=;#", rune(e.pre[0])) {
--			epos.Column = 1
--			checkPos(t, lit, pos, epos)
--			var etok token.Token
--			if e.pre[0] == '=' {
--				etok = token.ASSIGN
--			} else {
--				etok = token.COMMENT
--			}
--			if tok != etok {
--				t.Errorf("bad token for %q: got %q, expected %q", lit, tok, etok)
--			}
--			pos, tok, lit = s.Scan()
--		}
--		epos.Offset += len(e.pre)
--		if tok != token.EOF {
--			epos.Column = 1 + len(e.pre)
--		}
--		if e.pre != "" && e.pre[len(e.pre)-1] == '\n' {
--			epos.Offset--
--			epos.Column--
--			checkPos(t, lit, pos, epos)
--			if tok != token.EOL {
--				t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.EOL)
--			}
--			epos.Line++
--			epos.Offset++
--			epos.Column = 1
--			pos, tok, lit = s.Scan()
--		}
--		checkPos(t, lit, pos, epos)
--		if tok != e.tok {
--			t.Errorf("bad token for %q: got %q, expected %q", lit, tok, e.tok)
--		}
--		if e.tok.IsLiteral() {
--			// no CRs in value string literals
--			elit := e.lit
--			if strings.ContainsRune(e.pre, '=') {
--				elit = string(stripCR([]byte(elit)))
--				epos.Offset += len(e.lit) - len(lit) // correct position
--			}
--			if lit != elit {
--				t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
--			}
--		}
--		if tokenclass(tok) != e.class {
--			t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
--		}
--		epos.Offset += len(lit) + len(e.suf) + len(whitespace)
--		epos.Line += newlineCount(lit) + newlineCount(e.suf) + whitespace_linecount
--		index++
--		if tok == token.EOF {
--			break
--		}
--		if e.suf == "value" {
--			pos, tok, lit = s.Scan()
--			if tok != token.STRING {
--				t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.STRING)
--			}
--		} else if strings.ContainsRune(e.suf, ';') || strings.ContainsRune(e.suf, '#') {
--			pos, tok, lit = s.Scan()
--			if tok != token.COMMENT {
--				t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.COMMENT)
--			}
--		}
--		// skip EOLs
--		for i := 0; i < whitespace_linecount+newlineCount(e.suf); i++ {
--			pos, tok, lit = s.Scan()
--			if tok != token.EOL {
--				t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.EOL)
--			}
--		}
--	}
--	if s.ErrorCount != 0 {
--		t.Errorf("found %d errors", s.ErrorCount)
--	}
--}
--
--func TestScanValStringEOF(t *testing.T) {
--	var s Scanner
--	src := "= value"
--	f := fset.AddFile("src", fset.Base(), len(src))
--	s.Init(f, []byte(src), nil, 0)
--	s.Scan()              // =
--	s.Scan()              // value
--	_, tok, _ := s.Scan() // EOF
--	if tok != token.EOF {
--		t.Errorf("bad token: got %s, expected %s", tok, token.EOF)
--	}
--	if s.ErrorCount > 0 {
--		t.Error("scanning error")
--	}
--}
--
--// Verify that initializing the same scanner more then once works correctly.
--func TestInit(t *testing.T) {
--	var s Scanner
--
--	// 1st init
--	src1 := "\nname = value"
--	f1 := fset.AddFile("src1", fset.Base(), len(src1))
--	s.Init(f1, []byte(src1), nil, 0)
--	if f1.Size() != len(src1) {
--		t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
--	}
--	s.Scan()              // \n
--	s.Scan()              // name
--	_, tok, _ := s.Scan() // =
--	if tok != token.ASSIGN {
--		t.Errorf("bad token: got %s, expected %s", tok, token.ASSIGN)
--	}
--
--	// 2nd init
--	src2 := "[section]"
--	f2 := fset.AddFile("src2", fset.Base(), len(src2))
--	s.Init(f2, []byte(src2), nil, 0)
--	if f2.Size() != len(src2) {
--		t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
--	}
--	_, tok, _ = s.Scan() // [
--	if tok != token.LBRACK {
--		t.Errorf("bad token: got %s, expected %s", tok, token.LBRACK)
--	}
--
--	if s.ErrorCount != 0 {
--		t.Errorf("found %d errors", s.ErrorCount)
--	}
--}
--
--func TestStdErrorHandler(t *testing.T) {
--	const src = "@\n" + // illegal character, cause an error
--		"@ @\n" // two errors on the same line
--
--	var list ErrorList
--	eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
--
--	var s Scanner
--	s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, 0)
--	for {
--		if _, tok, _ := s.Scan(); tok == token.EOF {
--			break
--		}
--	}
--
--	if len(list) != s.ErrorCount {
--		t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
--	}
--
--	if len(list) != 3 {
--		t.Errorf("found %d raw errors, expected 3", len(list))
--		PrintError(os.Stderr, list)
--	}
--
--	list.Sort()
--	if len(list) != 3 {
--		t.Errorf("found %d sorted errors, expected 3", len(list))
--		PrintError(os.Stderr, list)
--	}
--
--	list.RemoveMultiples()
--	if len(list) != 2 {
--		t.Errorf("found %d one-per-line errors, expected 2", len(list))
--		PrintError(os.Stderr, list)
--	}
--}
--
--type errorCollector struct {
--	cnt int            // number of errors encountered
--	msg string         // last error message encountered
--	pos token.Position // last error position encountered
--}
--
--func checkError(t *testing.T, src string, tok token.Token, pos int, err string) {
--	var s Scanner
--	var h errorCollector
--	eh := func(pos token.Position, msg string) {
--		h.cnt++
--		h.msg = msg
--		h.pos = pos
--	}
--	s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments)
--	if src[0] == '=' {
--		_, _, _ = s.Scan()
--	}
--	_, tok0, _ := s.Scan()
--	_, tok1, _ := s.Scan()
--	if tok0 != tok {
--		t.Errorf("%q: got %s, expected %s", src, tok0, tok)
--	}
--	if tok1 != token.EOF {
--		t.Errorf("%q: got %s, expected EOF", src, tok1)
--	}
--	cnt := 0
--	if err != "" {
--		cnt = 1
--	}
--	if h.cnt != cnt {
--		t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
--	}
--	if h.msg != err {
--		t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
--	}
--	if h.pos.Offset != pos {
--		t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
--	}
--}
--
--var errors = []struct {
--	src string
--	tok token.Token
--	pos int
--	err string
--}{
--	{"\a", token.ILLEGAL, 0, "illegal character U+0007"},
--	{"/", token.ILLEGAL, 0, "illegal character U+002F '/'"},
--	{"_", token.ILLEGAL, 0, "illegal character U+005F '_'"},
--	{`…`, token.ILLEGAL, 0, "illegal character U+2026 '…'"},
--	{`""`, token.STRING, 0, ""},
--	{`"`, token.STRING, 0, "string not terminated"},
--	{"\"\n", token.STRING, 0, "string not terminated"},
--	{`="`, token.STRING, 1, "string not terminated"},
--	{"=\"\n", token.STRING, 1, "string not terminated"},
--	{"=\\", token.STRING, 1, "unquoted '\\' must be followed by new line"},
--	{"=\\\r", token.STRING, 1, "unquoted '\\' must be followed by new line"},
--	{`"\z"`, token.STRING, 2, "unknown escape sequence"},
--	{`"\a"`, token.STRING, 2, "unknown escape sequence"},
--	{`"\b"`, token.STRING, 2, "unknown escape sequence"},
--	{`"\f"`, token.STRING, 2, "unknown escape sequence"},
--	{`"\r"`, token.STRING, 2, "unknown escape sequence"},
--	{`"\t"`, token.STRING, 2, "unknown escape sequence"},
--	{`"\v"`, token.STRING, 2, "unknown escape sequence"},
--	{`"\0"`, token.STRING, 2, "unknown escape sequence"},
--}
--
--func TestScanErrors(t *testing.T) {
--	for _, e := range errors {
--		checkError(t, e.src, e.tok, e.pos, e.err)
--	}
--}
--
--func BenchmarkScan(b *testing.B) {
--	b.StopTimer()
--	fset := token.NewFileSet()
--	file := fset.AddFile("", fset.Base(), len(source))
--	var s Scanner
--	b.StartTimer()
--	for i := b.N - 1; i >= 0; i-- {
--		s.Init(file, source, nil, ScanComments)
--		for {
--			_, tok, _ := s.Scan()
--			if tok == token.EOF {
--				break
--			}
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/set.go b/Godeps/_workspace/src/code.google.com/p/gcfg/set.go
-deleted file mode 100644
-index 4e15604..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/set.go
-+++ /dev/null
-@@ -1,281 +0,0 @@
--package gcfg
--
--import (
--	"fmt"
--	"math/big"
--	"reflect"
--	"strings"
--	"unicode"
--	"unicode/utf8"
--
--	"code.google.com/p/gcfg/types"
--)
--
--type tag struct {
--	ident   string
--	intMode string
--}
--
--func newTag(ts string) tag {
--	t := tag{}
--	s := strings.Split(ts, ",")
--	t.ident = s[0]
--	for _, tse := range s[1:] {
--		if strings.HasPrefix(tse, "int=") {
--			t.intMode = tse[len("int="):]
--		}
--	}
--	return t
--}
--
--func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
--	var n string
--	r0, _ := utf8.DecodeRuneInString(name)
--	if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
--		n = "X"
--	}
--	n += strings.Replace(name, "-", "_", -1)
--	f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
--		if !v.FieldByName(fieldName).CanSet() {
--			return false
--		}
--		f, _ := v.Type().FieldByName(fieldName)
--		t := newTag(f.Tag.Get("gcfg"))
--		if t.ident != "" {
--			return strings.EqualFold(t.ident, name)
--		}
--		return strings.EqualFold(n, fieldName)
--	})
--	if !ok {
--		return reflect.Value{}, tag{}
--	}
--	return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
--}
--
--type setter func(destp interface{}, blank bool, val string, t tag) error
--
--var errUnsupportedType = fmt.Errorf("unsupported type")
--var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
--
--var setters = []setter{
--	typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
--}
--
--func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
--	dtu, ok := d.(textUnmarshaler)
--	if !ok {
--		return errUnsupportedType
--	}
--	if blank {
--		return errBlankUnsupported
--	}
--	return dtu.UnmarshalText([]byte(val))
--}
--
--func boolSetter(d interface{}, blank bool, val string, t tag) error {
--	if blank {
--		reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
--		return nil
--	}
--	b, err := types.ParseBool(val)
--	if err == nil {
--		reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
--	}
--	return err
--}
--
--func intMode(mode string) types.IntMode {
--	var m types.IntMode
--	if strings.ContainsAny(mode, "dD") {
--		m |= types.Dec
--	}
--	if strings.ContainsAny(mode, "hH") {
--		m |= types.Hex
--	}
--	if strings.ContainsAny(mode, "oO") {
--		m |= types.Oct
--	}
--	return m
--}
--
--var typeModes = map[reflect.Type]types.IntMode{
--	reflect.TypeOf(int(0)):    types.Dec | types.Hex,
--	reflect.TypeOf(int8(0)):   types.Dec | types.Hex,
--	reflect.TypeOf(int16(0)):  types.Dec | types.Hex,
--	reflect.TypeOf(int32(0)):  types.Dec | types.Hex,
--	reflect.TypeOf(int64(0)):  types.Dec | types.Hex,
--	reflect.TypeOf(uint(0)):   types.Dec | types.Hex,
--	reflect.TypeOf(uint8(0)):  types.Dec | types.Hex,
--	reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
--	reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
--	reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
--	// use default mode (allow dec/hex/oct) for uintptr type
--	reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
--}
--
--func intModeDefault(t reflect.Type) types.IntMode {
--	m, ok := typeModes[t]
--	if !ok {
--		m = types.Dec | types.Hex | types.Oct
--	}
--	return m
--}
--
--func intSetter(d interface{}, blank bool, val string, t tag) error {
--	if blank {
--		return errBlankUnsupported
--	}
--	mode := intMode(t.intMode)
--	if mode == 0 {
--		mode = intModeDefault(reflect.TypeOf(d).Elem())
--	}
--	return types.ParseInt(d, val, mode)
--}
--
--func stringSetter(d interface{}, blank bool, val string, t tag) error {
--	if blank {
--		return errBlankUnsupported
--	}
--	dsp, ok := d.(*string)
--	if !ok {
--		return errUnsupportedType
--	}
--	*dsp = val
--	return nil
--}
--
--var kindSetters = map[reflect.Kind]setter{
--	reflect.String:  stringSetter,
--	reflect.Bool:    boolSetter,
--	reflect.Int:     intSetter,
--	reflect.Int8:    intSetter,
--	reflect.Int16:   intSetter,
--	reflect.Int32:   intSetter,
--	reflect.Int64:   intSetter,
--	reflect.Uint:    intSetter,
--	reflect.Uint8:   intSetter,
--	reflect.Uint16:  intSetter,
--	reflect.Uint32:  intSetter,
--	reflect.Uint64:  intSetter,
--	reflect.Uintptr: intSetter,
--}
--
--var typeSetters = map[reflect.Type]setter{
--	reflect.TypeOf(big.Int{}): intSetter,
--}
--
--func typeSetter(d interface{}, blank bool, val string, tt tag) error {
--	t := reflect.ValueOf(d).Type().Elem()
--	setter, ok := typeSetters[t]
--	if !ok {
--		return errUnsupportedType
--	}
--	return setter(d, blank, val, tt)
--}
--
--func kindSetter(d interface{}, blank bool, val string, tt tag) error {
--	k := reflect.ValueOf(d).Type().Elem().Kind()
--	setter, ok := kindSetters[k]
--	if !ok {
--		return errUnsupportedType
--	}
--	return setter(d, blank, val, tt)
--}
--
--func scanSetter(d interface{}, blank bool, val string, tt tag) error {
--	if blank {
--		return errBlankUnsupported
--	}
--	return types.ScanFully(d, val, 'v')
--}
--
--func set(cfg interface{}, sect, sub, name string, blank bool, value string) error {
--	vPCfg := reflect.ValueOf(cfg)
--	if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
--		panic(fmt.Errorf("config must be a pointer to a struct"))
--	}
--	vCfg := vPCfg.Elem()
--	vSect, _ := fieldFold(vCfg, sect)
--	if !vSect.IsValid() {
--		return fmt.Errorf("invalid section: section %q", sect)
--	}
--	if vSect.Kind() == reflect.Map {
--		vst := vSect.Type()
--		if vst.Key().Kind() != reflect.String ||
--			vst.Elem().Kind() != reflect.Ptr ||
--			vst.Elem().Elem().Kind() != reflect.Struct {
--			panic(fmt.Errorf("map field for section must have string keys and "+
--				" pointer-to-struct values: section %q", sect))
--		}
--		if vSect.IsNil() {
--			vSect.Set(reflect.MakeMap(vst))
--		}
--		k := reflect.ValueOf(sub)
--		pv := vSect.MapIndex(k)
--		if !pv.IsValid() {
--			vType := vSect.Type().Elem().Elem()
--			pv = reflect.New(vType)
--			vSect.SetMapIndex(k, pv)
--		}
--		vSect = pv.Elem()
--	} else if vSect.Kind() != reflect.Struct {
--		panic(fmt.Errorf("field for section must be a map or a struct: "+
--			"section %q", sect))
--	} else if sub != "" {
--		return fmt.Errorf("invalid subsection: "+
--			"section %q subsection %q", sect, sub)
--	}
--	vVar, t := fieldFold(vSect, name)
--	if !vVar.IsValid() {
--		return fmt.Errorf("invalid variable: "+
--			"section %q subsection %q variable %q", sect, sub, name)
--	}
--	// vVal is either single-valued var, or newly allocated value within multi-valued var
--	var vVal reflect.Value
--	// multi-value if unnamed slice type
--	isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice
--	if isMulti && blank {
--		vVar.Set(reflect.Zero(vVar.Type()))
--		return nil
--	}
--	if isMulti {
--		vVal = reflect.New(vVar.Type().Elem()).Elem()
--	} else {
--		vVal = vVar
--	}
--	isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
--	isNew := isDeref && vVal.IsNil()
--	// vAddr is address of value to set (dereferenced & allocated as needed)
--	var vAddr reflect.Value
--	switch {
--	case isNew:
--		vAddr = reflect.New(vVal.Type().Elem())
--	case isDeref && !isNew:
--		vAddr = vVal
--	default:
--		vAddr = vVal.Addr()
--	}
--	vAddrI := vAddr.Interface()
--	err, ok := error(nil), false
--	for _, s := range setters {
--		err = s(vAddrI, blank, value, t)
--		if err == nil {
--			ok = true
--			break
--		}
--		if err != errUnsupportedType {
--			return err
--		}
--	}
--	if !ok {
--		// in case all setters returned errUnsupportedType
--		return err
--	}
--	if isNew { // set reference if it was dereferenced and newly allocated
--		vVal.Set(vAddr)
--	}
--	if isMulti { // append if multi-valued
--		vVar.Set(reflect.Append(vVar, vVal))
--	}
--	return nil
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/testdata/gcfg_test.gcfg b/Godeps/_workspace/src/code.google.com/p/gcfg/testdata/gcfg_test.gcfg
-deleted file mode 100644
-index cddff29..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/testdata/gcfg_test.gcfg
-+++ /dev/null
-@@ -1,3 +0,0 @@
--; Comment line
--[section]
--name=value # comment
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/testdata/gcfg_unicode_test.gcfg b/Godeps/_workspace/src/code.google.com/p/gcfg/testdata/gcfg_unicode_test.gcfg
-deleted file mode 100644
-index 3762a20..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/testdata/gcfg_unicode_test.gcfg
-+++ /dev/null
-@@ -1,3 +0,0 @@
--; Comment line
--[甲]
--乙=丙 # comment
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/token/position.go b/Godeps/_workspace/src/code.google.com/p/gcfg/token/position.go
-deleted file mode 100644
-index fc45c1e..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/token/position.go
-+++ /dev/null
-@@ -1,435 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// TODO(gri) consider making this a separate package outside the go directory.
--
--package token
--
--import (
--	"fmt"
--	"sort"
--	"sync"
--)
--
--// -----------------------------------------------------------------------------
--// Positions
--
--// Position describes an arbitrary source position
--// including the file, line, and column location.
--// A Position is valid if the line number is > 0.
--//
--type Position struct {
--	Filename string // filename, if any
--	Offset   int    // offset, starting at 0
--	Line     int    // line number, starting at 1
--	Column   int    // column number, starting at 1 (character count)
--}
--
--// IsValid returns true if the position is valid.
--func (pos *Position) IsValid() bool { return pos.Line > 0 }
--
--// String returns a string in one of several forms:
--//
--//	file:line:column    valid position with file name
--//	line:column         valid position without file name
--//	file                invalid position with file name
--//	-                   invalid position without file name
--//
--func (pos Position) String() string {
--	s := pos.Filename
--	if pos.IsValid() {
--		if s != "" {
--			s += ":"
--		}
--		s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
--	}
--	if s == "" {
--		s = "-"
--	}
--	return s
--}
--
--// Pos is a compact encoding of a source position within a file set.
--// It can be converted into a Position for a more convenient, but much
--// larger, representation.
--//
--// The Pos value for a given file is a number in the range [base, base+size],
--// where base and size are specified when adding the file to the file set via
--// AddFile.
--//
--// To create the Pos value for a specific source offset, first add
--// the respective file to the current file set (via FileSet.AddFile)
--// and then call File.Pos(offset) for that file. Given a Pos value p
--// for a specific file set fset, the corresponding Position value is
--// obtained by calling fset.Position(p).
--//
--// Pos values can be compared directly with the usual comparison operators:
--// If two Pos values p and q are in the same file, comparing p and q is
--// equivalent to comparing the respective source file offsets. If p and q
--// are in different files, p < q is true if the file implied by p was added
--// to the respective file set before the file implied by q.
--//
--type Pos int
--
--// The zero value for Pos is NoPos; there is no file and line information
--// associated with it, and NoPos().IsValid() is false. NoPos is always
--// smaller than any other Pos value. The corresponding Position value
--// for NoPos is the zero value for Position.
--//
--const NoPos Pos = 0
--
--// IsValid returns true if the position is valid.
--func (p Pos) IsValid() bool {
--	return p != NoPos
--}
--
--// -----------------------------------------------------------------------------
--// File
--
--// A File is a handle for a file belonging to a FileSet.
--// A File has a name, size, and line offset table.
--//
--type File struct {
--	set  *FileSet
--	name string // file name as provided to AddFile
--	base int    // Pos value range for this file is [base...base+size]
--	size int    // file size as provided to AddFile
--
--	// lines and infos are protected by set.mutex
--	lines []int
--	infos []lineInfo
--}
--
--// Name returns the file name of file f as registered with AddFile.
--func (f *File) Name() string {
--	return f.name
--}
--
--// Base returns the base offset of file f as registered with AddFile.
--func (f *File) Base() int {
--	return f.base
--}
--
--// Size returns the size of file f as registered with AddFile.
--func (f *File) Size() int {
--	return f.size
--}
--
--// LineCount returns the number of lines in file f.
--func (f *File) LineCount() int {
--	f.set.mutex.RLock()
--	n := len(f.lines)
--	f.set.mutex.RUnlock()
--	return n
--}
--
--// AddLine adds the line offset for a new line.
--// The line offset must be larger than the offset for the previous line
--// and smaller than the file size; otherwise the line offset is ignored.
--//
--func (f *File) AddLine(offset int) {
--	f.set.mutex.Lock()
--	if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
--		f.lines = append(f.lines, offset)
--	}
--	f.set.mutex.Unlock()
--}
--
--// SetLines sets the line offsets for a file and returns true if successful.
--// The line offsets are the offsets of the first character of each line;
--// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
--// An empty file has an empty line offset table.
--// Each line offset must be larger than the offset for the previous line
--// and smaller than the file size; otherwise SetLines fails and returns
--// false.
--//
--func (f *File) SetLines(lines []int) bool {
--	// verify validity of lines table
--	size := f.size
--	for i, offset := range lines {
--		if i > 0 && offset <= lines[i-1] || size <= offset {
--			return false
--		}
--	}
--
--	// set lines table
--	f.set.mutex.Lock()
--	f.lines = lines
--	f.set.mutex.Unlock()
--	return true
--}
--
--// SetLinesForContent sets the line offsets for the given file content.
--func (f *File) SetLinesForContent(content []byte) {
--	var lines []int
--	line := 0
--	for offset, b := range content {
--		if line >= 0 {
--			lines = append(lines, line)
--		}
--		line = -1
--		if b == '\n' {
--			line = offset + 1
--		}
--	}
--
--	// set lines table
--	f.set.mutex.Lock()
--	f.lines = lines
--	f.set.mutex.Unlock()
--}
--
--// A lineInfo object describes alternative file and line number
--// information (such as provided via a //line comment in a .go
--// file) for a given file offset.
--type lineInfo struct {
--	// fields are exported to make them accessible to gob
--	Offset   int
--	Filename string
--	Line     int
--}
--
--// AddLineInfo adds alternative file and line number information for
--// a given file offset. The offset must be larger than the offset for
--// the previously added alternative line info and smaller than the
--// file size; otherwise the information is ignored.
--//
--// AddLineInfo is typically used to register alternative position
--// information for //line filename:line comments in source files.
--//
--func (f *File) AddLineInfo(offset int, filename string, line int) {
--	f.set.mutex.Lock()
--	if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
--		f.infos = append(f.infos, lineInfo{offset, filename, line})
--	}
--	f.set.mutex.Unlock()
--}
--
--// Pos returns the Pos value for the given file offset;
--// the offset must be <= f.Size().
--// f.Pos(f.Offset(p)) == p.
--//
--func (f *File) Pos(offset int) Pos {
--	if offset > f.size {
--		panic("illegal file offset")
--	}
--	return Pos(f.base + offset)
--}
--
--// Offset returns the offset for the given file position p;
--// p must be a valid Pos value in that file.
--// f.Offset(f.Pos(offset)) == offset.
--//
--func (f *File) Offset(p Pos) int {
--	if int(p) < f.base || int(p) > f.base+f.size {
--		panic("illegal Pos value")
--	}
--	return int(p) - f.base
--}
--
--// Line returns the line number for the given file position p;
--// p must be a Pos value in that file or NoPos.
--//
--func (f *File) Line(p Pos) int {
--	// TODO(gri) this can be implemented much more efficiently
--	return f.Position(p).Line
--}
--
--func searchLineInfos(a []lineInfo, x int) int {
--	return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
--}
--
--// info returns the file name, line, and column number for a file offset.
--func (f *File) info(offset int) (filename string, line, column int) {
--	filename = f.name
--	if i := searchInts(f.lines, offset); i >= 0 {
--		line, column = i+1, offset-f.lines[i]+1
--	}
--	if len(f.infos) > 0 {
--		// almost no files have extra line infos
--		if i := searchLineInfos(f.infos, offset); i >= 0 {
--			alt := &f.infos[i]
--			filename = alt.Filename
--			if i := searchInts(f.lines, alt.Offset); i >= 0 {
--				line += alt.Line - i - 1
--			}
--		}
--	}
--	return
--}
--
--func (f *File) position(p Pos) (pos Position) {
--	offset := int(p) - f.base
--	pos.Offset = offset
--	pos.Filename, pos.Line, pos.Column = f.info(offset)
--	return
--}
--
--// Position returns the Position value for the given file position p;
--// p must be a Pos value in that file or NoPos.
--//
--func (f *File) Position(p Pos) (pos Position) {
--	if p != NoPos {
--		if int(p) < f.base || int(p) > f.base+f.size {
--			panic("illegal Pos value")
--		}
--		pos = f.position(p)
--	}
--	return
--}
--
--// -----------------------------------------------------------------------------
--// FileSet
--
--// A FileSet represents a set of source files.
--// Methods of file sets are synchronized; multiple goroutines
--// may invoke them concurrently.
--//
--type FileSet struct {
--	mutex sync.RWMutex // protects the file set
--	base  int          // base offset for the next file
--	files []*File      // list of files in the order added to the set
--	last  *File        // cache of last file looked up
--}
--
--// NewFileSet creates a new file set.
--func NewFileSet() *FileSet {
--	s := new(FileSet)
--	s.base = 1 // 0 == NoPos
--	return s
--}
--
--// Base returns the minimum base offset that must be provided to
--// AddFile when adding the next file.
--//
--func (s *FileSet) Base() int {
--	s.mutex.RLock()
--	b := s.base
--	s.mutex.RUnlock()
--	return b
--
--}
--
--// AddFile adds a new file with a given filename, base offset, and file size
--// to the file set s and returns the file. Multiple files may have the same
--// name. The base offset must not be smaller than the FileSet's Base(), and
--// size must not be negative.
--//
--// Adding the file will set the file set's Base() value to base + size + 1
--// as the minimum base value for the next file. The following relationship
--// exists between a Pos value p for a given file offset offs:
--//
--//	int(p) = base + offs
--//
--// with offs in the range [0, size] and thus p in the range [base, base+size].
--// For convenience, File.Pos may be used to create file-specific position
--// values from a file offset.
--//
--func (s *FileSet) AddFile(filename string, base, size int) *File {
--	s.mutex.Lock()
--	defer s.mutex.Unlock()
--	if base < s.base || size < 0 {
--		panic("illegal base or size")
--	}
--	// base >= s.base && size >= 0
--	f := &File{s, filename, base, size, []int{0}, nil}
--	base += size + 1 // +1 because EOF also has a position
--	if base < 0 {
--		panic("token.Pos offset overflow (> 2G of source code in file set)")
--	}
--	// add the file to the file set
--	s.base = base
--	s.files = append(s.files, f)
--	s.last = f
--	return f
--}
--
--// Iterate calls f for the files in the file set in the order they were added
--// until f returns false.
--//
--func (s *FileSet) Iterate(f func(*File) bool) {
--	for i := 0; ; i++ {
--		var file *File
--		s.mutex.RLock()
--		if i < len(s.files) {
--			file = s.files[i]
--		}
--		s.mutex.RUnlock()
--		if file == nil || !f(file) {
--			break
--		}
--	}
--}
--
--func searchFiles(a []*File, x int) int {
--	return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
--}
--
--func (s *FileSet) file(p Pos) *File {
--	// common case: p is in last file
--	if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
--		return f
--	}
--	// p is not in last file - search all files
--	if i := searchFiles(s.files, int(p)); i >= 0 {
--		f := s.files[i]
--		// f.base <= int(p) by definition of searchFiles
--		if int(p) <= f.base+f.size {
--			s.last = f
--			return f
--		}
--	}
--	return nil
--}
--
--// File returns the file that contains the position p.
--// If no such file is found (for instance for p == NoPos),
--// the result is nil.
--//
--func (s *FileSet) File(p Pos) (f *File) {
--	if p != NoPos {
--		s.mutex.RLock()
--		f = s.file(p)
--		s.mutex.RUnlock()
--	}
--	return
--}
--
--// Position converts a Pos in the fileset into a general Position.
--func (s *FileSet) Position(p Pos) (pos Position) {
--	if p != NoPos {
--		s.mutex.RLock()
--		if f := s.file(p); f != nil {
--			pos = f.position(p)
--		}
--		s.mutex.RUnlock()
--	}
--	return
--}
--
--// -----------------------------------------------------------------------------
--// Helper functions
--
--func searchInts(a []int, x int) int {
--	// This function body is a manually inlined version of:
--	//
--	//   return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
--	//
--	// With better compiler optimizations, this may not be needed in the
--	// future, but at the moment this change improves the go/printer
--	// benchmark performance by ~30%. This has a direct impact on the
--	// speed of gofmt and thus seems worthwhile (2011-04-29).
--	// TODO(gri): Remove this when compilers have caught up.
--	i, j := 0, len(a)
--	for i < j {
--		h := i + (j-i)/2 // avoid overflow when computing h
--		// i ≤ h < j
--		if a[h] <= x {
--			i = h + 1
--		} else {
--			j = h
--		}
--	}
--	return i - 1
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/token/position_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/token/position_test.go
-deleted file mode 100644
-index 160107d..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/token/position_test.go
-+++ /dev/null
-@@ -1,181 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package token
--
--import (
--	"fmt"
--	"testing"
--)
--
--func checkPos(t *testing.T, msg string, p, q Position) {
--	if p.Filename != q.Filename {
--		t.Errorf("%s: expected filename = %q; got %q", msg, q.Filename, p.Filename)
--	}
--	if p.Offset != q.Offset {
--		t.Errorf("%s: expected offset = %d; got %d", msg, q.Offset, p.Offset)
--	}
--	if p.Line != q.Line {
--		t.Errorf("%s: expected line = %d; got %d", msg, q.Line, p.Line)
--	}
--	if p.Column != q.Column {
--		t.Errorf("%s: expected column = %d; got %d", msg, q.Column, p.Column)
--	}
--}
--
--func TestNoPos(t *testing.T) {
--	if NoPos.IsValid() {
--		t.Errorf("NoPos should not be valid")
--	}
--	var fset *FileSet
--	checkPos(t, "nil NoPos", fset.Position(NoPos), Position{})
--	fset = NewFileSet()
--	checkPos(t, "fset NoPos", fset.Position(NoPos), Position{})
--}
--
--var tests = []struct {
--	filename string
--	source   []byte // may be nil
--	size     int
--	lines    []int
--}{
--	{"a", []byte{}, 0, []int{}},
--	{"b", []byte("01234"), 5, []int{0}},
--	{"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}},
--	{"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}},
--	{"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}},
--	{"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}},
--	{"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}},
--	{"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}},
--}
--
--func linecol(lines []int, offs int) (int, int) {
--	prevLineOffs := 0
--	for line, lineOffs := range lines {
--		if offs < lineOffs {
--			return line, offs - prevLineOffs + 1
--		}
--		prevLineOffs = lineOffs
--	}
--	return len(lines), offs - prevLineOffs + 1
--}
--
--func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) {
--	for offs := 0; offs < f.Size(); offs++ {
--		p := f.Pos(offs)
--		offs2 := f.Offset(p)
--		if offs2 != offs {
--			t.Errorf("%s, Offset: expected offset %d; got %d", f.Name(), offs, offs2)
--		}
--		line, col := linecol(lines, offs)
--		msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
--		checkPos(t, msg, f.Position(f.Pos(offs)), Position{f.Name(), offs, line, col})
--		checkPos(t, msg, fset.Position(p), Position{f.Name(), offs, line, col})
--	}
--}
--
--func makeTestSource(size int, lines []int) []byte {
--	src := make([]byte, size)
--	for _, offs := range lines {
--		if offs > 0 {
--			src[offs-1] = '\n'
--		}
--	}
--	return src
--}
--
--func TestPositions(t *testing.T) {
--	const delta = 7 // a non-zero base offset increment
--	fset := NewFileSet()
--	for _, test := range tests {
--		// verify consistency of test case
--		if test.source != nil && len(test.source) != test.size {
--			t.Errorf("%s: inconsistent test case: expected file size %d; got %d", test.filename, test.size, len(test.source))
--		}
--
--		// add file and verify name and size
--		f := fset.AddFile(test.filename, fset.Base()+delta, test.size)
--		if f.Name() != test.filename {
--			t.Errorf("expected filename %q; got %q", test.filename, f.Name())
--		}
--		if f.Size() != test.size {
--			t.Errorf("%s: expected file size %d; got %d", f.Name(), test.size, f.Size())
--		}
--		if fset.File(f.Pos(0)) != f {
--			t.Errorf("%s: f.Pos(0) was not found in f", f.Name())
--		}
--
--		// add lines individually and verify all positions
--		for i, offset := range test.lines {
--			f.AddLine(offset)
--			if f.LineCount() != i+1 {
--				t.Errorf("%s, AddLine: expected line count %d; got %d", f.Name(), i+1, f.LineCount())
--			}
--			// adding the same offset again should be ignored
--			f.AddLine(offset)
--			if f.LineCount() != i+1 {
--				t.Errorf("%s, AddLine: expected unchanged line count %d; got %d", f.Name(), i+1, f.LineCount())
--			}
--			verifyPositions(t, fset, f, test.lines[0:i+1])
--		}
--
--		// add lines with SetLines and verify all positions
--		if ok := f.SetLines(test.lines); !ok {
--			t.Errorf("%s: SetLines failed", f.Name())
--		}
--		if f.LineCount() != len(test.lines) {
--			t.Errorf("%s, SetLines: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount())
--		}
--		verifyPositions(t, fset, f, test.lines)
--
--		// add lines with SetLinesForContent and verify all positions
--		src := test.source
--		if src == nil {
--			// no test source available - create one from scratch
--			src = makeTestSource(test.size, test.lines)
--		}
--		f.SetLinesForContent(src)
--		if f.LineCount() != len(test.lines) {
--			t.Errorf("%s, SetLinesForContent: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount())
--		}
--		verifyPositions(t, fset, f, test.lines)
--	}
--}
--
--func TestLineInfo(t *testing.T) {
--	fset := NewFileSet()
--	f := fset.AddFile("foo", fset.Base(), 500)
--	lines := []int{0, 42, 77, 100, 210, 220, 277, 300, 333, 401}
--	// add lines individually and provide alternative line information
--	for _, offs := range lines {
--		f.AddLine(offs)
--		f.AddLineInfo(offs, "bar", 42)
--	}
--	// verify positions for all offsets
--	for offs := 0; offs <= f.Size(); offs++ {
--		p := f.Pos(offs)
--		_, col := linecol(lines, offs)
--		msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
--		checkPos(t, msg, f.Position(f.Pos(offs)), Position{"bar", offs, 42, col})
--		checkPos(t, msg, fset.Position(p), Position{"bar", offs, 42, col})
--	}
--}
--
--func TestFiles(t *testing.T) {
--	fset := NewFileSet()
--	for i, test := range tests {
--		fset.AddFile(test.filename, fset.Base(), test.size)
--		j := 0
--		fset.Iterate(func(f *File) bool {
--			if f.Name() != tests[j].filename {
--				t.Errorf("expected filename = %s; got %s", tests[j].filename, f.Name())
--			}
--			j++
--			return true
--		})
--		if j != i+1 {
--			t.Errorf("expected %d files; got %d", i+1, j)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/token/serialize.go b/Godeps/_workspace/src/code.google.com/p/gcfg/token/serialize.go
-deleted file mode 100644
-index 4adc8f9..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/token/serialize.go
-+++ /dev/null
-@@ -1,56 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package token
--
--type serializedFile struct {
--	// fields correspond 1:1 to fields with same (lower-case) name in File
--	Name  string
--	Base  int
--	Size  int
--	Lines []int
--	Infos []lineInfo
--}
--
--type serializedFileSet struct {
--	Base  int
--	Files []serializedFile
--}
--
--// Read calls decode to deserialize a file set into s; s must not be nil.
--func (s *FileSet) Read(decode func(interface{}) error) error {
--	var ss serializedFileSet
--	if err := decode(&ss); err != nil {
--		return err
--	}
--
--	s.mutex.Lock()
--	s.base = ss.Base
--	files := make([]*File, len(ss.Files))
--	for i := 0; i < len(ss.Files); i++ {
--		f := &ss.Files[i]
--		files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
--	}
--	s.files = files
--	s.last = nil
--	s.mutex.Unlock()
--
--	return nil
--}
--
--// Write calls encode to serialize the file set s.
--func (s *FileSet) Write(encode func(interface{}) error) error {
--	var ss serializedFileSet
--
--	s.mutex.Lock()
--	ss.Base = s.base
--	files := make([]serializedFile, len(s.files))
--	for i, f := range s.files {
--		files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
--	}
--	ss.Files = files
--	s.mutex.Unlock()
--
--	return encode(ss)
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/token/serialize_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/token/serialize_test.go
-deleted file mode 100644
-index 4e925ad..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/token/serialize_test.go
-+++ /dev/null
-@@ -1,111 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package token
--
--import (
--	"bytes"
--	"encoding/gob"
--	"fmt"
--	"testing"
--)
--
--// equal returns nil if p and q describe the same file set;
--// otherwise it returns an error describing the discrepancy.
--func equal(p, q *FileSet) error {
--	if p == q {
--		// avoid deadlock if p == q
--		return nil
--	}
--
--	// not strictly needed for the test
--	p.mutex.Lock()
--	q.mutex.Lock()
--	defer q.mutex.Unlock()
--	defer p.mutex.Unlock()
--
--	if p.base != q.base {
--		return fmt.Errorf("different bases: %d != %d", p.base, q.base)
--	}
--
--	if len(p.files) != len(q.files) {
--		return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
--	}
--
--	for i, f := range p.files {
--		g := q.files[i]
--		if f.set != p {
--			return fmt.Errorf("wrong fileset for %q", f.name)
--		}
--		if g.set != q {
--			return fmt.Errorf("wrong fileset for %q", g.name)
--		}
--		if f.name != g.name {
--			return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
--		}
--		if f.base != g.base {
--			return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
--		}
--		if f.size != g.size {
--			return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
--		}
--		for j, l := range f.lines {
--			m := g.lines[j]
--			if l != m {
--				return fmt.Errorf("different offsets for %q", f.name)
--			}
--		}
--		for j, l := range f.infos {
--			m := g.infos[j]
--			if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
--				return fmt.Errorf("different infos for %q", f.name)
--			}
--		}
--	}
--
--	// we don't care about .last - it's just a cache
--	return nil
--}
--
--func checkSerialize(t *testing.T, p *FileSet) {
--	var buf bytes.Buffer
--	encode := func(x interface{}) error {
--		return gob.NewEncoder(&buf).Encode(x)
--	}
--	if err := p.Write(encode); err != nil {
--		t.Errorf("writing fileset failed: %s", err)
--		return
--	}
--	q := NewFileSet()
--	decode := func(x interface{}) error {
--		return gob.NewDecoder(&buf).Decode(x)
--	}
--	if err := q.Read(decode); err != nil {
--		t.Errorf("reading fileset failed: %s", err)
--		return
--	}
--	if err := equal(p, q); err != nil {
--		t.Errorf("filesets not identical: %s", err)
--	}
--}
--
--func TestSerialization(t *testing.T) {
--	p := NewFileSet()
--	checkSerialize(t, p)
--	// add some files
--	for i := 0; i < 10; i++ {
--		f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
--		checkSerialize(t, p)
--		// add some lines and alternative file infos
--		line := 1000
--		for offs := 0; offs < f.Size(); offs += 40 + i {
--			f.AddLine(offs)
--			if offs%7 == 0 {
--				f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
--				line += 33
--			}
--		}
--		checkSerialize(t, p)
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/token/token.go b/Godeps/_workspace/src/code.google.com/p/gcfg/token/token.go
-deleted file mode 100644
-index b3c7c83..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/token/token.go
-+++ /dev/null
-@@ -1,83 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package token defines constants representing the lexical tokens of the gcfg
--// configuration syntax and basic operations on tokens (printing, predicates).
--//
--// Note that the API for the token package may change to accommodate new
--// features or implementation changes in gcfg.
--//
--package token
--
--import "strconv"
--
--// Token is the set of lexical tokens of the gcfg configuration syntax.
--type Token int
--
--// The list of tokens.
--const (
--	// Special tokens
--	ILLEGAL Token = iota
--	EOF
--	COMMENT
--
--	literal_beg
--	// Identifiers and basic type literals
--	// (these tokens stand for classes of literals)
--	IDENT  // section-name, variable-name
--	STRING // "subsection-name", variable value
--	literal_end
--
--	operator_beg
--	// Operators and delimiters
--	ASSIGN // =
--	LBRACK // [
--	RBRACK // ]
--	EOL    // \n
--	operator_end
--)
--
--var tokens = [...]string{
--	ILLEGAL: "ILLEGAL",
--
--	EOF:     "EOF",
--	COMMENT: "COMMENT",
--
--	IDENT:  "IDENT",
--	STRING: "STRING",
--
--	ASSIGN: "=",
--	LBRACK: "[",
--	RBRACK: "]",
--	EOL:    "\n",
--}
--
--// String returns the string corresponding to the token tok.
--// For operators and delimiters, the string is the actual token character
--// sequence (e.g., for the token ASSIGN, the string is "="). For all other
--// tokens the string corresponds to the token constant name (e.g. for the
--// token IDENT, the string is "IDENT").
--//
--func (tok Token) String() string {
--	s := ""
--	if 0 <= tok && tok < Token(len(tokens)) {
--		s = tokens[tok]
--	}
--	if s == "" {
--		s = "token(" + strconv.Itoa(int(tok)) + ")"
--	}
--	return s
--}
--
--// Predicates
--
--// IsLiteral returns true for tokens corresponding to identifiers
--// and basic type literals; it returns false otherwise.
--//
--func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
--
--// IsOperator returns true for tokens corresponding to operators and
--// delimiters; it returns false otherwise.
--//
--func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/types/bool.go b/Godeps/_workspace/src/code.google.com/p/gcfg/types/bool.go
-deleted file mode 100644
-index 8dcae0d..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/types/bool.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package types
--
--// BoolValues defines the name and value mappings for ParseBool.
--var BoolValues = map[string]interface{}{
--	"true": true, "yes": true, "on": true, "1": true,
--	"false": false, "no": false, "off": false, "0": false,
--}
--
--var boolParser = func() *EnumParser {
--	ep := &EnumParser{}
--	ep.AddVals(BoolValues)
--	return ep
--}()
--
--// ParseBool parses bool values according to the definitions in BoolValues.
--// Parsing is case-insensitive.
--func ParseBool(s string) (bool, error) {
--	v, err := boolParser.Parse(s)
--	if err != nil {
--		return false, err
--	}
--	return v.(bool), nil
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/types/doc.go b/Godeps/_workspace/src/code.google.com/p/gcfg/types/doc.go
-deleted file mode 100644
-index 9f9c345..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/types/doc.go
-+++ /dev/null
-@@ -1,4 +0,0 @@
--// Package types defines helpers for type conversions.
--//
--// The API for this package is not finalized yet.
--package types
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/types/enum.go b/Godeps/_workspace/src/code.google.com/p/gcfg/types/enum.go
-deleted file mode 100644
-index 1a0c7ef..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/types/enum.go
-+++ /dev/null
-@@ -1,44 +0,0 @@
--package types
--
--import (
--	"fmt"
--	"reflect"
--	"strings"
--)
--
--// EnumParser parses "enum" values; i.e. a predefined set of strings to
--// predefined values.
--type EnumParser struct {
--	Type      string // type name; if not set, use type of first value added
--	CaseMatch bool   // if true, matching of strings is case-sensitive
--	// PrefixMatch bool
--	vals map[string]interface{}
--}
--
--// AddVals adds strings and values to an EnumParser.
--func (ep *EnumParser) AddVals(vals map[string]interface{}) {
--	if ep.vals == nil {
--		ep.vals = make(map[string]interface{})
--	}
--	for k, v := range vals {
--		if ep.Type == "" {
--			ep.Type = reflect.TypeOf(v).Name()
--		}
--		if !ep.CaseMatch {
--			k = strings.ToLower(k)
--		}
--		ep.vals[k] = v
--	}
--}
--
--// Parse parses the string and returns the value or an error.
--func (ep EnumParser) Parse(s string) (interface{}, error) {
--	if !ep.CaseMatch {
--		s = strings.ToLower(s)
--	}
--	v, ok := ep.vals[s]
--	if !ok {
--		return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
--	}
--	return v, nil
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/types/enum_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/types/enum_test.go
-deleted file mode 100644
-index 4bf135e..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/types/enum_test.go
-+++ /dev/null
-@@ -1,29 +0,0 @@
--package types
--
--import (
--	"testing"
--)
--
--func TestEnumParserBool(t *testing.T) {
--	for _, tt := range []struct {
--		val string
--		res bool
--		ok  bool
--	}{
--		{val: "tRuE", res: true, ok: true},
--		{val: "False", res: false, ok: true},
--		{val: "t", ok: false},
--	} {
--		b, err := ParseBool(tt.val)
--		switch {
--		case tt.ok && err != nil:
--			t.Errorf("%q: got error %v, want %v", tt.val, err, tt.res)
--		case !tt.ok && err == nil:
--			t.Errorf("%q: got %v, want error", tt.val, b)
--		case tt.ok && b != tt.res:
--			t.Errorf("%q: got %v, want %v", tt.val, b, tt.res)
--		default:
--			t.Logf("%q: got %v, %v", tt.val, b, err)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/types/int.go b/Godeps/_workspace/src/code.google.com/p/gcfg/types/int.go
-deleted file mode 100644
-index af7e75c..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/types/int.go
-+++ /dev/null
-@@ -1,86 +0,0 @@
--package types
--
--import (
--	"fmt"
--	"strings"
--)
--
--// An IntMode is a mode for parsing integer values, representing a set of
--// accepted bases.
--type IntMode uint8
--
--// IntMode values for ParseInt; can be combined using binary or.
--const (
--	Dec IntMode = 1 << iota
--	Hex
--	Oct
--)
--
--// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
--func (m IntMode) String() string {
--	var modes []string
--	if m&Dec != 0 {
--		modes = append(modes, "Dec")
--	}
--	if m&Hex != 0 {
--		modes = append(modes, "Hex")
--	}
--	if m&Oct != 0 {
--		modes = append(modes, "Oct")
--	}
--	return "IntMode(" + strings.Join(modes, "|") + ")"
--}
--
--var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
--
--func prefix0(val string) bool {
--	return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
--}
--
--func prefix0x(val string) bool {
--	return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
--}
--
--// ParseInt parses val using mode into intptr, which must be a pointer to an
--// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
--// when mode permits ambiguity of base; otherwise the prefix can be omitted.
--func ParseInt(intptr interface{}, val string, mode IntMode) error {
--	val = strings.TrimSpace(val)
--	verb := byte(0)
--	switch mode {
--	case Dec:
--		verb = 'd'
--	case Dec + Hex:
--		if prefix0x(val) {
--			verb = 'v'
--		} else {
--			verb = 'd'
--		}
--	case Dec + Oct:
--		if prefix0(val) && !prefix0x(val) {
--			verb = 'v'
--		} else {
--			verb = 'd'
--		}
--	case Dec + Hex + Oct:
--		verb = 'v'
--	case Hex:
--		if prefix0x(val) {
--			verb = 'v'
--		} else {
--			verb = 'x'
--		}
--	case Oct:
--		verb = 'o'
--	case Hex + Oct:
--		if prefix0(val) {
--			verb = 'v'
--		} else {
--			return errIntAmbig
--		}
--	}
--	if verb == 0 {
--		panic("unsupported mode")
--	}
--	return ScanFully(intptr, val, verb)
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/types/int_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/types/int_test.go
-deleted file mode 100644
-index b63dbcb..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/types/int_test.go
-+++ /dev/null
-@@ -1,67 +0,0 @@
--package types
--
--import (
--	"reflect"
--	"testing"
--)
--
--func elem(p interface{}) interface{} {
--	return reflect.ValueOf(p).Elem().Interface()
--}
--
--func TestParseInt(t *testing.T) {
--	for _, tt := range []struct {
--		val  string
--		mode IntMode
--		exp  interface{}
--		ok   bool
--	}{
--		{"0", Dec, int(0), true},
--		{"10", Dec, int(10), true},
--		{"-10", Dec, int(-10), true},
--		{"x", Dec, int(0), false},
--		{"0xa", Hex, int(0xa), true},
--		{"a", Hex, int(0xa), true},
--		{"10", Hex, int(0x10), true},
--		{"-0xa", Hex, int(-0xa), true},
--		{"0x", Hex, int(0x0), true},  // Scanf doesn't require digit behind 0x
--		{"-0x", Hex, int(0x0), true}, // Scanf doesn't require digit behind 0x
--		{"-a", Hex, int(-0xa), true},
--		{"-10", Hex, int(-0x10), true},
--		{"x", Hex, int(0), false},
--		{"10", Oct, int(010), true},
--		{"010", Oct, int(010), true},
--		{"-10", Oct, int(-010), true},
--		{"-010", Oct, int(-010), true},
--		{"10", Dec | Hex, int(10), true},
--		{"010", Dec | Hex, int(10), true},
--		{"0x10", Dec | Hex, int(0x10), true},
--		{"10", Dec | Oct, int(10), true},
--		{"010", Dec | Oct, int(010), true},
--		{"0x10", Dec | Oct, int(0), false},
--		{"10", Hex | Oct, int(0), false}, // need prefix to distinguish Hex/Oct
--		{"010", Hex | Oct, int(010), true},
--		{"0x10", Hex | Oct, int(0x10), true},
--		{"10", Dec | Hex | Oct, int(10), true},
--		{"010", Dec | Hex | Oct, int(010), true},
--		{"0x10", Dec | Hex | Oct, int(0x10), true},
--	} {
--		typ := reflect.TypeOf(tt.exp)
--		res := reflect.New(typ).Interface()
--		err := ParseInt(res, tt.val, tt.mode)
--		switch {
--		case tt.ok && err != nil:
--			t.Errorf("ParseInt(%v, %#v, %v): fail; got error %v, want ok",
--				typ, tt.val, tt.mode, err)
--		case !tt.ok && err == nil:
--			t.Errorf("ParseInt(%v, %#v, %v): fail; got %v, want error",
--				typ, tt.val, tt.mode, elem(res))
--		case tt.ok && !reflect.DeepEqual(elem(res), tt.exp):
--			t.Errorf("ParseInt(%v, %#v, %v): fail; got %v, want %v",
--				typ, tt.val, tt.mode, elem(res), tt.exp)
--		default:
--			t.Logf("ParseInt(%v, %#v, %s): pass; got %v, error %v",
--				typ, tt.val, tt.mode, elem(res), err)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/types/scan.go b/Godeps/_workspace/src/code.google.com/p/gcfg/types/scan.go
-deleted file mode 100644
-index db2f6ed..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/types/scan.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package types
--
--import (
--	"fmt"
--	"io"
--	"reflect"
--)
--
--// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
--func ScanFully(ptr interface{}, val string, verb byte) error {
--	t := reflect.ValueOf(ptr).Elem().Type()
--	// attempt to read extra bytes to make sure the value is consumed
--	var b []byte
--	n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
--	switch {
--	case n < 1 || n == 1 && err != io.EOF:
--		return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
--	case n > 1:
--		return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
--	}
--	// n == 1 && err == io.EOF
--	return nil
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/gcfg/types/scan_test.go b/Godeps/_workspace/src/code.google.com/p/gcfg/types/scan_test.go
-deleted file mode 100644
-index a8083e0..0000000
---- a/Godeps/_workspace/src/code.google.com/p/gcfg/types/scan_test.go
-+++ /dev/null
-@@ -1,36 +0,0 @@
--package types
--
--import (
--	"reflect"
--	"testing"
--)
--
--func TestScanFully(t *testing.T) {
--	for _, tt := range []struct {
--		val  string
--		verb byte
--		res  interface{}
--		ok   bool
--	}{
--		{"a", 'v', int(0), false},
--		{"0x", 'v', int(0), true},
--		{"0x", 'd', int(0), false},
--	} {
--		d := reflect.New(reflect.TypeOf(tt.res)).Interface()
--		err := ScanFully(d, tt.val, tt.verb)
--		switch {
--		case tt.ok && err != nil:
--			t.Errorf("ScanFully(%T, %q, '%c'): want ok, got error %v",
--				d, tt.val, tt.verb, err)
--		case !tt.ok && err == nil:
--			t.Errorf("ScanFully(%T, %q, '%c'): want error, got %v",
--				d, tt.val, tt.verb, elem(d))
--		case tt.ok && err == nil && !reflect.DeepEqual(tt.res, elem(d)):
--			t.Errorf("ScanFully(%T, %q, '%c'): want %v, got %v",
--				d, tt.val, tt.verb, tt.res, elem(d))
--		default:
--			t.Logf("ScanFully(%T, %q, '%c') = %v; *ptr==%v",
--				d, tt.val, tt.verb, err, elem(d))
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE
-deleted file mode 100644
-index ab6b011..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE
-+++ /dev/null
-@@ -1,27 +0,0 @@
--Copyright (c) 2009 Google Inc. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--   * Neither the name of Google Inc. nor the names of its
--contributors may be used to endorse or promote products derived from
--this software without specific prior written permission.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go
-deleted file mode 100644
-index 50a0f2d..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go
-+++ /dev/null
-@@ -1,84 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--import (
--	"encoding/binary"
--	"fmt"
--	"os"
--)
--
--// A Domain represents a Version 2 domain
--type Domain byte
--
--// Domain constants for DCE Security (Version 2) UUIDs.
--const (
--	Person = Domain(0)
--	Group  = Domain(1)
--	Org    = Domain(2)
--)
--
--// NewDCESecurity returns a DCE Security (Version 2) UUID.
--//
--// The domain should be one of Person, Group or Org.
--// On a POSIX system the id should be the users UID for the Person
--// domain and the users GID for the Group.  The meaning of id for
--// the domain Org or on non-POSIX systems is site defined.
--//
--// For a given domain/id pair the same token may be returned for up to
--// 7 minutes and 10 seconds.
--func NewDCESecurity(domain Domain, id uint32) UUID {
--	uuid := NewUUID()
--	if uuid != nil {
--		uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
--		uuid[9] = byte(domain)
--		binary.BigEndian.PutUint32(uuid[0:], id)
--	}
--	return uuid
--}
--
--// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
--// domain with the id returned by os.Getuid.
--//
--//  NewDCEPerson(Person, uint32(os.Getuid()))
--func NewDCEPerson() UUID {
--	return NewDCESecurity(Person, uint32(os.Getuid()))
--}
--
--// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
--// domain with the id returned by os.Getgid.
--//
--//  NewDCEGroup(Group, uint32(os.Getgid()))
--func NewDCEGroup() UUID {
--	return NewDCESecurity(Group, uint32(os.Getgid()))
--}
--
--// Domain returns the domain for a Version 2 UUID or false.
--func (uuid UUID) Domain() (Domain, bool) {
--	if v, _ := uuid.Version(); v != 2 {
--		return 0, false
--	}
--	return Domain(uuid[9]), true
--}
--
--// Id returns the id for a Version 2 UUID or false.
--func (uuid UUID) Id() (uint32, bool) {
--	if v, _ := uuid.Version(); v != 2 {
--		return 0, false
--	}
--	return binary.BigEndian.Uint32(uuid[0:4]), true
--}
--
--func (d Domain) String() string {
--	switch d {
--	case Person:
--		return "Person"
--	case Group:
--		return "Group"
--	case Org:
--		return "Org"
--	}
--	return fmt.Sprintf("Domain%d", int(d))
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go
-deleted file mode 100644
-index d8bd013..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go
-+++ /dev/null
-@@ -1,8 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// The uuid package generates and inspects UUIDs.
--//
--// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
--package uuid
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go
-deleted file mode 100644
-index cdd4192..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go
-+++ /dev/null
-@@ -1,53 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--import (
--	"crypto/md5"
--	"crypto/sha1"
--	"hash"
--)
--
--// Well known Name Space IDs and UUIDs
--var (
--	NameSpace_DNS  = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
--	NameSpace_URL  = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
--	NameSpace_OID  = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
--	NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
--	NIL            = Parse("00000000-0000-0000-0000-000000000000")
--)
--
--// NewHash returns a new UUID dervied from the hash of space concatenated with
--// data generated by h.  The hash should be at least 16 byte in length.  The
--// first 16 bytes of the hash are used to form the UUID.  The version of the
--// UUID will be the lower 4 bits of version.  NewHash is used to implement
--// NewMD5 and NewSHA1.
--func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
--	h.Reset()
--	h.Write(space)
--	h.Write([]byte(data))
--	s := h.Sum(nil)
--	uuid := make([]byte, 16)
--	copy(uuid, s)
--	uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
--	uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
--	return uuid
--}
--
--// NewMD5 returns a new MD5 (Version 3) UUID based on the
--// supplied name space and data.
--//
--//  NewHash(md5.New(), space, data, 3)
--func NewMD5(space UUID, data []byte) UUID {
--	return NewHash(md5.New(), space, data, 3)
--}
--
--// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
--// supplied name space and data.
--//
--//  NewHash(sha1.New(), space, data, 5)
--func NewSHA1(space UUID, data []byte) UUID {
--	return NewHash(sha1.New(), space, data, 5)
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go
-deleted file mode 100644
-index dd0a8ac..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go
-+++ /dev/null
-@@ -1,101 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--import "net"
--
--var (
--	interfaces []net.Interface // cached list of interfaces
--	ifname     string          // name of interface being used
--	nodeID     []byte          // hardware for version 1 UUIDs
--)
--
--// NodeInterface returns the name of the interface from which the NodeID was
--// derived.  The interface "user" is returned if the NodeID was set by
--// SetNodeID.
--func NodeInterface() string {
--	return ifname
--}
--
--// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
--// If name is "" then the first usable interface found will be used or a random
--// Node ID will be generated.  If a named interface cannot be found then false
--// is returned.
--//
--// SetNodeInterface never fails when name is "".
--func SetNodeInterface(name string) bool {
--	if interfaces == nil {
--		var err error
--		interfaces, err = net.Interfaces()
--		if err != nil && name != "" {
--			return false
--		}
--	}
--
--	for _, ifs := range interfaces {
--		if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
--			if setNodeID(ifs.HardwareAddr) {
--				ifname = ifs.Name
--				return true
--			}
--		}
--	}
--
--	// We found no interfaces with a valid hardware address.  If name
--	// does not specify a specific interface generate a random Node ID
--	// (section 4.1.6)
--	if name == "" {
--		if nodeID == nil {
--			nodeID = make([]byte, 6)
--		}
--		randomBits(nodeID)
--		return true
--	}
--	return false
--}
--
--// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
--// if not already set.
--func NodeID() []byte {
--	if nodeID == nil {
--		SetNodeInterface("")
--	}
--	nid := make([]byte, 6)
--	copy(nid, nodeID)
--	return nid
--}
--
--// SetNodeID sets the Node ID to be used for Version 1 UUIDs.  The first 6 bytes
--// of id are used.  If id is less than 6 bytes then false is returned and the
--// Node ID is not set.
--func SetNodeID(id []byte) bool {
--	if setNodeID(id) {
--		ifname = "user"
--		return true
--	}
--	return false
--}
--
--func setNodeID(id []byte) bool {
--	if len(id) < 6 {
--		return false
--	}
--	if nodeID == nil {
--		nodeID = make([]byte, 6)
--	}
--	copy(nodeID, id)
--	return true
--}
--
--// NodeID returns the 6 byte node id encoded in uuid.  It returns nil if uuid is
--// not valid.  The NodeID is only well defined for version 1 and 2 UUIDs.
--func (uuid UUID) NodeID() []byte {
--	if len(uuid) != 16 {
--		return nil
--	}
--	node := make([]byte, 6)
--	copy(node, uuid[10:])
--	return node
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go
-deleted file mode 100644
-index b9369c2..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go
-+++ /dev/null
-@@ -1,132 +0,0 @@
--// Copyright 2014 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--import (
--	"encoding/binary"
--	"sync"
--	"time"
--)
--
--// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
--// 1582.
--type Time int64
--
--const (
--	lillian    = 2299160          // Julian day of 15 Oct 1582
--	unix       = 2440587          // Julian day of 1 Jan 1970
--	epoch      = unix - lillian   // Days between epochs
--	g1582      = epoch * 86400    // seconds between epochs
--	g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
--)
--
--var (
--	mu        sync.Mutex
--	lasttime  uint64 // last time we returned
--	clock_seq uint16 // clock sequence for this run
--
--	timeNow = time.Now // for testing
--)
--
--// UnixTime converts t the number of seconds and nanoseconds using the Unix
--// epoch of 1 Jan 1970.
--func (t Time) UnixTime() (sec, nsec int64) {
--	sec = int64(t - g1582ns100)
--	nsec = (sec % 10000000) * 100
--	sec /= 10000000
--	return sec, nsec
--}
--
--// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
--// adjusts the clock sequence as needed.  An error is returned if the current
--// time cannot be determined.
--func GetTime() (Time, error) {
--	defer mu.Unlock()
--	mu.Lock()
--	return getTime()
--}
--
--func getTime() (Time, error) {
--	t := timeNow()
--
--	// If we don't have a clock sequence already, set one.
--	if clock_seq == 0 {
--		setClockSequence(-1)
--	}
--	now := uint64(t.UnixNano()/100) + g1582ns100
--
--	// If time has gone backwards with this clock sequence then we
--	// increment the clock sequence
--	if now <= lasttime {
--		clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
--	}
--	lasttime = now
--	return Time(now), nil
--}
--
--// ClockSequence returns the current clock sequence, generating one if not
--// already set.  The clock sequence is only used for Version 1 UUIDs.
--//
--// The uuid package does not use global static storage for the clock sequence or
--// the last time a UUID was generated.  Unless SetClockSequence a new random
--// clock sequence is generated the first time a clock sequence is requested by
--// ClockSequence, GetTime, or NewUUID.  (section 4.2.1.1) sequence is generated
--// for
--func ClockSequence() int {
--	defer mu.Unlock()
--	mu.Lock()
--	return clockSequence()
--}
--
--func clockSequence() int {
--	if clock_seq == 0 {
--		setClockSequence(-1)
--	}
--	return int(clock_seq & 0x3fff)
--}
--
--// SetClockSeq sets the clock sequence to the lower 14 bits of seq.  Setting to
--// -1 causes a new sequence to be generated.
--func SetClockSequence(seq int) {
--	defer mu.Unlock()
--	mu.Lock()
--	setClockSequence(seq)
--}
--
--func setClockSequence(seq int) {
--	if seq == -1 {
--		var b [2]byte
--		randomBits(b[:]) // clock sequence
--		seq = int(b[0])<<8 | int(b[1])
--	}
--	old_seq := clock_seq
--	clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
--	if old_seq != clock_seq {
--		lasttime = 0
--	}
--}
--
--// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
--// uuid.  It returns false if uuid is not valid.  The time is only well defined
--// for version 1 and 2 UUIDs.
--func (uuid UUID) Time() (Time, bool) {
--	if len(uuid) != 16 {
--		return 0, false
--	}
--	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
--	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
--	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
--	return Time(time), true
--}
--
--// ClockSequence returns the clock sequence encoded in uuid.  It returns false
--// if uuid is not valid.  The clock sequence is only well defined for version 1
--// and 2 UUIDs.
--func (uuid UUID) ClockSequence() (int, bool) {
--	if len(uuid) != 16 {
--		return 0, false
--	}
--	return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go
-deleted file mode 100644
-index de40b10..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--import (
--	"io"
--)
--
--// randomBits completely fills slice b with random data.
--func randomBits(b []byte) {
--	if _, err := io.ReadFull(rander, b); err != nil {
--		panic(err.Error()) // rand should never fail
--	}
--}
--
--// xvalues returns the value of a byte as a hexadecimal digit or 255.
--var xvalues = []byte{
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
--	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
--}
--
--// xtob converts the the first two hex bytes of x into a byte.
--func xtob(x string) (byte, bool) {
--	b1 := xvalues[x[0]]
--	b2 := xvalues[x[1]]
--	return (b1 << 4) | b2, b1 != 255 && b2 != 255
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go
-deleted file mode 100644
-index 2920fae..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go
-+++ /dev/null
-@@ -1,163 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--import (
--	"bytes"
--	"crypto/rand"
--	"fmt"
--	"io"
--	"strings"
--)
--
--// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
--// 4122.
--type UUID []byte
--
--// A Version represents a UUIDs version.
--type Version byte
--
--// A Variant represents a UUIDs variant.
--type Variant byte
--
--// Constants returned by Variant.
--const (
--	Invalid   = Variant(iota) // Invalid UUID
--	RFC4122                   // The variant specified in RFC4122
--	Reserved                  // Reserved, NCS backward compatibility.
--	Microsoft                 // Reserved, Microsoft Corporation backward compatibility.
--	Future                    // Reserved for future definition.
--)
--
--var rander = rand.Reader // random function
--
--// New returns a new random (version 4) UUID as a string.  It is a convenience
--// function for NewRandom().String().
--func New() string {
--	return NewRandom().String()
--}
--
--// Parse decodes s into a UUID or returns nil.  Both the UUID form of
--// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
--// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
--func Parse(s string) UUID {
--	if len(s) == 36+9 {
--		if strings.ToLower(s[:9]) != "urn:uuid:" {
--			return nil
--		}
--		s = s[9:]
--	} else if len(s) != 36 {
--		return nil
--	}
--	if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
--		return nil
--	}
--	uuid := make([]byte, 16)
--	for i, x := range []int{
--		0, 2, 4, 6,
--		9, 11,
--		14, 16,
--		19, 21,
--		24, 26, 28, 30, 32, 34} {
--		if v, ok := xtob(s[x:]); !ok {
--			return nil
--		} else {
--			uuid[i] = v
--		}
--	}
--	return uuid
--}
--
--// Equal returns true if uuid1 and uuid2 are equal.
--func Equal(uuid1, uuid2 UUID) bool {
--	return bytes.Equal(uuid1, uuid2)
--}
--
--// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
--// , or "" if uuid is invalid.
--func (uuid UUID) String() string {
--	if uuid == nil || len(uuid) != 16 {
--		return ""
--	}
--	b := []byte(uuid)
--	return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
--		b[:4], b[4:6], b[6:8], b[8:10], b[10:])
--}
--
--// URN returns the RFC 2141 URN form of uuid,
--// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx,  or "" if uuid is invalid.
--func (uuid UUID) URN() string {
--	if uuid == nil || len(uuid) != 16 {
--		return ""
--	}
--	b := []byte(uuid)
--	return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
--		b[:4], b[4:6], b[6:8], b[8:10], b[10:])
--}
--
--// Variant returns the variant encoded in uuid.  It returns Invalid if
--// uuid is invalid.
--func (uuid UUID) Variant() Variant {
--	if len(uuid) != 16 {
--		return Invalid
--	}
--	switch {
--	case (uuid[8] & 0xc0) == 0x80:
--		return RFC4122
--	case (uuid[8] & 0xe0) == 0xc0:
--		return Microsoft
--	case (uuid[8] & 0xe0) == 0xe0:
--		return Future
--	default:
--		return Reserved
--	}
--	panic("unreachable")
--}
--
--// Version returns the verison of uuid.  It returns false if uuid is not
--// valid.
--func (uuid UUID) Version() (Version, bool) {
--	if len(uuid) != 16 {
--		return 0, false
--	}
--	return Version(uuid[6] >> 4), true
--}
--
--func (v Version) String() string {
--	if v > 15 {
--		return fmt.Sprintf("BAD_VERSION_%d", v)
--	}
--	return fmt.Sprintf("VERSION_%d", v)
--}
--
--func (v Variant) String() string {
--	switch v {
--	case RFC4122:
--		return "RFC4122"
--	case Reserved:
--		return "Reserved"
--	case Microsoft:
--		return "Microsoft"
--	case Future:
--		return "Future"
--	case Invalid:
--		return "Invalid"
--	}
--	return fmt.Sprintf("BadVariant%d", int(v))
--}
--
--// SetRand sets the random number generator to r, which implents io.Reader.
--// If r.Read returns an error when the package requests random data then
--// a panic will be issued.
--//
--// Calling SetRand with nil sets the random number generator to the default
--// generator.
--func SetRand(r io.Reader) {
--	if r == nil {
--		rander = rand.Reader
--		return
--	}
--	rander = r
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
-deleted file mode 100644
-index 417ebeb..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
-+++ /dev/null
-@@ -1,390 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--import (
--	"bytes"
--	"fmt"
--	"os"
--	"strings"
--	"testing"
--	"time"
--)
--
--type test struct {
--	in      string
--	version Version
--	variant Variant
--	isuuid  bool
--}
--
--var tests = []test{
--	{"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true},
--	{"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true},
--	{"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true},
--	{"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true},
--	{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
--	{"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true},
--	{"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true},
--	{"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true},
--	{"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true},
--	{"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true},
--	{"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true},
--	{"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true},
--	{"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true},
--	{"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true},
--	{"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true},
--	{"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true},
--
--	{"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
--	{"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true},
--	{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
--	{"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true},
--	{"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true},
--	{"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true},
--	{"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true},
--	{"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true},
--	{"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true},
--	{"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true},
--
--	{"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false},
--	{"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false},
--	{"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false},
--	{"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false},
--	{"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false},
--	{"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false},
--}
--
--var constants = []struct {
--	c    interface{}
--	name string
--}{
--	{Person, "Person"},
--	{Group, "Group"},
--	{Org, "Org"},
--	{Invalid, "Invalid"},
--	{RFC4122, "RFC4122"},
--	{Reserved, "Reserved"},
--	{Microsoft, "Microsoft"},
--	{Future, "Future"},
--	{Domain(17), "Domain17"},
--	{Variant(42), "BadVariant42"},
--}
--
--func testTest(t *testing.T, in string, tt test) {
--	uuid := Parse(in)
--	if ok := (uuid != nil); ok != tt.isuuid {
--		t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid)
--	}
--	if uuid == nil {
--		return
--	}
--
--	if v := uuid.Variant(); v != tt.variant {
--		t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant)
--	}
--	if v, _ := uuid.Version(); v != tt.version {
--		t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version)
--	}
--}
--
--func TestUUID(t *testing.T) {
--	for _, tt := range tests {
--		testTest(t, tt.in, tt)
--		testTest(t, strings.ToUpper(tt.in), tt)
--	}
--}
--
--func TestConstants(t *testing.T) {
--	for x, tt := range constants {
--		v, ok := tt.c.(fmt.Stringer)
--		if !ok {
--			t.Errorf("%x: %v: not a stringer", x, v)
--		} else if s := v.String(); s != tt.name {
--			v, _ := tt.c.(int)
--			t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name)
--		}
--	}
--}
--
--func TestRandomUUID(t *testing.T) {
--	m := make(map[string]bool)
--	for x := 1; x < 32; x++ {
--		uuid := NewRandom()
--		s := uuid.String()
--		if m[s] {
--			t.Errorf("NewRandom returned duplicated UUID %s\n", s)
--		}
--		m[s] = true
--		if v, _ := uuid.Version(); v != 4 {
--			t.Errorf("Random UUID of version %s\n", v)
--		}
--		if uuid.Variant() != RFC4122 {
--			t.Errorf("Random UUID is variant %d\n", uuid.Variant())
--		}
--	}
--}
--
--func TestNew(t *testing.T) {
--	m := make(map[string]bool)
--	for x := 1; x < 32; x++ {
--		s := New()
--		if m[s] {
--			t.Errorf("New returned duplicated UUID %s\n", s)
--		}
--		m[s] = true
--		uuid := Parse(s)
--		if uuid == nil {
--			t.Errorf("New returned %q which does not decode\n", s)
--			continue
--		}
--		if v, _ := uuid.Version(); v != 4 {
--			t.Errorf("Random UUID of version %s\n", v)
--		}
--		if uuid.Variant() != RFC4122 {
--			t.Errorf("Random UUID is variant %d\n", uuid.Variant())
--		}
--	}
--}
--
--func clockSeq(t *testing.T, uuid UUID) int {
--	seq, ok := uuid.ClockSequence()
--	if !ok {
--		t.Fatalf("%s: invalid clock sequence\n", uuid)
--	}
--	return seq
--}
--
--func TestClockSeq(t *testing.T) {
--	// Fake time.Now for this test to return a monotonically advancing time; restore it at end.
--	defer func(orig func() time.Time) { timeNow = orig }(timeNow)
--	monTime := time.Now()
--	timeNow = func() time.Time {
--		monTime = monTime.Add(1 * time.Second)
--		return monTime
--	}
--
--	SetClockSequence(-1)
--	uuid1 := NewUUID()
--	uuid2 := NewUUID()
--
--	if clockSeq(t, uuid1) != clockSeq(t, uuid2) {
--		t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2))
--	}
--
--	SetClockSequence(-1)
--	uuid2 = NewUUID()
--
--	// Just on the very off chance we generated the same sequence
--	// two times we try again.
--	if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
--		SetClockSequence(-1)
--		uuid2 = NewUUID()
--	}
--	if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
--		t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1))
--	}
--
--	SetClockSequence(0x1234)
--	uuid1 = NewUUID()
--	if seq := clockSeq(t, uuid1); seq != 0x1234 {
--		t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq)
--	}
--}
--
--func TestCoding(t *testing.T) {
--	text := "7d444840-9dc0-11d1-b245-5ffdce74fad2"
--	urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2"
--	data := UUID{
--		0x7d, 0x44, 0x48, 0x40,
--		0x9d, 0xc0,
--		0x11, 0xd1,
--		0xb2, 0x45,
--		0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
--	}
--	if v := data.String(); v != text {
--		t.Errorf("%x: encoded to %s, expected %s\n", data, v, text)
--	}
--	if v := data.URN(); v != urn {
--		t.Errorf("%x: urn is %s, expected %s\n", data, v, urn)
--	}
--
--	uuid := Parse(text)
--	if !Equal(uuid, data) {
--		t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data)
--	}
--}
--
--func TestVersion1(t *testing.T) {
--	uuid1 := NewUUID()
--	uuid2 := NewUUID()
--
--	if Equal(uuid1, uuid2) {
--		t.Errorf("%s:duplicate uuid\n", uuid1)
--	}
--	if v, _ := uuid1.Version(); v != 1 {
--		t.Errorf("%s: version %s expected 1\n", uuid1, v)
--	}
--	if v, _ := uuid2.Version(); v != 1 {
--		t.Errorf("%s: version %s expected 1\n", uuid2, v)
--	}
--	n1 := uuid1.NodeID()
--	n2 := uuid2.NodeID()
--	if !bytes.Equal(n1, n2) {
--		t.Errorf("Different nodes %x != %x\n", n1, n2)
--	}
--	t1, ok := uuid1.Time()
--	if !ok {
--		t.Errorf("%s: invalid time\n", uuid1)
--	}
--	t2, ok := uuid2.Time()
--	if !ok {
--		t.Errorf("%s: invalid time\n", uuid2)
--	}
--	q1, ok := uuid1.ClockSequence()
--	if !ok {
--		t.Errorf("%s: invalid clock sequence\n", uuid1)
--	}
--	q2, ok := uuid2.ClockSequence()
--	if !ok {
--		t.Errorf("%s: invalid clock sequence", uuid2)
--	}
--
--	switch {
--	case t1 == t2 && q1 == q2:
--		t.Errorf("time stopped\n")
--	case t1 > t2 && q1 == q2:
--		t.Errorf("time reversed\n")
--	case t1 < t2 && q1 != q2:
--		t.Errorf("clock sequence chaned unexpectedly\n")
--	}
--}
--
--func TestNodeAndTime(t *testing.T) {
--	// Time is February 5, 1998 12:30:23.136364800 AM GMT
--
--	uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
--	node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2}
--
--	ts, ok := uuid.Time()
--	if ok {
--		c := time.Unix(ts.UnixTime())
--		want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC)
--		if !c.Equal(want) {
--			t.Errorf("Got time %v, want %v", c, want)
--		}
--	} else {
--		t.Errorf("%s: bad time\n", uuid)
--	}
--	if !bytes.Equal(node, uuid.NodeID()) {
--		t.Errorf("Expected node %v got %v\n", node, uuid.NodeID())
--	}
--}
--
--func TestMD5(t *testing.T) {
--	uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String()
--	want := "6fa459ea-ee8a-3ca4-894e-db77e160355e"
--	if uuid != want {
--		t.Errorf("MD5: got %q expected %q\n", uuid, want)
--	}
--}
--
--func TestSHA1(t *testing.T) {
--	uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String()
--	want := "886313e1-3b8a-5372-9b90-0c9aee199e5d"
--	if uuid != want {
--		t.Errorf("SHA1: got %q expected %q\n", uuid, want)
--	}
--}
--
--func TestNodeID(t *testing.T) {
--	nid := []byte{1, 2, 3, 4, 5, 6}
--	SetNodeInterface("")
--	s := NodeInterface()
--	if s == "" || s == "user" {
--		t.Errorf("NodeInterface %q after SetInteface\n", s)
--	}
--	node1 := NodeID()
--	if node1 == nil {
--		t.Errorf("NodeID nil after SetNodeInterface\n", s)
--	}
--	SetNodeID(nid)
--	s = NodeInterface()
--	if s != "user" {
--		t.Errorf("Expected NodeInterface %q got %q\n", "user", s)
--	}
--	node2 := NodeID()
--	if node2 == nil {
--		t.Errorf("NodeID nil after SetNodeID\n", s)
--	}
--	if bytes.Equal(node1, node2) {
--		t.Errorf("NodeID not changed after SetNodeID\n", s)
--	} else if !bytes.Equal(nid, node2) {
--		t.Errorf("NodeID is %x, expected %x\n", node2, nid)
--	}
--}
--
--func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) {
--	if uuid == nil {
--		t.Errorf("%s failed\n", name)
--		return
--	}
--	if v, _ := uuid.Version(); v != 2 {
--		t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v)
--		return
--	}
--	if v, ok := uuid.Domain(); !ok || v != domain {
--		if !ok {
--			t.Errorf("%s: %d: Domain failed\n", name, uuid)
--		} else {
--			t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v)
--		}
--	}
--	if v, ok := uuid.Id(); !ok || v != id {
--		if !ok {
--			t.Errorf("%s: %d: Id failed\n", name, uuid)
--		} else {
--			t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v)
--		}
--	}
--}
--
--func TestDCE(t *testing.T) {
--	testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678)
--	testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid()))
--	testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid()))
--}
--
--type badRand struct{}
--
--func (r badRand) Read(buf []byte) (int, error) {
--	for i, _ := range buf {
--		buf[i] = byte(i)
--	}
--	return len(buf), nil
--}
--
--func TestBadRand(t *testing.T) {
--	SetRand(badRand{})
--	uuid1 := New()
--	uuid2 := New()
--	if uuid1 != uuid2 {
--		t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2)
--	}
--	SetRand(nil)
--	uuid1 = New()
--	uuid2 = New()
--	if uuid1 == uuid2 {
--		t.Errorf("unexecpted duplicates, got %q\n", uuid1)
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go
-deleted file mode 100644
-index 6358004..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go
-+++ /dev/null
-@@ -1,41 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--import (
--	"encoding/binary"
--)
--
--// NewUUID returns a Version 1 UUID based on the current NodeID and clock
--// sequence, and the current time.  If the NodeID has not been set by SetNodeID
--// or SetNodeInterface then it will be set automatically.  If the NodeID cannot
--// be set NewUUID returns nil.  If clock sequence has not been set by
--// SetClockSequence then it will be set automatically.  If GetTime fails to
--// return the current NewUUID returns nil.
--func NewUUID() UUID {
--	if nodeID == nil {
--		SetNodeInterface("")
--	}
--
--	now, err := GetTime()
--	if err != nil {
--		return nil
--	}
--
--	uuid := make([]byte, 16)
--
--	time_low := uint32(now & 0xffffffff)
--	time_mid := uint16((now >> 32) & 0xffff)
--	time_hi := uint16((now >> 48) & 0x0fff)
--	time_hi |= 0x1000 // Version 1
--
--	binary.BigEndian.PutUint32(uuid[0:], time_low)
--	binary.BigEndian.PutUint16(uuid[4:], time_mid)
--	binary.BigEndian.PutUint16(uuid[6:], time_hi)
--	binary.BigEndian.PutUint16(uuid[8:], clock_seq)
--	copy(uuid[10:], nodeID)
--
--	return uuid
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go b/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go
-deleted file mode 100644
-index b3d4a36..0000000
---- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go
-+++ /dev/null
-@@ -1,25 +0,0 @@
--// Copyright 2011 Google Inc.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package uuid
--
--// Random returns a Random (Version 4) UUID or panics.
--//
--// The strength of the UUIDs is based on the strength of the crypto/rand
--// package.
--//
--// A note about uniqueness derived from from the UUID Wikipedia entry:
--//
--//  Randomly generated UUIDs have 122 random bits.  One's annual risk of being
--//  hit by a meteorite is estimated to be one chance in 17 billion, that
--//  means the probability is about 0.00000000006 (6 × 10−11),
--//  equivalent to the odds of creating a few tens of trillions of UUIDs in a
--//  year and having one duplicate.
--func NewRandom() UUID {
--	uuid := make([]byte, 16)
--	randomBits([]byte(uuid))
--	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
--	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
--	return uuid
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/compute/serviceaccount/serviceaccount.go b/Godeps/_workspace/src/code.google.com/p/goauth2/compute/serviceaccount/serviceaccount.go
-deleted file mode 100644
-index ed3e10c..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/compute/serviceaccount/serviceaccount.go
-+++ /dev/null
-@@ -1,172 +0,0 @@
--// Copyright 2013 The goauth2 Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package serviceaccount provides support for making OAuth2-authorized
--// HTTP requests from Google Compute Engine instances using service accounts.
--//
--// See: https://developers.google.com/compute/docs/authentication
--//
--// Example usage:
--//
--//	client, err := serviceaccount.NewClient(&serviceaccount.Options{})
--//	if err != nil {
--//		c.Errorf("failed to create service account client: %q", err)
--//		return err
--//	}
--//	client.Post("https://www.googleapis.com/compute/...", ...)
--//	client.Post("https://www.googleapis.com/bigquery/...", ...)
--//
--package serviceaccount
--
--import (
--	"encoding/json"
--	"net/http"
--	"net/url"
--	"path"
--	"sync"
--	"time"
--
--	"code.google.com/p/goauth2/oauth"
--)
--
--const (
--	metadataServer     = "metadata"
--	serviceAccountPath = "/computeMetadata/v1/instance/service-accounts"
--)
--
--// Options configures a service account Client.
--type Options struct {
--	// Underlying transport of service account Client.
--	// If nil, http.DefaultTransport is used.
--	Transport http.RoundTripper
--
--	// Service account name.
--	// If empty, "default" is used.
--	Account string
--}
--
--// NewClient returns an *http.Client authorized with the service account
--// configured in the Google Compute Engine instance.
--func NewClient(opt *Options) (*http.Client, error) {
--	tr := http.DefaultTransport
--	account := "default"
--	if opt != nil {
--		if opt.Transport != nil {
--			tr = opt.Transport
--		}
--		if opt.Account != "" {
--			account = opt.Account
--		}
--	}
--	t := &transport{
--		Transport: tr,
--		Account:   account,
--	}
--	// Get the initial access token.
--	if _, err := fetchToken(t); err != nil {
--		return nil, err
--	}
--	return &http.Client{
--		Transport: t,
--	}, nil
--}
--
--type tokenData struct {
--	AccessToken string  `json:"access_token"`
--	ExpiresIn   float64 `json:"expires_in"`
--	TokenType   string  `json:"token_type"`
--}
--
--// transport is an oauth.Transport with a custom Refresh and RoundTrip implementation.
--type transport struct {
--	Transport http.RoundTripper
--	Account   string
--
--	mu sync.Mutex
--	*oauth.Token
--}
--
--// Refresh renews the transport's AccessToken.
--// t.mu sould be held when this is called.
--func (t *transport) refresh() error {
--	// https://developers.google.com/compute/docs/metadata#transitioning
--	// v1 requires "Metadata-Flavor: Google" header.
--	tokenURL := &url.URL{
--		Scheme: "http",
--		Host:   metadataServer,
--		Path:   path.Join(serviceAccountPath, t.Account, "token"),
--	}
--	req, err := http.NewRequest("GET", tokenURL.String(), nil)
--	if err != nil {
--		return err
--	}
--	req.Header.Add("Metadata-Flavor", "Google")
--	resp, err := http.DefaultClient.Do(req)
--	if err != nil {
--		return err
--	}
--	defer resp.Body.Close()
--	d := json.NewDecoder(resp.Body)
--	var token tokenData
--	err = d.Decode(&token)
--	if err != nil {
--		return err
--	}
--	t.Token = &oauth.Token{
--		AccessToken: token.AccessToken,
--		Expiry:      time.Now().Add(time.Duration(token.ExpiresIn) * time.Second),
--	}
--	return nil
--}
--
--// Refresh renews the transport's AccessToken.
--func (t *transport) Refresh() error {
--	t.mu.Lock()
--	defer t.mu.Unlock()
--	return t.refresh()
--}
--
--// Fetch token from cache or generate a new one if cache miss or expired.
--func fetchToken(t *transport) (*oauth.Token, error) {
--	// Get a new token using Refresh in case of a cache miss of if it has expired.
--	t.mu.Lock()
--	defer t.mu.Unlock()
--	if t.Token == nil || t.Expired() {
--		if err := t.refresh(); err != nil {
--			return nil, err
--		}
--	}
--	return t.Token, nil
--}
--
--// cloneRequest returns a clone of the provided *http.Request.
--// The clone is a shallow copy of the struct and its Header map.
--func cloneRequest(r *http.Request) *http.Request {
--	// shallow copy of the struct
--	r2 := new(http.Request)
--	*r2 = *r
--	// deep copy of the Header
--	r2.Header = make(http.Header)
--	for k, s := range r.Header {
--		r2.Header[k] = s
--	}
--	return r2
--}
--
--// RoundTrip issues an authorized HTTP request and returns its response.
--func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
--	token, err := fetchToken(t)
--	if err != nil {
--		return nil, err
--	}
--
--	// To set the Authorization header, we must make a copy of the Request
--	// so that we don't modify the Request we were given.
--	// This is required by the specification of http.RoundTripper.
--	newReq := cloneRequest(req)
--	newReq.Header.Set("Authorization", "Bearer "+token.AccessToken)
--
--	// Make the HTTP request.
--	return t.Transport.RoundTrip(newReq)
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/example/oauthreq.go b/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/example/oauthreq.go
-deleted file mode 100644
-index f9651bd..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/example/oauthreq.go
-+++ /dev/null
-@@ -1,100 +0,0 @@
--// Copyright 2011 The goauth2 Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// This program makes a call to the specified API, authenticated with OAuth2.
--// a list of example APIs can be found at https://code.google.com/oauthplayground/
--package main
--
--import (
--	"flag"
--	"fmt"
--	"io"
--	"log"
--	"os"
--
--	"code.google.com/p/goauth2/oauth"
--)
--
--var (
--	clientId     = flag.String("id", "", "Client ID")
--	clientSecret = flag.String("secret", "", "Client Secret")
--	scope        = flag.String("scope", "https://www.googleapis.com/auth/userinfo.profile", "OAuth scope")
--	redirectURL  = flag.String("redirect_url", "oob", "Redirect URL")
--	authURL      = flag.String("auth_url", "https://accounts.google.com/o/oauth2/auth", "Authentication URL")
--	tokenURL     = flag.String("token_url", "https://accounts.google.com/o/oauth2/token", "Token URL")
--	requestURL   = flag.String("request_url", "https://www.googleapis.com/oauth2/v1/userinfo", "API request")
--	code         = flag.String("code", "", "Authorization Code")
--	cachefile    = flag.String("cache", "cache.json", "Token cache file")
--)
--
--const usageMsg = `
--To obtain a request token you must specify both -id and -secret.
--
--To obtain Client ID and Secret, see the "OAuth 2 Credentials" section under
--the "API Access" tab on this page: https://code.google.com/apis/console/
--
--Once you have completed the OAuth flow, the credentials should be stored inside
--the file specified by -cache and you may run without the -id and -secret flags.
--`
--
--func main() {
--	flag.Parse()
--
--	// Set up a configuration.
--	config := &oauth.Config{
--		ClientId:     *clientId,
--		ClientSecret: *clientSecret,
--		RedirectURL:  *redirectURL,
--		Scope:        *scope,
--		AuthURL:      *authURL,
--		TokenURL:     *tokenURL,
--		TokenCache:   oauth.CacheFile(*cachefile),
--	}
--
--	// Set up a Transport using the config.
--	transport := &oauth.Transport{Config: config}
--
--	// Try to pull the token from the cache; if this fails, we need to get one.
--	token, err := config.TokenCache.Token()
--	if err != nil {
--		if *clientId == "" || *clientSecret == "" {
--			flag.Usage()
--			fmt.Fprint(os.Stderr, usageMsg)
--			os.Exit(2)
--		}
--		if *code == "" {
--			// Get an authorization code from the data provider.
--			// ("Please ask the user if I can access this resource.")
--			url := config.AuthCodeURL("")
--			fmt.Print("Visit this URL to get a code, then run again with -code=YOUR_CODE\n\n")
--			fmt.Println(url)
--			return
--		}
--		// Exchange the authorization code for an access token.
--		// ("Here's the code you gave the user, now give me a token!")
--		token, err = transport.Exchange(*code)
--		if err != nil {
--			log.Fatal("Exchange:", err)
--		}
--		// (The Exchange method will automatically cache the token.)
--		fmt.Printf("Token is cached in %v\n", config.TokenCache)
--	}
--
--	// Make the actual request using the cached token to authenticate.
--	// ("Here's the token, let me in!")
--	transport.Token = token
--
--	// Make the request.
--	r, err := transport.Client().Get(*requestURL)
--	if err != nil {
--		log.Fatal("Get:", err)
--	}
--	defer r.Body.Close()
--
--	// Write the response to standard output.
--	io.Copy(os.Stdout, r.Body)
--
--	// Send final carriage return, just to be neat.
--	fmt.Println()
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/example.client_secrets.json b/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/example.client_secrets.json
-deleted file mode 100644
-index 2ea86f2..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/example.client_secrets.json
-+++ /dev/null
-@@ -1 +0,0 @@
--{"web":{"auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://accounts.google.com/o/oauth2/token","client_email":"XXXXXXXXXXXX at developer.gserviceaccount.com","client_x509_cert_url":"https://www.googleapis.com/robot/v1/metadata/x509/XXXXXXXXXXXX@developer.gserviceaccount.com","client_id":"XXXXXXXXXXXX.apps.googleusercontent.com","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs"}}
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/example.pem b/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/example.pem
-deleted file mode 100644
-index 8f78b92..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/example.pem
-+++ /dev/null
-@@ -1,20 +0,0 @@
--Bag Attributes
--    friendlyName: privatekey
--    localKeyID: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 
--Key Attributes: <No Attributes>
-------BEGIN PRIVATE KEY-----
--XXXXxyXXXXXXXxxyxxxX9y0XXYXXXXYXXxXyxxXxXxXXXyXXXXx4yx1xy1xyYxxY
--1XxYy38YxXxxxyXxyyxx+xxxxyx1Y1xYx7yx2/Y1XyyXYYYxY5YXxX0xY/Y642yX
--zYYxYXzXYxY0Y8y9YxyYXxxX40YyXxxXX4XXxx7XxXxxXyXxYYXxXyxX5XY0Yy2X
--1YX0XXyy6YXyXx9XxXxyXX9XXYXxXxXXXXXXxYXYY3Y8Yy311XYYY81XyY14Xyyx
--xXyx7xxXXXxxxxyyyX4YYYXyYyYXyxX4XYXYyxXYyx9xy23xXYyXyxYxXxx1XXXY
--y98yX6yYxyyyX4Xyx1Xy/0yxxYxXxYYx2xx7yYXXXxYXXXxyXyyYYxx5XX2xxyxy
--y6Yyyx0XX3YYYyx9YYXXXX7y0yxXXy+90XYz1y2xyx7yXxX+8X0xYxXXYxxyxYYy
--YXx8Yy4yX0Xyxxx6yYX92yxy1YYYzyyyyxy55x/yyXXXYYXYXXzXXxYYxyXY8XXX
--+y9+yXxX7XxxyYYxxXYxyY623xxXxYX59x5Y6yYyXYY4YxXXYXXXYxXYxXxXXx6x
--YXX7XxXX2X0XY7YXyYy1XXxYXxXxYY1xXXxxxyy+07zXYxYxxXyyxxyxXx1XYy5X
--5XYzyxYxXXYyX9XX7xX8xXxx+XXYyYXXXX5YY1x8Yxyx54Xy/1XXyyYXY5YxYyxY
--XyyxXyX/YxxXXXxXXYXxyxx63xX/xxyYXXyYzx0XY+YxX5xyYyyxxxXXYX/94XXy
--Xx63xYxXyXY3/XXxyyXX15XXXyz08XYY5YYXY/YXy/96x68XyyXXxYyXy4xYXx5x
--7yxxyxxYxXxyx3y=
-------END PRIVATE KEY-----
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/main.go b/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/main.go
-deleted file mode 100644
-index 2256e9c..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/example/main.go
-+++ /dev/null
-@@ -1,114 +0,0 @@
--// Copyright 2011 The goauth2 Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// This program makes a read only call to the Google Cloud Storage API,
--// authenticated with OAuth2. A list of example APIs can be found at
--// https://code.google.com/oauthplayground/
--package main
--
--import (
--	"encoding/json"
--	"flag"
--	"fmt"
--	"io/ioutil"
--	"log"
--	"net/http"
--	"strings"
--
--	"code.google.com/p/goauth2/oauth/jwt"
--)
--
--const scope = "https://www.googleapis.com/auth/devstorage.read_only"
--
--var (
--	secretsFile = flag.String("s", "", "JSON encoded secrets for the service account")
--	pemFile     = flag.String("k", "", "private pem key file for the service account")
--)
--
--const usageMsg = `
--You must specify -k and -s.
--
--To obtain client secrets and pem, see the "OAuth 2 Credentials" section under
--the "API Access" tab on this page: https://code.google.com/apis/console/
--
--Google Cloud Storage must also be turned on in the API console.
--`
--
--func main() {
--	flag.Parse()
--
--	if *secretsFile == "" || *pemFile == "" {
--		flag.Usage()
--		fmt.Println(usageMsg)
--		return
--	}
--
--	// Read the secret file bytes into the config.
--	secretBytes, err := ioutil.ReadFile(*secretsFile)
--	if err != nil {
--		log.Fatal("error reading secerets file:", err)
--	}
--	var config struct {
--		Web struct {
--			ClientEmail string `json:"client_email"`
--			ClientID    string `json:"client_id"`
--			TokenURI    string `json:"token_uri"`
--		}
--	}
--	err = json.Unmarshal(secretBytes, &config)
--	if err != nil {
--		log.Fatal("error unmarshalling secerets:", err)
--	}
--
--	// Get the project ID from the client ID.
--	projectID := strings.SplitN(config.Web.ClientID, "-", 2)[0]
--
--	// Read the pem file bytes for the private key.
--	keyBytes, err := ioutil.ReadFile(*pemFile)
--	if err != nil {
--		log.Fatal("error reading private key file:", err)
--	}
--
--	// Craft the ClaimSet and JWT token.
--	t := jwt.NewToken(config.Web.ClientEmail, scope, keyBytes)
--	t.ClaimSet.Aud = config.Web.TokenURI
--
--	// We need to provide a client.
--	c := &http.Client{}
--
--	// Get the access token.
--	o, err := t.Assert(c)
--	if err != nil {
--		log.Fatal("assertion error:", err)
--	}
--
--	// Refresh token will be missing, but this access_token will be good
--	// for one hour.
--	fmt.Printf("access_token = %v\n", o.AccessToken)
--	fmt.Printf("refresh_token = %v\n", o.RefreshToken)
--	fmt.Printf("expires %v\n", o.Expiry)
--
--	// Form the request to list Google Cloud Storage buckets.
--	req, err := http.NewRequest("GET", "https://storage.googleapis.com/", nil)
--	if err != nil {
--		log.Fatal("http.NewRequest:", err)
--	}
--	req.Header.Set("Authorization", "OAuth "+o.AccessToken)
--	req.Header.Set("x-goog-api-version", "2")
--	req.Header.Set("x-goog-project-id", projectID)
--
--	// Make the request.
--	r, err := c.Do(req)
--	if err != nil {
--		log.Fatal("API request error:", err)
--	}
--	defer r.Body.Close()
--
--	// Write the response to standard output.
--	res, err := ioutil.ReadAll(r.Body)
--	if err != nil {
--		log.Fatal("error reading API request results:", err)
--	}
--	fmt.Printf("\nRESULT:\n%s\n", res)
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/jwt.go b/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/jwt.go
-deleted file mode 100644
-index 61bf5ce..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/jwt.go
-+++ /dev/null
-@@ -1,511 +0,0 @@
--// Copyright 2012 The goauth2 Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// The jwt package provides support for creating credentials for OAuth2 service
--// account requests.
--//
--// For examples of the package usage please see jwt_test.go.
--// Example usage (error handling omitted for brevity):
--//
--//	// Craft the ClaimSet and JWT token.
--//	iss := "XXXXXXXXXXXX at developer.gserviceaccount.com"
--//	scope := "https://www.googleapis.com/auth/devstorage.read_only"
--//	t := jwt.NewToken(iss, scope, pemKeyBytes)
--//
--//	// We need to provide a client.
--//	c := &http.Client{}
--//
--//	// Get the access token.
--//	o, _ := t.Assert(c)
--//
--//	// Form the request to the service.
--//	req, _ := http.NewRequest("GET", "https://storage.googleapis.com/", nil)
--//	req.Header.Set("Authorization", "OAuth "+o.AccessToken)
--//	req.Header.Set("x-goog-api-version", "2")
--//	req.Header.Set("x-goog-project-id", "XXXXXXXXXXXX")
--//
--//	// Make the request.
--//	result, _ := c.Do(req)
--//
--// For info on OAuth2 service accounts please see the online documentation.
--// https://developers.google.com/accounts/docs/OAuth2ServiceAccount
--//
--package jwt
--
--import (
--	"bytes"
--	"crypto"
--	"crypto/rand"
--	"crypto/rsa"
--	"crypto/sha256"
--	"crypto/x509"
--	"encoding/base64"
--	"encoding/json"
--	"encoding/pem"
--	"errors"
--	"fmt"
--	"net/http"
--	"net/url"
--	"strings"
--	"time"
--
--	"code.google.com/p/goauth2/oauth"
--)
--
--// These are the default/standard values for this to work for Google service accounts.
--const (
--	stdAlgorithm     = "RS256"
--	stdType          = "JWT"
--	stdAssertionType = "http://oauth.net/grant_type/jwt/1.0/bearer"
--	stdGrantType     = "urn:ietf:params:oauth:grant-type:jwt-bearer"
--	stdAud           = "https://accounts.google.com/o/oauth2/token"
--)
--
--var (
--	ErrInvalidKey = errors.New("Invalid Key")
--)
--
--// base64Encode returns and Base64url encoded version of the input string with any
--// trailing "=" stripped.
--func base64Encode(b []byte) string {
--	return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
--}
--
--// base64Decode decodes the Base64url encoded string
--func base64Decode(s string) ([]byte, error) {
--	// add back missing padding
--	switch len(s) % 4 {
--	case 2:
--		s += "=="
--	case 3:
--		s += "="
--	}
--	return base64.URLEncoding.DecodeString(s)
--}
--
--// The JWT claim set contains information about the JWT including the
--// permissions being requested (scopes), the target of the token, the issuer,
--// the time the token was issued, and the lifetime of the token.
--//
--// Aud is usually https://accounts.google.com/o/oauth2/token
--type ClaimSet struct {
--	Iss   string `json:"iss"`             // email address of the client_id of the application making the access token request
--	Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
--	Aud   string `json:"aud"`             // descriptor of the intended target of the assertion (Optional).
--	Prn   string `json:"prn,omitempty"`   // email for which the application is requesting delegated access (Optional).
--	Exp   int64  `json:"exp"`
--	Iat   int64  `json:"iat"`
--	Typ   string `json:"typ,omitempty"`
--	Sub   string `json:"sub,omitempty"` // Add support for googleapi delegation support
--
--	// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
--	// This array is marshalled using custom code (see (c *ClaimSet) encode()).
--	PrivateClaims map[string]interface{} `json:"-"`
--
--	exp time.Time
--	iat time.Time
--}
--
--// setTimes sets iat and exp to time.Now() and iat.Add(time.Hour) respectively.
--//
--// Note that these times have nothing to do with the expiration time for the
--// access_token returned by the server.  These have to do with the lifetime of
--// the encoded JWT.
--//
--// A JWT can be re-used for up to one hour after it was encoded.  The access
--// token that is granted will also be good for one hour so there is little point
--// in trying to use the JWT a second time.
--func (c *ClaimSet) setTimes(t time.Time) {
--	c.iat = t
--	c.exp = c.iat.Add(time.Hour)
--}
--
--var (
--	jsonStart = []byte{'{'}
--	jsonEnd   = []byte{'}'}
--)
--
--// encode returns the Base64url encoded form of the Signature.
--func (c *ClaimSet) encode() string {
--	if c.exp.IsZero() || c.iat.IsZero() {
--		c.setTimes(time.Now())
--	}
--	if c.Aud == "" {
--		c.Aud = stdAud
--	}
--	c.Exp = c.exp.Unix()
--	c.Iat = c.iat.Unix()
--
--	b, err := json.Marshal(c)
--	if err != nil {
--		panic(err)
--	}
--
--	if len(c.PrivateClaims) == 0 {
--		return base64Encode(b)
--	}
--
--	// Marshal private claim set and then append it to b.
--	prv, err := json.Marshal(c.PrivateClaims)
--	if err != nil {
--		panic(fmt.Errorf("Invalid map of private claims %v", c.PrivateClaims))
--	}
--
--	// Concatenate public and private claim JSON objects.
--	if !bytes.HasSuffix(b, jsonEnd) {
--		panic(fmt.Errorf("Invalid JSON %s", b))
--	}
--	if !bytes.HasPrefix(prv, jsonStart) {
--		panic(fmt.Errorf("Invalid JSON %s", prv))
--	}
--	b[len(b)-1] = ','         // Replace closing curly brace with a comma.
--	b = append(b, prv[1:]...) // Append private claims.
--
--	return base64Encode(b)
--}
--
--// Header describes the algorithm and type of token being generated,
--// and optionally a KeyID describing additional parameters for the
--// signature.
--type Header struct {
--	Algorithm string `json:"alg"`
--	Type      string `json:"typ"`
--	KeyId     string `json:"kid,omitempty"`
--}
--
--func (h *Header) encode() string {
--	b, err := json.Marshal(h)
--	if err != nil {
--		panic(err)
--	}
--	return base64Encode(b)
--}
--
--// A JWT is composed of three parts: a header, a claim set, and a signature.
--// The well formed and encoded JWT can then be exchanged for an access token.
--//
--// The Token is not a JWT, but is is encoded to produce a well formed JWT.
--//
--// When obtaining a key from the Google API console it will be downloaded in a
--// PKCS12 encoding.  To use this key you will need to convert it to a PEM file.
--// This can be achieved with openssl.
--//
--//   $ openssl pkcs12 -in <key.p12> -nocerts -passin pass:notasecret -nodes -out <key.pem>
--//
--// The contents of this file can then be used as the Key.
--type Token struct {
--	ClaimSet *ClaimSet // claim set used to construct the JWT
--	Header   *Header   // header used to construct the JWT
--	Key      []byte    // PEM printable encoding of the private key
--	pKey     *rsa.PrivateKey
--
--	header string
--	claim  string
--	sig    string
--
--	useExternalSigner bool
--	signer            Signer
--}
--
--// NewToken returns a filled in *Token based on the standard header,
--// and sets the Iat and Exp times based on when the call to Assert is
--// made.
--func NewToken(iss, scope string, key []byte) *Token {
--	c := &ClaimSet{
--		Iss:   iss,
--		Scope: scope,
--		Aud:   stdAud,
--	}
--	h := &Header{
--		Algorithm: stdAlgorithm,
--		Type:      stdType,
--	}
--	t := &Token{
--		ClaimSet: c,
--		Header:   h,
--		Key:      key,
--	}
--	return t
--}
--
--// Signer is an interface that given a JWT token, returns the header &
--// claim (serialized and urlEncoded to a byte slice), along with the
--// signature and an error (if any occured).  It could modify any data
--// to sign (typically the KeyID).
--//
--// Example usage where a SHA256 hash of the original url-encoded token
--// with an added KeyID and secret data is used as a signature:
--//
--//	var privateData = "secret data added to hash, indexed by KeyID"
--//
--//	type SigningService struct{}
--//
--//	func (ss *SigningService) Sign(in *jwt.Token) (newTokenData, sig []byte, err error) {
--//		in.Header.KeyID = "signing service"
--//		newTokenData = in.EncodeWithoutSignature()
--//		dataToSign := fmt.Sprintf("%s.%s", newTokenData, privateData)
--//		h := sha256.New()
--//		_, err := h.Write([]byte(dataToSign))
--//		sig = h.Sum(nil)
--//		return
--//	}
--type Signer interface {
--	Sign(in *Token) (tokenData, signature []byte, err error)
--}
--
--// NewSignerToken returns a *Token, using an external signer function
--func NewSignerToken(iss, scope string, signer Signer) *Token {
--	t := NewToken(iss, scope, nil)
--	t.useExternalSigner = true
--	t.signer = signer
--	return t
--}
--
--// Expired returns a boolean value letting us know if the token has expired.
--func (t *Token) Expired() bool {
--	return t.ClaimSet.exp.Before(time.Now())
--}
--
--// Encode constructs and signs a Token returning a JWT ready to use for
--// requesting an access token.
--func (t *Token) Encode() (string, error) {
--	var tok string
--	t.header = t.Header.encode()
--	t.claim = t.ClaimSet.encode()
--	err := t.sign()
--	if err != nil {
--		return tok, err
--	}
--	tok = fmt.Sprintf("%s.%s.%s", t.header, t.claim, t.sig)
--	return tok, nil
--}
--
--// EncodeWithoutSignature returns the url-encoded value of the Token
--// before signing has occured (typically for use by external signers).
--func (t *Token) EncodeWithoutSignature() string {
--	t.header = t.Header.encode()
--	t.claim = t.ClaimSet.encode()
--	return fmt.Sprintf("%s.%s", t.header, t.claim)
--}
--
--// sign computes the signature for a Token.  The details for this can be found
--// in the OAuth2 Service Account documentation.
--// https://developers.google.com/accounts/docs/OAuth2ServiceAccount#computingsignature
--func (t *Token) sign() error {
--	if t.useExternalSigner {
--		fulldata, sig, err := t.signer.Sign(t)
--		if err != nil {
--			return err
--		}
--		split := strings.Split(string(fulldata), ".")
--		if len(split) != 2 {
--			return errors.New("no token returned")
--		}
--		t.header = split[0]
--		t.claim = split[1]
--		t.sig = base64Encode(sig)
--		return err
--	}
--	ss := fmt.Sprintf("%s.%s", t.header, t.claim)
--	if t.pKey == nil {
--		err := t.parsePrivateKey()
--		if err != nil {
--			return err
--		}
--	}
--	h := sha256.New()
--	h.Write([]byte(ss))
--	b, err := rsa.SignPKCS1v15(rand.Reader, t.pKey, crypto.SHA256, h.Sum(nil))
--	t.sig = base64Encode(b)
--	return err
--}
--
--// parsePrivateKey converts the Token's Key ([]byte) into a parsed
--// rsa.PrivateKey.  If the key is not well formed this method will return an
--// ErrInvalidKey error.
--func (t *Token) parsePrivateKey() error {
--	block, _ := pem.Decode(t.Key)
--	if block == nil {
--		return ErrInvalidKey
--	}
--	parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)
--	if err != nil {
--		parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes)
--		if err != nil {
--			return err
--		}
--	}
--	var ok bool
--	t.pKey, ok = parsedKey.(*rsa.PrivateKey)
--	if !ok {
--		return ErrInvalidKey
--	}
--	return nil
--}
--
--// Assert obtains an *oauth.Token from the remote server by encoding and sending
--// a JWT.  The access_token will expire in one hour (3600 seconds) and cannot be
--// refreshed (no refresh_token is returned with the response).  Once this token
--// expires call this method again to get a fresh one.
--func (t *Token) Assert(c *http.Client) (*oauth.Token, error) {
--	var o *oauth.Token
--	t.ClaimSet.setTimes(time.Now())
--	u, v, err := t.buildRequest()
--	if err != nil {
--		return o, err
--	}
--	resp, err := c.PostForm(u, v)
--	if err != nil {
--		return o, err
--	}
--	o, err = handleResponse(resp)
--	return o, err
--}
--
--// buildRequest sets up the URL values and the proper URL string for making our
--// access_token request.
--func (t *Token) buildRequest() (string, url.Values, error) {
--	v := url.Values{}
--	j, err := t.Encode()
--	if err != nil {
--		return t.ClaimSet.Aud, v, err
--	}
--	v.Set("grant_type", stdGrantType)
--	v.Set("assertion", j)
--	return t.ClaimSet.Aud, v, nil
--}
--
--// Used for decoding the response body.
--type respBody struct {
--	IdToken   string        `json:"id_token"`
--	Access    string        `json:"access_token"`
--	Type      string        `json:"token_type"`
--	ExpiresIn time.Duration `json:"expires_in"`
--}
--
--// handleResponse returns a filled in *oauth.Token given the *http.Response from
--// a *http.Request created by buildRequest.
--func handleResponse(r *http.Response) (*oauth.Token, error) {
--	o := &oauth.Token{}
--	defer r.Body.Close()
--	if r.StatusCode != 200 {
--		return o, errors.New("invalid response: " + r.Status)
--	}
--	b := &respBody{}
--	err := json.NewDecoder(r.Body).Decode(b)
--	if err != nil {
--		return o, err
--	}
--	o.AccessToken = b.Access
--	if b.IdToken != "" {
--		// decode returned id token to get expiry
--		o.AccessToken = b.IdToken
--		s := strings.Split(b.IdToken, ".")
--		if len(s) < 2 {
--			return nil, errors.New("invalid token received")
--		}
--		d, err := base64Decode(s[1])
--		if err != nil {
--			return o, err
--		}
--		c := &ClaimSet{}
--		err = json.NewDecoder(bytes.NewBuffer(d)).Decode(c)
--		if err != nil {
--			return o, err
--		}
--		o.Expiry = time.Unix(c.Exp, 0)
--		return o, nil
--	}
--	o.Expiry = time.Now().Add(b.ExpiresIn * time.Second)
--	return o, nil
--}
--
--// Transport implements http.RoundTripper. When configured with a valid
--// JWT and OAuth tokens it can be used to make authenticated HTTP requests.
--//
--//	t := &jwt.Transport{jwtToken, oauthToken}
--//	r, _, err := t.Client().Get("http://example.org/url/requiring/auth")
--//
--// It will automatically refresh the OAuth token if it can, updating in place.
--type Transport struct {
--	JWTToken   *Token
--	OAuthToken *oauth.Token
--
--	// Transport is the HTTP transport to use when making requests.
--	// It will default to http.DefaultTransport if nil.
--	Transport http.RoundTripper
--}
--
--// Creates a new authenticated transport.
--func NewTransport(token *Token) (*Transport, error) {
--	oa, err := token.Assert(new(http.Client))
--	if err != nil {
--		return nil, err
--	}
--	return &Transport{
--		JWTToken:   token,
--		OAuthToken: oa,
--	}, nil
--}
--
--// Client returns an *http.Client that makes OAuth-authenticated requests.
--func (t *Transport) Client() *http.Client {
--	return &http.Client{Transport: t}
--}
--
--// Fetches the internal transport.
--func (t *Transport) transport() http.RoundTripper {
--	if t.Transport != nil {
--		return t.Transport
--	}
--	return http.DefaultTransport
--}
--
--// RoundTrip executes a single HTTP transaction using the Transport's
--// OAuthToken as authorization headers.
--//
--// This method will attempt to renew the token if it has expired and may return
--// an error related to that token renewal before attempting the client request.
--// If the token cannot be renewed a non-nil os.Error value will be returned.
--// If the token is invalid callers should expect HTTP-level errors,
--// as indicated by the Response's StatusCode.
--func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
--	// Sanity check the two tokens
--	if t.JWTToken == nil {
--		return nil, fmt.Errorf("no JWT token supplied")
--	}
--	if t.OAuthToken == nil {
--		return nil, fmt.Errorf("no OAuth token supplied")
--	}
--	// Refresh the OAuth token if it has expired
--	if t.OAuthToken.Expired() {
--		if oa, err := t.JWTToken.Assert(new(http.Client)); err != nil {
--			return nil, err
--		} else {
--			t.OAuthToken = oa
--		}
--	}
--	// To set the Authorization header, we must make a copy of the Request
--	// so that we don't modify the Request we were given.
--	// This is required by the specification of http.RoundTripper.
--	req = cloneRequest(req)
--	req.Header.Set("Authorization", "Bearer "+t.OAuthToken.AccessToken)
--
--	// Make the HTTP request.
--	return t.transport().RoundTrip(req)
--}
--
--// cloneRequest returns a clone of the provided *http.Request.
--// The clone is a shallow copy of the struct and its Header map.
--func cloneRequest(r *http.Request) *http.Request {
--	// shallow copy of the struct
--	r2 := new(http.Request)
--	*r2 = *r
--	// deep copy of the Header
--	r2.Header = make(http.Header)
--	for k, s := range r.Header {
--		r2.Header[k] = s
--	}
--	return r2
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/jwt_test.go b/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/jwt_test.go
-deleted file mode 100644
-index 622843e..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/jwt/jwt_test.go
-+++ /dev/null
-@@ -1,486 +0,0 @@
--// Copyright 2012 The goauth2 Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// For package documentation please see jwt.go.
--//
--package jwt
--
--import (
--	"bytes"
--	"crypto"
--	"crypto/rand"
--	"crypto/rsa"
--	"crypto/sha256"
--	"crypto/x509"
--	"encoding/json"
--	"encoding/pem"
--	"io/ioutil"
--	"net/http"
--	"testing"
--	"time"
--)
--
--const (
--	stdHeaderStr = `{"alg":"RS256","typ":"JWT"}`
--	iss          = "761326798069-r5mljlln1rd4lrbhg75efgigp36m78j5 at developer.gserviceaccount.com"
--	scope        = "https://www.googleapis.com/auth/prediction"
--	exp          = 1328554385
--	iat          = 1328550785 // exp + 1 hour
--)
--
--// Base64url encoded Header
--const headerEnc = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9"
--
--// Base64url encoded ClaimSet
--const claimSetEnc = "eyJpc3MiOiI3NjEzMjY3OTgwNjktcjVtbGpsbG4xcmQ0bHJiaGc3NWVmZ2lncDM2bTc4ajVAZGV2ZWxvcGVyLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzY29wZSI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2F1dGgvcHJlZGljdGlvbiIsImF1ZCI6Imh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsImV4cCI6MTMyODU1NDM4NSwiaWF0IjoxMzI4NTUwNzg1fQ"
--
--// Base64url encoded Signature
--const sigEnc = "olukbHreNiYrgiGCTEmY3eWGeTvYDSUHYoE84Jz3BRPBSaMdZMNOn_0CYK7UHPO7OdvUofjwft1dH59UxE9GWS02pjFti1uAQoImaqjLZoTXr8qiF6O_kDa9JNoykklWlRAIwGIZkDupCS-8cTAnM_ksSymiH1coKJrLDUX_BM0x2f4iMFQzhL5vT1ll-ZipJ0lNlxb5QsyXxDYcxtHYguF12-vpv3ItgT0STfcXoWzIGQoEbhwB9SBp9JYcQ8Ygz6pYDjm0rWX9LrchmTyDArCodpKLFtutNgcIFUP9fWxvwd1C2dNw5GjLcKr9a_SAERyoJ2WnCR1_j9N0wD2o0g"
--
--// Base64url encoded Token
--const tokEnc = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3NjEzMjY3OTgwNjktcjVtbGpsbG4xcmQ0bHJiaGc3NWVmZ2lncDM2bTc4ajVAZGV2ZWxvcGVyLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzY29wZSI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2F1dGgvcHJlZGljdGlvbiIsImF1ZCI6Imh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsImV4cCI6MTMyODU1NDM4NSwiaWF0IjoxMzI4NTUwNzg1fQ.olukbHreNiYrgiGCTEmY3eWGeTvYDSUHYoE84Jz3BRPBSaMdZMNOn_0CYK7UHPO7OdvUofjwft1dH59UxE9GWS02pjFti1uAQoImaqjLZoTXr8qiF6O_kDa9JNoykklWlRAIwGIZkDupCS-8cTAnM_ksSymiH1coKJrLDUX_BM0x2f4iMFQzhL5vT1ll-ZipJ0lNlxb5QsyXxDYcxtHYguF12-vpv3ItgT0STfcXoWzIGQoEbhwB9SBp9JYcQ8Ygz6pYDjm0rWX9LrchmTyDArCodpKLFtutNgcIFUP9fWxvwd1C2dNw5GjLcKr9a_SAERyoJ2WnCR1_j9N0wD2o0g"
--
--// Private key for testing
--const privateKeyPem = `-----BEGIN RSA PRIVATE KEY-----
--MIIEpAIBAAKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj
--7wZgkdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/
--xmVU1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYs
--SliS5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18
--pe+zpyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xk
--SBc//fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABAoIBAQDGGHzQxGKX+ANk
--nQi53v/c6632dJKYXVJC+PDAz4+bzU800Y+n/bOYsWf/kCp94XcG4Lgsdd0Gx+Zq
--HD9CI1IcqqBRR2AFscsmmX6YzPLTuEKBGMW8twaYy3utlFxElMwoUEsrSWRcCA1y
--nHSDzTt871c7nxCXHxuZ6Nm/XCL7Bg8uidRTSC1sQrQyKgTPhtQdYrPQ4WZ1A4J9
--IisyDYmZodSNZe5P+LTJ6M1SCgH8KH9ZGIxv3diMwzNNpk3kxJc9yCnja4mjiGE2
--YCNusSycU5IhZwVeCTlhQGcNeV/skfg64xkiJE34c2y2ttFbdwBTPixStGaF09nU
--Z422D40BAoGBAPvVyRRsC3BF+qZdaSMFwI1yiXY7vQw5+JZh01tD28NuYdRFzjcJ
--vzT2n8LFpj5ZfZFvSMLMVEFVMgQvWnN0O6xdXvGov6qlRUSGaH9u+TCPNnIldjMP
--B8+xTwFMqI7uQr54wBB+Poq7dVRP+0oHb0NYAwUBXoEuvYo3c/nDoRcZAoGBAOWl
--aLHjMv4CJbArzT8sPfic/8waSiLV9Ixs3Re5YREUTtnLq7LoymqB57UXJB3BNz/2
--eCueuW71avlWlRtE/wXASj5jx6y5mIrlV4nZbVuyYff0QlcG+fgb6pcJQuO9DxMI
--aqFGrWP3zye+LK87a6iR76dS9vRU+bHZpSVvGMKJAoGAFGt3TIKeQtJJyqeUWNSk
--klORNdcOMymYMIlqG+JatXQD1rR6ThgqOt8sgRyJqFCVT++YFMOAqXOBBLnaObZZ
--CFbh1fJ66BlSjoXff0W+SuOx5HuJJAa5+WtFHrPajwxeuRcNa8jwxUsB7n41wADu
--UqWWSRedVBg4Ijbw3nWwYDECgYB0pLew4z4bVuvdt+HgnJA9n0EuYowVdadpTEJg
--soBjNHV4msLzdNqbjrAqgz6M/n8Ztg8D2PNHMNDNJPVHjJwcR7duSTA6w2p/4k28
--bvvk/45Ta3XmzlxZcZSOct3O31Cw0i2XDVc018IY5be8qendDYM08icNo7vQYkRH
--504kQQKBgQDjx60zpz8ozvm1XAj0wVhi7GwXe+5lTxiLi9Fxq721WDxPMiHDW2XL
--YXfFVy/9/GIMvEiGYdmarK1NW+VhWl1DC5xhDg0kvMfxplt4tynoq1uTsQTY31Mx
--BeF5CT/JuNYk3bEBF0H/Q3VGO1/ggVS+YezdFbLWIRoMnLj6XCFEGg==
-------END RSA PRIVATE KEY-----`
--
--// Public key to go with the private key for testing
--const publicKeyPem = `-----BEGIN CERTIFICATE-----
--MIIDIzCCAgugAwIBAgIJAMfISuBQ5m+5MA0GCSqGSIb3DQEBBQUAMBUxEzARBgNV
--BAMTCnVuaXQtdGVzdHMwHhcNMTExMjA2MTYyNjAyWhcNMjExMjAzMTYyNjAyWjAV
--MRMwEQYDVQQDEwp1bml0LXRlc3RzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
--CgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZgkdmM
--7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/xmVU1Wer
--uQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYsSliS5qQp
--gyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18pe+zpyl4
--+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xkSBc//fy3
--ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABo3YwdDAdBgNVHQ4EFgQU2RQ8yO+O
--gN8oVW2SW7RLrfYd9jEwRQYDVR0jBD4wPIAU2RQ8yO+OgN8oVW2SW7RLrfYd9jGh
--GaQXMBUxEzARBgNVBAMTCnVuaXQtdGVzdHOCCQDHyErgUOZvuTAMBgNVHRMEBTAD
--AQH/MA0GCSqGSIb3DQEBBQUAA4IBAQBRv+M/6+FiVu7KXNjFI5pSN17OcW5QUtPr
--odJMlWrJBtynn/TA1oJlYu3yV5clc/71Vr/AxuX5xGP+IXL32YDF9lTUJXG/uUGk
--+JETpKmQviPbRsvzYhz4pf6ZIOZMc3/GIcNq92ECbseGO+yAgyWUVKMmZM0HqXC9
--ovNslqe0M8C1sLm1zAR5z/h/litE7/8O2ietija3Q/qtl2TOXJdCA6sgjJX2WUql
--ybrC55ct18NKf3qhpcEkGQvFU40rVYApJpi98DiZPYFdx1oBDp/f4uZ3ojpxRVFT
--cDwcJLfNRCPUhormsY7fDS9xSyThiHsW9mjJYdcaKQkwYZ0F11yB
-------END CERTIFICATE-----`
--
--var (
--	privateKeyPemBytes = []byte(privateKeyPem)
--	publicKeyPemBytes  = []byte(publicKeyPem)
--	stdHeader          = &Header{Algorithm: stdAlgorithm, Type: stdType}
--)
--
--// Testing the urlEncode function.
--func TestUrlEncode(t *testing.T) {
--	enc := base64Encode([]byte(stdHeaderStr))
--	b := []byte(enc)
--	if b[len(b)-1] == 61 {
--		t.Error("TestUrlEncode: last chat == \"=\"")
--	}
--	if enc != headerEnc {
--		t.Error("TestUrlEncode: enc != headerEnc")
--		t.Errorf("        enc = %s", enc)
--		t.Errorf("  headerEnc = %s", headerEnc)
--	}
--}
--
--// Test that the times are set properly.
--func TestClaimSetSetTimes(t *testing.T) {
--	c := &ClaimSet{
--		Iss:   iss,
--		Scope: scope,
--	}
--	iat := time.Unix(iat, 0)
--	c.setTimes(iat)
--	if c.exp.Unix() != exp {
--		t.Error("TestClaimSetSetTimes: c.exp != exp")
--		t.Errorf("  c.Exp = %d", c.exp.Unix())
--		t.Errorf("    exp = %d", exp)
--	}
--}
--
--// Given a well formed ClaimSet, test for proper encoding.
--func TestClaimSetEncode(t *testing.T) {
--	c := &ClaimSet{
--		Iss:   iss,
--		Scope: scope,
--		exp:   time.Unix(exp, 0),
--		iat:   time.Unix(iat, 0),
--	}
--	enc := c.encode()
--	re, err := base64Decode(enc)
--	if err != nil {
--		t.Fatalf("error decoding encoded claim set: %v", err)
--	}
--
--	wa, err := base64Decode(claimSetEnc)
--	if err != nil {
--		t.Fatalf("error decoding encoded expected claim set: %v", err)
--	}
--
--	if enc != claimSetEnc {
--		t.Error("TestClaimSetEncode: enc != claimSetEnc")
--		t.Errorf("          enc = %s", string(re))
--		t.Errorf("  claimSetEnc = %s", string(wa))
--	}
--}
--
--// Test that claim sets with private claim names are encoded correctly.
--func TestClaimSetWithPrivateNameEncode(t *testing.T) {
--	iatT := time.Unix(iat, 0)
--	expT := time.Unix(exp, 0)
--
--	i, err := json.Marshal(iatT.Unix())
--	if err != nil {
--		t.Fatalf("error marshaling iatT value of %v: %v", iatT.Unix(), err)
--	}
--	iatStr := string(i)
--	e, err := json.Marshal(expT.Unix())
--	if err != nil {
--		t.Fatalf("error marshaling expT value of %v: %v", expT.Unix(), err)
--	}
--
--	expStr := string(e)
--
--	testCases := []struct {
--		desc  string
--		input map[string]interface{}
--		want  string
--	}{
--		// Test a simple int field.
--		{
--			"single simple field",
--			map[string]interface{}{"amount": 22},
--			`{` +
--				`"iss":"` + iss + `",` +
--				`"scope":"` + scope + `",` +
--				`"aud":"` + stdAud + `",` +
--				`"exp":` + expStr + `,` +
--				`"iat":` + iatStr + `,` +
--				`"amount":22` +
--				`}`,
--		},
--		{
--			"multiple simple fields",
--			map[string]interface{}{"tracking_code": "axZf", "amount": 22},
--			`{` +
--				`"iss":"` + iss + `",` +
--				`"scope":"` + scope + `",` +
--				`"aud":"` + stdAud + `",` +
--				`"exp":` + expStr + `,` +
--				`"iat":` + iatStr + `,` +
--				`"amount":22,` +
--				`"tracking_code":"axZf"` +
--				`}`,
--		},
--		{
--			"nested struct fields",
--			map[string]interface{}{
--				"tracking_code": "axZf",
--				"purchase": struct {
--					Description string `json:"desc"`
--					Quantity    int32  `json:"q"`
--					Time        int64  `json:"t"`
--				}{
--					"toaster",
--					5,
--					iat,
--				},
--			},
--			`{` +
--				`"iss":"` + iss + `",` +
--				`"scope":"` + scope + `",` +
--				`"aud":"` + stdAud + `",` +
--				`"exp":` + expStr + `,` +
--				`"iat":` + iatStr + `,` +
--				`"purchase":{"desc":"toaster","q":5,"t":` + iatStr + `},` +
--				`"tracking_code":"axZf"` +
--				`}`,
--		},
--	}
--
--	for _, testCase := range testCases {
--		c := &ClaimSet{
--			Iss:           iss,
--			Scope:         scope,
--			Aud:           stdAud,
--			iat:           iatT,
--			exp:           expT,
--			PrivateClaims: testCase.input,
--		}
--		cJSON, err := base64Decode(c.encode())
--		if err != nil {
--			t.Fatalf("error decoding claim set: %v", err)
--		}
--		if string(cJSON) != testCase.want {
--			t.Errorf("TestClaimSetWithPrivateNameEncode: enc != want in case %s", testCase.desc)
--			t.Errorf("    enc = %s", cJSON)
--			t.Errorf("    want = %s", testCase.want)
--		}
--	}
--}
--
--// Test the NewToken constructor.
--func TestNewToken(t *testing.T) {
--	tok := NewToken(iss, scope, privateKeyPemBytes)
--	if tok.ClaimSet.Iss != iss {
--		t.Error("TestNewToken: tok.ClaimSet.Iss != iss")
--		t.Errorf("  tok.ClaimSet.Iss = %s", tok.ClaimSet.Iss)
--		t.Errorf("               iss = %s", iss)
--	}
--	if tok.ClaimSet.Scope != scope {
--		t.Error("TestNewToken: tok.ClaimSet.Scope != scope")
--		t.Errorf("  tok.ClaimSet.Scope = %s", tok.ClaimSet.Scope)
--		t.Errorf("               scope = %s", scope)
--	}
--	if tok.ClaimSet.Aud != stdAud {
--		t.Error("TestNewToken: tok.ClaimSet.Aud != stdAud")
--		t.Errorf("  tok.ClaimSet.Aud = %s", tok.ClaimSet.Aud)
--		t.Errorf("            stdAud = %s", stdAud)
--	}
--	if !bytes.Equal(tok.Key, privateKeyPemBytes) {
--		t.Error("TestNewToken: tok.Key != privateKeyPemBytes")
--		t.Errorf("             tok.Key = %s", tok.Key)
--		t.Errorf("  privateKeyPemBytes = %s", privateKeyPemBytes)
--	}
--}
--
--// Make sure the private key parsing functions work.
--func TestParsePrivateKey(t *testing.T) {
--	tok := &Token{
--		Key: privateKeyPemBytes,
--	}
--	err := tok.parsePrivateKey()
--	if err != nil {
--		t.Errorf("TestParsePrivateKey:tok.parsePrivateKey: %v", err)
--	}
--}
--
--// Test that the token signature generated matches the golden standard.
--func TestTokenSign(t *testing.T) {
--	tok := &Token{
--		Key:    privateKeyPemBytes,
--		claim:  claimSetEnc,
--		header: headerEnc,
--	}
--	err := tok.parsePrivateKey()
--	if err != nil {
--		t.Errorf("TestTokenSign:tok.parsePrivateKey: %v", err)
--	}
--	err = tok.sign()
--	if err != nil {
--		t.Errorf("TestTokenSign:tok.sign: %v", err)
--	}
--	if tok.sig != sigEnc {
--		t.Error("TestTokenSign: tok.sig != sigEnc")
--		t.Errorf("  tok.sig = %s", tok.sig)
--		t.Errorf("   sigEnc = %s", sigEnc)
--	}
--}
--
--// Test that the token expiration function is working.
--func TestTokenExpired(t *testing.T) {
--	c := &ClaimSet{}
--	tok := &Token{
--		ClaimSet: c,
--	}
--	now := time.Now()
--	c.setTimes(now)
--	if tok.Expired() != false {
--		t.Error("TestTokenExpired: tok.Expired != false")
--	}
--	// Set the times as if they were set 2 hours ago.
--	c.setTimes(now.Add(-2 * time.Hour))
--	if tok.Expired() != true {
--		t.Error("TestTokenExpired: tok.Expired != true")
--	}
--}
--
--// Given a well formed Token, test for proper encoding.
--func TestTokenEncode(t *testing.T) {
--	c := &ClaimSet{
--		Iss:   iss,
--		Scope: scope,
--		exp:   time.Unix(exp, 0),
--		iat:   time.Unix(iat, 0),
--	}
--	tok := &Token{
--		ClaimSet: c,
--		Header:   stdHeader,
--		Key:      privateKeyPemBytes,
--	}
--	enc, err := tok.Encode()
--	if err != nil {
--		t.Errorf("TestTokenEncode:tok.Assertion: %v", err)
--	}
--	if enc != tokEnc {
--		t.Error("TestTokenEncode: enc != tokEnc")
--		t.Errorf("     enc = %s", enc)
--		t.Errorf("  tokEnc = %s", tokEnc)
--	}
--}
--
--// Given a well formed Token we should get back a well formed request.
--func TestBuildRequest(t *testing.T) {
--	c := &ClaimSet{
--		Iss:   iss,
--		Scope: scope,
--		exp:   time.Unix(exp, 0),
--		iat:   time.Unix(iat, 0),
--	}
--	tok := &Token{
--		ClaimSet: c,
--		Header:   stdHeader,
--		Key:      privateKeyPemBytes,
--	}
--	u, v, err := tok.buildRequest()
--	if err != nil {
--		t.Errorf("TestBuildRequest:BuildRequest: %v", err)
--	}
--	if u != c.Aud {
--		t.Error("TestBuildRequest: u != c.Aud")
--		t.Errorf("      u = %s", u)
--		t.Errorf("  c.Aud = %s", c.Aud)
--	}
--	if v.Get("grant_type") != stdGrantType {
--		t.Error("TestBuildRequest: grant_type != stdGrantType")
--		t.Errorf("    grant_type = %s", v.Get("grant_type"))
--		t.Errorf("  stdGrantType = %s", stdGrantType)
--	}
--	if v.Get("assertion") != tokEnc {
--		t.Error("TestBuildRequest: assertion != tokEnc")
--		t.Errorf("  assertion = %s", v.Get("assertion"))
--		t.Errorf("     tokEnc = %s", tokEnc)
--	}
--}
--
--// Given a well formed access request response we should get back a oauth.Token.
--func TestHandleResponse(t *testing.T) {
--	rb := &respBody{
--		Access:    "1/8xbJqaOZXSUZbHLl5EOtu1pxz3fmmetKx9W8CV4t79M",
--		Type:      "Bearer",
--		ExpiresIn: 3600,
--	}
--	b, err := json.Marshal(rb)
--	if err != nil {
--		t.Errorf("TestHandleResponse:json.Marshal: %v", err)
--	}
--	r := &http.Response{
--		Status:     "200 OK",
--		StatusCode: 200,
--		Body:       ioutil.NopCloser(bytes.NewReader(b)),
--	}
--	o, err := handleResponse(r)
--	if err != nil {
--		t.Errorf("TestHandleResponse:handleResponse: %v", err)
--	}
--	if o.AccessToken != rb.Access {
--		t.Error("TestHandleResponse: o.AccessToken != rb.Access")
--		t.Errorf("  o.AccessToken = %s", o.AccessToken)
--		t.Errorf("       rb.Access = %s", rb.Access)
--	}
--	if o.Expired() {
--		t.Error("TestHandleResponse: o.Expired == true")
--	}
--}
--
--// passthrough signature for test
--type FakeSigner struct{}
--
--func (f FakeSigner) Sign(tok *Token) ([]byte, []byte, error) {
--	block, _ := pem.Decode(privateKeyPemBytes)
--	pKey, _ := x509.ParsePKCS1PrivateKey(block.Bytes)
--	ss := headerEnc + "." + claimSetEnc
--	h := sha256.New()
--	h.Write([]byte(ss))
--	b, _ := rsa.SignPKCS1v15(rand.Reader, pKey, crypto.SHA256, h.Sum(nil))
--	return []byte(ss), b, nil
--}
--
--// Given an external signer, get back a valid and signed JWT
--func TestExternalSigner(t *testing.T) {
--	tok := NewSignerToken(iss, scope, FakeSigner{})
--	enc, _ := tok.Encode()
--	if enc != tokEnc {
--		t.Errorf("TestExternalSigner: enc != tokEnc")
--		t.Errorf("     enc = %s", enc)
--		t.Errorf("  tokEnc = %s", tokEnc)
--	}
--}
--
--func TestHandleResponseWithNewExpiry(t *testing.T) {
--	rb := &respBody{
--		IdToken: tokEnc,
--	}
--	b, err := json.Marshal(rb)
--	if err != nil {
--		t.Errorf("TestHandleResponse:json.Marshal: %v", err)
--	}
--	r := &http.Response{
--		Status:     "200 OK",
--		StatusCode: 200,
--		Body:       ioutil.NopCloser(bytes.NewReader(b)),
--	}
--	o, err := handleResponse(r)
--	if err != nil {
--		t.Errorf("TestHandleResponse:handleResponse: %v", err)
--	}
--	if o.Expiry != time.Unix(exp, 0) {
--		t.Error("TestHandleResponse: o.Expiry != exp")
--		t.Errorf("  o.Expiry = %s", o.Expiry)
--		t.Errorf("       exp = %s", time.Unix(exp, 0))
--	}
--}
--
--// Placeholder for future Assert tests.
--func TestAssert(t *testing.T) {
--	// Since this method makes a call to BuildRequest, an htttp.Client, and
--	// finally HandleResponse there is not much more to test.  This is here
--	// as a placeholder if that changes.
--}
--
--// Benchmark for the end-to-end encoding of a well formed token.
--func BenchmarkTokenEncode(b *testing.B) {
--	b.StopTimer()
--	c := &ClaimSet{
--		Iss:   iss,
--		Scope: scope,
--		exp:   time.Unix(exp, 0),
--		iat:   time.Unix(iat, 0),
--	}
--	tok := &Token{
--		ClaimSet: c,
--		Key:      privateKeyPemBytes,
--	}
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		tok.Encode()
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/oauth.go b/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/oauth.go
-deleted file mode 100644
-index 79d603d..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/oauth.go
-+++ /dev/null
-@@ -1,405 +0,0 @@
--// Copyright 2011 The goauth2 Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// The oauth package provides support for making
--// OAuth2-authenticated HTTP requests.
--//
--// Example usage:
--//
--//	// Specify your configuration. (typically as a global variable)
--//	var config = &oauth.Config{
--//		ClientId:     YOUR_CLIENT_ID,
--//		ClientSecret: YOUR_CLIENT_SECRET,
--//		Scope:        "https://www.googleapis.com/auth/buzz",
--//		AuthURL:      "https://accounts.google.com/o/oauth2/auth",
--//		TokenURL:     "https://accounts.google.com/o/oauth2/token",
--//		RedirectURL:  "http://you.example.org/handler",
--//	}
--//
--//	// A landing page redirects to the OAuth provider to get the auth code.
--//	func landing(w http.ResponseWriter, r *http.Request) {
--//		http.Redirect(w, r, config.AuthCodeURL("foo"), http.StatusFound)
--//	}
--//
--//	// The user will be redirected back to this handler, that takes the
--//	// "code" query parameter and Exchanges it for an access token.
--//	func handler(w http.ResponseWriter, r *http.Request) {
--//		t := &oauth.Transport{Config: config}
--//		t.Exchange(r.FormValue("code"))
--//		// The Transport now has a valid Token. Create an *http.Client
--//		// with which we can make authenticated API requests.
--//		c := t.Client()
--//		c.Post(...)
--//		// ...
--//		// btw, r.FormValue("state") == "foo"
--//	}
--//
--package oauth
--
--import (
--	"encoding/json"
--	"errors"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"mime"
--	"net/http"
--	"net/url"
--	"os"
--	"strings"
--	"time"
--)
--
--type OAuthError struct {
--	prefix string
--	msg    string
--}
--
--func (oe OAuthError) Error() string {
--	return "OAuthError: " + oe.prefix + ": " + oe.msg
--}
--
--// Cache specifies the methods that implement a Token cache.
--type Cache interface {
--	Token() (*Token, error)
--	PutToken(*Token) error
--}
--
--// CacheFile implements Cache. Its value is the name of the file in which
--// the Token is stored in JSON format.
--type CacheFile string
--
--func (f CacheFile) Token() (*Token, error) {
--	file, err := os.Open(string(f))
--	if err != nil {
--		return nil, OAuthError{"CacheFile.Token", err.Error()}
--	}
--	defer file.Close()
--	tok := &Token{}
--	if err := json.NewDecoder(file).Decode(tok); err != nil {
--		return nil, OAuthError{"CacheFile.Token", err.Error()}
--	}
--	return tok, nil
--}
--
--func (f CacheFile) PutToken(tok *Token) error {
--	file, err := os.OpenFile(string(f), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
--	if err != nil {
--		return OAuthError{"CacheFile.PutToken", err.Error()}
--	}
--	if err := json.NewEncoder(file).Encode(tok); err != nil {
--		file.Close()
--		return OAuthError{"CacheFile.PutToken", err.Error()}
--	}
--	if err := file.Close(); err != nil {
--		return OAuthError{"CacheFile.PutToken", err.Error()}
--	}
--	return nil
--}
--
--// Config is the configuration of an OAuth consumer.
--type Config struct {
--	// ClientId is the OAuth client identifier used when communicating with
--	// the configured OAuth provider.
--	ClientId string
--
--	// ClientSecret is the OAuth client secret used when communicating with
--	// the configured OAuth provider.
--	ClientSecret string
--
--	// Scope identifies the level of access being requested. Multiple scope
--	// values should be provided as a space-delimited string.
--	Scope string
--
--	// AuthURL is the URL the user will be directed to in order to grant
--	// access.
--	AuthURL string
--
--	// TokenURL is the URL used to retrieve OAuth tokens.
--	TokenURL string
--
--	// RedirectURL is the URL to which the user will be returned after
--	// granting (or denying) access.
--	RedirectURL string
--
--	// TokenCache allows tokens to be cached for subsequent requests.
--	TokenCache Cache
--
--	AccessType string // Optional, "online" (default) or "offline", no refresh token if "online"
--
--	// ApprovalPrompt indicates whether the user should be
--	// re-prompted for consent. If set to "auto" (default) the
--	// user will be prompted only if they haven't previously
--	// granted consent and the code can only be exchanged for an
--	// access token.
--	// If set to "force" the user will always be prompted, and the
--	// code can be exchanged for a refresh token.
--	ApprovalPrompt string
--}
--
--// Token contains an end-user's tokens.
--// This is the data you must store to persist authentication.
--type Token struct {
--	AccessToken  string
--	RefreshToken string
--	Expiry       time.Time         // If zero the token has no (known) expiry time.
--	Extra        map[string]string // May be nil.
--}
--
--func (t *Token) Expired() bool {
--	if t.Expiry.IsZero() {
--		return false
--	}
--	return t.Expiry.Before(time.Now())
--}
--
--// Transport implements http.RoundTripper. When configured with a valid
--// Config and Token it can be used to make authenticated HTTP requests.
--//
--//	t := &oauth.Transport{config}
--//      t.Exchange(code)
--//      // t now contains a valid Token
--//	r, _, err := t.Client().Get("http://example.org/url/requiring/auth")
--//
--// It will automatically refresh the Token if it can,
--// updating the supplied Token in place.
--type Transport struct {
--	*Config
--	*Token
--
--	// Transport is the HTTP transport to use when making requests.
--	// It will default to http.DefaultTransport if nil.
--	// (It should never be an oauth.Transport.)
--	Transport http.RoundTripper
--}
--
--// Client returns an *http.Client that makes OAuth-authenticated requests.
--func (t *Transport) Client() *http.Client {
--	return &http.Client{Transport: t}
--}
--
--func (t *Transport) transport() http.RoundTripper {
--	if t.Transport != nil {
--		return t.Transport
--	}
--	return http.DefaultTransport
--}
--
--// AuthCodeURL returns a URL that the end-user should be redirected to,
--// so that they may obtain an authorization code.
--func (c *Config) AuthCodeURL(state string) string {
--	url_, err := url.Parse(c.AuthURL)
--	if err != nil {
--		panic("AuthURL malformed: " + err.Error())
--	}
--	q := url.Values{
--		"response_type":   {"code"},
--		"client_id":       {c.ClientId},
--		"redirect_uri":    {c.RedirectURL},
--		"scope":           {c.Scope},
--		"state":           {state},
--		"access_type":     {c.AccessType},
--		"approval_prompt": {c.ApprovalPrompt},
--	}.Encode()
--	if url_.RawQuery == "" {
--		url_.RawQuery = q
--	} else {
--		url_.RawQuery += "&" + q
--	}
--	return url_.String()
--}
--
--// Exchange takes a code and gets access Token from the remote server.
--func (t *Transport) Exchange(code string) (*Token, error) {
--	if t.Config == nil {
--		return nil, OAuthError{"Exchange", "no Config supplied"}
--	}
--
--	// If the transport or the cache already has a token, it is
--	// passed to `updateToken` to preserve existing refresh token.
--	tok := t.Token
--	if tok == nil && t.TokenCache != nil {
--		tok, _ = t.TokenCache.Token()
--	}
--	if tok == nil {
--		tok = new(Token)
--	}
--	err := t.updateToken(tok, url.Values{
--		"grant_type":   {"authorization_code"},
--		"redirect_uri": {t.RedirectURL},
--		"scope":        {t.Scope},
--		"code":         {code},
--	})
--	if err != nil {
--		return nil, err
--	}
--	t.Token = tok
--	if t.TokenCache != nil {
--		return tok, t.TokenCache.PutToken(tok)
--	}
--	return tok, nil
--}
--
--// RoundTrip executes a single HTTP transaction using the Transport's
--// Token as authorization headers.
--//
--// This method will attempt to renew the Token if it has expired and may return
--// an error related to that Token renewal before attempting the client request.
--// If the Token cannot be renewed a non-nil os.Error value will be returned.
--// If the Token is invalid callers should expect HTTP-level errors,
--// as indicated by the Response's StatusCode.
--func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
--	if t.Token == nil {
--		if t.Config == nil {
--			return nil, OAuthError{"RoundTrip", "no Config supplied"}
--		}
--		if t.TokenCache == nil {
--			return nil, OAuthError{"RoundTrip", "no Token supplied"}
--		}
--		var err error
--		t.Token, err = t.TokenCache.Token()
--		if err != nil {
--			return nil, err
--		}
--	}
--
--	// Refresh the Token if it has expired.
--	if t.Expired() {
--		if err := t.Refresh(); err != nil {
--			return nil, err
--		}
--	}
--
--	// To set the Authorization header, we must make a copy of the Request
--	// so that we don't modify the Request we were given.
--	// This is required by the specification of http.RoundTripper.
--	req = cloneRequest(req)
--	req.Header.Set("Authorization", "Bearer "+t.AccessToken)
--
--	// Make the HTTP request.
--	return t.transport().RoundTrip(req)
--}
--
--// cloneRequest returns a clone of the provided *http.Request.
--// The clone is a shallow copy of the struct and its Header map.
--func cloneRequest(r *http.Request) *http.Request {
--	// shallow copy of the struct
--	r2 := new(http.Request)
--	*r2 = *r
--	// deep copy of the Header
--	r2.Header = make(http.Header)
--	for k, s := range r.Header {
--		r2.Header[k] = s
--	}
--	return r2
--}
--
--// Refresh renews the Transport's AccessToken using its RefreshToken.
--func (t *Transport) Refresh() error {
--	if t.Token == nil {
--		return OAuthError{"Refresh", "no existing Token"}
--	}
--	if t.RefreshToken == "" {
--		return OAuthError{"Refresh", "Token expired; no Refresh Token"}
--	}
--	if t.Config == nil {
--		return OAuthError{"Refresh", "no Config supplied"}
--	}
--
--	err := t.updateToken(t.Token, url.Values{
--		"grant_type":    {"refresh_token"},
--		"refresh_token": {t.RefreshToken},
--	})
--	if err != nil {
--		return err
--	}
--	if t.TokenCache != nil {
--		return t.TokenCache.PutToken(t.Token)
--	}
--	return nil
--}
--
--// AuthenticateClient gets an access Token using the client_credentials grant
--// type.
--func (t *Transport) AuthenticateClient() error {
--	if t.Config == nil {
--		return OAuthError{"Exchange", "no Config supplied"}
--	}
--	if t.Token == nil {
--		t.Token = &Token{}
--	}
--	return t.updateToken(t.Token, url.Values{"grant_type": {"client_credentials"}})
--}
--
--func (t *Transport) updateToken(tok *Token, v url.Values) error {
--	v.Set("client_id", t.ClientId)
--	v.Set("client_secret", t.ClientSecret)
--	client := &http.Client{Transport: t.transport()}
--	req, err := http.NewRequest("POST", t.TokenURL, strings.NewReader(v.Encode()))
--	if err != nil {
--		return err
--	}
--	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
--	req.SetBasicAuth(t.ClientId, t.ClientSecret)
--	r, err := client.Do(req)
--	if err != nil {
--		return err
--	}
--	defer r.Body.Close()
--	if r.StatusCode != 200 {
--		return OAuthError{"updateToken", r.Status}
--	}
--	var b struct {
--		Access    string        `json:"access_token"`
--		Refresh   string        `json:"refresh_token"`
--		ExpiresIn time.Duration `json:"expires_in"`
--		Id        string        `json:"id_token"`
--	}
--
--	body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
--	if err != nil {
--		return err
--	}
--
--	content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
--	switch content {
--	case "application/x-www-form-urlencoded", "text/plain":
--		vals, err := url.ParseQuery(string(body))
--		if err != nil {
--			return err
--		}
--
--		b.Access = vals.Get("access_token")
--		b.Refresh = vals.Get("refresh_token")
--		b.ExpiresIn, _ = time.ParseDuration(vals.Get("expires_in") + "s")
--		b.Id = vals.Get("id_token")
--	default:
--		if err = json.Unmarshal(body, &b); err != nil {
--			return fmt.Errorf("got bad response from server: %q", body)
--		}
--		// The JSON parser treats the unitless ExpiresIn like 'ns' instead of 's' as above,
--		// so compensate here.
--		b.ExpiresIn *= time.Second
--	}
--	if b.Access == "" {
--		return errors.New("received empty access token from authorization server")
--	}
--	tok.AccessToken = b.Access
--	// Don't overwrite `RefreshToken` with an empty value
--	if len(b.Refresh) > 0 {
--		tok.RefreshToken = b.Refresh
--	}
--	if b.ExpiresIn == 0 {
--		tok.Expiry = time.Time{}
--	} else {
--		tok.Expiry = time.Now().Add(b.ExpiresIn)
--	}
--	if b.Id != "" {
--		if tok.Extra == nil {
--			tok.Extra = make(map[string]string)
--		}
--		tok.Extra["id_token"] = b.Id
--	}
--	return nil
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/oauth_test.go b/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/oauth_test.go
-deleted file mode 100644
-index b903c16..0000000
---- a/Godeps/_workspace/src/code.google.com/p/goauth2/oauth/oauth_test.go
-+++ /dev/null
-@@ -1,214 +0,0 @@
--// Copyright 2011 The goauth2 Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package oauth
--
--import (
--	"io"
--	"io/ioutil"
--	"net/http"
--	"net/http/httptest"
--	"net/url"
--	"os"
--	"path/filepath"
--	"runtime"
--	"testing"
--	"time"
--)
--
--var requests = []struct {
--	path, query, auth string // request
--	contenttype, body string // response
--}{
--	{
--		path:        "/token",
--		query:       "grant_type=authorization_code&code=c0d3&client_id=cl13nt1d&client_secret=s3cr3t",
--		contenttype: "application/json",
--		auth:        "Basic Y2wxM250MWQ6czNjcjN0",
--		body: `
--			{
--				"access_token":"token1",
--				"refresh_token":"refreshtoken1",
--				"id_token":"idtoken1",
--				"expires_in":3600
--			}
--		`,
--	},
--	{path: "/secure", auth: "Bearer token1", body: "first payload"},
--	{
--		path:        "/token",
--		query:       "grant_type=refresh_token&refresh_token=refreshtoken1&client_id=cl13nt1d&client_secret=s3cr3t",
--		contenttype: "application/json",
--		auth:        "Basic Y2wxM250MWQ6czNjcjN0",
--		body: `
--			{
--				"access_token":"token2",
--				"refresh_token":"refreshtoken2",
--				"id_token":"idtoken2",
--				"expires_in":3600
--			}
--		`,
--	},
--	{path: "/secure", auth: "Bearer token2", body: "second payload"},
--	{
--		path:        "/token",
--		query:       "grant_type=refresh_token&refresh_token=refreshtoken2&client_id=cl13nt1d&client_secret=s3cr3t",
--		contenttype: "application/x-www-form-urlencoded",
--		body:        "access_token=token3&refresh_token=refreshtoken3&id_token=idtoken3&expires_in=3600",
--		auth:        "Basic Y2wxM250MWQ6czNjcjN0",
--	},
--	{path: "/secure", auth: "Bearer token3", body: "third payload"},
--	{
--		path:        "/token",
--		query:       "grant_type=client_credentials&client_id=cl13nt1d&client_secret=s3cr3t",
--		contenttype: "application/json",
--		auth:        "Basic Y2wxM250MWQ6czNjcjN0",
--		body: `
--			{
--				"access_token":"token4",
--				"expires_in":3600
--			}
--		`,
--	},
--	{path: "/secure", auth: "Bearer token4", body: "fourth payload"},
--}
--
--func TestOAuth(t *testing.T) {
--	// Set up test server.
--	n := 0
--	handler := func(w http.ResponseWriter, r *http.Request) {
--		if n >= len(requests) {
--			t.Errorf("too many requests: %d", n)
--			return
--		}
--		req := requests[n]
--		n++
--
--		// Check request.
--		if g, w := r.URL.Path, req.path; g != w {
--			t.Errorf("request[%d] got path %s, want %s", n, g, w)
--		}
--		want, _ := url.ParseQuery(req.query)
--		for k := range want {
--			if g, w := r.FormValue(k), want.Get(k); g != w {
--				t.Errorf("query[%s] = %s, want %s", k, g, w)
--			}
--		}
--		if g, w := r.Header.Get("Authorization"), req.auth; w != "" && g != w {
--			t.Errorf("Authorization: %v, want %v", g, w)
--		}
--
--		// Send response.
--		w.Header().Set("Content-Type", req.contenttype)
--		io.WriteString(w, req.body)
--	}
--	server := httptest.NewServer(http.HandlerFunc(handler))
--	defer server.Close()
--
--	config := &Config{
--		ClientId:     "cl13nt1d",
--		ClientSecret: "s3cr3t",
--		Scope:        "https://example.net/scope",
--		AuthURL:      server.URL + "/auth",
--		TokenURL:     server.URL + "/token",
--	}
--
--	// TODO(adg): test AuthCodeURL
--
--	transport := &Transport{Config: config}
--	_, err := transport.Exchange("c0d3")
--	if err != nil {
--		t.Fatalf("Exchange: %v", err)
--	}
--	checkToken(t, transport.Token, "token1", "refreshtoken1", "idtoken1")
--
--	c := transport.Client()
--	resp, err := c.Get(server.URL + "/secure")
--	if err != nil {
--		t.Fatalf("Get: %v", err)
--	}
--	checkBody(t, resp, "first payload")
--
--	// test automatic refresh
--	transport.Expiry = time.Now().Add(-time.Hour)
--	resp, err = c.Get(server.URL + "/secure")
--	if err != nil {
--		t.Fatalf("Get: %v", err)
--	}
--	checkBody(t, resp, "second payload")
--	checkToken(t, transport.Token, "token2", "refreshtoken2", "idtoken2")
--
--	// refresh one more time, but get URL-encoded token instead of JSON
--	transport.Expiry = time.Now().Add(-time.Hour)
--	resp, err = c.Get(server.URL + "/secure")
--	if err != nil {
--		t.Fatalf("Get: %v", err)
--	}
--	checkBody(t, resp, "third payload")
--	checkToken(t, transport.Token, "token3", "refreshtoken3", "idtoken3")
--
--	transport.Token = &Token{}
--	err = transport.AuthenticateClient()
--	if err != nil {
--		t.Fatalf("AuthenticateClient: %v", err)
--	}
--	checkToken(t, transport.Token, "token4", "", "")
--	resp, err = c.Get(server.URL + "/secure")
--	if err != nil {
--		t.Fatalf("Get: %v", err)
--	}
--	checkBody(t, resp, "fourth payload")
--}
--
--func checkToken(t *testing.T, tok *Token, access, refresh, id string) {
--	if g, w := tok.AccessToken, access; g != w {
--		t.Errorf("AccessToken = %q, want %q", g, w)
--	}
--	if g, w := tok.RefreshToken, refresh; g != w {
--		t.Errorf("RefreshToken = %q, want %q", g, w)
--	}
--	if g, w := tok.Extra["id_token"], id; g != w {
--		t.Errorf("Extra['id_token'] = %q, want %q", g, w)
--	}
--	exp := tok.Expiry.Sub(time.Now())
--	if (time.Hour-time.Second) > exp || exp > time.Hour {
--		t.Errorf("Expiry = %v, want ~1 hour", exp)
--	}
--}
--
--func checkBody(t *testing.T, r *http.Response, body string) {
--	b, err := ioutil.ReadAll(r.Body)
--	if err != nil {
--		t.Errorf("reading reponse body: %v, want %q", err, body)
--	}
--	if g, w := string(b), body; g != w {
--		t.Errorf("request body mismatch: got %q, want %q", g, w)
--	}
--}
--
--func TestCachePermissions(t *testing.T) {
--	if runtime.GOOS == "windows" {
--		// Windows doesn't support file mode bits.
--		return
--	}
--
--	td, err := ioutil.TempDir("", "oauth-test")
--	if err != nil {
--		t.Fatalf("ioutil.TempDir: %v", err)
--	}
--	defer os.RemoveAll(td)
--	tempFile := filepath.Join(td, "cache-file")
--
--	cf := CacheFile(tempFile)
--	if err := cf.PutToken(new(Token)); err != nil {
--		t.Fatalf("PutToken: %v", err)
--	}
--	fi, err := os.Stat(tempFile)
--	if err != nil {
--		t.Fatalf("os.Stat: %v", err)
--	}
--	if fi.Mode()&0077 != 0 {
--		t.Errorf("Created cache file has mode %#o, want non-accessible to group+other", fi.Mode())
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/AUTHORS b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/AUTHORS
-deleted file mode 100644
-index f73b725..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/AUTHORS
-+++ /dev/null
-@@ -1,10 +0,0 @@
--# This is the official list of authors for copyright purposes.
--# This file is distinct from the CONTRIBUTORS files.
--# See the latter for an explanation.
--
--# Names should be added to this file as
--#	Name or Organization <email address>
--# The email address is not required for organizations.
--
--# Please keep the list sorted.
--Google Inc.
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/CONTRIBUTORS b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/CONTRIBUTORS
-deleted file mode 100644
-index 4af9298..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/CONTRIBUTORS
-+++ /dev/null
-@@ -1,46 +0,0 @@
--# This is the official list of people who can contribute
--# (and typically have contributed) code to the repository.
--# The AUTHORS file lists the copyright holders; this file
--# lists people.  For example, Google employees are listed here
--# but not in AUTHORS, because Google holds the copyright.
--#
--# The submission process automatically checks to make sure
--# that people submitting code are listed in this file (by email address).
--#
--# Names should be added to this file only after verifying that
--# the individual or the individual's organization has agreed to
--# the appropriate Contributor License Agreement, found here:
--#
--#     http://code.google.com/legal/individual-cla-v1.0.html
--#     http://code.google.com/legal/corporate-cla-v1.0.html
--#
--# The agreement for individuals can be filled out on the web.
--#
--# When adding J Random Contributor's name to this file,
--# either J's name or J's organization's name should be
--# added to the AUTHORS file, depending on whether the
--# individual or corporate CLA was used.
--
--# Names should be added to this file like so:
--#     Name <email address>
--#
--# An entry with two email addresses specifies that the
--# first address should be used in the submit logs and
--# that the second address should be recognized as the
--# same person when interacting with Rietveld.
--
--# Please keep the list sorted.
--
--Alain Vongsouvanhalainv <alainv at google.com>
--Andrew Gerrand <adg at golang.org>
--Brad Fitzpatrick <bradfitz at golang.org>
--Francesc Campoy <campoy at golang.org>
--Garrick Evans <garrick at google.com>
--Glenn Lewis <gmlewis at google.com>
--Ivan Krasin <krasin at golang.org>
--Jason Hall <jasonhall at google.com>
--Johan Euphrosine <proppy at google.com>
--Kostik Shtoyk <kostik at google.com>
--Nick Craig-Wood <nickcw at gmail.com>
--Scott Van Woudenberg <scottvw at google.com>
--Takashi Matsuo <tmatsuo at google.com>
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/LICENSE b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/LICENSE
-deleted file mode 100644
-index 263aa7a..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/LICENSE
-+++ /dev/null
-@@ -1,27 +0,0 @@
--Copyright (c) 2011 Google Inc. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--   * Neither the name of Google Inc. nor the names of its
--contributors may be used to endorse or promote products derived from
--this software without specific prior written permission.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/Makefile b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/Makefile
-deleted file mode 100644
-index 20ce8a5..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/Makefile
-+++ /dev/null
-@@ -1,9 +0,0 @@
--all:
--	go install code.google.com/p/google-api-go-client/googleapi
--	go install code.google.com/p/google-api-go-client/google-api-go-generator
--	$(GOPATH)/bin/google-api-go-generator -cache=false -install -api=*
--
--cached:
--	go install code.google.com/p/google-api-go-client/googleapi
--	go install code.google.com/p/google-api-go-client/google-api-go-generator
--	$(GOPATH)/bin/google-api-go-generator -cache=true -install -api=*
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/NOTES b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/NOTES
-deleted file mode 100644
-index 3b10889..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/NOTES
-+++ /dev/null
-@@ -1,13 +0,0 @@
--Discovery Service:
--http://code.google.com/apis/discovery/
--http://code.google.com/apis/discovery/v1/reference.html
--
--The "type" key:
--http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1
--
--The "format" key:
--http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.23
--http://code.google.com/apis/discovery/v1/reference.html#parameter-format-summary
--
--Google JSON format docs:
--http://google-styleguide.googlecode.com/svn/trunk/jsoncstyleguide.xml
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/README b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/README
-deleted file mode 100644
-index 9aca57b..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/README
-+++ /dev/null
-@@ -1,10 +0,0 @@
--Most of this project is auto-generated.
--
--The notable directories which are not auto-generated:
--
--   google-api-go-generator/ -- the generator itself
--   google-api/              -- shared common code, used by auto-generated code
--   examples/                -- sample code
--
--When changing the generator, re-compile all APIs and submit the
--modified APIs in the same CL as the generator changes itself.
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/TODO b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/TODO
-deleted file mode 100644
-index af55f14..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/TODO
-+++ /dev/null
-@@ -1,2 +0,0 @@
--Moved to:
--http://code.google.com/p/google-api-go-client/issues/
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/compute-api.json b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/compute-api.json
-deleted file mode 100644
-index 726a0ac..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/compute-api.json
-+++ /dev/null
-@@ -1,9526 +0,0 @@
--{
-- "kind": "discovery#restDescription",
-- "etag": "\"l66ggWbucbkBw9Lpos72oziyefE/qp3DHGvWPpREzEdWk7WwxnpgC9w\"",
-- "discoveryVersion": "v1",
-- "id": "compute:v1",
-- "name": "compute",
-- "version": "v1",
-- "revision": "20141014",
-- "title": "Compute Engine API",
-- "description": "API for the Google Compute Engine service.",
-- "ownerDomain": "google.com",
-- "ownerName": "Google",
-- "icons": {
--  "x16": "https://www.google.com/images/icons/product/compute_engine-16.png",
--  "x32": "https://www.google.com/images/icons/product/compute_engine-32.png"
-- },
-- "documentationLink": "https://developers.google.com/compute/docs/reference/latest/",
-- "protocol": "rest",
-- "baseUrl": "https://www.googleapis.com/compute/v1/projects/",
-- "basePath": "/compute/v1/projects/",
-- "rootUrl": "https://www.googleapis.com/",
-- "servicePath": "compute/v1/projects/",
-- "batchPath": "batch",
-- "parameters": {
--  "alt": {
--   "type": "string",
--   "description": "Data format for the response.",
--   "default": "json",
--   "enum": [
--    "json"
--   ],
--   "enumDescriptions": [
--    "Responses with Content-Type of application/json"
--   ],
--   "location": "query"
--  },
--  "fields": {
--   "type": "string",
--   "description": "Selector specifying which fields to include in a partial response.",
--   "location": "query"
--  },
--  "key": {
--   "type": "string",
--   "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
--   "location": "query"
--  },
--  "oauth_token": {
--   "type": "string",
--   "description": "OAuth 2.0 token for the current user.",
--   "location": "query"
--  },
--  "prettyPrint": {
--   "type": "boolean",
--   "description": "Returns response with indentations and line breaks.",
--   "default": "true",
--   "location": "query"
--  },
--  "quotaUser": {
--   "type": "string",
--   "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
--   "location": "query"
--  },
--  "userIp": {
--   "type": "string",
--   "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
--   "location": "query"
--  }
-- },
-- "auth": {
--  "oauth2": {
--   "scopes": {
--    "https://www.googleapis.com/auth/compute": {
--     "description": "View and manage your Google Compute Engine resources"
--    },
--    "https://www.googleapis.com/auth/compute.readonly": {
--     "description": "View your Google Compute Engine resources"
--    },
--    "https://www.googleapis.com/auth/devstorage.full_control": {
--     "description": "Manage your data and permissions in Google Cloud Storage"
--    },
--    "https://www.googleapis.com/auth/devstorage.read_only": {
--     "description": "View your data in Google Cloud Storage"
--    },
--    "https://www.googleapis.com/auth/devstorage.read_write": {
--     "description": "Manage your data in Google Cloud Storage"
--    }
--   }
--  }
-- },
-- "schemas": {
--  "AccessConfig": {
--   "id": "AccessConfig",
--   "type": "object",
--   "description": "An access configuration attached to an instance's network interface.",
--   "properties": {
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#accessConfig"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of this access configuration."
--    },
--    "natIP": {
--     "type": "string",
--     "description": "An external IP address associated with this instance. Specify an unused static IP address available to the project. If not specified, the external IP will be drawn from a shared ephemeral pool."
--    },
--    "type": {
--     "type": "string",
--     "description": "Type of configuration. Must be set to \"ONE_TO_ONE_NAT\". This configures port-for-port NAT to the internet.",
--     "default": "ONE_TO_ONE_NAT",
--     "enum": [
--      "ONE_TO_ONE_NAT"
--     ],
--     "enumDescriptions": [
--      ""
--     ]
--    }
--   }
--  },
--  "Address": {
--   "id": "Address",
--   "type": "object",
--   "description": "A reserved address resource.",
--   "properties": {
--    "address": {
--     "type": "string",
--     "description": "The IP address represented by this resource."
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#address"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.addresses.insert"
--      ]
--     }
--    },
--    "region": {
--     "type": "string",
--     "description": "URL of the region where the regional address resides (output only). This field is not applicable to global addresses."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "status": {
--     "type": "string",
--     "description": "The status of the address (output only).",
--     "enum": [
--      "IN_USE",
--      "RESERVED"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    },
--    "users": {
--     "type": "array",
--     "description": "The resources that are using this address resource.",
--     "items": {
--      "type": "string"
--     }
--    }
--   }
--  },
--  "AddressAggregatedList": {
--   "id": "AddressAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped address lists.",
--     "additionalProperties": {
--      "$ref": "AddressesScopedList",
--      "description": "Name of the scope containing this set of addresses."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#addressAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "AddressList": {
--   "id": "AddressList",
--   "type": "object",
--   "description": "Contains a list of address resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The address resources.",
--     "items": {
--      "$ref": "Address"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#addressList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    }
--   }
--  },
--  "AddressesScopedList": {
--   "id": "AddressesScopedList",
--   "type": "object",
--   "properties": {
--    "addresses": {
--     "type": "array",
--     "description": "List of addresses contained in this scope.",
--     "items": {
--      "$ref": "Address"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of addresses when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "AttachedDisk": {
--   "id": "AttachedDisk",
--   "type": "object",
--   "description": "An instance-attached disk resource.",
--   "properties": {
--    "autoDelete": {
--     "type": "boolean",
--     "description": "Whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)."
--    },
--    "boot": {
--     "type": "boolean",
--     "description": "Indicates that this is a boot disk. VM will use the first partition of the disk for its root filesystem."
--    },
--    "deviceName": {
--     "type": "string",
--     "description": "Persistent disk only; must be unique within the instance when specified. This represents a unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. If not specified, a default will be chosen by the system."
--    },
--    "index": {
--     "type": "integer",
--     "description": "A zero-based index to assign to this disk, where 0 is reserved for the boot disk. If not specified, the server will choose an appropriate value (output only).",
--     "format": "int32"
--    },
--    "initializeParams": {
--     "$ref": "AttachedDiskInitializeParams",
--     "description": "Initialization parameters."
--    },
--    "interface": {
--     "type": "string",
--     "enum": [
--      "NVME",
--      "SCSI"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#attachedDisk"
--    },
--    "licenses": {
--     "type": "array",
--     "description": "Public visible licenses.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "mode": {
--     "type": "string",
--     "description": "The mode in which to attach this disk, either \"READ_WRITE\" or \"READ_ONLY\".",
--     "enum": [
--      "READ_ONLY",
--      "READ_WRITE"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    },
--    "source": {
--     "type": "string",
--     "description": "Persistent disk only; the URL of the persistent disk resource."
--    },
--    "type": {
--     "type": "string",
--     "description": "Type of the disk, either \"SCRATCH\" or \"PERSISTENT\". Note that persistent disks must be created before you can specify them here.",
--     "enum": [
--      "PERSISTENT",
--      "SCRATCH"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ],
--     "annotations": {
--      "required": [
--       "compute.instances.insert"
--      ]
--     }
--    }
--   }
--  },
--  "AttachedDiskInitializeParams": {
--   "id": "AttachedDiskInitializeParams",
--   "type": "object",
--   "description": "Initialization parameters for the new disk (input-only). Can only be specified on the boot disk or local SSDs. Mutually exclusive with 'source'.",
--   "properties": {
--    "diskName": {
--     "type": "string",
--     "description": "Name of the disk (when not provided defaults to the name of the instance)."
--    },
--    "diskSizeGb": {
--     "type": "string",
--     "description": "Size of the disk in base-2 GB.",
--     "format": "int64"
--    },
--    "diskType": {
--     "type": "string",
--     "description": "URL of the disk type resource describing which disk type to use to create the disk; provided by the client when the disk is created."
--    },
--    "sourceImage": {
--     "type": "string",
--     "description": "The source image used to create this disk."
--    }
--   }
--  },
--  "Backend": {
--   "id": "Backend",
--   "type": "object",
--   "description": "Message containing information of one individual backend.",
--   "properties": {
--    "balancingMode": {
--     "type": "string",
--     "description": "The balancing mode of this backend, default is UTILIZATION.",
--     "enum": [
--      "RATE",
--      "UTILIZATION"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    },
--    "capacityScaler": {
--     "type": "number",
--     "description": "The multiplier (a value between 0 and 1e6) of the max capacity (CPU or RPS, depending on 'balancingMode') the group should serve up to. 0 means the group is totally drained. Default value is 1. Valid range is [0, 1e6].",
--     "format": "float"
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource, which is provided by the client when the resource is created."
--    },
--    "group": {
--     "type": "string",
--     "description": "URL of a zonal Cloud Resource View resource. This resource view defines the list of instances that serve traffic. Member virtual machine instances from each resource view must live in the same zone as the resource view itself. No two backends in a backend service are allowed to use same Resource View resource."
--    },
--    "maxRate": {
--     "type": "integer",
--     "description": "The max RPS of the group. Can be used with either balancing mode, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.",
--     "format": "int32"
--    },
--    "maxRatePerInstance": {
--     "type": "number",
--     "description": "The max RPS that a single backed instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.",
--     "format": "float"
--    },
--    "maxUtilization": {
--     "type": "number",
--     "description": "Used when 'balancingMode' is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0, 1].",
--     "format": "float"
--    }
--   }
--  },
--  "BackendService": {
--   "id": "BackendService",
--   "type": "object",
--   "description": "A BackendService resource. This resource defines a group of backend VMs together with their serving capacity.",
--   "properties": {
--    "backends": {
--     "type": "array",
--     "description": "The list of backends that serve this BackendService.",
--     "items": {
--      "$ref": "Backend"
--     }
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "fingerprint": {
--     "type": "string",
--     "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService.",
--     "format": "byte"
--    },
--    "healthChecks": {
--     "type": "array",
--     "description": "The list of URLs to the HttpHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#backendService"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "port": {
--     "type": "integer",
--     "description": "Deprecated in favor of port_name. The TCP port to connect on the backend. The default value is 80.",
--     "format": "int32"
--    },
--    "portName": {
--     "type": "string",
--     "description": "Name of backend port. The same name should appear in the resource views referenced by this service. Required."
--    },
--    "protocol": {
--     "type": "string",
--     "enum": [
--      "HTTP"
--     ],
--     "enumDescriptions": [
--      ""
--     ]
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "timeoutSec": {
--     "type": "integer",
--     "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.",
--     "format": "int32"
--    }
--   }
--  },
--  "BackendServiceGroupHealth": {
--   "id": "BackendServiceGroupHealth",
--   "type": "object",
--   "properties": {
--    "healthStatus": {
--     "type": "array",
--     "items": {
--      "$ref": "HealthStatus"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#backendServiceGroupHealth"
--    }
--   }
--  },
--  "BackendServiceList": {
--   "id": "BackendServiceList",
--   "type": "object",
--   "description": "Contains a list of BackendService resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The BackendService resources.",
--     "items": {
--      "$ref": "BackendService"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#backendServiceList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "DeprecationStatus": {
--   "id": "DeprecationStatus",
--   "type": "object",
--   "description": "Deprecation status for a public resource.",
--   "properties": {
--    "deleted": {
--     "type": "string",
--     "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DELETED."
--    },
--    "deprecated": {
--     "type": "string",
--     "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DEPRECATED."
--    },
--    "obsolete": {
--     "type": "string",
--     "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to OBSOLETE."
--    },
--    "replacement": {
--     "type": "string",
--     "description": "A URL of the suggested replacement for the deprecated resource. The deprecated resource and its replacement must be resources of the same kind."
--    },
--    "state": {
--     "type": "string",
--     "description": "The deprecation state. Can be \"DEPRECATED\", \"OBSOLETE\", or \"DELETED\". Operations which create a new resource using a \"DEPRECATED\" resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. New uses of \"OBSOLETE\" or \"DELETED\" resources will result in an error.",
--     "enum": [
--      "DELETED",
--      "DEPRECATED",
--      "OBSOLETE"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      ""
--     ]
--    }
--   }
--  },
--  "Disk": {
--   "id": "Disk",
--   "type": "object",
--   "description": "A persistent disk resource.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#disk"
--    },
--    "licenses": {
--     "type": "array",
--     "description": "Public visible licenses.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.disks.insert"
--      ]
--     }
--    },
--    "options": {
--     "type": "string",
--     "description": "Internal use only."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "sizeGb": {
--     "type": "string",
--     "description": "Size of the persistent disk, specified in GB. This parameter is optional when creating a disk from a disk image or a snapshot, otherwise it is required.",
--     "format": "int64"
--    },
--    "sourceImage": {
--     "type": "string",
--     "description": "The source image used to create this disk."
--    },
--    "sourceImageId": {
--     "type": "string",
--     "description": "The 'id' value of the image used to create this disk. This value may be used to determine whether the disk was created from the current or a previous instance of a given image."
--    },
--    "sourceSnapshot": {
--     "type": "string",
--     "description": "The source snapshot used to create this disk."
--    },
--    "sourceSnapshotId": {
--     "type": "string",
--     "description": "The 'id' value of the snapshot used to create this disk. This value may be used to determine whether the disk was created from the current or a previous instance of a given disk snapshot."
--    },
--    "status": {
--     "type": "string",
--     "description": "The status of disk creation (output only).",
--     "enum": [
--      "CREATING",
--      "FAILED",
--      "READY",
--      "RESTORING"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      "",
--      ""
--     ]
--    },
--    "type": {
--     "type": "string",
--     "description": "URL of the disk type resource describing which disk type to use to create the disk; provided by the client when the disk is created."
--    },
--    "zone": {
--     "type": "string",
--     "description": "URL of the zone where the disk resides (output only)."
--    }
--   }
--  },
--  "DiskAggregatedList": {
--   "id": "DiskAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped disk lists.",
--     "additionalProperties": {
--      "$ref": "DisksScopedList",
--      "description": "Name of the scope containing this set of disks."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#diskAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "DiskList": {
--   "id": "DiskList",
--   "type": "object",
--   "description": "Contains a list of persistent disk resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The persistent disk resources.",
--     "items": {
--      "$ref": "Disk"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#diskList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "DiskType": {
--   "id": "DiskType",
--   "type": "object",
--   "description": "A disk type resource.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "defaultDiskSizeGb": {
--     "type": "string",
--     "description": "Server defined default disk size in gb (output only).",
--     "format": "int64"
--    },
--    "deprecated": {
--     "$ref": "DeprecationStatus",
--     "description": "The deprecation status associated with this disk type."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#diskType"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "validDiskSize": {
--     "type": "string",
--     "description": "An optional textual descroption of the valid disk size, e.g., \"10GB-10TB\"."
--    },
--    "zone": {
--     "type": "string",
--     "description": "Url of the zone where the disk type resides (output only)."
--    }
--   }
--  },
--  "DiskTypeAggregatedList": {
--   "id": "DiskTypeAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped disk type lists.",
--     "additionalProperties": {
--      "$ref": "DiskTypesScopedList",
--      "description": "Name of the scope containing this set of disk types."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#diskTypeAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "DiskTypeList": {
--   "id": "DiskTypeList",
--   "type": "object",
--   "description": "Contains a list of disk type resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The disk type resources.",
--     "items": {
--      "$ref": "DiskType"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#diskTypeList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "DiskTypesScopedList": {
--   "id": "DiskTypesScopedList",
--   "type": "object",
--   "properties": {
--    "diskTypes": {
--     "type": "array",
--     "description": "List of disk types contained in this scope.",
--     "items": {
--      "$ref": "DiskType"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of disk types when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "DisksScopedList": {
--   "id": "DisksScopedList",
--   "type": "object",
--   "properties": {
--    "disks": {
--     "type": "array",
--     "description": "List of disks contained in this scope.",
--     "items": {
--      "$ref": "Disk"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of disks when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "Firewall": {
--   "id": "Firewall",
--   "type": "object",
--   "description": "A firewall resource.",
--   "properties": {
--    "allowed": {
--     "type": "array",
--     "description": "The list of rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.",
--     "items": {
--      "type": "object",
--      "properties": {
--       "IPProtocol": {
--        "type": "string",
--        "description": "Required; this is the IP protocol that is allowed for this rule. This can either be one of the following well known protocol strings [\"tcp\", \"udp\", \"icmp\", \"esp\", \"ah\", \"sctp\"], or the IP protocol number."
--       },
--       "ports": {
--        "type": "array",
--        "description": "An optional list of ports which are allowed. It is an error to specify this for any protocol that isn't UDP or TCP. Each entry must be either an integer or a range. If not specified, connections through any port are allowed.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"] and [\"12345-12349\"].",
--        "items": {
--         "type": "string"
--        }
--       }
--      }
--     }
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#firewall"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.firewalls.insert",
--       "compute.firewalls.patch"
--      ]
--     }
--    },
--    "network": {
--     "type": "string",
--     "description": "URL of the network to which this firewall is applied; provided by the client when the firewall is created."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "sourceRanges": {
--     "type": "array",
--     "description": "A list of IP address blocks expressed in CIDR format which this rule applies to. One or both of sourceRanges and sourceTags may be set; an inbound connection is allowed if either the range or the tag of the source matches.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "sourceTags": {
--     "type": "array",
--     "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set; an inbound connection is allowed if either the range or the tag of the source matches.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "targetTags": {
--     "type": "array",
--     "description": "A list of instance tags indicating sets of instances located on network which may make network connections as specified in allowed. If no targetTags are specified, the firewall rule applies to all instances on the specified network.",
--     "items": {
--      "type": "string"
--     }
--    }
--   }
--  },
--  "FirewallList": {
--   "id": "FirewallList",
--   "type": "object",
--   "description": "Contains a list of firewall resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The firewall resources.",
--     "items": {
--      "$ref": "Firewall"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#firewallList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "ForwardingRule": {
--   "id": "ForwardingRule",
--   "type": "object",
--   "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target VMs to forward a packet to if it matches the given [IPAddress, IPProtocol, portRange] tuple.",
--   "properties": {
--    "IPAddress": {
--     "type": "string",
--     "description": "Value of the reserved IP address that this forwarding rule is serving on behalf of. For global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. If left empty (default value), an ephemeral IP from the same scope (global or regional) will be assigned."
--    },
--    "IPProtocol": {
--     "type": "string",
--     "description": "The IP protocol to which this rule applies, valid options are 'TCP', 'UDP', 'ESP', 'AH' or 'SCTP'.",
--     "enum": [
--      "AH",
--      "ESP",
--      "SCTP",
--      "TCP",
--      "UDP"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      "",
--      "",
--      ""
--     ]
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#forwardingRule"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "portRange": {
--     "type": "string",
--     "description": "Applicable only when 'IPProtocol' is 'TCP', 'UDP' or 'SCTP', only packets addressed to ports in the specified range will be forwarded to 'target'. If 'portRange' is left empty (default value), all ports are forwarded. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges."
--    },
--    "region": {
--     "type": "string",
--     "description": "URL of the region where the regional forwarding rule resides (output only). This field is not applicable to global forwarding rules."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "target": {
--     "type": "string",
--     "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy resource."
--    }
--   }
--  },
--  "ForwardingRuleAggregatedList": {
--   "id": "ForwardingRuleAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped forwarding rule lists.",
--     "additionalProperties": {
--      "$ref": "ForwardingRulesScopedList",
--      "description": "Name of the scope containing this set of addresses."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#forwardingRuleAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "ForwardingRuleList": {
--   "id": "ForwardingRuleList",
--   "type": "object",
--   "description": "Contains a list of ForwardingRule resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The ForwardingRule resources.",
--     "items": {
--      "$ref": "ForwardingRule"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#forwardingRuleList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "ForwardingRulesScopedList": {
--   "id": "ForwardingRulesScopedList",
--   "type": "object",
--   "properties": {
--    "forwardingRules": {
--     "type": "array",
--     "description": "List of forwarding rules contained in this scope.",
--     "items": {
--      "$ref": "ForwardingRule"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of forwarding rules when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "HealthCheckReference": {
--   "id": "HealthCheckReference",
--   "type": "object",
--   "properties": {
--    "healthCheck": {
--     "type": "string"
--    }
--   }
--  },
--  "HealthStatus": {
--   "id": "HealthStatus",
--   "type": "object",
--   "properties": {
--    "healthState": {
--     "type": "string",
--     "description": "Health state of the instance.",
--     "enum": [
--      "HEALTHY",
--      "UNHEALTHY"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    },
--    "instance": {
--     "type": "string",
--     "description": "URL of the instance resource."
--    },
--    "ipAddress": {
--     "type": "string",
--     "description": "The IP address represented by this resource."
--    },
--    "port": {
--     "type": "integer",
--     "description": "The port on the instance.",
--     "format": "int32"
--    }
--   }
--  },
--  "HostRule": {
--   "id": "HostRule",
--   "type": "object",
--   "description": "A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.",
--   "properties": {
--    "description": {
--     "type": "string"
--    },
--    "hosts": {
--     "type": "array",
--     "description": "The list of host patterns to match. They must be valid hostnames except that they may start with *. or *-. The * acts like a glob and will match any string of atoms (separated by .s and -s) to the left.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "pathMatcher": {
--     "type": "string",
--     "description": "The name of the PathMatcher to match the path portion of the URL, if the this HostRule matches the URL's host portion."
--    }
--   }
--  },
--  "HttpHealthCheck": {
--   "id": "HttpHealthCheck",
--   "type": "object",
--   "description": "An HttpHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTP.",
--   "properties": {
--    "checkIntervalSec": {
--     "type": "integer",
--     "description": "How often (in seconds) to send a health check. The default value is 5 seconds.",
--     "format": "int32"
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "healthyThreshold": {
--     "type": "integer",
--     "description": "A so-far unhealthy VM will be marked healthy after this many consecutive successes. The default value is 2.",
--     "format": "int32"
--    },
--    "host": {
--     "type": "string",
--     "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#httpHealthCheck"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "port": {
--     "type": "integer",
--     "description": "The TCP port number for the HTTP health check request. The default value is 80.",
--     "format": "int32"
--    },
--    "requestPath": {
--     "type": "string",
--     "description": "The request path of the HTTP health check request. The default value is \"/\"."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "timeoutSec": {
--     "type": "integer",
--     "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds.",
--     "format": "int32"
--    },
--    "unhealthyThreshold": {
--     "type": "integer",
--     "description": "A so-far healthy VM will be marked unhealthy after this many consecutive failures. The default value is 2.",
--     "format": "int32"
--    }
--   }
--  },
--  "HttpHealthCheckList": {
--   "id": "HttpHealthCheckList",
--   "type": "object",
--   "description": "Contains a list of HttpHealthCheck resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The HttpHealthCheck resources.",
--     "items": {
--      "$ref": "HttpHealthCheck"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#httpHealthCheckList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "Image": {
--   "id": "Image",
--   "type": "object",
--   "description": "A disk image resource.",
--   "properties": {
--    "archiveSizeBytes": {
--     "type": "string",
--     "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).",
--     "format": "int64"
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "deprecated": {
--     "$ref": "DeprecationStatus",
--     "description": "The deprecation status associated with this image."
--    },
--    "description": {
--     "type": "string",
--     "description": "Textual description of the resource; provided by the client when the resource is created."
--    },
--    "diskSizeGb": {
--     "type": "string",
--     "description": "Size of the image when restored onto a disk (in GiB).",
--     "format": "int64"
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#image"
--    },
--    "licenses": {
--     "type": "array",
--     "description": "Public visible licenses.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.images.insert"
--      ]
--     }
--    },
--    "rawDisk": {
--     "type": "object",
--     "description": "The raw disk image parameters.",
--     "properties": {
--      "containerType": {
--       "type": "string",
--       "description": "The format used to encode and transmit the block device. Should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.",
--       "enum": [
--        "TAR"
--       ],
--       "enumDescriptions": [
--        ""
--       ]
--      },
--      "sha1Checksum": {
--       "type": "string",
--       "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.",
--       "pattern": "[a-f0-9]{40}"
--      },
--      "source": {
--       "type": "string",
--       "description": "The full Google Cloud Storage URL where the disk image is stored; provided by the client when the disk image is created.",
--       "annotations": {
--        "required": [
--         "compute.images.insert"
--        ]
--       }
--      }
--     }
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "sourceDisk": {
--     "type": "string",
--     "description": "The source disk used to create this image."
--    },
--    "sourceDiskId": {
--     "type": "string",
--     "description": "The 'id' value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name."
--    },
--    "sourceType": {
--     "type": "string",
--     "description": "Must be \"RAW\"; provided by the client when the disk image is created.",
--     "default": "RAW",
--     "enum": [
--      "RAW"
--     ],
--     "enumDescriptions": [
--      ""
--     ]
--    },
--    "status": {
--     "type": "string",
--     "description": "Status of the image (output only). It will be one of the following READY - after image has been successfully created and is ready for use FAILED - if creating the image fails for some reason PENDING - the image creation is in progress An image can be used to create other resources suck as instances only after the image has been successfully created and the status is set to READY.",
--     "enum": [
--      "FAILED",
--      "PENDING",
--      "READY"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      ""
--     ]
--    }
--   }
--  },
--  "ImageList": {
--   "id": "ImageList",
--   "type": "object",
--   "description": "Contains a list of disk image resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The disk image resources.",
--     "items": {
--      "$ref": "Image"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#imageList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "Instance": {
--   "id": "Instance",
--   "type": "object",
--   "description": "An instance resource.",
--   "properties": {
--    "canIpForward": {
--     "type": "boolean",
--     "description": "Allows this instance to send packets with source IP addresses other than its own and receive packets with destination IP addresses other than its own. If this instance will be used as an IP gateway or it will be set as the next-hop in a Route resource, say true. If unsure, leave this set to false."
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "disks": {
--     "type": "array",
--     "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.",
--     "items": {
--      "$ref": "AttachedDisk"
--     }
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#instance"
--    },
--    "machineType": {
--     "type": "string",
--     "description": "URL of the machine type resource describing which machine type to use to host the instance; provided by the client when the instance is created.",
--     "annotations": {
--      "required": [
--       "compute.instances.insert"
--      ]
--     }
--    },
--    "metadata": {
--     "$ref": "Metadata",
--     "description": "Metadata key/value pairs assigned to this instance. Consists of custom metadata or predefined keys; see Instance documentation for more information."
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.instances.insert"
--      ]
--     }
--    },
--    "networkInterfaces": {
--     "type": "array",
--     "description": "Array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.",
--     "items": {
--      "$ref": "NetworkInterface"
--     }
--    },
--    "scheduling": {
--     "$ref": "Scheduling",
--     "description": "Scheduling options for this instance."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    },
--    "serviceAccounts": {
--     "type": "array",
--     "description": "A list of service accounts each with specified scopes, for which access tokens are to be made available to the instance through metadata queries.",
--     "items": {
--      "$ref": "ServiceAccount"
--     }
--    },
--    "status": {
--     "type": "string",
--     "description": "Instance status. One of the following values: \"PROVISIONING\", \"STAGING\", \"RUNNING\", \"STOPPING\", \"STOPPED\", \"TERMINATED\" (output only).",
--     "enum": [
--      "PROVISIONING",
--      "RUNNING",
--      "STAGING",
--      "STOPPED",
--      "STOPPING",
--      "TERMINATED"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      "",
--      "",
--      "",
--      ""
--     ]
--    },
--    "statusMessage": {
--     "type": "string",
--     "description": "An optional, human-readable explanation of the status (output only)."
--    },
--    "tags": {
--     "$ref": "Tags",
--     "description": "A list of tags to be applied to this instance. Used to identify valid sources or targets for network firewalls. Provided by the client on instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035."
--    },
--    "zone": {
--     "type": "string",
--     "description": "URL of the zone where the instance resides (output only)."
--    }
--   }
--  },
--  "InstanceAggregatedList": {
--   "id": "InstanceAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped instance lists.",
--     "additionalProperties": {
--      "$ref": "InstancesScopedList",
--      "description": "Name of the scope containing this set of instances."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#instanceAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "InstanceList": {
--   "id": "InstanceList",
--   "type": "object",
--   "description": "Contains a list of instance resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "A list of instance resources.",
--     "items": {
--      "$ref": "Instance"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#instanceList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "InstanceProperties": {
--   "id": "InstanceProperties",
--   "type": "object",
--   "description": "",
--   "properties": {
--    "canIpForward": {
--     "type": "boolean",
--     "description": "Allows instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, say true. If unsure, leave this set to false."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description for the instances created based on the instance template resource; provided by the client when the template is created."
--    },
--    "disks": {
--     "type": "array",
--     "description": "Array of disks associated with instance created based on this template.",
--     "items": {
--      "$ref": "AttachedDisk"
--     }
--    },
--    "machineType": {
--     "type": "string",
--     "description": "Name of the machine type resource describing which machine type to use to host the instances created based on this template; provided by the client when the instance template is created.",
--     "annotations": {
--      "required": [
--       "compute.instanceTemplates.insert"
--      ]
--     }
--    },
--    "metadata": {
--     "$ref": "Metadata",
--     "description": "Metadata key/value pairs assigned to instances created based on this template. Consists of custom metadata or predefined keys; see Instance documentation for more information."
--    },
--    "networkInterfaces": {
--     "type": "array",
--     "description": "Array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instances created based based on this template will have no external internet access.",
--     "items": {
--      "$ref": "NetworkInterface"
--     }
--    },
--    "scheduling": {
--     "$ref": "Scheduling",
--     "description": "Scheduling options for the instances created based on this template."
--    },
--    "serviceAccounts": {
--     "type": "array",
--     "description": "A list of service accounts each with specified scopes, for which access tokens are to be made available to the instances created based on this template, through metadata queries.",
--     "items": {
--      "$ref": "ServiceAccount"
--     }
--    },
--    "tags": {
--     "$ref": "Tags",
--     "description": "A list of tags to be applied to the instances created based on this template used to identify valid sources or targets for network firewalls. Provided by the client on instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035."
--    }
--   }
--  },
--  "InstanceReference": {
--   "id": "InstanceReference",
--   "type": "object",
--   "properties": {
--    "instance": {
--     "type": "string"
--    }
--   }
--  },
--  "InstanceTemplate": {
--   "id": "InstanceTemplate",
--   "type": "object",
--   "description": "An Instance Template resource.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the instance template resource; provided by the client when the resource is created."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#instanceTemplate"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the instance template resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.instanceTemplates.insert"
--      ]
--     }
--    },
--    "properties": {
--     "$ref": "InstanceProperties",
--     "description": "The instance properties portion of this instance template resource."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    }
--   }
--  },
--  "InstanceTemplateList": {
--   "id": "InstanceTemplateList",
--   "type": "object",
--   "description": "Contains a list of instance template resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "A list of instance template resources.",
--     "items": {
--      "$ref": "InstanceTemplate"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#instanceTemplateList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "InstancesScopedList": {
--   "id": "InstancesScopedList",
--   "type": "object",
--   "properties": {
--    "instances": {
--     "type": "array",
--     "description": "List of instances contained in this scope.",
--     "items": {
--      "$ref": "Instance"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of instances when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "License": {
--   "id": "License",
--   "type": "object",
--   "description": "A license resource.",
--   "properties": {
--    "chargesUseFee": {
--     "type": "boolean",
--     "description": "If true, the customer will be charged license fee for running software that contains this license on an instance."
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#license"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.images.insert"
--      ]
--     }
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    }
--   }
--  },
--  "MachineType": {
--   "id": "MachineType",
--   "type": "object",
--   "description": "A machine type resource.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "deprecated": {
--     "$ref": "DeprecationStatus",
--     "description": "The deprecation status associated with this machine type."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource."
--    },
--    "guestCpus": {
--     "type": "integer",
--     "description": "Count of CPUs exposed to the instance.",
--     "format": "int32"
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "imageSpaceGb": {
--     "type": "integer",
--     "description": "Space allotted for the image, defined in GB.",
--     "format": "int32"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#machineType"
--    },
--    "maximumPersistentDisks": {
--     "type": "integer",
--     "description": "Maximum persistent disks allowed.",
--     "format": "int32"
--    },
--    "maximumPersistentDisksSizeGb": {
--     "type": "string",
--     "description": "Maximum total persistent disks size (GB) allowed.",
--     "format": "int64"
--    },
--    "memoryMb": {
--     "type": "integer",
--     "description": "Physical memory assigned to the instance, defined in MB.",
--     "format": "int32"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "scratchDisks": {
--     "type": "array",
--     "description": "List of extended scratch disks assigned to the instance.",
--     "items": {
--      "type": "object",
--      "properties": {
--       "diskGb": {
--        "type": "integer",
--        "description": "Size of the scratch disk, defined in GB.",
--        "format": "int32"
--       }
--      }
--     }
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "zone": {
--     "type": "string",
--     "description": "Url of the zone where the machine type resides (output only)."
--    }
--   }
--  },
--  "MachineTypeAggregatedList": {
--   "id": "MachineTypeAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped machine type lists.",
--     "additionalProperties": {
--      "$ref": "MachineTypesScopedList",
--      "description": "Name of the scope containing this set of machine types."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#machineTypeAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "MachineTypeList": {
--   "id": "MachineTypeList",
--   "type": "object",
--   "description": "Contains a list of machine type resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The machine type resources.",
--     "items": {
--      "$ref": "MachineType"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#machineTypeList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "MachineTypesScopedList": {
--   "id": "MachineTypesScopedList",
--   "type": "object",
--   "properties": {
--    "machineTypes": {
--     "type": "array",
--     "description": "List of machine types contained in this scope.",
--     "items": {
--      "$ref": "MachineType"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of machine types when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "Metadata": {
--   "id": "Metadata",
--   "type": "object",
--   "description": "A metadata key/value entry.",
--   "properties": {
--    "fingerprint": {
--     "type": "string",
--     "description": "Fingerprint of this resource. A hash of the metadata's contents. This field is used for optimistic locking. An up-to-date metadata fingerprint must be provided in order to modify metadata.",
--     "format": "byte"
--    },
--    "items": {
--     "type": "array",
--     "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.",
--     "items": {
--      "type": "object",
--      "properties": {
--       "key": {
--        "type": "string",
--        "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.",
--        "pattern": "[a-zA-Z0-9-_]{1,128}",
--        "annotations": {
--         "required": [
--          "compute.instances.insert",
--          "compute.projects.setCommonInstanceMetadata"
--         ]
--        }
--       },
--       "value": {
--        "type": "string",
--        "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 32768 bytes.",
--        "annotations": {
--         "required": [
--          "compute.instances.insert",
--          "compute.projects.setCommonInstanceMetadata"
--         ]
--        }
--       }
--      }
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#metadata"
--    }
--   }
--  },
--  "Network": {
--   "id": "Network",
--   "type": "object",
--   "description": "A network resource.",
--   "properties": {
--    "IPv4Range": {
--     "type": "string",
--     "description": "Required; The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.",
--     "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}",
--     "annotations": {
--      "required": [
--       "compute.networks.insert"
--      ]
--     }
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "gatewayIPv4": {
--     "type": "string",
--     "description": "An optional address that is used for default routing to other networks. This must be within the range specified by IPv4Range, and is typically the first usable address in that range. If not specified, the default value is the first usable address in IPv4Range.",
--     "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}"
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#network"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.networks.insert"
--      ]
--     }
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    }
--   }
--  },
--  "NetworkInterface": {
--   "id": "NetworkInterface",
--   "type": "object",
--   "description": "A network interface resource attached to an instance.",
--   "properties": {
--    "accessConfigs": {
--     "type": "array",
--     "description": "Array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.",
--     "items": {
--      "$ref": "AccessConfig"
--     }
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the network interface, determined by the server; for network devices, these are e.g. eth0, eth1, etc. (output only)."
--    },
--    "network": {
--     "type": "string",
--     "description": "URL of the network resource attached to this interface.",
--     "annotations": {
--      "required": [
--       "compute.instances.insert"
--      ]
--     }
--    },
--    "networkIP": {
--     "type": "string",
--     "description": "An optional IPV4 internal network address assigned to the instance for this network interface (output only)."
--    }
--   }
--  },
--  "NetworkList": {
--   "id": "NetworkList",
--   "type": "object",
--   "description": "Contains a list of network resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The network resources.",
--     "items": {
--      "$ref": "Network"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#networkList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "Operation": {
--   "id": "Operation",
--   "type": "object",
--   "description": "An operation resource, used to manage asynchronous API requests.",
--   "properties": {
--    "clientOperationId": {
--     "type": "string",
--     "description": "An optional identifier specified by the client when the mutation was initiated. Must be unique for all operation resources in the project (output only)."
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "endTime": {
--     "type": "string",
--     "description": "The time that this operation was completed. This is in RFC 3339 format (output only)."
--    },
--    "error": {
--     "type": "object",
--     "description": "If errors occurred during processing of this operation, this field will be populated (output only).",
--     "properties": {
--      "errors": {
--       "type": "array",
--       "description": "The array of errors encountered while processing this operation.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "code": {
--          "type": "string",
--          "description": "The error type identifier for this error."
--         },
--         "location": {
--          "type": "string",
--          "description": "Indicates the field in the request which caused the error. This property is optional."
--         },
--         "message": {
--          "type": "string",
--          "description": "An optional, human-readable error message."
--         }
--        }
--       }
--      }
--     }
--    },
--    "httpErrorMessage": {
--     "type": "string",
--     "description": "If operation fails, the HTTP error message returned, e.g. NOT FOUND. (output only)."
--    },
--    "httpErrorStatusCode": {
--     "type": "integer",
--     "description": "If operation fails, the HTTP error status code returned, e.g. 404. (output only).",
--     "format": "int32"
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "insertTime": {
--     "type": "string",
--     "description": "The time that this operation was requested. This is in RFC 3339 format (output only)."
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#operation"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource (output only)."
--    },
--    "operationType": {
--     "type": "string",
--     "description": "Type of the operation. Examples include \"insert\", \"update\", and \"delete\" (output only)."
--    },
--    "progress": {
--     "type": "integer",
--     "description": "An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess at when the operation will be complete. This number should be monotonically increasing as the operation progresses (output only).",
--     "format": "int32"
--    },
--    "region": {
--     "type": "string",
--     "description": "URL of the region where the operation resides (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "startTime": {
--     "type": "string",
--     "description": "The time that this operation was started by the server. This is in RFC 3339 format (output only)."
--    },
--    "status": {
--     "type": "string",
--     "description": "Status of the operation. Can be one of the following: \"PENDING\", \"RUNNING\", or \"DONE\" (output only).",
--     "enum": [
--      "DONE",
--      "PENDING",
--      "RUNNING"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      ""
--     ]
--    },
--    "statusMessage": {
--     "type": "string",
--     "description": "An optional textual description of the current status of the operation (output only)."
--    },
--    "targetId": {
--     "type": "string",
--     "description": "Unique target id which identifies a particular incarnation of the target (output only).",
--     "format": "uint64"
--    },
--    "targetLink": {
--     "type": "string",
--     "description": "URL of the resource the operation is mutating (output only)."
--    },
--    "user": {
--     "type": "string",
--     "description": "User who requested the operation, for example \"user at example.com\" (output only)."
--    },
--    "warnings": {
--     "type": "array",
--     "description": "If warning messages generated during processing of this operation, this field will be populated (output only).",
--     "items": {
--      "type": "object",
--      "properties": {
--       "code": {
--        "type": "string",
--        "description": "The warning type identifier for this warning.",
--        "enum": [
--         "DEPRECATED_RESOURCE_USED",
--         "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--         "INJECTED_KERNELS_DEPRECATED",
--         "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--         "NEXT_HOP_CANNOT_IP_FORWARD",
--         "NEXT_HOP_INSTANCE_NOT_FOUND",
--         "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--         "NEXT_HOP_NOT_RUNNING",
--         "NO_RESULTS_ON_PAGE",
--         "REQUIRED_TOS_AGREEMENT",
--         "RESOURCE_NOT_DELETED",
--         "UNREACHABLE"
--        ],
--        "enumDescriptions": [
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         ""
--        ]
--       },
--       "data": {
--        "type": "array",
--        "description": "Metadata for this warning in 'key: value' format.",
--        "items": {
--         "type": "object",
--         "properties": {
--          "key": {
--           "type": "string",
--           "description": "A key for the warning data."
--          },
--          "value": {
--           "type": "string",
--           "description": "A warning data value corresponding to the key."
--          }
--         }
--        }
--       },
--       "message": {
--        "type": "string",
--        "description": "Optional human-readable details for this warning."
--       }
--      }
--     }
--    },
--    "zone": {
--     "type": "string",
--     "description": "URL of the zone where the operation resides (output only)."
--    }
--   }
--  },
--  "OperationAggregatedList": {
--   "id": "OperationAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped operation lists.",
--     "additionalProperties": {
--      "$ref": "OperationsScopedList",
--      "description": "Name of the scope containing this set of operations."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#operationAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "OperationList": {
--   "id": "OperationList",
--   "type": "object",
--   "description": "Contains a list of operation resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The operation resources.",
--     "items": {
--      "$ref": "Operation"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#operationList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "OperationsScopedList": {
--   "id": "OperationsScopedList",
--   "type": "object",
--   "properties": {
--    "operations": {
--     "type": "array",
--     "description": "List of operations contained in this scope.",
--     "items": {
--      "$ref": "Operation"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of operations when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "PathMatcher": {
--   "id": "PathMatcher",
--   "type": "object",
--   "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default_service will be used.",
--   "properties": {
--    "defaultService": {
--     "type": "string",
--     "description": "The URL to the BackendService resource. This will be used if none of the 'pathRules' defined by this PathMatcher is met by the URL's path portion."
--    },
--    "description": {
--     "type": "string"
--    },
--    "name": {
--     "type": "string",
--     "description": "The name to which this PathMatcher is referred by the HostRule."
--    },
--    "pathRules": {
--     "type": "array",
--     "description": "The list of path rules.",
--     "items": {
--      "$ref": "PathRule"
--     }
--    }
--   }
--  },
--  "PathRule": {
--   "id": "PathRule",
--   "type": "object",
--   "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.",
--   "properties": {
--    "paths": {
--     "type": "array",
--     "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "service": {
--     "type": "string",
--     "description": "The URL of the BackendService resource if this rule is matched."
--    }
--   }
--  },
--  "Project": {
--   "id": "Project",
--   "type": "object",
--   "description": "A project resource. Projects can be created only in the APIs Console. Unless marked otherwise, values can only be modified in the console.",
--   "properties": {
--    "commonInstanceMetadata": {
--     "$ref": "Metadata",
--     "description": "Metadata key/value pairs available to all instances contained in this project."
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#project"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource."
--    },
--    "quotas": {
--     "type": "array",
--     "description": "Quotas assigned to this project.",
--     "items": {
--      "$ref": "Quota"
--     }
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "usageExportLocation": {
--     "$ref": "UsageExportLocation",
--     "description": "The location in Cloud Storage and naming method of the daily usage report."
--    }
--   }
--  },
--  "Quota": {
--   "id": "Quota",
--   "type": "object",
--   "description": "A quotas entry.",
--   "properties": {
--    "limit": {
--     "type": "number",
--     "description": "Quota limit for this metric.",
--     "format": "double"
--    },
--    "metric": {
--     "type": "string",
--     "description": "Name of the quota metric.",
--     "enum": [
--      "BACKEND_SERVICES",
--      "CPUS",
--      "DISKS",
--      "DISKS_TOTAL_GB",
--      "EPHEMERAL_ADDRESSES",
--      "FIREWALLS",
--      "FORWARDING_RULES",
--      "HEALTH_CHECKS",
--      "IMAGES",
--      "IMAGES_TOTAL_GB",
--      "INSTANCES",
--      "IN_USE_ADDRESSES",
--      "KERNELS",
--      "KERNELS_TOTAL_GB",
--      "LOCAL_SSD_TOTAL_GB",
--      "NETWORKS",
--      "OPERATIONS",
--      "ROUTES",
--      "SNAPSHOTS",
--      "SSD_TOTAL_GB",
--      "STATIC_ADDRESSES",
--      "TARGET_HTTP_PROXIES",
--      "TARGET_INSTANCES",
--      "TARGET_POOLS",
--      "URL_MAPS"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      "",
--      ""
--     ]
--    },
--    "usage": {
--     "type": "number",
--     "description": "Current usage of this metric.",
--     "format": "double"
--    }
--   }
--  },
--  "Region": {
--   "id": "Region",
--   "type": "object",
--   "description": "Region resource.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "deprecated": {
--     "$ref": "DeprecationStatus",
--     "description": "The deprecation status associated with this region."
--    },
--    "description": {
--     "type": "string",
--     "description": "Textual description of the resource."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#region"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource."
--    },
--    "quotas": {
--     "type": "array",
--     "description": "Quotas assigned to this region.",
--     "items": {
--      "$ref": "Quota"
--     }
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "status": {
--     "type": "string",
--     "description": "Status of the region, \"UP\" or \"DOWN\".",
--     "enum": [
--      "DOWN",
--      "UP"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    },
--    "zones": {
--     "type": "array",
--     "description": "A list of zones homed in this region, in the form of resource URLs.",
--     "items": {
--      "type": "string"
--     }
--    }
--   }
--  },
--  "RegionList": {
--   "id": "RegionList",
--   "type": "object",
--   "description": "Contains a list of region resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The region resources.",
--     "items": {
--      "$ref": "Region"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#regionList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "ResourceGroupReference": {
--   "id": "ResourceGroupReference",
--   "type": "object",
--   "properties": {
--    "group": {
--     "type": "string",
--     "description": "A URI referencing one of the resource views listed in the backend service."
--    }
--   }
--  },
--  "Route": {
--   "id": "Route",
--   "type": "object",
--   "description": "The route resource. A Route is a rule that specifies how certain packets should be handled by the virtual network. Routes are associated with VMs by tag and the set of Routes for a particular VM is called its routing table. For each packet leaving a VM, the system searches that VM's routing table for a single best matching Route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the Route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching Routes. The packet is then forwarded as specified by the next_hop field of the winning Route -- either to another VM destination, a VM gateway or a GCE operated gateway. Packets that do not match any Route in the sending VM's routing table will be dropped.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "destRange": {
--     "type": "string",
--     "description": "Which packets does this route apply to?",
--     "annotations": {
--      "required": [
--       "compute.routes.insert"
--      ]
--     }
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#route"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--     "annotations": {
--      "required": [
--       "compute.routes.insert"
--      ]
--     }
--    },
--    "network": {
--     "type": "string",
--     "description": "URL of the network to which this route is applied; provided by the client when the route is created.",
--     "annotations": {
--      "required": [
--       "compute.routes.insert"
--      ]
--     }
--    },
--    "nextHopGateway": {
--     "type": "string",
--     "description": "The URL to a gateway that should handle matching packets."
--    },
--    "nextHopInstance": {
--     "type": "string",
--     "description": "The URL to an instance that should handle matching packets."
--    },
--    "nextHopIp": {
--     "type": "string",
--     "description": "The network IP address of an instance that should handle matching packets."
--    },
--    "nextHopNetwork": {
--     "type": "string",
--     "description": "The URL of the local network if it should handle matching packets."
--    },
--    "priority": {
--     "type": "integer",
--     "description": "Breaks ties between Routes of equal specificity. Routes with smaller values win when tied with routes with larger values.",
--     "format": "uint32",
--     "annotations": {
--      "required": [
--       "compute.routes.insert"
--      ]
--     }
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "tags": {
--     "type": "array",
--     "description": "A list of instance tags to which this route applies.",
--     "items": {
--      "type": "string"
--     },
--     "annotations": {
--      "required": [
--       "compute.routes.insert"
--      ]
--     }
--    },
--    "warnings": {
--     "type": "array",
--     "description": "If potential misconfigurations are detected for this route, this field will be populated with warning messages.",
--     "items": {
--      "type": "object",
--      "properties": {
--       "code": {
--        "type": "string",
--        "description": "The warning type identifier for this warning.",
--        "enum": [
--         "DEPRECATED_RESOURCE_USED",
--         "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--         "INJECTED_KERNELS_DEPRECATED",
--         "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--         "NEXT_HOP_CANNOT_IP_FORWARD",
--         "NEXT_HOP_INSTANCE_NOT_FOUND",
--         "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--         "NEXT_HOP_NOT_RUNNING",
--         "NO_RESULTS_ON_PAGE",
--         "REQUIRED_TOS_AGREEMENT",
--         "RESOURCE_NOT_DELETED",
--         "UNREACHABLE"
--        ],
--        "enumDescriptions": [
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         "",
--         ""
--        ]
--       },
--       "data": {
--        "type": "array",
--        "description": "Metadata for this warning in 'key: value' format.",
--        "items": {
--         "type": "object",
--         "properties": {
--          "key": {
--           "type": "string",
--           "description": "A key for the warning data."
--          },
--          "value": {
--           "type": "string",
--           "description": "A warning data value corresponding to the key."
--          }
--         }
--        }
--       },
--       "message": {
--        "type": "string",
--        "description": "Optional human-readable details for this warning."
--       }
--      }
--     }
--    }
--   }
--  },
--  "RouteList": {
--   "id": "RouteList",
--   "type": "object",
--   "description": "Contains a list of route resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The route resources.",
--     "items": {
--      "$ref": "Route"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#routeList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "Scheduling": {
--   "id": "Scheduling",
--   "type": "object",
--   "description": "Scheduling options for an Instance.",
--   "properties": {
--    "automaticRestart": {
--     "type": "boolean",
--     "description": "Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user)."
--    },
--    "onHostMaintenance": {
--     "type": "string",
--     "description": "How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance.",
--     "enum": [
--      "MIGRATE",
--      "TERMINATE"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    }
--   }
--  },
--  "SerialPortOutput": {
--   "id": "SerialPortOutput",
--   "type": "object",
--   "description": "An instance serial console output.",
--   "properties": {
--    "contents": {
--     "type": "string",
--     "description": "The contents of the console output."
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#serialPortOutput"
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    }
--   }
--  },
--  "ServiceAccount": {
--   "id": "ServiceAccount",
--   "type": "object",
--   "description": "A service account.",
--   "properties": {
--    "email": {
--     "type": "string",
--     "description": "Email address of the service account."
--    },
--    "scopes": {
--     "type": "array",
--     "description": "The list of scopes to be made available for this service account.",
--     "items": {
--      "type": "string"
--     }
--    }
--   }
--  },
--  "Snapshot": {
--   "id": "Snapshot",
--   "type": "object",
--   "description": "A persistent disk snapshot resource.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "diskSizeGb": {
--     "type": "string",
--     "description": "Size of the persistent disk snapshot, specified in GB (output only).",
--     "format": "int64"
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#snapshot"
--    },
--    "licenses": {
--     "type": "array",
--     "description": "Public visible licenses.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "sourceDisk": {
--     "type": "string",
--     "description": "The source disk used to create this snapshot."
--    },
--    "sourceDiskId": {
--     "type": "string",
--     "description": "The 'id' value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name."
--    },
--    "status": {
--     "type": "string",
--     "description": "The status of the persistent disk snapshot (output only).",
--     "enum": [
--      "CREATING",
--      "DELETING",
--      "FAILED",
--      "READY",
--      "UPLOADING"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      "",
--      "",
--      ""
--     ]
--    },
--    "storageBytes": {
--     "type": "string",
--     "description": "A size of the the storage used by the snapshot. As snapshots share storage this number is expected to change with snapshot creation/deletion.",
--     "format": "int64"
--    },
--    "storageBytesStatus": {
--     "type": "string",
--     "description": "An indicator whether storageBytes is in a stable state, or it is being adjusted as a result of shared storage reallocation.",
--     "enum": [
--      "UPDATING",
--      "UP_TO_DATE"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    }
--   }
--  },
--  "SnapshotList": {
--   "id": "SnapshotList",
--   "type": "object",
--   "description": "Contains a list of persistent disk snapshot resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The persistent snapshot resources.",
--     "items": {
--      "$ref": "Snapshot"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#snapshotList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "Tags": {
--   "id": "Tags",
--   "type": "object",
--   "description": "A set of instance tags.",
--   "properties": {
--    "fingerprint": {
--     "type": "string",
--     "description": "Fingerprint of this resource. A hash of the tags stored in this object. This field is used optimistic locking. An up-to-date tags fingerprint must be provided in order to modify tags.",
--     "format": "byte"
--    },
--    "items": {
--     "type": "array",
--     "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.",
--     "items": {
--      "type": "string"
--     }
--    }
--   }
--  },
--  "TargetHttpProxy": {
--   "id": "TargetHttpProxy",
--   "type": "object",
--   "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#targetHttpProxy"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "urlMap": {
--     "type": "string",
--     "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService."
--    }
--   }
--  },
--  "TargetHttpProxyList": {
--   "id": "TargetHttpProxyList",
--   "type": "object",
--   "description": "Contains a list of TargetHttpProxy resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The TargetHttpProxy resources.",
--     "items": {
--      "$ref": "TargetHttpProxy"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#targetHttpProxyList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "TargetInstance": {
--   "id": "TargetInstance",
--   "type": "object",
--   "description": "A TargetInstance resource. This resource defines an endpoint VM that terminates traffic of certain protocols.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "instance": {
--     "type": "string",
--     "description": "The URL to the instance that terminates the relevant traffic."
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#targetInstance"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "natPolicy": {
--     "type": "string",
--     "description": "NAT option controlling how IPs are NAT'ed to the VM. Currently only NO_NAT (default value) is supported.",
--     "enum": [
--      "NO_NAT"
--     ],
--     "enumDescriptions": [
--      ""
--     ]
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "zone": {
--     "type": "string",
--     "description": "URL of the zone where the target instance resides (output only)."
--    }
--   }
--  },
--  "TargetInstanceAggregatedList": {
--   "id": "TargetInstanceAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped target instance lists.",
--     "additionalProperties": {
--      "$ref": "TargetInstancesScopedList",
--      "description": "Name of the scope containing this set of target instances."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#targetInstanceAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "TargetInstanceList": {
--   "id": "TargetInstanceList",
--   "type": "object",
--   "description": "Contains a list of TargetInstance resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The TargetInstance resources.",
--     "items": {
--      "$ref": "TargetInstance"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#targetInstanceList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "TargetInstancesScopedList": {
--   "id": "TargetInstancesScopedList",
--   "type": "object",
--   "properties": {
--    "targetInstances": {
--     "type": "array",
--     "description": "List of target instances contained in this scope.",
--     "items": {
--      "$ref": "TargetInstance"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of addresses when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "TargetPool": {
--   "id": "TargetPool",
--   "type": "object",
--   "description": "A TargetPool resource. This resource defines a pool of VMs, associated HttpHealthCheck resources, and the fallback TargetPool.",
--   "properties": {
--    "backupPool": {
--     "type": "string",
--     "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its 'failoverRatio' field is properly set to a value between [0, 1].\n\n'backupPool' and 'failoverRatio' together define the fallback behavior of the primary target pool: if the ratio of the healthy VMs in the primary pool is at or below 'failoverRatio', traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where 'failoverRatio' and 'backupPool' are not set, or all the VMs in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy."
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "failoverRatio": {
--     "type": "number",
--     "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, 'backupPool' must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy VMs in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where 'failoverRatio' is not set or all the VMs in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy.",
--     "format": "float"
--    },
--    "healthChecks": {
--     "type": "array",
--     "description": "A list of URLs to the HttpHealthCheck resource. A member VM in this pool is considered healthy if and only if all specified health checks pass. An empty list means all member VMs will be considered healthy at all times.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "instances": {
--     "type": "array",
--     "description": "A list of resource URLs to the member VMs serving this pool. They must live in zones contained in the same region as this pool.",
--     "items": {
--      "type": "string"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#targetPool"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "region": {
--     "type": "string",
--     "description": "URL of the region where the target pool resides (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "sessionAffinity": {
--     "type": "string",
--     "description": "Sesssion affinity option, must be one of the following values: 'NONE': Connections from the same client IP may go to any VM in the pool; 'CLIENT_IP': Connections from the same client IP will go to the same VM in the pool while that VM remains healthy. 'CLIENT_IP_PROTO': Connections from the same client IP with the same IP protocol will go to the same VM in the pool while that VM remains healthy.",
--     "enum": [
--      "CLIENT_IP",
--      "CLIENT_IP_PROTO",
--      "NONE"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      ""
--     ]
--    }
--   }
--  },
--  "TargetPoolAggregatedList": {
--   "id": "TargetPoolAggregatedList",
--   "type": "object",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "object",
--     "description": "A map of scoped target pool lists.",
--     "additionalProperties": {
--      "$ref": "TargetPoolsScopedList",
--      "description": "Name of the scope containing this set of target pools."
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#targetPoolAggregatedList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "TargetPoolInstanceHealth": {
--   "id": "TargetPoolInstanceHealth",
--   "type": "object",
--   "properties": {
--    "healthStatus": {
--     "type": "array",
--     "items": {
--      "$ref": "HealthStatus"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#targetPoolInstanceHealth"
--    }
--   }
--  },
--  "TargetPoolList": {
--   "id": "TargetPoolList",
--   "type": "object",
--   "description": "Contains a list of TargetPool resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The TargetPool resources.",
--     "items": {
--      "$ref": "TargetPool"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#targetPoolList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "TargetPoolsAddHealthCheckRequest": {
--   "id": "TargetPoolsAddHealthCheckRequest",
--   "type": "object",
--   "properties": {
--    "healthChecks": {
--     "type": "array",
--     "description": "Health check URLs to be added to targetPool.",
--     "items": {
--      "$ref": "HealthCheckReference"
--     }
--    }
--   }
--  },
--  "TargetPoolsAddInstanceRequest": {
--   "id": "TargetPoolsAddInstanceRequest",
--   "type": "object",
--   "properties": {
--    "instances": {
--     "type": "array",
--     "description": "URLs of the instances to be added to targetPool.",
--     "items": {
--      "$ref": "InstanceReference"
--     }
--    }
--   }
--  },
--  "TargetPoolsRemoveHealthCheckRequest": {
--   "id": "TargetPoolsRemoveHealthCheckRequest",
--   "type": "object",
--   "properties": {
--    "healthChecks": {
--     "type": "array",
--     "description": "Health check URLs to be removed from targetPool.",
--     "items": {
--      "$ref": "HealthCheckReference"
--     }
--    }
--   }
--  },
--  "TargetPoolsRemoveInstanceRequest": {
--   "id": "TargetPoolsRemoveInstanceRequest",
--   "type": "object",
--   "properties": {
--    "instances": {
--     "type": "array",
--     "description": "URLs of the instances to be removed from targetPool.",
--     "items": {
--      "$ref": "InstanceReference"
--     }
--    }
--   }
--  },
--  "TargetPoolsScopedList": {
--   "id": "TargetPoolsScopedList",
--   "type": "object",
--   "properties": {
--    "targetPools": {
--     "type": "array",
--     "description": "List of target pools contained in this scope.",
--     "items": {
--      "$ref": "TargetPool"
--     }
--    },
--    "warning": {
--     "type": "object",
--     "description": "Informational warning which replaces the list of addresses when the list is empty.",
--     "properties": {
--      "code": {
--       "type": "string",
--       "description": "The warning type identifier for this warning.",
--       "enum": [
--        "DEPRECATED_RESOURCE_USED",
--        "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
--        "INJECTED_KERNELS_DEPRECATED",
--        "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
--        "NEXT_HOP_CANNOT_IP_FORWARD",
--        "NEXT_HOP_INSTANCE_NOT_FOUND",
--        "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
--        "NEXT_HOP_NOT_RUNNING",
--        "NO_RESULTS_ON_PAGE",
--        "REQUIRED_TOS_AGREEMENT",
--        "RESOURCE_NOT_DELETED",
--        "UNREACHABLE"
--       ],
--       "enumDescriptions": [
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        "",
--        ""
--       ]
--      },
--      "data": {
--       "type": "array",
--       "description": "Metadata for this warning in 'key: value' format.",
--       "items": {
--        "type": "object",
--        "properties": {
--         "key": {
--          "type": "string",
--          "description": "A key for the warning data."
--         },
--         "value": {
--          "type": "string",
--          "description": "A warning data value corresponding to the key."
--         }
--        }
--       }
--      },
--      "message": {
--       "type": "string",
--       "description": "Optional human-readable details for this warning."
--      }
--     }
--    }
--   }
--  },
--  "TargetReference": {
--   "id": "TargetReference",
--   "type": "object",
--   "properties": {
--    "target": {
--     "type": "string"
--    }
--   }
--  },
--  "TestFailure": {
--   "id": "TestFailure",
--   "type": "object",
--   "properties": {
--    "actualService": {
--     "type": "string"
--    },
--    "expectedService": {
--     "type": "string"
--    },
--    "host": {
--     "type": "string"
--    },
--    "path": {
--     "type": "string"
--    }
--   }
--  },
--  "UrlMap": {
--   "id": "UrlMap",
--   "type": "object",
--   "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "defaultService": {
--     "type": "string",
--     "description": "The URL of the BackendService resource if none of the hostRules match."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional textual description of the resource; provided by the client when the resource is created."
--    },
--    "fingerprint": {
--     "type": "string",
--     "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.",
--     "format": "byte"
--    },
--    "hostRules": {
--     "type": "array",
--     "description": "The list of HostRules to use against the URL.",
--     "items": {
--      "$ref": "HostRule"
--     }
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#urlMap"
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
--     "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
--    },
--    "pathMatchers": {
--     "type": "array",
--     "description": "The list of named PathMatchers to use against the URL.",
--     "items": {
--      "$ref": "PathMatcher"
--     }
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "tests": {
--     "type": "array",
--     "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only all of the test cases pass.",
--     "items": {
--      "$ref": "UrlMapTest"
--     }
--    }
--   }
--  },
--  "UrlMapList": {
--   "id": "UrlMapList",
--   "type": "object",
--   "description": "Contains a list of UrlMap resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The UrlMap resources.",
--     "items": {
--      "$ref": "UrlMap"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#urlMapList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  },
--  "UrlMapReference": {
--   "id": "UrlMapReference",
--   "type": "object",
--   "properties": {
--    "urlMap": {
--     "type": "string"
--    }
--   }
--  },
--  "UrlMapTest": {
--   "id": "UrlMapTest",
--   "type": "object",
--   "description": "Message for the expected URL mappings.",
--   "properties": {
--    "description": {
--     "type": "string",
--     "description": "Description of this test case."
--    },
--    "host": {
--     "type": "string",
--     "description": "Host portion of the URL."
--    },
--    "path": {
--     "type": "string",
--     "description": "Path portion of the URL."
--    },
--    "service": {
--     "type": "string",
--     "description": "Expected BackendService resource the given URL should be mapped to."
--    }
--   }
--  },
--  "UrlMapValidationResult": {
--   "id": "UrlMapValidationResult",
--   "type": "object",
--   "description": "Message representing the validation result for a UrlMap.",
--   "properties": {
--    "loadErrors": {
--     "type": "array",
--     "items": {
--      "type": "string"
--     }
--    },
--    "loadSucceeded": {
--     "type": "boolean",
--     "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons."
--    },
--    "testFailures": {
--     "type": "array",
--     "items": {
--      "$ref": "TestFailure"
--     }
--    },
--    "testPassed": {
--     "type": "boolean",
--     "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure."
--    }
--   }
--  },
--  "UrlMapsValidateRequest": {
--   "id": "UrlMapsValidateRequest",
--   "type": "object",
--   "properties": {
--    "resource": {
--     "$ref": "UrlMap",
--     "description": "Content of the UrlMap to be validated."
--    }
--   }
--  },
--  "UrlMapsValidateResponse": {
--   "id": "UrlMapsValidateResponse",
--   "type": "object",
--   "properties": {
--    "result": {
--     "$ref": "UrlMapValidationResult"
--    }
--   }
--  },
--  "UsageExportLocation": {
--   "id": "UsageExportLocation",
--   "type": "object",
--   "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.",
--   "properties": {
--    "bucketName": {
--     "type": "string",
--     "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This is simply the bucket name, with no \"gs://\" or \"https://storage.googleapis.com/\" in front of it."
--    },
--    "reportNamePrefix": {
--     "type": "string",
--     "description": "An optional prefix for the name of the usage report object stored in bucket_name. If not supplied, defaults to \"usage_\". The report is stored as a CSV file named _gce_.csv. where  is the day of the usage according to Pacific Time. The prefix should conform to Cloud Storage object naming conventions."
--    }
--   }
--  },
--  "Zone": {
--   "id": "Zone",
--   "type": "object",
--   "description": "A zone resource.",
--   "properties": {
--    "creationTimestamp": {
--     "type": "string",
--     "description": "Creation timestamp in RFC3339 text format (output only)."
--    },
--    "deprecated": {
--     "$ref": "DeprecationStatus",
--     "description": "The deprecation status associated with this zone."
--    },
--    "description": {
--     "type": "string",
--     "description": "Textual description of the resource."
--    },
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only).",
--     "format": "uint64"
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of the resource.",
--     "default": "compute#zone"
--    },
--    "maintenanceWindows": {
--     "type": "array",
--     "description": "Scheduled maintenance windows for the zone. When the zone is in a maintenance window, all resources which reside in the zone will be unavailable.",
--     "items": {
--      "type": "object",
--      "properties": {
--       "beginTime": {
--        "type": "string",
--        "description": "Begin time of the maintenance window, in RFC 3339 format."
--       },
--       "description": {
--        "type": "string",
--        "description": "Textual description of the maintenance window."
--       },
--       "endTime": {
--        "type": "string",
--        "description": "End time of the maintenance window, in RFC 3339 format."
--       },
--       "name": {
--        "type": "string",
--        "description": "Name of the maintenance window."
--       }
--      }
--     }
--    },
--    "name": {
--     "type": "string",
--     "description": "Name of the resource."
--    },
--    "region": {
--     "type": "string",
--     "description": "Full URL reference to the region which hosts the zone (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for the resource (output only)."
--    },
--    "status": {
--     "type": "string",
--     "description": "Status of the zone. \"UP\" or \"DOWN\".",
--     "enum": [
--      "DOWN",
--      "UP"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    }
--   }
--  },
--  "ZoneList": {
--   "id": "ZoneList",
--   "type": "object",
--   "description": "Contains a list of zone resources.",
--   "properties": {
--    "id": {
--     "type": "string",
--     "description": "Unique identifier for the resource; defined by the server (output only)."
--    },
--    "items": {
--     "type": "array",
--     "description": "The zone resources.",
--     "items": {
--      "$ref": "Zone"
--     }
--    },
--    "kind": {
--     "type": "string",
--     "description": "Type of resource.",
--     "default": "compute#zoneList"
--    },
--    "nextPageToken": {
--     "type": "string",
--     "description": "A token used to continue a truncated list request (output only)."
--    },
--    "selfLink": {
--     "type": "string",
--     "description": "Server defined URL for this resource (output only)."
--    }
--   }
--  }
-- },
-- "resources": {
--  "addresses": {
--   "methods": {
--    "aggregatedList": {
--     "id": "compute.addresses.aggregatedList",
--     "path": "{project}/aggregated/addresses",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of addresses grouped by scope.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "AddressAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "delete": {
--     "id": "compute.addresses.delete",
--     "path": "{project}/regions/{region}/addresses/{address}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified address resource.",
--     "parameters": {
--      "address": {
--       "type": "string",
--       "description": "Name of the address resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "address"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.addresses.get",
--     "path": "{project}/regions/{region}/addresses/{address}",
--     "httpMethod": "GET",
--     "description": "Returns the specified address resource.",
--     "parameters": {
--      "address": {
--       "type": "string",
--       "description": "Name of the address resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "address"
--     ],
--     "response": {
--      "$ref": "Address"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.addresses.insert",
--     "path": "{project}/regions/{region}/addresses",
--     "httpMethod": "POST",
--     "description": "Creates an address resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region"
--     ],
--     "request": {
--      "$ref": "Address"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.addresses.list",
--     "path": "{project}/regions/{region}/addresses",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of address resources contained within the specified region.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region"
--     ],
--     "response": {
--      "$ref": "AddressList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "backendServices": {
--   "methods": {
--    "delete": {
--     "id": "compute.backendServices.delete",
--     "path": "{project}/global/backendServices/{backendService}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified BackendService resource.",
--     "parameters": {
--      "backendService": {
--       "type": "string",
--       "description": "Name of the BackendService resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "backendService"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.backendServices.get",
--     "path": "{project}/global/backendServices/{backendService}",
--     "httpMethod": "GET",
--     "description": "Returns the specified BackendService resource.",
--     "parameters": {
--      "backendService": {
--       "type": "string",
--       "description": "Name of the BackendService resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "backendService"
--     ],
--     "response": {
--      "$ref": "BackendService"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "getHealth": {
--     "id": "compute.backendServices.getHealth",
--     "path": "{project}/global/backendServices/{backendService}/getHealth",
--     "httpMethod": "POST",
--     "description": "Gets the most recent health check results for this BackendService.",
--     "parameters": {
--      "backendService": {
--       "type": "string",
--       "description": "Name of the BackendService resource to which the queried instance belongs.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "backendService"
--     ],
--     "request": {
--      "$ref": "ResourceGroupReference"
--     },
--     "response": {
--      "$ref": "BackendServiceGroupHealth"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.backendServices.insert",
--     "path": "{project}/global/backendServices",
--     "httpMethod": "POST",
--     "description": "Creates a BackendService resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "BackendService"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.backendServices.list",
--     "path": "{project}/global/backendServices",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of BackendService resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "BackendServiceList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "patch": {
--     "id": "compute.backendServices.patch",
--     "path": "{project}/global/backendServices/{backendService}",
--     "httpMethod": "PATCH",
--     "description": "Update the entire content of the BackendService resource. This method supports patch semantics.",
--     "parameters": {
--      "backendService": {
--       "type": "string",
--       "description": "Name of the BackendService resource to update.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "backendService"
--     ],
--     "request": {
--      "$ref": "BackendService"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "update": {
--     "id": "compute.backendServices.update",
--     "path": "{project}/global/backendServices/{backendService}",
--     "httpMethod": "PUT",
--     "description": "Update the entire content of the BackendService resource.",
--     "parameters": {
--      "backendService": {
--       "type": "string",
--       "description": "Name of the BackendService resource to update.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "backendService"
--     ],
--     "request": {
--      "$ref": "BackendService"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "diskTypes": {
--   "methods": {
--    "aggregatedList": {
--     "id": "compute.diskTypes.aggregatedList",
--     "path": "{project}/aggregated/diskTypes",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of disk type resources grouped by scope.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "DiskTypeAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "get": {
--     "id": "compute.diskTypes.get",
--     "path": "{project}/zones/{zone}/diskTypes/{diskType}",
--     "httpMethod": "GET",
--     "description": "Returns the specified disk type resource.",
--     "parameters": {
--      "diskType": {
--       "type": "string",
--       "description": "Name of the disk type resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "diskType"
--     ],
--     "response": {
--      "$ref": "DiskType"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "list": {
--     "id": "compute.diskTypes.list",
--     "path": "{project}/zones/{zone}/diskTypes",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of disk type resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "response": {
--      "$ref": "DiskTypeList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "disks": {
--   "methods": {
--    "aggregatedList": {
--     "id": "compute.disks.aggregatedList",
--     "path": "{project}/aggregated/disks",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of disks grouped by scope.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "DiskAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "createSnapshot": {
--     "id": "compute.disks.createSnapshot",
--     "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot",
--     "httpMethod": "POST",
--     "parameters": {
--      "disk": {
--       "type": "string",
--       "description": "Name of the persistent disk resource to snapshot.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "disk"
--     ],
--     "request": {
--      "$ref": "Snapshot"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "delete": {
--     "id": "compute.disks.delete",
--     "path": "{project}/zones/{zone}/disks/{disk}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified persistent disk resource.",
--     "parameters": {
--      "disk": {
--       "type": "string",
--       "description": "Name of the persistent disk resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "disk"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.disks.get",
--     "path": "{project}/zones/{zone}/disks/{disk}",
--     "httpMethod": "GET",
--     "description": "Returns the specified persistent disk resource.",
--     "parameters": {
--      "disk": {
--       "type": "string",
--       "description": "Name of the persistent disk resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "disk"
--     ],
--     "response": {
--      "$ref": "Disk"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.disks.insert",
--     "path": "{project}/zones/{zone}/disks",
--     "httpMethod": "POST",
--     "description": "Creates a persistent disk resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "sourceImage": {
--       "type": "string",
--       "description": "Optional. Source image to restore onto a disk.",
--       "location": "query"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "request": {
--      "$ref": "Disk"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.disks.list",
--     "path": "{project}/zones/{zone}/disks",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of persistent disk resources contained within the specified zone.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "response": {
--      "$ref": "DiskList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "firewalls": {
--   "methods": {
--    "delete": {
--     "id": "compute.firewalls.delete",
--     "path": "{project}/global/firewalls/{firewall}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified firewall resource.",
--     "parameters": {
--      "firewall": {
--       "type": "string",
--       "description": "Name of the firewall resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "firewall"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.firewalls.get",
--     "path": "{project}/global/firewalls/{firewall}",
--     "httpMethod": "GET",
--     "description": "Returns the specified firewall resource.",
--     "parameters": {
--      "firewall": {
--       "type": "string",
--       "description": "Name of the firewall resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "firewall"
--     ],
--     "response": {
--      "$ref": "Firewall"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.firewalls.insert",
--     "path": "{project}/global/firewalls",
--     "httpMethod": "POST",
--     "description": "Creates a firewall resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "Firewall"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.firewalls.list",
--     "path": "{project}/global/firewalls",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of firewall resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "FirewallList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "patch": {
--     "id": "compute.firewalls.patch",
--     "path": "{project}/global/firewalls/{firewall}",
--     "httpMethod": "PATCH",
--     "description": "Updates the specified firewall resource with the data included in the request. This method supports patch semantics.",
--     "parameters": {
--      "firewall": {
--       "type": "string",
--       "description": "Name of the firewall resource to update.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "firewall"
--     ],
--     "request": {
--      "$ref": "Firewall"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "update": {
--     "id": "compute.firewalls.update",
--     "path": "{project}/global/firewalls/{firewall}",
--     "httpMethod": "PUT",
--     "description": "Updates the specified firewall resource with the data included in the request.",
--     "parameters": {
--      "firewall": {
--       "type": "string",
--       "description": "Name of the firewall resource to update.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "firewall"
--     ],
--     "request": {
--      "$ref": "Firewall"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "forwardingRules": {
--   "methods": {
--    "aggregatedList": {
--     "id": "compute.forwardingRules.aggregatedList",
--     "path": "{project}/aggregated/forwardingRules",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of forwarding rules grouped by scope.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "ForwardingRuleAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "delete": {
--     "id": "compute.forwardingRules.delete",
--     "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified ForwardingRule resource.",
--     "parameters": {
--      "forwardingRule": {
--       "type": "string",
--       "description": "Name of the ForwardingRule resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "forwardingRule"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.forwardingRules.get",
--     "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}",
--     "httpMethod": "GET",
--     "description": "Returns the specified ForwardingRule resource.",
--     "parameters": {
--      "forwardingRule": {
--       "type": "string",
--       "description": "Name of the ForwardingRule resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "forwardingRule"
--     ],
--     "response": {
--      "$ref": "ForwardingRule"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.forwardingRules.insert",
--     "path": "{project}/regions/{region}/forwardingRules",
--     "httpMethod": "POST",
--     "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region"
--     ],
--     "request": {
--      "$ref": "ForwardingRule"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.forwardingRules.list",
--     "path": "{project}/regions/{region}/forwardingRules",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of ForwardingRule resources available to the specified project and region.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region"
--     ],
--     "response": {
--      "$ref": "ForwardingRuleList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "setTarget": {
--     "id": "compute.forwardingRules.setTarget",
--     "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget",
--     "httpMethod": "POST",
--     "description": "Changes target url for forwarding rule.",
--     "parameters": {
--      "forwardingRule": {
--       "type": "string",
--       "description": "Name of the ForwardingRule resource in which target is to be set.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "forwardingRule"
--     ],
--     "request": {
--      "$ref": "TargetReference"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "globalAddresses": {
--   "methods": {
--    "delete": {
--     "id": "compute.globalAddresses.delete",
--     "path": "{project}/global/addresses/{address}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified address resource.",
--     "parameters": {
--      "address": {
--       "type": "string",
--       "description": "Name of the address resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "address"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.globalAddresses.get",
--     "path": "{project}/global/addresses/{address}",
--     "httpMethod": "GET",
--     "description": "Returns the specified address resource.",
--     "parameters": {
--      "address": {
--       "type": "string",
--       "description": "Name of the address resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "address"
--     ],
--     "response": {
--      "$ref": "Address"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.globalAddresses.insert",
--     "path": "{project}/global/addresses",
--     "httpMethod": "POST",
--     "description": "Creates an address resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "Address"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.globalAddresses.list",
--     "path": "{project}/global/addresses",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of global address resources.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "AddressList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "globalForwardingRules": {
--   "methods": {
--    "delete": {
--     "id": "compute.globalForwardingRules.delete",
--     "path": "{project}/global/forwardingRules/{forwardingRule}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified ForwardingRule resource.",
--     "parameters": {
--      "forwardingRule": {
--       "type": "string",
--       "description": "Name of the ForwardingRule resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "forwardingRule"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.globalForwardingRules.get",
--     "path": "{project}/global/forwardingRules/{forwardingRule}",
--     "httpMethod": "GET",
--     "description": "Returns the specified ForwardingRule resource.",
--     "parameters": {
--      "forwardingRule": {
--       "type": "string",
--       "description": "Name of the ForwardingRule resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "forwardingRule"
--     ],
--     "response": {
--      "$ref": "ForwardingRule"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.globalForwardingRules.insert",
--     "path": "{project}/global/forwardingRules",
--     "httpMethod": "POST",
--     "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "ForwardingRule"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.globalForwardingRules.list",
--     "path": "{project}/global/forwardingRules",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of ForwardingRule resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "ForwardingRuleList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "setTarget": {
--     "id": "compute.globalForwardingRules.setTarget",
--     "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget",
--     "httpMethod": "POST",
--     "description": "Changes target url for forwarding rule.",
--     "parameters": {
--      "forwardingRule": {
--       "type": "string",
--       "description": "Name of the ForwardingRule resource in which target is to be set.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "forwardingRule"
--     ],
--     "request": {
--      "$ref": "TargetReference"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "globalOperations": {
--   "methods": {
--    "aggregatedList": {
--     "id": "compute.globalOperations.aggregatedList",
--     "path": "{project}/aggregated/operations",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of all operations grouped by scope.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "OperationAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "delete": {
--     "id": "compute.globalOperations.delete",
--     "path": "{project}/global/operations/{operation}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified operation resource.",
--     "parameters": {
--      "operation": {
--       "type": "string",
--       "description": "Name of the operation resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "operation"
--     ],
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.globalOperations.get",
--     "path": "{project}/global/operations/{operation}",
--     "httpMethod": "GET",
--     "description": "Retrieves the specified operation resource.",
--     "parameters": {
--      "operation": {
--       "type": "string",
--       "description": "Name of the operation resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "operation"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "list": {
--     "id": "compute.globalOperations.list",
--     "path": "{project}/global/operations",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of operation resources contained within the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "OperationList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "httpHealthChecks": {
--   "methods": {
--    "delete": {
--     "id": "compute.httpHealthChecks.delete",
--     "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified HttpHealthCheck resource.",
--     "parameters": {
--      "httpHealthCheck": {
--       "type": "string",
--       "description": "Name of the HttpHealthCheck resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "httpHealthCheck"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.httpHealthChecks.get",
--     "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
--     "httpMethod": "GET",
--     "description": "Returns the specified HttpHealthCheck resource.",
--     "parameters": {
--      "httpHealthCheck": {
--       "type": "string",
--       "description": "Name of the HttpHealthCheck resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "httpHealthCheck"
--     ],
--     "response": {
--      "$ref": "HttpHealthCheck"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.httpHealthChecks.insert",
--     "path": "{project}/global/httpHealthChecks",
--     "httpMethod": "POST",
--     "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "HttpHealthCheck"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.httpHealthChecks.list",
--     "path": "{project}/global/httpHealthChecks",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "HttpHealthCheckList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "patch": {
--     "id": "compute.httpHealthChecks.patch",
--     "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
--     "httpMethod": "PATCH",
--     "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.",
--     "parameters": {
--      "httpHealthCheck": {
--       "type": "string",
--       "description": "Name of the HttpHealthCheck resource to update.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "httpHealthCheck"
--     ],
--     "request": {
--      "$ref": "HttpHealthCheck"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "update": {
--     "id": "compute.httpHealthChecks.update",
--     "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
--     "httpMethod": "PUT",
--     "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.",
--     "parameters": {
--      "httpHealthCheck": {
--       "type": "string",
--       "description": "Name of the HttpHealthCheck resource to update.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "httpHealthCheck"
--     ],
--     "request": {
--      "$ref": "HttpHealthCheck"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "images": {
--   "methods": {
--    "delete": {
--     "id": "compute.images.delete",
--     "path": "{project}/global/images/{image}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified image resource.",
--     "parameters": {
--      "image": {
--       "type": "string",
--       "description": "Name of the image resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "image"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "deprecate": {
--     "id": "compute.images.deprecate",
--     "path": "{project}/global/images/{image}/deprecate",
--     "httpMethod": "POST",
--     "description": "Sets the deprecation status of an image. If no message body is given, clears the deprecation status instead.",
--     "parameters": {
--      "image": {
--       "type": "string",
--       "description": "Image name.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "image"
--     ],
--     "request": {
--      "$ref": "DeprecationStatus"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.images.get",
--     "path": "{project}/global/images/{image}",
--     "httpMethod": "GET",
--     "description": "Returns the specified image resource.",
--     "parameters": {
--      "image": {
--       "type": "string",
--       "description": "Name of the image resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "image"
--     ],
--     "response": {
--      "$ref": "Image"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.images.insert",
--     "path": "{project}/global/images",
--     "httpMethod": "POST",
--     "description": "Creates an image resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "Image"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/devstorage.full_control",
--      "https://www.googleapis.com/auth/devstorage.read_only",
--      "https://www.googleapis.com/auth/devstorage.read_write"
--     ]
--    },
--    "list": {
--     "id": "compute.images.list",
--     "path": "{project}/global/images",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of image resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "ImageList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "instanceTemplates": {
--   "methods": {
--    "delete": {
--     "id": "compute.instanceTemplates.delete",
--     "path": "{project}/global/instanceTemplates/{instanceTemplate}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified instance template resource.",
--     "parameters": {
--      "instanceTemplate": {
--       "type": "string",
--       "description": "Name of the instance template resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "instanceTemplate"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.instanceTemplates.get",
--     "path": "{project}/global/instanceTemplates/{instanceTemplate}",
--     "httpMethod": "GET",
--     "description": "Returns the specified instance template resource.",
--     "parameters": {
--      "instanceTemplate": {
--       "type": "string",
--       "description": "Name of the instance template resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "instanceTemplate"
--     ],
--     "response": {
--      "$ref": "InstanceTemplate"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.instanceTemplates.insert",
--     "path": "{project}/global/instanceTemplates",
--     "httpMethod": "POST",
--     "description": "Creates an instance template resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "InstanceTemplate"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.instanceTemplates.list",
--     "path": "{project}/global/instanceTemplates",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of instance template resources contained within the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "InstanceTemplateList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "instances": {
--   "methods": {
--    "addAccessConfig": {
--     "id": "compute.instances.addAccessConfig",
--     "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig",
--     "httpMethod": "POST",
--     "description": "Adds an access config to an instance's network interface.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Instance name.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "networkInterface": {
--       "type": "string",
--       "description": "Network interface name.",
--       "required": true,
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Project name.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance",
--      "networkInterface"
--     ],
--     "request": {
--      "$ref": "AccessConfig"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "aggregatedList": {
--     "id": "compute.instances.aggregatedList",
--     "path": "{project}/aggregated/instances",
--     "httpMethod": "GET",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "InstanceAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "attachDisk": {
--     "id": "compute.instances.attachDisk",
--     "path": "{project}/zones/{zone}/instances/{instance}/attachDisk",
--     "httpMethod": "POST",
--     "description": "Attaches a disk resource to an instance.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Instance name.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Project name.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance"
--     ],
--     "request": {
--      "$ref": "AttachedDisk"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "delete": {
--     "id": "compute.instances.delete",
--     "path": "{project}/zones/{zone}/instances/{instance}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified instance resource.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Name of the instance resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "deleteAccessConfig": {
--     "id": "compute.instances.deleteAccessConfig",
--     "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig",
--     "httpMethod": "POST",
--     "description": "Deletes an access config from an instance's network interface.",
--     "parameters": {
--      "accessConfig": {
--       "type": "string",
--       "description": "Access config name.",
--       "required": true,
--       "location": "query"
--      },
--      "instance": {
--       "type": "string",
--       "description": "Instance name.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "networkInterface": {
--       "type": "string",
--       "description": "Network interface name.",
--       "required": true,
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Project name.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance",
--      "accessConfig",
--      "networkInterface"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "detachDisk": {
--     "id": "compute.instances.detachDisk",
--     "path": "{project}/zones/{zone}/instances/{instance}/detachDisk",
--     "httpMethod": "POST",
--     "description": "Detaches a disk from an instance.",
--     "parameters": {
--      "deviceName": {
--       "type": "string",
--       "description": "Disk device name to detach.",
--       "required": true,
--       "pattern": "\\w[\\w.-]{0,254}",
--       "location": "query"
--      },
--      "instance": {
--       "type": "string",
--       "description": "Instance name.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Project name.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance",
--      "deviceName"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.instances.get",
--     "path": "{project}/zones/{zone}/instances/{instance}",
--     "httpMethod": "GET",
--     "description": "Returns the specified instance resource.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Name of the instance resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance"
--     ],
--     "response": {
--      "$ref": "Instance"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "getSerialPortOutput": {
--     "id": "compute.instances.getSerialPortOutput",
--     "path": "{project}/zones/{zone}/instances/{instance}/serialPort",
--     "httpMethod": "GET",
--     "description": "Returns the specified instance's serial port output.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Name of the instance scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance"
--     ],
--     "response": {
--      "$ref": "SerialPortOutput"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.instances.insert",
--     "path": "{project}/zones/{zone}/instances",
--     "httpMethod": "POST",
--     "description": "Creates an instance resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "request": {
--      "$ref": "Instance"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.instances.list",
--     "path": "{project}/zones/{zone}/instances",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of instance resources contained within the specified zone.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "response": {
--      "$ref": "InstanceList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "reset": {
--     "id": "compute.instances.reset",
--     "path": "{project}/zones/{zone}/instances/{instance}/reset",
--     "httpMethod": "POST",
--     "description": "Performs a hard reset on the instance.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Name of the instance scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "setDiskAutoDelete": {
--     "id": "compute.instances.setDiskAutoDelete",
--     "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete",
--     "httpMethod": "POST",
--     "description": "Sets the auto-delete flag for a disk attached to an instance",
--     "parameters": {
--      "autoDelete": {
--       "type": "boolean",
--       "description": "Whether to auto-delete the disk when the instance is deleted.",
--       "required": true,
--       "location": "query"
--      },
--      "deviceName": {
--       "type": "string",
--       "description": "Disk device name to modify.",
--       "required": true,
--       "pattern": "\\w[\\w.-]{0,254}",
--       "location": "query"
--      },
--      "instance": {
--       "type": "string",
--       "description": "Instance name.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Project name.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance",
--      "autoDelete",
--      "deviceName"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "setMetadata": {
--     "id": "compute.instances.setMetadata",
--     "path": "{project}/zones/{zone}/instances/{instance}/setMetadata",
--     "httpMethod": "POST",
--     "description": "Sets metadata for the specified instance to the data included in the request.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Name of the instance scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance"
--     ],
--     "request": {
--      "$ref": "Metadata"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "setScheduling": {
--     "id": "compute.instances.setScheduling",
--     "path": "{project}/zones/{zone}/instances/{instance}/setScheduling",
--     "httpMethod": "POST",
--     "description": "Sets an instance's scheduling options.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Instance name.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Project name.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance"
--     ],
--     "request": {
--      "$ref": "Scheduling"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "setTags": {
--     "id": "compute.instances.setTags",
--     "path": "{project}/zones/{zone}/instances/{instance}/setTags",
--     "httpMethod": "POST",
--     "description": "Sets tags for the specified instance to the data included in the request.",
--     "parameters": {
--      "instance": {
--       "type": "string",
--       "description": "Name of the instance scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "instance"
--     ],
--     "request": {
--      "$ref": "Tags"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "licenses": {
--   "methods": {
--    "get": {
--     "id": "compute.licenses.get",
--     "path": "{project}/global/licenses/{license}",
--     "httpMethod": "GET",
--     "description": "Returns the specified license resource.",
--     "parameters": {
--      "license": {
--       "type": "string",
--       "description": "Name of the license resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "license"
--     ],
--     "response": {
--      "$ref": "License"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "machineTypes": {
--   "methods": {
--    "aggregatedList": {
--     "id": "compute.machineTypes.aggregatedList",
--     "path": "{project}/aggregated/machineTypes",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of machine type resources grouped by scope.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "MachineTypeAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "get": {
--     "id": "compute.machineTypes.get",
--     "path": "{project}/zones/{zone}/machineTypes/{machineType}",
--     "httpMethod": "GET",
--     "description": "Returns the specified machine type resource.",
--     "parameters": {
--      "machineType": {
--       "type": "string",
--       "description": "Name of the machine type resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "machineType"
--     ],
--     "response": {
--      "$ref": "MachineType"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "list": {
--     "id": "compute.machineTypes.list",
--     "path": "{project}/zones/{zone}/machineTypes",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of machine type resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "response": {
--      "$ref": "MachineTypeList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "networks": {
--   "methods": {
--    "delete": {
--     "id": "compute.networks.delete",
--     "path": "{project}/global/networks/{network}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified network resource.",
--     "parameters": {
--      "network": {
--       "type": "string",
--       "description": "Name of the network resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "network"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.networks.get",
--     "path": "{project}/global/networks/{network}",
--     "httpMethod": "GET",
--     "description": "Returns the specified network resource.",
--     "parameters": {
--      "network": {
--       "type": "string",
--       "description": "Name of the network resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "network"
--     ],
--     "response": {
--      "$ref": "Network"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.networks.insert",
--     "path": "{project}/global/networks",
--     "httpMethod": "POST",
--     "description": "Creates a network resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "Network"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.networks.list",
--     "path": "{project}/global/networks",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of network resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "NetworkList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "projects": {
--   "methods": {
--    "get": {
--     "id": "compute.projects.get",
--     "path": "{project}",
--     "httpMethod": "GET",
--     "description": "Returns the specified project resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project resource to retrieve.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "Project"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "setCommonInstanceMetadata": {
--     "id": "compute.projects.setCommonInstanceMetadata",
--     "path": "{project}/setCommonInstanceMetadata",
--     "httpMethod": "POST",
--     "description": "Sets metadata common to all instances within the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "Metadata"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "setUsageExportBucket": {
--     "id": "compute.projects.setUsageExportBucket",
--     "path": "{project}/setUsageExportBucket",
--     "httpMethod": "POST",
--     "description": "Sets usage export location",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "UsageExportLocation"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/devstorage.full_control",
--      "https://www.googleapis.com/auth/devstorage.read_only",
--      "https://www.googleapis.com/auth/devstorage.read_write"
--     ]
--    }
--   }
--  },
--  "regionOperations": {
--   "methods": {
--    "delete": {
--     "id": "compute.regionOperations.delete",
--     "path": "{project}/regions/{region}/operations/{operation}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified region-specific operation resource.",
--     "parameters": {
--      "operation": {
--       "type": "string",
--       "description": "Name of the operation resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "operation"
--     ],
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.regionOperations.get",
--     "path": "{project}/regions/{region}/operations/{operation}",
--     "httpMethod": "GET",
--     "description": "Retrieves the specified region-specific operation resource.",
--     "parameters": {
--      "operation": {
--       "type": "string",
--       "description": "Name of the operation resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "operation"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "list": {
--     "id": "compute.regionOperations.list",
--     "path": "{project}/regions/{region}/operations",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of operation resources contained within the specified region.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region"
--     ],
--     "response": {
--      "$ref": "OperationList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "regions": {
--   "methods": {
--    "get": {
--     "id": "compute.regions.get",
--     "path": "{project}/regions/{region}",
--     "httpMethod": "GET",
--     "description": "Returns the specified region resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region"
--     ],
--     "response": {
--      "$ref": "Region"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "list": {
--     "id": "compute.regions.list",
--     "path": "{project}/regions",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of region resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "RegionList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "routes": {
--   "methods": {
--    "delete": {
--     "id": "compute.routes.delete",
--     "path": "{project}/global/routes/{route}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified route resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "route": {
--       "type": "string",
--       "description": "Name of the route resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "route"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.routes.get",
--     "path": "{project}/global/routes/{route}",
--     "httpMethod": "GET",
--     "description": "Returns the specified route resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "route": {
--       "type": "string",
--       "description": "Name of the route resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "route"
--     ],
--     "response": {
--      "$ref": "Route"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.routes.insert",
--     "path": "{project}/global/routes",
--     "httpMethod": "POST",
--     "description": "Creates a route resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "Route"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.routes.list",
--     "path": "{project}/global/routes",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of route resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "RouteList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "snapshots": {
--   "methods": {
--    "delete": {
--     "id": "compute.snapshots.delete",
--     "path": "{project}/global/snapshots/{snapshot}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified persistent disk snapshot resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "snapshot": {
--       "type": "string",
--       "description": "Name of the persistent disk snapshot resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "snapshot"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.snapshots.get",
--     "path": "{project}/global/snapshots/{snapshot}",
--     "httpMethod": "GET",
--     "description": "Returns the specified persistent disk snapshot resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "snapshot": {
--       "type": "string",
--       "description": "Name of the persistent disk snapshot resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "snapshot"
--     ],
--     "response": {
--      "$ref": "Snapshot"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "list": {
--     "id": "compute.snapshots.list",
--     "path": "{project}/global/snapshots",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of persistent disk snapshot resources contained within the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "SnapshotList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "targetHttpProxies": {
--   "methods": {
--    "delete": {
--     "id": "compute.targetHttpProxies.delete",
--     "path": "{project}/global/targetHttpProxies/{targetHttpProxy}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified TargetHttpProxy resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "targetHttpProxy": {
--       "type": "string",
--       "description": "Name of the TargetHttpProxy resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "targetHttpProxy"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.targetHttpProxies.get",
--     "path": "{project}/global/targetHttpProxies/{targetHttpProxy}",
--     "httpMethod": "GET",
--     "description": "Returns the specified TargetHttpProxy resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "targetHttpProxy": {
--       "type": "string",
--       "description": "Name of the TargetHttpProxy resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "targetHttpProxy"
--     ],
--     "response": {
--      "$ref": "TargetHttpProxy"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.targetHttpProxies.insert",
--     "path": "{project}/global/targetHttpProxies",
--     "httpMethod": "POST",
--     "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "TargetHttpProxy"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.targetHttpProxies.list",
--     "path": "{project}/global/targetHttpProxies",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "TargetHttpProxyList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "setUrlMap": {
--     "id": "compute.targetHttpProxies.setUrlMap",
--     "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap",
--     "httpMethod": "POST",
--     "description": "Changes the URL map for TargetHttpProxy.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "targetHttpProxy": {
--       "type": "string",
--       "description": "Name of the TargetHttpProxy resource whose URL map is to be set.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "targetHttpProxy"
--     ],
--     "request": {
--      "$ref": "UrlMapReference"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "targetInstances": {
--   "methods": {
--    "aggregatedList": {
--     "id": "compute.targetInstances.aggregatedList",
--     "path": "{project}/aggregated/targetInstances",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of target instances grouped by scope.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "TargetInstanceAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "delete": {
--     "id": "compute.targetInstances.delete",
--     "path": "{project}/zones/{zone}/targetInstances/{targetInstance}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified TargetInstance resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "targetInstance": {
--       "type": "string",
--       "description": "Name of the TargetInstance resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "targetInstance"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.targetInstances.get",
--     "path": "{project}/zones/{zone}/targetInstances/{targetInstance}",
--     "httpMethod": "GET",
--     "description": "Returns the specified TargetInstance resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "targetInstance": {
--       "type": "string",
--       "description": "Name of the TargetInstance resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "targetInstance"
--     ],
--     "response": {
--      "$ref": "TargetInstance"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.targetInstances.insert",
--     "path": "{project}/zones/{zone}/targetInstances",
--     "httpMethod": "POST",
--     "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "request": {
--      "$ref": "TargetInstance"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.targetInstances.list",
--     "path": "{project}/zones/{zone}/targetInstances",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of TargetInstance resources available to the specified project and zone.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "response": {
--      "$ref": "TargetInstanceList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "targetPools": {
--   "methods": {
--    "addHealthCheck": {
--     "id": "compute.targetPools.addHealthCheck",
--     "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck",
--     "httpMethod": "POST",
--     "description": "Adds health check URL to targetPool.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "targetPool": {
--       "type": "string",
--       "description": "Name of the TargetPool resource to which health_check_url is to be added.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "targetPool"
--     ],
--     "request": {
--      "$ref": "TargetPoolsAddHealthCheckRequest"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "addInstance": {
--     "id": "compute.targetPools.addInstance",
--     "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance",
--     "httpMethod": "POST",
--     "description": "Adds instance url to targetPool.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "targetPool": {
--       "type": "string",
--       "description": "Name of the TargetPool resource to which instance_url is to be added.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "targetPool"
--     ],
--     "request": {
--      "$ref": "TargetPoolsAddInstanceRequest"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "aggregatedList": {
--     "id": "compute.targetPools.aggregatedList",
--     "path": "{project}/aggregated/targetPools",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of target pools grouped by scope.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "TargetPoolAggregatedList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "delete": {
--     "id": "compute.targetPools.delete",
--     "path": "{project}/regions/{region}/targetPools/{targetPool}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified TargetPool resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "targetPool": {
--       "type": "string",
--       "description": "Name of the TargetPool resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "targetPool"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.targetPools.get",
--     "path": "{project}/regions/{region}/targetPools/{targetPool}",
--     "httpMethod": "GET",
--     "description": "Returns the specified TargetPool resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "targetPool": {
--       "type": "string",
--       "description": "Name of the TargetPool resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "targetPool"
--     ],
--     "response": {
--      "$ref": "TargetPool"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "getHealth": {
--     "id": "compute.targetPools.getHealth",
--     "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth",
--     "httpMethod": "POST",
--     "description": "Gets the most recent health check results for each IP for the given instance that is referenced by given TargetPool.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "targetPool": {
--       "type": "string",
--       "description": "Name of the TargetPool resource to which the queried instance belongs.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "targetPool"
--     ],
--     "request": {
--      "$ref": "InstanceReference"
--     },
--     "response": {
--      "$ref": "TargetPoolInstanceHealth"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.targetPools.insert",
--     "path": "{project}/regions/{region}/targetPools",
--     "httpMethod": "POST",
--     "description": "Creates a TargetPool resource in the specified project and region using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region"
--     ],
--     "request": {
--      "$ref": "TargetPool"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.targetPools.list",
--     "path": "{project}/regions/{region}/targetPools",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of TargetPool resources available to the specified project and region.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region"
--     ],
--     "response": {
--      "$ref": "TargetPoolList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "removeHealthCheck": {
--     "id": "compute.targetPools.removeHealthCheck",
--     "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck",
--     "httpMethod": "POST",
--     "description": "Removes health check URL from targetPool.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "targetPool": {
--       "type": "string",
--       "description": "Name of the TargetPool resource to which health_check_url is to be removed.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "targetPool"
--     ],
--     "request": {
--      "$ref": "TargetPoolsRemoveHealthCheckRequest"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "removeInstance": {
--     "id": "compute.targetPools.removeInstance",
--     "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance",
--     "httpMethod": "POST",
--     "description": "Removes instance URL from targetPool.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "targetPool": {
--       "type": "string",
--       "description": "Name of the TargetPool resource to which instance_url is to be removed.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "targetPool"
--     ],
--     "request": {
--      "$ref": "TargetPoolsRemoveInstanceRequest"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "setBackup": {
--     "id": "compute.targetPools.setBackup",
--     "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup",
--     "httpMethod": "POST",
--     "description": "Changes backup pool configurations.",
--     "parameters": {
--      "failoverRatio": {
--       "type": "number",
--       "description": "New failoverRatio value for the containing target pool.",
--       "format": "float",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "region": {
--       "type": "string",
--       "description": "Name of the region scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "targetPool": {
--       "type": "string",
--       "description": "Name of the TargetPool resource for which the backup is to be set.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "region",
--      "targetPool"
--     ],
--     "request": {
--      "$ref": "TargetReference"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "urlMaps": {
--   "methods": {
--    "delete": {
--     "id": "compute.urlMaps.delete",
--     "path": "{project}/global/urlMaps/{urlMap}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified UrlMap resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "urlMap": {
--       "type": "string",
--       "description": "Name of the UrlMap resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "urlMap"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.urlMaps.get",
--     "path": "{project}/global/urlMaps/{urlMap}",
--     "httpMethod": "GET",
--     "description": "Returns the specified UrlMap resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "urlMap": {
--       "type": "string",
--       "description": "Name of the UrlMap resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "urlMap"
--     ],
--     "response": {
--      "$ref": "UrlMap"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "insert": {
--     "id": "compute.urlMaps.insert",
--     "path": "{project}/global/urlMaps",
--     "httpMethod": "POST",
--     "description": "Creates a UrlMap resource in the specified project using the data included in the request.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "request": {
--      "$ref": "UrlMap"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "list": {
--     "id": "compute.urlMaps.list",
--     "path": "{project}/global/urlMaps",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of UrlMap resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "UrlMapList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "patch": {
--     "id": "compute.urlMaps.patch",
--     "path": "{project}/global/urlMaps/{urlMap}",
--     "httpMethod": "PATCH",
--     "description": "Update the entire content of the UrlMap resource. This method supports patch semantics.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "urlMap": {
--       "type": "string",
--       "description": "Name of the UrlMap resource to update.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "urlMap"
--     ],
--     "request": {
--      "$ref": "UrlMap"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "update": {
--     "id": "compute.urlMaps.update",
--     "path": "{project}/global/urlMaps/{urlMap}",
--     "httpMethod": "PUT",
--     "description": "Update the entire content of the UrlMap resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "urlMap": {
--       "type": "string",
--       "description": "Name of the UrlMap resource to update.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "urlMap"
--     ],
--     "request": {
--      "$ref": "UrlMap"
--     },
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "validate": {
--     "id": "compute.urlMaps.validate",
--     "path": "{project}/global/urlMaps/{urlMap}/validate",
--     "httpMethod": "POST",
--     "description": "Run static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "urlMap": {
--       "type": "string",
--       "description": "Name of the UrlMap resource to be validated as.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "urlMap"
--     ],
--     "request": {
--      "$ref": "UrlMapsValidateRequest"
--     },
--     "response": {
--      "$ref": "UrlMapsValidateResponse"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    }
--   }
--  },
--  "zoneOperations": {
--   "methods": {
--    "delete": {
--     "id": "compute.zoneOperations.delete",
--     "path": "{project}/zones/{zone}/operations/{operation}",
--     "httpMethod": "DELETE",
--     "description": "Deletes the specified zone-specific operation resource.",
--     "parameters": {
--      "operation": {
--       "type": "string",
--       "description": "Name of the operation resource to delete.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "operation"
--     ],
--     "scopes": [
--      "https://www.googleapis.com/auth/compute"
--     ]
--    },
--    "get": {
--     "id": "compute.zoneOperations.get",
--     "path": "{project}/zones/{zone}/operations/{operation}",
--     "httpMethod": "GET",
--     "description": "Retrieves the specified zone-specific operation resource.",
--     "parameters": {
--      "operation": {
--       "type": "string",
--       "description": "Name of the operation resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone",
--      "operation"
--     ],
--     "response": {
--      "$ref": "Operation"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "list": {
--     "id": "compute.zoneOperations.list",
--     "path": "{project}/zones/{zone}/operations",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of operation resources contained within the specified zone.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone scoping this request.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "response": {
--      "$ref": "OperationList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  },
--  "zones": {
--   "methods": {
--    "get": {
--     "id": "compute.zones.get",
--     "path": "{project}/zones/{zone}",
--     "httpMethod": "GET",
--     "description": "Returns the specified zone resource.",
--     "parameters": {
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      },
--      "zone": {
--       "type": "string",
--       "description": "Name of the zone resource to return.",
--       "required": true,
--       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project",
--      "zone"
--     ],
--     "response": {
--      "$ref": "Zone"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    },
--    "list": {
--     "id": "compute.zones.list",
--     "path": "{project}/zones",
--     "httpMethod": "GET",
--     "description": "Retrieves the list of zone resources available to the specified project.",
--     "parameters": {
--      "filter": {
--       "type": "string",
--       "description": "Optional. Filter expression for filtering listed resources.",
--       "location": "query"
--      },
--      "maxResults": {
--       "type": "integer",
--       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--       "default": "500",
--       "format": "uint32",
--       "minimum": "0",
--       "maximum": "500",
--       "location": "query"
--      },
--      "pageToken": {
--       "type": "string",
--       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--       "location": "query"
--      },
--      "project": {
--       "type": "string",
--       "description": "Name of the project scoping this request.",
--       "required": true,
--       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--       "location": "path"
--      }
--     },
--     "parameterOrder": [
--      "project"
--     ],
--     "response": {
--      "$ref": "ZoneList"
--     },
--     "scopes": [
--      "https://www.googleapis.com/auth/compute",
--      "https://www.googleapis.com/auth/compute.readonly"
--     ]
--    }
--   }
--  }
-- }
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/compute-gen.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/compute-gen.go
-deleted file mode 100644
-index 7d193b5..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/compute-gen.go
-+++ /dev/null
-@@ -1,16952 +0,0 @@
--// Package compute provides access to the Compute Engine API.
--//
--// See https://developers.google.com/compute/docs/reference/latest/
--//
--// Usage example:
--//
--//   import "code.google.com/p/google-api-go-client/compute/v1"
--//   ...
--//   computeService, err := compute.New(oauthHttpClient)
--package compute
--
--import (
--	"bytes"
--	"code.google.com/p/google-api-go-client/googleapi"
--	"encoding/json"
--	"errors"
--	"fmt"
--	"io"
--	"net/http"
--	"net/url"
--	"strconv"
--	"strings"
--)
--
--// Always reference these packages, just in case the auto-generated code
--// below doesn't.
--var _ = bytes.NewBuffer
--var _ = strconv.Itoa
--var _ = fmt.Sprintf
--var _ = json.NewDecoder
--var _ = io.Copy
--var _ = url.Parse
--var _ = googleapi.Version
--var _ = errors.New
--var _ = strings.Replace
--
--const apiId = "compute:v1"
--const apiName = "compute"
--const apiVersion = "v1"
--const basePath = "https://www.googleapis.com/compute/v1/projects/"
--
--// OAuth2 scopes used by this API.
--const (
--	// View and manage your Google Compute Engine resources
--	ComputeScope = "https://www.googleapis.com/auth/compute"
--
--	// View your Google Compute Engine resources
--	ComputeReadonlyScope = "https://www.googleapis.com/auth/compute.readonly"
--
--	// Manage your data and permissions in Google Cloud Storage
--	DevstorageFull_controlScope = "https://www.googleapis.com/auth/devstorage.full_control"
--
--	// View your data in Google Cloud Storage
--	DevstorageRead_onlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
--
--	// Manage your data in Google Cloud Storage
--	DevstorageRead_writeScope = "https://www.googleapis.com/auth/devstorage.read_write"
--)
--
--func New(client *http.Client) (*Service, error) {
--	if client == nil {
--		return nil, errors.New("client is nil")
--	}
--	s := &Service{client: client, BasePath: basePath}
--	s.Addresses = NewAddressesService(s)
--	s.BackendServices = NewBackendServicesService(s)
--	s.DiskTypes = NewDiskTypesService(s)
--	s.Disks = NewDisksService(s)
--	s.Firewalls = NewFirewallsService(s)
--	s.ForwardingRules = NewForwardingRulesService(s)
--	s.GlobalAddresses = NewGlobalAddressesService(s)
--	s.GlobalForwardingRules = NewGlobalForwardingRulesService(s)
--	s.GlobalOperations = NewGlobalOperationsService(s)
--	s.HttpHealthChecks = NewHttpHealthChecksService(s)
--	s.Images = NewImagesService(s)
--	s.InstanceTemplates = NewInstanceTemplatesService(s)
--	s.Instances = NewInstancesService(s)
--	s.Licenses = NewLicensesService(s)
--	s.MachineTypes = NewMachineTypesService(s)
--	s.Networks = NewNetworksService(s)
--	s.Projects = NewProjectsService(s)
--	s.RegionOperations = NewRegionOperationsService(s)
--	s.Regions = NewRegionsService(s)
--	s.Routes = NewRoutesService(s)
--	s.Snapshots = NewSnapshotsService(s)
--	s.TargetHttpProxies = NewTargetHttpProxiesService(s)
--	s.TargetInstances = NewTargetInstancesService(s)
--	s.TargetPools = NewTargetPoolsService(s)
--	s.UrlMaps = NewUrlMapsService(s)
--	s.ZoneOperations = NewZoneOperationsService(s)
--	s.Zones = NewZonesService(s)
--	return s, nil
--}
--
--type Service struct {
--	client   *http.Client
--	BasePath string // API endpoint base URL
--
--	Addresses *AddressesService
--
--	BackendServices *BackendServicesService
--
--	DiskTypes *DiskTypesService
--
--	Disks *DisksService
--
--	Firewalls *FirewallsService
--
--	ForwardingRules *ForwardingRulesService
--
--	GlobalAddresses *GlobalAddressesService
--
--	GlobalForwardingRules *GlobalForwardingRulesService
--
--	GlobalOperations *GlobalOperationsService
--
--	HttpHealthChecks *HttpHealthChecksService
--
--	Images *ImagesService
--
--	InstanceTemplates *InstanceTemplatesService
--
--	Instances *InstancesService
--
--	Licenses *LicensesService
--
--	MachineTypes *MachineTypesService
--
--	Networks *NetworksService
--
--	Projects *ProjectsService
--
--	RegionOperations *RegionOperationsService
--
--	Regions *RegionsService
--
--	Routes *RoutesService
--
--	Snapshots *SnapshotsService
--
--	TargetHttpProxies *TargetHttpProxiesService
--
--	TargetInstances *TargetInstancesService
--
--	TargetPools *TargetPoolsService
--
--	UrlMaps *UrlMapsService
--
--	ZoneOperations *ZoneOperationsService
--
--	Zones *ZonesService
--}
--
--func NewAddressesService(s *Service) *AddressesService {
--	rs := &AddressesService{s: s}
--	return rs
--}
--
--type AddressesService struct {
--	s *Service
--}
--
--func NewBackendServicesService(s *Service) *BackendServicesService {
--	rs := &BackendServicesService{s: s}
--	return rs
--}
--
--type BackendServicesService struct {
--	s *Service
--}
--
--func NewDiskTypesService(s *Service) *DiskTypesService {
--	rs := &DiskTypesService{s: s}
--	return rs
--}
--
--type DiskTypesService struct {
--	s *Service
--}
--
--func NewDisksService(s *Service) *DisksService {
--	rs := &DisksService{s: s}
--	return rs
--}
--
--type DisksService struct {
--	s *Service
--}
--
--func NewFirewallsService(s *Service) *FirewallsService {
--	rs := &FirewallsService{s: s}
--	return rs
--}
--
--type FirewallsService struct {
--	s *Service
--}
--
--func NewForwardingRulesService(s *Service) *ForwardingRulesService {
--	rs := &ForwardingRulesService{s: s}
--	return rs
--}
--
--type ForwardingRulesService struct {
--	s *Service
--}
--
--func NewGlobalAddressesService(s *Service) *GlobalAddressesService {
--	rs := &GlobalAddressesService{s: s}
--	return rs
--}
--
--type GlobalAddressesService struct {
--	s *Service
--}
--
--func NewGlobalForwardingRulesService(s *Service) *GlobalForwardingRulesService {
--	rs := &GlobalForwardingRulesService{s: s}
--	return rs
--}
--
--type GlobalForwardingRulesService struct {
--	s *Service
--}
--
--func NewGlobalOperationsService(s *Service) *GlobalOperationsService {
--	rs := &GlobalOperationsService{s: s}
--	return rs
--}
--
--type GlobalOperationsService struct {
--	s *Service
--}
--
--func NewHttpHealthChecksService(s *Service) *HttpHealthChecksService {
--	rs := &HttpHealthChecksService{s: s}
--	return rs
--}
--
--type HttpHealthChecksService struct {
--	s *Service
--}
--
--func NewImagesService(s *Service) *ImagesService {
--	rs := &ImagesService{s: s}
--	return rs
--}
--
--type ImagesService struct {
--	s *Service
--}
--
--func NewInstanceTemplatesService(s *Service) *InstanceTemplatesService {
--	rs := &InstanceTemplatesService{s: s}
--	return rs
--}
--
--type InstanceTemplatesService struct {
--	s *Service
--}
--
--func NewInstancesService(s *Service) *InstancesService {
--	rs := &InstancesService{s: s}
--	return rs
--}
--
--type InstancesService struct {
--	s *Service
--}
--
--func NewLicensesService(s *Service) *LicensesService {
--	rs := &LicensesService{s: s}
--	return rs
--}
--
--type LicensesService struct {
--	s *Service
--}
--
--func NewMachineTypesService(s *Service) *MachineTypesService {
--	rs := &MachineTypesService{s: s}
--	return rs
--}
--
--type MachineTypesService struct {
--	s *Service
--}
--
--func NewNetworksService(s *Service) *NetworksService {
--	rs := &NetworksService{s: s}
--	return rs
--}
--
--type NetworksService struct {
--	s *Service
--}
--
--func NewProjectsService(s *Service) *ProjectsService {
--	rs := &ProjectsService{s: s}
--	return rs
--}
--
--type ProjectsService struct {
--	s *Service
--}
--
--func NewRegionOperationsService(s *Service) *RegionOperationsService {
--	rs := &RegionOperationsService{s: s}
--	return rs
--}
--
--type RegionOperationsService struct {
--	s *Service
--}
--
--func NewRegionsService(s *Service) *RegionsService {
--	rs := &RegionsService{s: s}
--	return rs
--}
--
--type RegionsService struct {
--	s *Service
--}
--
--func NewRoutesService(s *Service) *RoutesService {
--	rs := &RoutesService{s: s}
--	return rs
--}
--
--type RoutesService struct {
--	s *Service
--}
--
--func NewSnapshotsService(s *Service) *SnapshotsService {
--	rs := &SnapshotsService{s: s}
--	return rs
--}
--
--type SnapshotsService struct {
--	s *Service
--}
--
--func NewTargetHttpProxiesService(s *Service) *TargetHttpProxiesService {
--	rs := &TargetHttpProxiesService{s: s}
--	return rs
--}
--
--type TargetHttpProxiesService struct {
--	s *Service
--}
--
--func NewTargetInstancesService(s *Service) *TargetInstancesService {
--	rs := &TargetInstancesService{s: s}
--	return rs
--}
--
--type TargetInstancesService struct {
--	s *Service
--}
--
--func NewTargetPoolsService(s *Service) *TargetPoolsService {
--	rs := &TargetPoolsService{s: s}
--	return rs
--}
--
--type TargetPoolsService struct {
--	s *Service
--}
--
--func NewUrlMapsService(s *Service) *UrlMapsService {
--	rs := &UrlMapsService{s: s}
--	return rs
--}
--
--type UrlMapsService struct {
--	s *Service
--}
--
--func NewZoneOperationsService(s *Service) *ZoneOperationsService {
--	rs := &ZoneOperationsService{s: s}
--	return rs
--}
--
--type ZoneOperationsService struct {
--	s *Service
--}
--
--func NewZonesService(s *Service) *ZonesService {
--	rs := &ZonesService{s: s}
--	return rs
--}
--
--type ZonesService struct {
--	s *Service
--}
--
--type AccessConfig struct {
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of this access configuration.
--	Name string `json:"name,omitempty"`
--
--	// NatIP: An external IP address associated with this instance. Specify
--	// an unused static IP address available to the project. If not
--	// specified, the external IP will be drawn from a shared ephemeral
--	// pool.
--	NatIP string `json:"natIP,omitempty"`
--
--	// Type: Type of configuration. Must be set to "ONE_TO_ONE_NAT". This
--	// configures port-for-port NAT to the internet.
--	Type string `json:"type,omitempty"`
--}
--
--type Address struct {
--	// Address: The IP address represented by this resource.
--	Address string `json:"address,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// Region: URL of the region where the regional address resides (output
--	// only). This field is not applicable to global addresses.
--	Region string `json:"region,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// Status: The status of the address (output only).
--	Status string `json:"status,omitempty"`
--
--	// Users: The resources that are using this address resource.
--	Users []string `json:"users,omitempty"`
--}
--
--type AddressAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped address lists.
--	Items map[string]AddressesScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type AddressList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The address resources.
--	Items []*Address `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type AddressesScopedList struct {
--	// Addresses: List of addresses contained in this scope.
--	Addresses []*Address `json:"addresses,omitempty"`
--
--	// Warning: Informational warning which replaces the list of addresses
--	// when the list is empty.
--	Warning *AddressesScopedListWarning `json:"warning,omitempty"`
--}
--
--type AddressesScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*AddressesScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type AddressesScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type AttachedDisk struct {
--	// AutoDelete: Whether the disk will be auto-deleted when the instance
--	// is deleted (but not when the disk is detached from the instance).
--	AutoDelete bool `json:"autoDelete,omitempty"`
--
--	// Boot: Indicates that this is a boot disk. VM will use the first
--	// partition of the disk for its root filesystem.
--	Boot bool `json:"boot,omitempty"`
--
--	// DeviceName: Persistent disk only; must be unique within the instance
--	// when specified. This represents a unique device name that is
--	// reflected into the /dev/ tree of a Linux operating system running
--	// within the instance. If not specified, a default will be chosen by
--	// the system.
--	DeviceName string `json:"deviceName,omitempty"`
--
--	// Index: A zero-based index to assign to this disk, where 0 is reserved
--	// for the boot disk. If not specified, the server will choose an
--	// appropriate value (output only).
--	Index int64 `json:"index,omitempty"`
--
--	// InitializeParams: Initialization parameters.
--	InitializeParams *AttachedDiskInitializeParams `json:"initializeParams,omitempty"`
--
--	Interface string `json:"interface,omitempty"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Licenses: Public visible licenses.
--	Licenses []string `json:"licenses,omitempty"`
--
--	// Mode: The mode in which to attach this disk, either "READ_WRITE" or
--	// "READ_ONLY".
--	Mode string `json:"mode,omitempty"`
--
--	// Source: Persistent disk only; the URL of the persistent disk
--	// resource.
--	Source string `json:"source,omitempty"`
--
--	// Type: Type of the disk, either "SCRATCH" or "PERSISTENT". Note that
--	// persistent disks must be created before you can specify them here.
--	Type string `json:"type,omitempty"`
--}
--
--type AttachedDiskInitializeParams struct {
--	// DiskName: Name of the disk (when not provided defaults to the name of
--	// the instance).
--	DiskName string `json:"diskName,omitempty"`
--
--	// DiskSizeGb: Size of the disk in base-2 GB.
--	DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"`
--
--	// DiskType: URL of the disk type resource describing which disk type to
--	// use to create the disk; provided by the client when the disk is
--	// created.
--	DiskType string `json:"diskType,omitempty"`
--
--	// SourceImage: The source image used to create this disk.
--	SourceImage string `json:"sourceImage,omitempty"`
--}
--
--type Backend struct {
--	// BalancingMode: The balancing mode of this backend, default is
--	// UTILIZATION.
--	BalancingMode string `json:"balancingMode,omitempty"`
--
--	// CapacityScaler: The multiplier (a value between 0 and 1e6) of the max
--	// capacity (CPU or RPS, depending on 'balancingMode') the group should
--	// serve up to. 0 means the group is totally drained. Default value is
--	// 1. Valid range is [0, 1e6].
--	CapacityScaler float64 `json:"capacityScaler,omitempty"`
--
--	// Description: An optional textual description of the resource, which
--	// is provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Group: URL of a zonal Cloud Resource View resource. This resource
--	// view defines the list of instances that serve traffic. Member virtual
--	// machine instances from each resource view must live in the same zone
--	// as the resource view itself. No two backends in a backend service are
--	// allowed to use same Resource View resource.
--	Group string `json:"group,omitempty"`
--
--	// MaxRate: The max RPS of the group. Can be used with either balancing
--	// mode, but required if RATE mode. For RATE mode, either maxRate or
--	// maxRatePerInstance must be set.
--	MaxRate int64 `json:"maxRate,omitempty"`
--
--	// MaxRatePerInstance: The max RPS that a single backed instance can
--	// handle. This is used to calculate the capacity of the group. Can be
--	// used in either balancing mode. For RATE mode, either maxRate or
--	// maxRatePerInstance must be set.
--	MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"`
--
--	// MaxUtilization: Used when 'balancingMode' is UTILIZATION. This ratio
--	// defines the CPU utilization target for the group. The default is 0.8.
--	// Valid range is [0, 1].
--	MaxUtilization float64 `json:"maxUtilization,omitempty"`
--}
--
--type BackendService struct {
--	// Backends: The list of backends that serve this BackendService.
--	Backends []*Backend `json:"backends,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Fingerprint: Fingerprint of this resource. A hash of the contents
--	// stored in this object. This field is used in optimistic locking. This
--	// field will be ignored when inserting a BackendService. An up-to-date
--	// fingerprint must be provided in order to update the BackendService.
--	Fingerprint string `json:"fingerprint,omitempty"`
--
--	// HealthChecks: The list of URLs to the HttpHealthCheck resource for
--	// health checking this BackendService. Currently at most one health
--	// check can be specified, and a health check is required.
--	HealthChecks []string `json:"healthChecks,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// Port: Deprecated in favor of port_name. The TCP port to connect on
--	// the backend. The default value is 80.
--	Port int64 `json:"port,omitempty"`
--
--	// PortName: Name of backend port. The same name should appear in the
--	// resource views referenced by this service. Required.
--	PortName string `json:"portName,omitempty"`
--
--	Protocol string `json:"protocol,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// TimeoutSec: How many seconds to wait for the backend before
--	// considering it a failed request. Default is 30 seconds.
--	TimeoutSec int64 `json:"timeoutSec,omitempty"`
--}
--
--type BackendServiceGroupHealth struct {
--	HealthStatus []*HealthStatus `json:"healthStatus,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--}
--
--type BackendServiceList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The BackendService resources.
--	Items []*BackendService `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type DeprecationStatus struct {
--	// Deleted: An optional RFC3339 timestamp on or after which the
--	// deprecation state of this resource will be changed to DELETED.
--	Deleted string `json:"deleted,omitempty"`
--
--	// Deprecated: An optional RFC3339 timestamp on or after which the
--	// deprecation state of this resource will be changed to DEPRECATED.
--	Deprecated string `json:"deprecated,omitempty"`
--
--	// Obsolete: An optional RFC3339 timestamp on or after which the
--	// deprecation state of this resource will be changed to OBSOLETE.
--	Obsolete string `json:"obsolete,omitempty"`
--
--	// Replacement: A URL of the suggested replacement for the deprecated
--	// resource. The deprecated resource and its replacement must be
--	// resources of the same kind.
--	Replacement string `json:"replacement,omitempty"`
--
--	// State: The deprecation state. Can be "DEPRECATED", "OBSOLETE", or
--	// "DELETED". Operations which create a new resource using a
--	// "DEPRECATED" resource will return successfully, but with a warning
--	// indicating the deprecated resource and recommending its replacement.
--	// New uses of "OBSOLETE" or "DELETED" resources will result in an
--	// error.
--	State string `json:"state,omitempty"`
--}
--
--type Disk struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Licenses: Public visible licenses.
--	Licenses []string `json:"licenses,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// Options: Internal use only.
--	Options string `json:"options,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// SizeGb: Size of the persistent disk, specified in GB. This parameter
--	// is optional when creating a disk from a disk image or a snapshot,
--	// otherwise it is required.
--	SizeGb int64 `json:"sizeGb,omitempty,string"`
--
--	// SourceImage: The source image used to create this disk.
--	SourceImage string `json:"sourceImage,omitempty"`
--
--	// SourceImageId: The 'id' value of the image used to create this disk.
--	// This value may be used to determine whether the disk was created from
--	// the current or a previous instance of a given image.
--	SourceImageId string `json:"sourceImageId,omitempty"`
--
--	// SourceSnapshot: The source snapshot used to create this disk.
--	SourceSnapshot string `json:"sourceSnapshot,omitempty"`
--
--	// SourceSnapshotId: The 'id' value of the snapshot used to create this
--	// disk. This value may be used to determine whether the disk was
--	// created from the current or a previous instance of a given disk
--	// snapshot.
--	SourceSnapshotId string `json:"sourceSnapshotId,omitempty"`
--
--	// Status: The status of disk creation (output only).
--	Status string `json:"status,omitempty"`
--
--	// Type: URL of the disk type resource describing which disk type to use
--	// to create the disk; provided by the client when the disk is created.
--	Type string `json:"type,omitempty"`
--
--	// Zone: URL of the zone where the disk resides (output only).
--	Zone string `json:"zone,omitempty"`
--}
--
--type DiskAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped disk lists.
--	Items map[string]DisksScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type DiskList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The persistent disk resources.
--	Items []*Disk `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type DiskType struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// DefaultDiskSizeGb: Server defined default disk size in gb (output
--	// only).
--	DefaultDiskSizeGb int64 `json:"defaultDiskSizeGb,omitempty,string"`
--
--	// Deprecated: The deprecation status associated with this disk type.
--	Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
--
--	// Description: An optional textual description of the resource.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource.
--	Name string `json:"name,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// ValidDiskSize: An optional textual descroption of the valid disk
--	// size, e.g., "10GB-10TB".
--	ValidDiskSize string `json:"validDiskSize,omitempty"`
--
--	// Zone: Url of the zone where the disk type resides (output only).
--	Zone string `json:"zone,omitempty"`
--}
--
--type DiskTypeAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped disk type lists.
--	Items map[string]DiskTypesScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type DiskTypeList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The disk type resources.
--	Items []*DiskType `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type DiskTypesScopedList struct {
--	// DiskTypes: List of disk types contained in this scope.
--	DiskTypes []*DiskType `json:"diskTypes,omitempty"`
--
--	// Warning: Informational warning which replaces the list of disk types
--	// when the list is empty.
--	Warning *DiskTypesScopedListWarning `json:"warning,omitempty"`
--}
--
--type DiskTypesScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*DiskTypesScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type DiskTypesScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type DisksScopedList struct {
--	// Disks: List of disks contained in this scope.
--	Disks []*Disk `json:"disks,omitempty"`
--
--	// Warning: Informational warning which replaces the list of disks when
--	// the list is empty.
--	Warning *DisksScopedListWarning `json:"warning,omitempty"`
--}
--
--type DisksScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*DisksScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type DisksScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type Firewall struct {
--	// Allowed: The list of rules specified by this firewall. Each rule
--	// specifies a protocol and port-range tuple that describes a permitted
--	// connection.
--	Allowed []*FirewallAllowed `json:"allowed,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// Network: URL of the network to which this firewall is applied;
--	// provided by the client when the firewall is created.
--	Network string `json:"network,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// SourceRanges: A list of IP address blocks expressed in CIDR format
--	// which this rule applies to. One or both of sourceRanges and
--	// sourceTags may be set; an inbound connection is allowed if either the
--	// range or the tag of the source matches.
--	SourceRanges []string `json:"sourceRanges,omitempty"`
--
--	// SourceTags: A list of instance tags which this rule applies to. One
--	// or both of sourceRanges and sourceTags may be set; an inbound
--	// connection is allowed if either the range or the tag of the source
--	// matches.
--	SourceTags []string `json:"sourceTags,omitempty"`
--
--	// TargetTags: A list of instance tags indicating sets of instances
--	// located on network which may make network connections as specified in
--	// allowed. If no targetTags are specified, the firewall rule applies to
--	// all instances on the specified network.
--	TargetTags []string `json:"targetTags,omitempty"`
--}
--
--type FirewallAllowed struct {
--	// IPProtocol: Required; this is the IP protocol that is allowed for
--	// this rule. This can either be one of the following well known
--	// protocol strings ["tcp", "udp", "icmp", "esp", "ah", "sctp"], or the
--	// IP protocol number.
--	IPProtocol string `json:"IPProtocol,omitempty"`
--
--	// Ports: An optional list of ports which are allowed. It is an error to
--	// specify this for any protocol that isn't UDP or TCP. Each entry must
--	// be either an integer or a range. If not specified, connections
--	// through any port are allowed.
--	//
--	// Example inputs include: ["22"],
--	// ["80","443"] and ["12345-12349"].
--	Ports []string `json:"ports,omitempty"`
--}
--
--type FirewallList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The firewall resources.
--	Items []*Firewall `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type ForwardingRule struct {
--	// IPAddress: Value of the reserved IP address that this forwarding rule
--	// is serving on behalf of. For global forwarding rules, the address
--	// must be a global IP; for regional forwarding rules, the address must
--	// live in the same region as the forwarding rule. If left empty
--	// (default value), an ephemeral IP from the same scope (global or
--	// regional) will be assigned.
--	IPAddress string `json:"IPAddress,omitempty"`
--
--	// IPProtocol: The IP protocol to which this rule applies, valid options
--	// are 'TCP', 'UDP', 'ESP', 'AH' or 'SCTP'.
--	IPProtocol string `json:"IPProtocol,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// PortRange: Applicable only when 'IPProtocol' is 'TCP', 'UDP' or
--	// 'SCTP', only packets addressed to ports in the specified range will
--	// be forwarded to 'target'. If 'portRange' is left empty (default
--	// value), all ports are forwarded. Forwarding rules with the same
--	// [IPAddress, IPProtocol] pair must have disjoint port ranges.
--	PortRange string `json:"portRange,omitempty"`
--
--	// Region: URL of the region where the regional forwarding rule resides
--	// (output only). This field is not applicable to global forwarding
--	// rules.
--	Region string `json:"region,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// Target: The URL of the target resource to receive the matched
--	// traffic. For regional forwarding rules, this target must live in the
--	// same region as the forwarding rule. For global forwarding rules, this
--	// target must be a global TargetHttpProxy resource.
--	Target string `json:"target,omitempty"`
--}
--
--type ForwardingRuleAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped forwarding rule lists.
--	Items map[string]ForwardingRulesScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type ForwardingRuleList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The ForwardingRule resources.
--	Items []*ForwardingRule `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type ForwardingRulesScopedList struct {
--	// ForwardingRules: List of forwarding rules contained in this scope.
--	ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"`
--
--	// Warning: Informational warning which replaces the list of forwarding
--	// rules when the list is empty.
--	Warning *ForwardingRulesScopedListWarning `json:"warning,omitempty"`
--}
--
--type ForwardingRulesScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*ForwardingRulesScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type ForwardingRulesScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type HealthCheckReference struct {
--	HealthCheck string `json:"healthCheck,omitempty"`
--}
--
--type HealthStatus struct {
--	// HealthState: Health state of the instance.
--	HealthState string `json:"healthState,omitempty"`
--
--	// Instance: URL of the instance resource.
--	Instance string `json:"instance,omitempty"`
--
--	// IpAddress: The IP address represented by this resource.
--	IpAddress string `json:"ipAddress,omitempty"`
--
--	// Port: The port on the instance.
--	Port int64 `json:"port,omitempty"`
--}
--
--type HostRule struct {
--	Description string `json:"description,omitempty"`
--
--	// Hosts: The list of host patterns to match. They must be valid
--	// hostnames except that they may start with *. or *-. The * acts like a
--	// glob and will match any string of atoms (separated by .s and -s) to
--	// the left.
--	Hosts []string `json:"hosts,omitempty"`
--
--	// PathMatcher: The name of the PathMatcher to match the path portion of
--	// the URL, if the this HostRule matches the URL's host portion.
--	PathMatcher string `json:"pathMatcher,omitempty"`
--}
--
--type HttpHealthCheck struct {
--	// CheckIntervalSec: How often (in seconds) to send a health check. The
--	// default value is 5 seconds.
--	CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// HealthyThreshold: A so-far unhealthy VM will be marked healthy after
--	// this many consecutive successes. The default value is 2.
--	HealthyThreshold int64 `json:"healthyThreshold,omitempty"`
--
--	// Host: The value of the host header in the HTTP health check request.
--	// If left empty (default value), the public IP on behalf of which this
--	// health check is performed will be used.
--	Host string `json:"host,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// Port: The TCP port number for the HTTP health check request. The
--	// default value is 80.
--	Port int64 `json:"port,omitempty"`
--
--	// RequestPath: The request path of the HTTP health check request. The
--	// default value is "/".
--	RequestPath string `json:"requestPath,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// TimeoutSec: How long (in seconds) to wait before claiming failure.
--	// The default value is 5 seconds.
--	TimeoutSec int64 `json:"timeoutSec,omitempty"`
--
--	// UnhealthyThreshold: A so-far healthy VM will be marked unhealthy
--	// after this many consecutive failures. The default value is 2.
--	UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"`
--}
--
--type HttpHealthCheckList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The HttpHealthCheck resources.
--	Items []*HttpHealthCheck `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type Image struct {
--	// ArchiveSizeBytes: Size of the image tar.gz archive stored in Google
--	// Cloud Storage (in bytes).
--	ArchiveSizeBytes int64 `json:"archiveSizeBytes,omitempty,string"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Deprecated: The deprecation status associated with this image.
--	Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
--
--	// Description: Textual description of the resource; provided by the
--	// client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// DiskSizeGb: Size of the image when restored onto a disk (in GiB).
--	DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Licenses: Public visible licenses.
--	Licenses []string `json:"licenses,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// RawDisk: The raw disk image parameters.
--	RawDisk *ImageRawDisk `json:"rawDisk,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// SourceDisk: The source disk used to create this image.
--	SourceDisk string `json:"sourceDisk,omitempty"`
--
--	// SourceDiskId: The 'id' value of the disk used to create this image.
--	// This value may be used to determine whether the image was taken from
--	// the current or a previous instance of a given disk name.
--	SourceDiskId string `json:"sourceDiskId,omitempty"`
--
--	// SourceType: Must be "RAW"; provided by the client when the disk image
--	// is created.
--	SourceType string `json:"sourceType,omitempty"`
--
--	// Status: Status of the image (output only). It will be one of the
--	// following READY - after image has been successfully created and is
--	// ready for use FAILED - if creating the image fails for some reason
--	// PENDING - the image creation is in progress An image can be used to
--	// create other resources suck as instances only after the image has
--	// been successfully created and the status is set to READY.
--	Status string `json:"status,omitempty"`
--}
--
--type ImageRawDisk struct {
--	// ContainerType: The format used to encode and transmit the block
--	// device. Should be TAR. This is just a container and transmission
--	// format and not a runtime format. Provided by the client when the disk
--	// image is created.
--	ContainerType string `json:"containerType,omitempty"`
--
--	// Sha1Checksum: An optional SHA1 checksum of the disk image before
--	// unpackaging; provided by the client when the disk image is created.
--	Sha1Checksum string `json:"sha1Checksum,omitempty"`
--
--	// Source: The full Google Cloud Storage URL where the disk image is
--	// stored; provided by the client when the disk image is created.
--	Source string `json:"source,omitempty"`
--}
--
--type ImageList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The disk image resources.
--	Items []*Image `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type Instance struct {
--	// CanIpForward: Allows this instance to send packets with source IP
--	// addresses other than its own and receive packets with destination IP
--	// addresses other than its own. If this instance will be used as an IP
--	// gateway or it will be set as the next-hop in a Route resource, say
--	// true. If unsure, leave this set to false.
--	CanIpForward bool `json:"canIpForward,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Disks: Array of disks associated with this instance. Persistent disks
--	// must be created before you can assign them.
--	Disks []*AttachedDisk `json:"disks,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// MachineType: URL of the machine type resource describing which
--	// machine type to use to host the instance; provided by the client when
--	// the instance is created.
--	MachineType string `json:"machineType,omitempty"`
--
--	// Metadata: Metadata key/value pairs assigned to this instance.
--	// Consists of custom metadata or predefined keys; see Instance
--	// documentation for more information.
--	Metadata *Metadata `json:"metadata,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// NetworkInterfaces: Array of configurations for this interface. This
--	// specifies how this interface is configured to interact with other
--	// network services, such as connecting to the internet. Currently,
--	// ONE_TO_ONE_NAT is the only access config supported. If there are no
--	// accessConfigs specified, then this instance will have no external
--	// internet access.
--	NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
--
--	// Scheduling: Scheduling options for this instance.
--	Scheduling *Scheduling `json:"scheduling,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// ServiceAccounts: A list of service accounts each with specified
--	// scopes, for which access tokens are to be made available to the
--	// instance through metadata queries.
--	ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"`
--
--	// Status: Instance status. One of the following values: "PROVISIONING",
--	// "STAGING", "RUNNING", "STOPPING", "STOPPED", "TERMINATED" (output
--	// only).
--	Status string `json:"status,omitempty"`
--
--	// StatusMessage: An optional, human-readable explanation of the status
--	// (output only).
--	StatusMessage string `json:"statusMessage,omitempty"`
--
--	// Tags: A list of tags to be applied to this instance. Used to identify
--	// valid sources or targets for network firewalls. Provided by the
--	// client on instance creation. The tags can be later modified by the
--	// setTags method. Each tag within the list must comply with RFC1035.
--	Tags *Tags `json:"tags,omitempty"`
--
--	// Zone: URL of the zone where the instance resides (output only).
--	Zone string `json:"zone,omitempty"`
--}
--
--type InstanceAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped instance lists.
--	Items map[string]InstancesScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type InstanceList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A list of instance resources.
--	Items []*Instance `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type InstanceProperties struct {
--	// CanIpForward: Allows instances created based on this template to send
--	// packets with source IP addresses other than their own and receive
--	// packets with destination IP addresses other than their own. If these
--	// instances will be used as an IP gateway or it will be set as the
--	// next-hop in a Route resource, say true. If unsure, leave this set to
--	// false.
--	CanIpForward bool `json:"canIpForward,omitempty"`
--
--	// Description: An optional textual description for the instances
--	// created based on the instance template resource; provided by the
--	// client when the template is created.
--	Description string `json:"description,omitempty"`
--
--	// Disks: Array of disks associated with instance created based on this
--	// template.
--	Disks []*AttachedDisk `json:"disks,omitempty"`
--
--	// MachineType: Name of the machine type resource describing which
--	// machine type to use to host the instances created based on this
--	// template; provided by the client when the instance template is
--	// created.
--	MachineType string `json:"machineType,omitempty"`
--
--	// Metadata: Metadata key/value pairs assigned to instances created
--	// based on this template. Consists of custom metadata or predefined
--	// keys; see Instance documentation for more information.
--	Metadata *Metadata `json:"metadata,omitempty"`
--
--	// NetworkInterfaces: Array of configurations for this interface. This
--	// specifies how this interface is configured to interact with other
--	// network services, such as connecting to the internet. Currently,
--	// ONE_TO_ONE_NAT is the only access config supported. If there are no
--	// accessConfigs specified, then this instances created based based on
--	// this template will have no external internet access.
--	NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
--
--	// Scheduling: Scheduling options for the instances created based on
--	// this template.
--	Scheduling *Scheduling `json:"scheduling,omitempty"`
--
--	// ServiceAccounts: A list of service accounts each with specified
--	// scopes, for which access tokens are to be made available to the
--	// instances created based on this template, through metadata queries.
--	ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"`
--
--	// Tags: A list of tags to be applied to the instances created based on
--	// this template used to identify valid sources or targets for network
--	// firewalls. Provided by the client on instance creation. The tags can
--	// be later modified by the setTags method. Each tag within the list
--	// must comply with RFC1035.
--	Tags *Tags `json:"tags,omitempty"`
--}
--
--type InstanceReference struct {
--	Instance string `json:"instance,omitempty"`
--}
--
--type InstanceTemplate struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the instance template
--	// resource; provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the instance template resource; provided by the client
--	// when the resource is created. The name must be 1-63 characters long,
--	// and comply with RFC1035
--	Name string `json:"name,omitempty"`
--
--	// Properties: The instance properties portion of this instance template
--	// resource.
--	Properties *InstanceProperties `json:"properties,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type InstanceTemplateList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A list of instance template resources.
--	Items []*InstanceTemplate `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type InstancesScopedList struct {
--	// Instances: List of instances contained in this scope.
--	Instances []*Instance `json:"instances,omitempty"`
--
--	// Warning: Informational warning which replaces the list of instances
--	// when the list is empty.
--	Warning *InstancesScopedListWarning `json:"warning,omitempty"`
--}
--
--type InstancesScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*InstancesScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type InstancesScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type License struct {
--	// ChargesUseFee: If true, the customer will be charged license fee for
--	// running software that contains this license on an instance.
--	ChargesUseFee bool `json:"chargesUseFee,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type MachineType struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Deprecated: The deprecation status associated with this machine type.
--	Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
--
--	// Description: An optional textual description of the resource.
--	Description string `json:"description,omitempty"`
--
--	// GuestCpus: Count of CPUs exposed to the instance.
--	GuestCpus int64 `json:"guestCpus,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// ImageSpaceGb: Space allotted for the image, defined in GB.
--	ImageSpaceGb int64 `json:"imageSpaceGb,omitempty"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// MaximumPersistentDisks: Maximum persistent disks allowed.
--	MaximumPersistentDisks int64 `json:"maximumPersistentDisks,omitempty"`
--
--	// MaximumPersistentDisksSizeGb: Maximum total persistent disks size
--	// (GB) allowed.
--	MaximumPersistentDisksSizeGb int64 `json:"maximumPersistentDisksSizeGb,omitempty,string"`
--
--	// MemoryMb: Physical memory assigned to the instance, defined in MB.
--	MemoryMb int64 `json:"memoryMb,omitempty"`
--
--	// Name: Name of the resource.
--	Name string `json:"name,omitempty"`
--
--	// ScratchDisks: List of extended scratch disks assigned to the
--	// instance.
--	ScratchDisks []*MachineTypeScratchDisks `json:"scratchDisks,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// Zone: Url of the zone where the machine type resides (output only).
--	Zone string `json:"zone,omitempty"`
--}
--
--type MachineTypeScratchDisks struct {
--	// DiskGb: Size of the scratch disk, defined in GB.
--	DiskGb int64 `json:"diskGb,omitempty"`
--}
--
--type MachineTypeAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped machine type lists.
--	Items map[string]MachineTypesScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type MachineTypeList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The machine type resources.
--	Items []*MachineType `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type MachineTypesScopedList struct {
--	// MachineTypes: List of machine types contained in this scope.
--	MachineTypes []*MachineType `json:"machineTypes,omitempty"`
--
--	// Warning: Informational warning which replaces the list of machine
--	// types when the list is empty.
--	Warning *MachineTypesScopedListWarning `json:"warning,omitempty"`
--}
--
--type MachineTypesScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*MachineTypesScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type MachineTypesScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type Metadata struct {
--	// Fingerprint: Fingerprint of this resource. A hash of the metadata's
--	// contents. This field is used for optimistic locking. An up-to-date
--	// metadata fingerprint must be provided in order to modify metadata.
--	Fingerprint string `json:"fingerprint,omitempty"`
--
--	// Items: Array of key/value pairs. The total size of all keys and
--	// values must be less than 512 KB.
--	Items []*MetadataItems `json:"items,omitempty"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--}
--
--type MetadataItems struct {
--	// Key: Key for the metadata entry. Keys must conform to the following
--	// regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is
--	// reflected as part of a URL in the metadata server. Additionally, to
--	// avoid ambiguity, keys must not conflict with any other metadata keys
--	// for the project.
--	Key string `json:"key,omitempty"`
--
--	// Value: Value for the metadata entry. These are free-form strings, and
--	// only have meaning as interpreted by the image running in the
--	// instance. The only restriction placed on values is that their size
--	// must be less than or equal to 32768 bytes.
--	Value string `json:"value,omitempty"`
--}
--
--type Network struct {
--	// IPv4Range: Required; The range of internal addresses that are legal
--	// on this network. This range is a CIDR specification, for example:
--	// 192.168.0.0/16. Provided by the client when the network is created.
--	IPv4Range string `json:"IPv4Range,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// GatewayIPv4: An optional address that is used for default routing to
--	// other networks. This must be within the range specified by IPv4Range,
--	// and is typically the first usable address in that range. If not
--	// specified, the default value is the first usable address in
--	// IPv4Range.
--	GatewayIPv4 string `json:"gatewayIPv4,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type NetworkInterface struct {
--	// AccessConfigs: Array of configurations for this interface. This
--	// specifies how this interface is configured to interact with other
--	// network services, such as connecting to the internet. Currently,
--	// ONE_TO_ONE_NAT is the only access config supported. If there are no
--	// accessConfigs specified, then this instance will have no external
--	// internet access.
--	AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"`
--
--	// Name: Name of the network interface, determined by the server; for
--	// network devices, these are e.g. eth0, eth1, etc. (output only).
--	Name string `json:"name,omitempty"`
--
--	// Network: URL of the network resource attached to this interface.
--	Network string `json:"network,omitempty"`
--
--	// NetworkIP: An optional IPV4 internal network address assigned to the
--	// instance for this network interface (output only).
--	NetworkIP string `json:"networkIP,omitempty"`
--}
--
--type NetworkList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The network resources.
--	Items []*Network `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type Operation struct {
--	// ClientOperationId: An optional identifier specified by the client
--	// when the mutation was initiated. Must be unique for all operation
--	// resources in the project (output only).
--	ClientOperationId string `json:"clientOperationId,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// EndTime: The time that this operation was completed. This is in RFC
--	// 3339 format (output only).
--	EndTime string `json:"endTime,omitempty"`
--
--	// Error: If errors occurred during processing of this operation, this
--	// field will be populated (output only).
--	Error *OperationError `json:"error,omitempty"`
--
--	// HttpErrorMessage: If operation fails, the HTTP error message
--	// returned, e.g. NOT FOUND. (output only).
--	HttpErrorMessage string `json:"httpErrorMessage,omitempty"`
--
--	// HttpErrorStatusCode: If operation fails, the HTTP error status code
--	// returned, e.g. 404. (output only).
--	HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// InsertTime: The time that this operation was requested. This is in
--	// RFC 3339 format (output only).
--	InsertTime string `json:"insertTime,omitempty"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource (output only).
--	Name string `json:"name,omitempty"`
--
--	// OperationType: Type of the operation. Examples include "insert",
--	// "update", and "delete" (output only).
--	OperationType string `json:"operationType,omitempty"`
--
--	// Progress: An optional progress indicator that ranges from 0 to 100.
--	// There is no requirement that this be linear or support any
--	// granularity of operations. This should not be used to guess at when
--	// the operation will be complete. This number should be monotonically
--	// increasing as the operation progresses (output only).
--	Progress int64 `json:"progress,omitempty"`
--
--	// Region: URL of the region where the operation resides (output only).
--	Region string `json:"region,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// StartTime: The time that this operation was started by the server.
--	// This is in RFC 3339 format (output only).
--	StartTime string `json:"startTime,omitempty"`
--
--	// Status: Status of the operation. Can be one of the following:
--	// "PENDING", "RUNNING", or "DONE" (output only).
--	Status string `json:"status,omitempty"`
--
--	// StatusMessage: An optional textual description of the current status
--	// of the operation (output only).
--	StatusMessage string `json:"statusMessage,omitempty"`
--
--	// TargetId: Unique target id which identifies a particular incarnation
--	// of the target (output only).
--	TargetId uint64 `json:"targetId,omitempty,string"`
--
--	// TargetLink: URL of the resource the operation is mutating (output
--	// only).
--	TargetLink string `json:"targetLink,omitempty"`
--
--	// User: User who requested the operation, for example
--	// "user at example.com" (output only).
--	User string `json:"user,omitempty"`
--
--	// Warnings: If warning messages generated during processing of this
--	// operation, this field will be populated (output only).
--	Warnings []*OperationWarnings `json:"warnings,omitempty"`
--
--	// Zone: URL of the zone where the operation resides (output only).
--	Zone string `json:"zone,omitempty"`
--}
--
--type OperationError struct {
--	// Errors: The array of errors encountered while processing this
--	// operation.
--	Errors []*OperationErrorErrors `json:"errors,omitempty"`
--}
--
--type OperationErrorErrors struct {
--	// Code: The error type identifier for this error.
--	Code string `json:"code,omitempty"`
--
--	// Location: Indicates the field in the request which caused the error.
--	// This property is optional.
--	Location string `json:"location,omitempty"`
--
--	// Message: An optional, human-readable error message.
--	Message string `json:"message,omitempty"`
--}
--
--type OperationWarnings struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*OperationWarningsData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type OperationWarningsData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type OperationAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped operation lists.
--	Items map[string]OperationsScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type OperationList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The operation resources.
--	Items []*Operation `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type OperationsScopedList struct {
--	// Operations: List of operations contained in this scope.
--	Operations []*Operation `json:"operations,omitempty"`
--
--	// Warning: Informational warning which replaces the list of operations
--	// when the list is empty.
--	Warning *OperationsScopedListWarning `json:"warning,omitempty"`
--}
--
--type OperationsScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*OperationsScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type OperationsScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type PathMatcher struct {
--	// DefaultService: The URL to the BackendService resource. This will be
--	// used if none of the 'pathRules' defined by this PathMatcher is met by
--	// the URL's path portion.
--	DefaultService string `json:"defaultService,omitempty"`
--
--	Description string `json:"description,omitempty"`
--
--	// Name: The name to which this PathMatcher is referred by the HostRule.
--	Name string `json:"name,omitempty"`
--
--	// PathRules: The list of path rules.
--	PathRules []*PathRule `json:"pathRules,omitempty"`
--}
--
--type PathRule struct {
--	// Paths: The list of path patterns to match. Each must start with / and
--	// the only place a * is allowed is at the end following a /. The string
--	// fed to the path matcher does not include any text after the first ?
--	// or #, and those chars are not allowed here.
--	Paths []string `json:"paths,omitempty"`
--
--	// Service: The URL of the BackendService resource if this rule is
--	// matched.
--	Service string `json:"service,omitempty"`
--}
--
--type Project struct {
--	// CommonInstanceMetadata: Metadata key/value pairs available to all
--	// instances contained in this project.
--	CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource.
--	Name string `json:"name,omitempty"`
--
--	// Quotas: Quotas assigned to this project.
--	Quotas []*Quota `json:"quotas,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// UsageExportLocation: The location in Cloud Storage and naming method
--	// of the daily usage report.
--	UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"`
--}
--
--type Quota struct {
--	// Limit: Quota limit for this metric.
--	Limit float64 `json:"limit,omitempty"`
--
--	// Metric: Name of the quota metric.
--	Metric string `json:"metric,omitempty"`
--
--	// Usage: Current usage of this metric.
--	Usage float64 `json:"usage,omitempty"`
--}
--
--type Region struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Deprecated: The deprecation status associated with this region.
--	Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
--
--	// Description: Textual description of the resource.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource.
--	Name string `json:"name,omitempty"`
--
--	// Quotas: Quotas assigned to this region.
--	Quotas []*Quota `json:"quotas,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// Status: Status of the region, "UP" or "DOWN".
--	Status string `json:"status,omitempty"`
--
--	// Zones: A list of zones homed in this region, in the form of resource
--	// URLs.
--	Zones []string `json:"zones,omitempty"`
--}
--
--type RegionList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The region resources.
--	Items []*Region `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type ResourceGroupReference struct {
--	// Group: A URI referencing one of the resource views listed in the
--	// backend service.
--	Group string `json:"group,omitempty"`
--}
--
--type Route struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// DestRange: Which packets does this route apply to?
--	DestRange string `json:"destRange,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// Network: URL of the network to which this route is applied; provided
--	// by the client when the route is created.
--	Network string `json:"network,omitempty"`
--
--	// NextHopGateway: The URL to a gateway that should handle matching
--	// packets.
--	NextHopGateway string `json:"nextHopGateway,omitempty"`
--
--	// NextHopInstance: The URL to an instance that should handle matching
--	// packets.
--	NextHopInstance string `json:"nextHopInstance,omitempty"`
--
--	// NextHopIp: The network IP address of an instance that should handle
--	// matching packets.
--	NextHopIp string `json:"nextHopIp,omitempty"`
--
--	// NextHopNetwork: The URL of the local network if it should handle
--	// matching packets.
--	NextHopNetwork string `json:"nextHopNetwork,omitempty"`
--
--	// Priority: Breaks ties between Routes of equal specificity. Routes
--	// with smaller values win when tied with routes with larger values.
--	Priority int64 `json:"priority,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// Tags: A list of instance tags to which this route applies.
--	Tags []string `json:"tags,omitempty"`
--
--	// Warnings: If potential misconfigurations are detected for this route,
--	// this field will be populated with warning messages.
--	Warnings []*RouteWarnings `json:"warnings,omitempty"`
--}
--
--type RouteWarnings struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*RouteWarningsData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type RouteWarningsData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type RouteList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The route resources.
--	Items []*Route `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type Scheduling struct {
--	// AutomaticRestart: Whether the Instance should be automatically
--	// restarted whenever it is terminated by Compute Engine (not terminated
--	// by user).
--	AutomaticRestart bool `json:"automaticRestart,omitempty"`
--
--	// OnHostMaintenance: How the instance should behave when the host
--	// machine undergoes maintenance that may temporarily impact instance
--	// performance.
--	OnHostMaintenance string `json:"onHostMaintenance,omitempty"`
--}
--
--type SerialPortOutput struct {
--	// Contents: The contents of the console output.
--	Contents string `json:"contents,omitempty"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type ServiceAccount struct {
--	// Email: Email address of the service account.
--	Email string `json:"email,omitempty"`
--
--	// Scopes: The list of scopes to be made available for this service
--	// account.
--	Scopes []string `json:"scopes,omitempty"`
--}
--
--type Snapshot struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// DiskSizeGb: Size of the persistent disk snapshot, specified in GB
--	// (output only).
--	DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Licenses: Public visible licenses.
--	Licenses []string `json:"licenses,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// SourceDisk: The source disk used to create this snapshot.
--	SourceDisk string `json:"sourceDisk,omitempty"`
--
--	// SourceDiskId: The 'id' value of the disk used to create this
--	// snapshot. This value may be used to determine whether the snapshot
--	// was taken from the current or a previous instance of a given disk
--	// name.
--	SourceDiskId string `json:"sourceDiskId,omitempty"`
--
--	// Status: The status of the persistent disk snapshot (output only).
--	Status string `json:"status,omitempty"`
--
--	// StorageBytes: A size of the the storage used by the snapshot. As
--	// snapshots share storage this number is expected to change with
--	// snapshot creation/deletion.
--	StorageBytes int64 `json:"storageBytes,omitempty,string"`
--
--	// StorageBytesStatus: An indicator whether storageBytes is in a stable
--	// state, or it is being adjusted as a result of shared storage
--	// reallocation.
--	StorageBytesStatus string `json:"storageBytesStatus,omitempty"`
--}
--
--type SnapshotList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The persistent snapshot resources.
--	Items []*Snapshot `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type Tags struct {
--	// Fingerprint: Fingerprint of this resource. A hash of the tags stored
--	// in this object. This field is used optimistic locking. An up-to-date
--	// tags fingerprint must be provided in order to modify tags.
--	Fingerprint string `json:"fingerprint,omitempty"`
--
--	// Items: An array of tags. Each tag must be 1-63 characters long, and
--	// comply with RFC1035.
--	Items []string `json:"items,omitempty"`
--}
--
--type TargetHttpProxy struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// UrlMap: URL to the UrlMap resource that defines the mapping from URL
--	// to the BackendService.
--	UrlMap string `json:"urlMap,omitempty"`
--}
--
--type TargetHttpProxyList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The TargetHttpProxy resources.
--	Items []*TargetHttpProxy `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type TargetInstance struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Instance: The URL to the instance that terminates the relevant
--	// traffic.
--	Instance string `json:"instance,omitempty"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// NatPolicy: NAT option controlling how IPs are NAT'ed to the VM.
--	// Currently only NO_NAT (default value) is supported.
--	NatPolicy string `json:"natPolicy,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// Zone: URL of the zone where the target instance resides (output
--	// only).
--	Zone string `json:"zone,omitempty"`
--}
--
--type TargetInstanceAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped target instance lists.
--	Items map[string]TargetInstancesScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type TargetInstanceList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The TargetInstance resources.
--	Items []*TargetInstance `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type TargetInstancesScopedList struct {
--	// TargetInstances: List of target instances contained in this scope.
--	TargetInstances []*TargetInstance `json:"targetInstances,omitempty"`
--
--	// Warning: Informational warning which replaces the list of addresses
--	// when the list is empty.
--	Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"`
--}
--
--type TargetInstancesScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type TargetInstancesScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type TargetPool struct {
--	// BackupPool: This field is applicable only when the containing target
--	// pool is serving a forwarding rule as the primary pool, and its
--	// 'failoverRatio' field is properly set to a value between [0,
--	// 1].
--	//
--	// 'backupPool' and 'failoverRatio' together define the fallback
--	// behavior of the primary target pool: if the ratio of the healthy VMs
--	// in the primary pool is at or below 'failoverRatio', traffic arriving
--	// at the load-balanced IP will be directed to the backup pool.
--	//
--	// In case
--	// where 'failoverRatio' and 'backupPool' are not set, or all the VMs in
--	// the backup pool are unhealthy, the traffic will be directed back to
--	// the primary pool in the "force" mode, where traffic will be spread to
--	// the healthy VMs with the best effort, or to all VMs when no VM is
--	// healthy.
--	BackupPool string `json:"backupPool,omitempty"`
--
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// FailoverRatio: This field is applicable only when the containing
--	// target pool is serving a forwarding rule as the primary pool (i.e.,
--	// not as a backup pool to some other target pool). The value of the
--	// field must be in [0, 1].
--	//
--	// If set, 'backupPool' must also be set. They
--	// together define the fallback behavior of the primary target pool: if
--	// the ratio of the healthy VMs in the primary pool is at or below this
--	// number, traffic arriving at the load-balanced IP will be directed to
--	// the backup pool.
--	//
--	// In case where 'failoverRatio' is not set or all the
--	// VMs in the backup pool are unhealthy, the traffic will be directed
--	// back to the primary pool in the "force" mode, where traffic will be
--	// spread to the healthy VMs with the best effort, or to all VMs when no
--	// VM is healthy.
--	FailoverRatio float64 `json:"failoverRatio,omitempty"`
--
--	// HealthChecks: A list of URLs to the HttpHealthCheck resource. A
--	// member VM in this pool is considered healthy if and only if all
--	// specified health checks pass. An empty list means all member VMs will
--	// be considered healthy at all times.
--	HealthChecks []string `json:"healthChecks,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Instances: A list of resource URLs to the member VMs serving this
--	// pool. They must live in zones contained in the same region as this
--	// pool.
--	Instances []string `json:"instances,omitempty"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// Region: URL of the region where the target pool resides (output
--	// only).
--	Region string `json:"region,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// SessionAffinity: Sesssion affinity option, must be one of the
--	// following values: 'NONE': Connections from the same client IP may go
--	// to any VM in the pool; 'CLIENT_IP': Connections from the same client
--	// IP will go to the same VM in the pool while that VM remains healthy.
--	// 'CLIENT_IP_PROTO': Connections from the same client IP with the same
--	// IP protocol will go to the same VM in the pool while that VM remains
--	// healthy.
--	SessionAffinity string `json:"sessionAffinity,omitempty"`
--}
--
--type TargetPoolAggregatedList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: A map of scoped target pool lists.
--	Items map[string]TargetPoolsScopedList `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type TargetPoolInstanceHealth struct {
--	HealthStatus []*HealthStatus `json:"healthStatus,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--}
--
--type TargetPoolList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The TargetPool resources.
--	Items []*TargetPool `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type TargetPoolsAddHealthCheckRequest struct {
--	// HealthChecks: Health check URLs to be added to targetPool.
--	HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"`
--}
--
--type TargetPoolsAddInstanceRequest struct {
--	// Instances: URLs of the instances to be added to targetPool.
--	Instances []*InstanceReference `json:"instances,omitempty"`
--}
--
--type TargetPoolsRemoveHealthCheckRequest struct {
--	// HealthChecks: Health check URLs to be removed from targetPool.
--	HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"`
--}
--
--type TargetPoolsRemoveInstanceRequest struct {
--	// Instances: URLs of the instances to be removed from targetPool.
--	Instances []*InstanceReference `json:"instances,omitempty"`
--}
--
--type TargetPoolsScopedList struct {
--	// TargetPools: List of target pools contained in this scope.
--	TargetPools []*TargetPool `json:"targetPools,omitempty"`
--
--	// Warning: Informational warning which replaces the list of addresses
--	// when the list is empty.
--	Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"`
--}
--
--type TargetPoolsScopedListWarning struct {
--	// Code: The warning type identifier for this warning.
--	Code string `json:"code,omitempty"`
--
--	// Data: Metadata for this warning in 'key: value' format.
--	Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"`
--
--	// Message: Optional human-readable details for this warning.
--	Message string `json:"message,omitempty"`
--}
--
--type TargetPoolsScopedListWarningData struct {
--	// Key: A key for the warning data.
--	Key string `json:"key,omitempty"`
--
--	// Value: A warning data value corresponding to the key.
--	Value string `json:"value,omitempty"`
--}
--
--type TargetReference struct {
--	Target string `json:"target,omitempty"`
--}
--
--type TestFailure struct {
--	ActualService string `json:"actualService,omitempty"`
--
--	ExpectedService string `json:"expectedService,omitempty"`
--
--	Host string `json:"host,omitempty"`
--
--	Path string `json:"path,omitempty"`
--}
--
--type UrlMap struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// DefaultService: The URL of the BackendService resource if none of the
--	// hostRules match.
--	DefaultService string `json:"defaultService,omitempty"`
--
--	// Description: An optional textual description of the resource;
--	// provided by the client when the resource is created.
--	Description string `json:"description,omitempty"`
--
--	// Fingerprint: Fingerprint of this resource. A hash of the contents
--	// stored in this object. This field is used in optimistic locking. This
--	// field will be ignored when inserting a UrlMap. An up-to-date
--	// fingerprint must be provided in order to update the UrlMap.
--	Fingerprint string `json:"fingerprint,omitempty"`
--
--	// HostRules: The list of HostRules to use against the URL.
--	HostRules []*HostRule `json:"hostRules,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// Name: Name of the resource; provided by the client when the resource
--	// is created. The name must be 1-63 characters long, and comply with
--	// RFC1035.
--	Name string `json:"name,omitempty"`
--
--	// PathMatchers: The list of named PathMatchers to use against the URL.
--	PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// Tests: The list of expected URL mappings. Request to update this
--	// UrlMap will succeed only all of the test cases pass.
--	Tests []*UrlMapTest `json:"tests,omitempty"`
--}
--
--type UrlMapList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The UrlMap resources.
--	Items []*UrlMap `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--type UrlMapReference struct {
--	UrlMap string `json:"urlMap,omitempty"`
--}
--
--type UrlMapTest struct {
--	// Description: Description of this test case.
--	Description string `json:"description,omitempty"`
--
--	// Host: Host portion of the URL.
--	Host string `json:"host,omitempty"`
--
--	// Path: Path portion of the URL.
--	Path string `json:"path,omitempty"`
--
--	// Service: Expected BackendService resource the given URL should be
--	// mapped to.
--	Service string `json:"service,omitempty"`
--}
--
--type UrlMapValidationResult struct {
--	LoadErrors []string `json:"loadErrors,omitempty"`
--
--	// LoadSucceeded: Whether the given UrlMap can be successfully loaded.
--	// If false, 'loadErrors' indicates the reasons.
--	LoadSucceeded bool `json:"loadSucceeded,omitempty"`
--
--	TestFailures []*TestFailure `json:"testFailures,omitempty"`
--
--	// TestPassed: If successfully loaded, this field indicates whether the
--	// test passed. If false, 'testFailures's indicate the reason of
--	// failure.
--	TestPassed bool `json:"testPassed,omitempty"`
--}
--
--type UrlMapsValidateRequest struct {
--	// Resource: Content of the UrlMap to be validated.
--	Resource *UrlMap `json:"resource,omitempty"`
--}
--
--type UrlMapsValidateResponse struct {
--	Result *UrlMapValidationResult `json:"result,omitempty"`
--}
--
--type UsageExportLocation struct {
--	// BucketName: The name of an existing bucket in Cloud Storage where the
--	// usage report object is stored. The Google Service Account is granted
--	// write access to this bucket. This is simply the bucket name, with no
--	// "gs://" or "https://storage.googleapis.com/" in front of it.
--	BucketName string `json:"bucketName,omitempty"`
--
--	// ReportNamePrefix: An optional prefix for the name of the usage report
--	// object stored in bucket_name. If not supplied, defaults to "usage_".
--	// The report is stored as a CSV file named _gce_.csv. where  is the day
--	// of the usage according to Pacific Time. The prefix should conform to
--	// Cloud Storage object naming conventions.
--	ReportNamePrefix string `json:"reportNamePrefix,omitempty"`
--}
--
--type Zone struct {
--	// CreationTimestamp: Creation timestamp in RFC3339 text format (output
--	// only).
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Deprecated: The deprecation status associated with this zone.
--	Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
--
--	// Description: Textual description of the resource.
--	Description string `json:"description,omitempty"`
--
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id uint64 `json:"id,omitempty,string"`
--
--	// Kind: Type of the resource.
--	Kind string `json:"kind,omitempty"`
--
--	// MaintenanceWindows: Scheduled maintenance windows for the zone. When
--	// the zone is in a maintenance window, all resources which reside in
--	// the zone will be unavailable.
--	MaintenanceWindows []*ZoneMaintenanceWindows `json:"maintenanceWindows,omitempty"`
--
--	// Name: Name of the resource.
--	Name string `json:"name,omitempty"`
--
--	// Region: Full URL reference to the region which hosts the zone (output
--	// only).
--	Region string `json:"region,omitempty"`
--
--	// SelfLink: Server defined URL for the resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--
--	// Status: Status of the zone. "UP" or "DOWN".
--	Status string `json:"status,omitempty"`
--}
--
--type ZoneMaintenanceWindows struct {
--	// BeginTime: Begin time of the maintenance window, in RFC 3339 format.
--	BeginTime string `json:"beginTime,omitempty"`
--
--	// Description: Textual description of the maintenance window.
--	Description string `json:"description,omitempty"`
--
--	// EndTime: End time of the maintenance window, in RFC 3339 format.
--	EndTime string `json:"endTime,omitempty"`
--
--	// Name: Name of the maintenance window.
--	Name string `json:"name,omitempty"`
--}
--
--type ZoneList struct {
--	// Id: Unique identifier for the resource; defined by the server (output
--	// only).
--	Id string `json:"id,omitempty"`
--
--	// Items: The zone resources.
--	Items []*Zone `json:"items,omitempty"`
--
--	// Kind: Type of resource.
--	Kind string `json:"kind,omitempty"`
--
--	// NextPageToken: A token used to continue a truncated list request
--	// (output only).
--	NextPageToken string `json:"nextPageToken,omitempty"`
--
--	// SelfLink: Server defined URL for this resource (output only).
--	SelfLink string `json:"selfLink,omitempty"`
--}
--
--// method id "compute.addresses.aggregatedList":
--
--type AddressesAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList: Retrieves the list of addresses grouped by scope.
--func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall {
--	c := &AddressesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *AddressesAggregatedListCall) Do() (*AddressAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *AddressAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of addresses grouped by scope.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.addresses.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/addresses",
--	//   "response": {
--	//     "$ref": "AddressAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.addresses.delete":
--
--type AddressesDeleteCall struct {
--	s       *Service
--	project string
--	region  string
--	address string
--	opt_    map[string]interface{}
--}
--
--// Delete: Deletes the specified address resource.
--func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall {
--	c := &AddressesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.address = address
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *AddressesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--		"address": c.address,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified address resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.addresses.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "address"
--	//   ],
--	//   "parameters": {
--	//     "address": {
--	//       "description": "Name of the address resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/addresses/{address}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.addresses.get":
--
--type AddressesGetCall struct {
--	s       *Service
--	project string
--	region  string
--	address string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified address resource.
--func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall {
--	c := &AddressesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.address = address
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *AddressesGetCall) Do() (*Address, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--		"address": c.address,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Address
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified address resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.addresses.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "address"
--	//   ],
--	//   "parameters": {
--	//     "address": {
--	//       "description": "Name of the address resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/addresses/{address}",
--	//   "response": {
--	//     "$ref": "Address"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.addresses.insert":
--
--type AddressesInsertCall struct {
--	s       *Service
--	project string
--	region  string
--	address *Address
--	opt_    map[string]interface{}
--}
--
--// Insert: Creates an address resource in the specified project using
--// the data included in the request.
--func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall {
--	c := &AddressesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.address = address
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *AddressesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.address)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates an address resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.addresses.insert",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/addresses",
--	//   "request": {
--	//     "$ref": "Address"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.addresses.list":
--
--type AddressesListCall struct {
--	s       *Service
--	project string
--	region  string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of address resources contained within the
--// specified region.
--func (r *AddressesService) List(project string, region string) *AddressesListCall {
--	c := &AddressesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *AddressesListCall) Filter(filter string) *AddressesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *AddressesListCall) Do() (*AddressList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *AddressList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of address resources contained within the specified region.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.addresses.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/addresses",
--	//   "response": {
--	//     "$ref": "AddressList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.backendServices.delete":
--
--type BackendServicesDeleteCall struct {
--	s              *Service
--	project        string
--	backendService string
--	opt_           map[string]interface{}
--}
--
--// Delete: Deletes the specified BackendService resource.
--func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall {
--	c := &BackendServicesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.backendService = backendService
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *BackendServicesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"backendService": c.backendService,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified BackendService resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.backendServices.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "backendService"
--	//   ],
--	//   "parameters": {
--	//     "backendService": {
--	//       "description": "Name of the BackendService resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/backendServices/{backendService}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.backendServices.get":
--
--type BackendServicesGetCall struct {
--	s              *Service
--	project        string
--	backendService string
--	opt_           map[string]interface{}
--}
--
--// Get: Returns the specified BackendService resource.
--func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall {
--	c := &BackendServicesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.backendService = backendService
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *BackendServicesGetCall) Do() (*BackendService, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"backendService": c.backendService,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *BackendService
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified BackendService resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.backendServices.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "backendService"
--	//   ],
--	//   "parameters": {
--	//     "backendService": {
--	//       "description": "Name of the BackendService resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/backendServices/{backendService}",
--	//   "response": {
--	//     "$ref": "BackendService"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.backendServices.getHealth":
--
--type BackendServicesGetHealthCall struct {
--	s                      *Service
--	project                string
--	backendService         string
--	resourcegroupreference *ResourceGroupReference
--	opt_                   map[string]interface{}
--}
--
--// GetHealth: Gets the most recent health check results for this
--// BackendService.
--func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall {
--	c := &BackendServicesGetHealthCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.backendService = backendService
--	c.resourcegroupreference = resourcegroupreference
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *BackendServicesGetHealthCall) Do() (*BackendServiceGroupHealth, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"backendService": c.backendService,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *BackendServiceGroupHealth
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Gets the most recent health check results for this BackendService.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.backendServices.getHealth",
--	//   "parameterOrder": [
--	//     "project",
--	//     "backendService"
--	//   ],
--	//   "parameters": {
--	//     "backendService": {
--	//       "description": "Name of the BackendService resource to which the queried instance belongs.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/backendServices/{backendService}/getHealth",
--	//   "request": {
--	//     "$ref": "ResourceGroupReference"
--	//   },
--	//   "response": {
--	//     "$ref": "BackendServiceGroupHealth"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.backendServices.insert":
--
--type BackendServicesInsertCall struct {
--	s              *Service
--	project        string
--	backendservice *BackendService
--	opt_           map[string]interface{}
--}
--
--// Insert: Creates a BackendService resource in the specified project
--// using the data included in the request.
--func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall {
--	c := &BackendServicesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.backendservice = backendservice
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *BackendServicesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a BackendService resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.backendServices.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/backendServices",
--	//   "request": {
--	//     "$ref": "BackendService"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.backendServices.list":
--
--type BackendServicesListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of BackendService resources available to the
--// specified project.
--func (r *BackendServicesService) List(project string) *BackendServicesListCall {
--	c := &BackendServicesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *BackendServicesListCall) Do() (*BackendServiceList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *BackendServiceList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of BackendService resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.backendServices.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/backendServices",
--	//   "response": {
--	//     "$ref": "BackendServiceList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.backendServices.patch":
--
--type BackendServicesPatchCall struct {
--	s              *Service
--	project        string
--	backendService string
--	backendservice *BackendService
--	opt_           map[string]interface{}
--}
--
--// Patch: Update the entire content of the BackendService resource. This
--// method supports patch semantics.
--func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall {
--	c := &BackendServicesPatchCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.backendService = backendService
--	c.backendservice = backendservice
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *BackendServicesPatchCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("PATCH", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"backendService": c.backendService,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Update the entire content of the BackendService resource. This method supports patch semantics.",
--	//   "httpMethod": "PATCH",
--	//   "id": "compute.backendServices.patch",
--	//   "parameterOrder": [
--	//     "project",
--	//     "backendService"
--	//   ],
--	//   "parameters": {
--	//     "backendService": {
--	//       "description": "Name of the BackendService resource to update.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/backendServices/{backendService}",
--	//   "request": {
--	//     "$ref": "BackendService"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.backendServices.update":
--
--type BackendServicesUpdateCall struct {
--	s              *Service
--	project        string
--	backendService string
--	backendservice *BackendService
--	opt_           map[string]interface{}
--}
--
--// Update: Update the entire content of the BackendService resource.
--func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall {
--	c := &BackendServicesUpdateCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.backendService = backendService
--	c.backendservice = backendservice
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *BackendServicesUpdateCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("PUT", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"backendService": c.backendService,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Update the entire content of the BackendService resource.",
--	//   "httpMethod": "PUT",
--	//   "id": "compute.backendServices.update",
--	//   "parameterOrder": [
--	//     "project",
--	//     "backendService"
--	//   ],
--	//   "parameters": {
--	//     "backendService": {
--	//       "description": "Name of the BackendService resource to update.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/backendServices/{backendService}",
--	//   "request": {
--	//     "$ref": "BackendService"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.diskTypes.aggregatedList":
--
--type DiskTypesAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList: Retrieves the list of disk type resources grouped by
--// scope.
--func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall {
--	c := &DiskTypesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DiskTypesAggregatedListCall) Do() (*DiskTypeAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *DiskTypeAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of disk type resources grouped by scope.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.diskTypes.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/diskTypes",
--	//   "response": {
--	//     "$ref": "DiskTypeAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.diskTypes.get":
--
--type DiskTypesGetCall struct {
--	s        *Service
--	project  string
--	zone     string
--	diskType string
--	opt_     map[string]interface{}
--}
--
--// Get: Returns the specified disk type resource.
--func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall {
--	c := &DiskTypesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.diskType = diskType
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DiskTypesGetCall) Do() (*DiskType, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"diskType": c.diskType,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *DiskType
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified disk type resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.diskTypes.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "diskType"
--	//   ],
--	//   "parameters": {
--	//     "diskType": {
--	//       "description": "Name of the disk type resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/diskTypes/{diskType}",
--	//   "response": {
--	//     "$ref": "DiskType"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.diskTypes.list":
--
--type DiskTypesListCall struct {
--	s       *Service
--	project string
--	zone    string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of disk type resources available to the
--// specified project.
--func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall {
--	c := &DiskTypesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DiskTypesListCall) Do() (*DiskTypeList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *DiskTypeList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of disk type resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.diskTypes.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/diskTypes",
--	//   "response": {
--	//     "$ref": "DiskTypeList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.disks.aggregatedList":
--
--type DisksAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList: Retrieves the list of disks grouped by scope.
--func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall {
--	c := &DisksAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DisksAggregatedListCall) Do() (*DiskAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *DiskAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of disks grouped by scope.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.disks.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/disks",
--	//   "response": {
--	//     "$ref": "DiskAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.disks.createSnapshot":
--
--type DisksCreateSnapshotCall struct {
--	s        *Service
--	project  string
--	zone     string
--	disk     string
--	snapshot *Snapshot
--	opt_     map[string]interface{}
--}
--
--// CreateSnapshot:
--func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall {
--	c := &DisksCreateSnapshotCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.disk = disk
--	c.snapshot = snapshot
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DisksCreateSnapshotCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--		"disk":    c.disk,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "httpMethod": "POST",
--	//   "id": "compute.disks.createSnapshot",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "disk"
--	//   ],
--	//   "parameters": {
--	//     "disk": {
--	//       "description": "Name of the persistent disk resource to snapshot.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot",
--	//   "request": {
--	//     "$ref": "Snapshot"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.disks.delete":
--
--type DisksDeleteCall struct {
--	s       *Service
--	project string
--	zone    string
--	disk    string
--	opt_    map[string]interface{}
--}
--
--// Delete: Deletes the specified persistent disk resource.
--func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall {
--	c := &DisksDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.disk = disk
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DisksDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--		"disk":    c.disk,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified persistent disk resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.disks.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "disk"
--	//   ],
--	//   "parameters": {
--	//     "disk": {
--	//       "description": "Name of the persistent disk resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/disks/{disk}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.disks.get":
--
--type DisksGetCall struct {
--	s       *Service
--	project string
--	zone    string
--	disk    string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified persistent disk resource.
--func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall {
--	c := &DisksGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.disk = disk
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DisksGetCall) Do() (*Disk, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--		"disk":    c.disk,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Disk
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified persistent disk resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.disks.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "disk"
--	//   ],
--	//   "parameters": {
--	//     "disk": {
--	//       "description": "Name of the persistent disk resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/disks/{disk}",
--	//   "response": {
--	//     "$ref": "Disk"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.disks.insert":
--
--type DisksInsertCall struct {
--	s       *Service
--	project string
--	zone    string
--	disk    *Disk
--	opt_    map[string]interface{}
--}
--
--// Insert: Creates a persistent disk resource in the specified project
--// using the data included in the request.
--func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall {
--	c := &DisksInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.disk = disk
--	return c
--}
--
--// SourceImage sets the optional parameter "sourceImage": Source image
--// to restore onto a disk.
--func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall {
--	c.opt_["sourceImage"] = sourceImage
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DisksInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["sourceImage"]; ok {
--		params.Set("sourceImage", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a persistent disk resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.disks.insert",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "sourceImage": {
--	//       "description": "Optional. Source image to restore onto a disk.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/disks",
--	//   "request": {
--	//     "$ref": "Disk"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.disks.list":
--
--type DisksListCall struct {
--	s       *Service
--	project string
--	zone    string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of persistent disk resources contained
--// within the specified zone.
--func (r *DisksService) List(project string, zone string) *DisksListCall {
--	c := &DisksListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *DisksListCall) Filter(filter string) *DisksListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *DisksListCall) PageToken(pageToken string) *DisksListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *DisksListCall) Do() (*DiskList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *DiskList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of persistent disk resources contained within the specified zone.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.disks.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/disks",
--	//   "response": {
--	//     "$ref": "DiskList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.firewalls.delete":
--
--type FirewallsDeleteCall struct {
--	s        *Service
--	project  string
--	firewall string
--	opt_     map[string]interface{}
--}
--
--// Delete: Deletes the specified firewall resource.
--func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall {
--	c := &FirewallsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.firewall = firewall
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *FirewallsDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"firewall": c.firewall,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified firewall resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.firewalls.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "firewall"
--	//   ],
--	//   "parameters": {
--	//     "firewall": {
--	//       "description": "Name of the firewall resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/firewalls/{firewall}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.firewalls.get":
--
--type FirewallsGetCall struct {
--	s        *Service
--	project  string
--	firewall string
--	opt_     map[string]interface{}
--}
--
--// Get: Returns the specified firewall resource.
--func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall {
--	c := &FirewallsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.firewall = firewall
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *FirewallsGetCall) Do() (*Firewall, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"firewall": c.firewall,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Firewall
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified firewall resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.firewalls.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "firewall"
--	//   ],
--	//   "parameters": {
--	//     "firewall": {
--	//       "description": "Name of the firewall resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/firewalls/{firewall}",
--	//   "response": {
--	//     "$ref": "Firewall"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.firewalls.insert":
--
--type FirewallsInsertCall struct {
--	s        *Service
--	project  string
--	firewall *Firewall
--	opt_     map[string]interface{}
--}
--
--// Insert: Creates a firewall resource in the specified project using
--// the data included in the request.
--func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall {
--	c := &FirewallsInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.firewall = firewall
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *FirewallsInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a firewall resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.firewalls.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/firewalls",
--	//   "request": {
--	//     "$ref": "Firewall"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.firewalls.list":
--
--type FirewallsListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of firewall resources available to the
--// specified project.
--func (r *FirewallsService) List(project string) *FirewallsListCall {
--	c := &FirewallsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *FirewallsListCall) Do() (*FirewallList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *FirewallList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of firewall resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.firewalls.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/firewalls",
--	//   "response": {
--	//     "$ref": "FirewallList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.firewalls.patch":
--
--type FirewallsPatchCall struct {
--	s         *Service
--	project   string
--	firewall  string
--	firewall2 *Firewall
--	opt_      map[string]interface{}
--}
--
--// Patch: Updates the specified firewall resource with the data included
--// in the request. This method supports patch semantics.
--func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall {
--	c := &FirewallsPatchCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.firewall = firewall
--	c.firewall2 = firewall2
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *FirewallsPatchCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("PATCH", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"firewall": c.firewall,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Updates the specified firewall resource with the data included in the request. This method supports patch semantics.",
--	//   "httpMethod": "PATCH",
--	//   "id": "compute.firewalls.patch",
--	//   "parameterOrder": [
--	//     "project",
--	//     "firewall"
--	//   ],
--	//   "parameters": {
--	//     "firewall": {
--	//       "description": "Name of the firewall resource to update.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/firewalls/{firewall}",
--	//   "request": {
--	//     "$ref": "Firewall"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.firewalls.update":
--
--type FirewallsUpdateCall struct {
--	s         *Service
--	project   string
--	firewall  string
--	firewall2 *Firewall
--	opt_      map[string]interface{}
--}
--
--// Update: Updates the specified firewall resource with the data
--// included in the request.
--func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall {
--	c := &FirewallsUpdateCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.firewall = firewall
--	c.firewall2 = firewall2
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *FirewallsUpdateCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("PUT", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"firewall": c.firewall,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Updates the specified firewall resource with the data included in the request.",
--	//   "httpMethod": "PUT",
--	//   "id": "compute.firewalls.update",
--	//   "parameterOrder": [
--	//     "project",
--	//     "firewall"
--	//   ],
--	//   "parameters": {
--	//     "firewall": {
--	//       "description": "Name of the firewall resource to update.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/firewalls/{firewall}",
--	//   "request": {
--	//     "$ref": "Firewall"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.forwardingRules.aggregatedList":
--
--type ForwardingRulesAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList: Retrieves the list of forwarding rules grouped by
--// scope.
--func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall {
--	c := &ForwardingRulesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ForwardingRulesAggregatedListCall) Do() (*ForwardingRuleAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ForwardingRuleAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of forwarding rules grouped by scope.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.forwardingRules.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/forwardingRules",
--	//   "response": {
--	//     "$ref": "ForwardingRuleAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.forwardingRules.delete":
--
--type ForwardingRulesDeleteCall struct {
--	s              *Service
--	project        string
--	region         string
--	forwardingRule string
--	opt_           map[string]interface{}
--}
--
--// Delete: Deletes the specified ForwardingRule resource.
--func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall {
--	c := &ForwardingRulesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.forwardingRule = forwardingRule
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ForwardingRulesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"region":         c.region,
--		"forwardingRule": c.forwardingRule,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified ForwardingRule resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.forwardingRules.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "forwardingRule"
--	//   ],
--	//   "parameters": {
--	//     "forwardingRule": {
--	//       "description": "Name of the ForwardingRule resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.forwardingRules.get":
--
--type ForwardingRulesGetCall struct {
--	s              *Service
--	project        string
--	region         string
--	forwardingRule string
--	opt_           map[string]interface{}
--}
--
--// Get: Returns the specified ForwardingRule resource.
--func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall {
--	c := &ForwardingRulesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.forwardingRule = forwardingRule
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ForwardingRulesGetCall) Do() (*ForwardingRule, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"region":         c.region,
--		"forwardingRule": c.forwardingRule,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ForwardingRule
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified ForwardingRule resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.forwardingRules.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "forwardingRule"
--	//   ],
--	//   "parameters": {
--	//     "forwardingRule": {
--	//       "description": "Name of the ForwardingRule resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}",
--	//   "response": {
--	//     "$ref": "ForwardingRule"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.forwardingRules.insert":
--
--type ForwardingRulesInsertCall struct {
--	s              *Service
--	project        string
--	region         string
--	forwardingrule *ForwardingRule
--	opt_           map[string]interface{}
--}
--
--// Insert: Creates a ForwardingRule resource in the specified project
--// and region using the data included in the request.
--func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall {
--	c := &ForwardingRulesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.forwardingrule = forwardingrule
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ForwardingRulesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.forwardingRules.insert",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/forwardingRules",
--	//   "request": {
--	//     "$ref": "ForwardingRule"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.forwardingRules.list":
--
--type ForwardingRulesListCall struct {
--	s       *Service
--	project string
--	region  string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of ForwardingRule resources available to the
--// specified project and region.
--func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall {
--	c := &ForwardingRulesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ForwardingRulesListCall) Do() (*ForwardingRuleList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ForwardingRuleList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of ForwardingRule resources available to the specified project and region.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.forwardingRules.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/forwardingRules",
--	//   "response": {
--	//     "$ref": "ForwardingRuleList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.forwardingRules.setTarget":
--
--type ForwardingRulesSetTargetCall struct {
--	s               *Service
--	project         string
--	region          string
--	forwardingRule  string
--	targetreference *TargetReference
--	opt_            map[string]interface{}
--}
--
--// SetTarget: Changes target url for forwarding rule.
--func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall {
--	c := &ForwardingRulesSetTargetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.forwardingRule = forwardingRule
--	c.targetreference = targetreference
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ForwardingRulesSetTargetCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"region":         c.region,
--		"forwardingRule": c.forwardingRule,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Changes target url for forwarding rule.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.forwardingRules.setTarget",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "forwardingRule"
--	//   ],
--	//   "parameters": {
--	//     "forwardingRule": {
--	//       "description": "Name of the ForwardingRule resource in which target is to be set.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget",
--	//   "request": {
--	//     "$ref": "TargetReference"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalAddresses.delete":
--
--type GlobalAddressesDeleteCall struct {
--	s       *Service
--	project string
--	address string
--	opt_    map[string]interface{}
--}
--
--// Delete: Deletes the specified address resource.
--func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall {
--	c := &GlobalAddressesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.address = address
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalAddressesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"address": c.address,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified address resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.globalAddresses.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "address"
--	//   ],
--	//   "parameters": {
--	//     "address": {
--	//       "description": "Name of the address resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/addresses/{address}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalAddresses.get":
--
--type GlobalAddressesGetCall struct {
--	s       *Service
--	project string
--	address string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified address resource.
--func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall {
--	c := &GlobalAddressesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.address = address
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalAddressesGetCall) Do() (*Address, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"address": c.address,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Address
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified address resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.globalAddresses.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "address"
--	//   ],
--	//   "parameters": {
--	//     "address": {
--	//       "description": "Name of the address resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/addresses/{address}",
--	//   "response": {
--	//     "$ref": "Address"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalAddresses.insert":
--
--type GlobalAddressesInsertCall struct {
--	s       *Service
--	project string
--	address *Address
--	opt_    map[string]interface{}
--}
--
--// Insert: Creates an address resource in the specified project using
--// the data included in the request.
--func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall {
--	c := &GlobalAddressesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.address = address
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalAddressesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.address)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates an address resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.globalAddresses.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/addresses",
--	//   "request": {
--	//     "$ref": "Address"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalAddresses.list":
--
--type GlobalAddressesListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of global address resources.
--func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall {
--	c := &GlobalAddressesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalAddressesListCall) Do() (*AddressList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *AddressList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of global address resources.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.globalAddresses.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/addresses",
--	//   "response": {
--	//     "$ref": "AddressList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalForwardingRules.delete":
--
--type GlobalForwardingRulesDeleteCall struct {
--	s              *Service
--	project        string
--	forwardingRule string
--	opt_           map[string]interface{}
--}
--
--// Delete: Deletes the specified ForwardingRule resource.
--func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall {
--	c := &GlobalForwardingRulesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.forwardingRule = forwardingRule
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalForwardingRulesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"forwardingRule": c.forwardingRule,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified ForwardingRule resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.globalForwardingRules.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "forwardingRule"
--	//   ],
--	//   "parameters": {
--	//     "forwardingRule": {
--	//       "description": "Name of the ForwardingRule resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/forwardingRules/{forwardingRule}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalForwardingRules.get":
--
--type GlobalForwardingRulesGetCall struct {
--	s              *Service
--	project        string
--	forwardingRule string
--	opt_           map[string]interface{}
--}
--
--// Get: Returns the specified ForwardingRule resource.
--func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall {
--	c := &GlobalForwardingRulesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.forwardingRule = forwardingRule
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalForwardingRulesGetCall) Do() (*ForwardingRule, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"forwardingRule": c.forwardingRule,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ForwardingRule
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified ForwardingRule resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.globalForwardingRules.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "forwardingRule"
--	//   ],
--	//   "parameters": {
--	//     "forwardingRule": {
--	//       "description": "Name of the ForwardingRule resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/forwardingRules/{forwardingRule}",
--	//   "response": {
--	//     "$ref": "ForwardingRule"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalForwardingRules.insert":
--
--type GlobalForwardingRulesInsertCall struct {
--	s              *Service
--	project        string
--	forwardingrule *ForwardingRule
--	opt_           map[string]interface{}
--}
--
--// Insert: Creates a ForwardingRule resource in the specified project
--// and region using the data included in the request.
--func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall {
--	c := &GlobalForwardingRulesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.forwardingrule = forwardingrule
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalForwardingRulesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.globalForwardingRules.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/forwardingRules",
--	//   "request": {
--	//     "$ref": "ForwardingRule"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalForwardingRules.list":
--
--type GlobalForwardingRulesListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of ForwardingRule resources available to the
--// specified project.
--func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall {
--	c := &GlobalForwardingRulesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalForwardingRulesListCall) Do() (*ForwardingRuleList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ForwardingRuleList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of ForwardingRule resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.globalForwardingRules.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/forwardingRules",
--	//   "response": {
--	//     "$ref": "ForwardingRuleList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalForwardingRules.setTarget":
--
--type GlobalForwardingRulesSetTargetCall struct {
--	s               *Service
--	project         string
--	forwardingRule  string
--	targetreference *TargetReference
--	opt_            map[string]interface{}
--}
--
--// SetTarget: Changes target url for forwarding rule.
--func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall {
--	c := &GlobalForwardingRulesSetTargetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.forwardingRule = forwardingRule
--	c.targetreference = targetreference
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalForwardingRulesSetTargetCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"forwardingRule": c.forwardingRule,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Changes target url for forwarding rule.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.globalForwardingRules.setTarget",
--	//   "parameterOrder": [
--	//     "project",
--	//     "forwardingRule"
--	//   ],
--	//   "parameters": {
--	//     "forwardingRule": {
--	//       "description": "Name of the ForwardingRule resource in which target is to be set.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget",
--	//   "request": {
--	//     "$ref": "TargetReference"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalOperations.aggregatedList":
--
--type GlobalOperationsAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList: Retrieves the list of all operations grouped by
--// scope.
--func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall {
--	c := &GlobalOperationsAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalOperationsAggregatedListCall) Do() (*OperationAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *OperationAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of all operations grouped by scope.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.globalOperations.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/operations",
--	//   "response": {
--	//     "$ref": "OperationAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalOperations.delete":
--
--type GlobalOperationsDeleteCall struct {
--	s         *Service
--	project   string
--	operation string
--	opt_      map[string]interface{}
--}
--
--// Delete: Deletes the specified operation resource.
--func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall {
--	c := &GlobalOperationsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.operation = operation
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalOperationsDeleteCall) Do() error {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":   c.project,
--		"operation": c.operation,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return err
--	}
--	return nil
--	// {
--	//   "description": "Deletes the specified operation resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.globalOperations.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "operation"
--	//   ],
--	//   "parameters": {
--	//     "operation": {
--	//       "description": "Name of the operation resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/operations/{operation}",
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalOperations.get":
--
--type GlobalOperationsGetCall struct {
--	s         *Service
--	project   string
--	operation string
--	opt_      map[string]interface{}
--}
--
--// Get: Retrieves the specified operation resource.
--func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall {
--	c := &GlobalOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.operation = operation
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalOperationsGetCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":   c.project,
--		"operation": c.operation,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the specified operation resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.globalOperations.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "operation"
--	//   ],
--	//   "parameters": {
--	//     "operation": {
--	//       "description": "Name of the operation resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/operations/{operation}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.globalOperations.list":
--
--type GlobalOperationsListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of operation resources contained within the
--// specified project.
--func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall {
--	c := &GlobalOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *GlobalOperationsListCall) Do() (*OperationList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *OperationList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of operation resources contained within the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.globalOperations.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/operations",
--	//   "response": {
--	//     "$ref": "OperationList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.httpHealthChecks.delete":
--
--type HttpHealthChecksDeleteCall struct {
--	s               *Service
--	project         string
--	httpHealthCheck string
--	opt_            map[string]interface{}
--}
--
--// Delete: Deletes the specified HttpHealthCheck resource.
--func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall {
--	c := &HttpHealthChecksDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.httpHealthCheck = httpHealthCheck
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *HttpHealthChecksDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":         c.project,
--		"httpHealthCheck": c.httpHealthCheck,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified HttpHealthCheck resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.httpHealthChecks.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "httpHealthCheck"
--	//   ],
--	//   "parameters": {
--	//     "httpHealthCheck": {
--	//       "description": "Name of the HttpHealthCheck resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.httpHealthChecks.get":
--
--type HttpHealthChecksGetCall struct {
--	s               *Service
--	project         string
--	httpHealthCheck string
--	opt_            map[string]interface{}
--}
--
--// Get: Returns the specified HttpHealthCheck resource.
--func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall {
--	c := &HttpHealthChecksGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.httpHealthCheck = httpHealthCheck
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *HttpHealthChecksGetCall) Do() (*HttpHealthCheck, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":         c.project,
--		"httpHealthCheck": c.httpHealthCheck,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *HttpHealthCheck
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified HttpHealthCheck resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.httpHealthChecks.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "httpHealthCheck"
--	//   ],
--	//   "parameters": {
--	//     "httpHealthCheck": {
--	//       "description": "Name of the HttpHealthCheck resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
--	//   "response": {
--	//     "$ref": "HttpHealthCheck"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.httpHealthChecks.insert":
--
--type HttpHealthChecksInsertCall struct {
--	s               *Service
--	project         string
--	httphealthcheck *HttpHealthCheck
--	opt_            map[string]interface{}
--}
--
--// Insert: Creates a HttpHealthCheck resource in the specified project
--// using the data included in the request.
--func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall {
--	c := &HttpHealthChecksInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.httphealthcheck = httphealthcheck
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *HttpHealthChecksInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.httpHealthChecks.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/httpHealthChecks",
--	//   "request": {
--	//     "$ref": "HttpHealthCheck"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.httpHealthChecks.list":
--
--type HttpHealthChecksListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of HttpHealthCheck resources available to
--// the specified project.
--func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall {
--	c := &HttpHealthChecksListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *HttpHealthChecksListCall) Do() (*HttpHealthCheckList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *HttpHealthCheckList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.httpHealthChecks.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/httpHealthChecks",
--	//   "response": {
--	//     "$ref": "HttpHealthCheckList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.httpHealthChecks.patch":
--
--type HttpHealthChecksPatchCall struct {
--	s               *Service
--	project         string
--	httpHealthCheck string
--	httphealthcheck *HttpHealthCheck
--	opt_            map[string]interface{}
--}
--
--// Patch: Updates a HttpHealthCheck resource in the specified project
--// using the data included in the request. This method supports patch
--// semantics.
--func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall {
--	c := &HttpHealthChecksPatchCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.httpHealthCheck = httpHealthCheck
--	c.httphealthcheck = httphealthcheck
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *HttpHealthChecksPatchCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("PATCH", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":         c.project,
--		"httpHealthCheck": c.httpHealthCheck,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.",
--	//   "httpMethod": "PATCH",
--	//   "id": "compute.httpHealthChecks.patch",
--	//   "parameterOrder": [
--	//     "project",
--	//     "httpHealthCheck"
--	//   ],
--	//   "parameters": {
--	//     "httpHealthCheck": {
--	//       "description": "Name of the HttpHealthCheck resource to update.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
--	//   "request": {
--	//     "$ref": "HttpHealthCheck"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.httpHealthChecks.update":
--
--type HttpHealthChecksUpdateCall struct {
--	s               *Service
--	project         string
--	httpHealthCheck string
--	httphealthcheck *HttpHealthCheck
--	opt_            map[string]interface{}
--}
--
--// Update: Updates a HttpHealthCheck resource in the specified project
--// using the data included in the request.
--func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall {
--	c := &HttpHealthChecksUpdateCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.httpHealthCheck = httpHealthCheck
--	c.httphealthcheck = httphealthcheck
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *HttpHealthChecksUpdateCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("PUT", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":         c.project,
--		"httpHealthCheck": c.httpHealthCheck,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.",
--	//   "httpMethod": "PUT",
--	//   "id": "compute.httpHealthChecks.update",
--	//   "parameterOrder": [
--	//     "project",
--	//     "httpHealthCheck"
--	//   ],
--	//   "parameters": {
--	//     "httpHealthCheck": {
--	//       "description": "Name of the HttpHealthCheck resource to update.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
--	//   "request": {
--	//     "$ref": "HttpHealthCheck"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.images.delete":
--
--type ImagesDeleteCall struct {
--	s       *Service
--	project string
--	image   string
--	opt_    map[string]interface{}
--}
--
--// Delete: Deletes the specified image resource.
--func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall {
--	c := &ImagesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.image = image
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ImagesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"image":   c.image,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified image resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.images.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "image"
--	//   ],
--	//   "parameters": {
--	//     "image": {
--	//       "description": "Name of the image resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/images/{image}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.images.deprecate":
--
--type ImagesDeprecateCall struct {
--	s                 *Service
--	project           string
--	image             string
--	deprecationstatus *DeprecationStatus
--	opt_              map[string]interface{}
--}
--
--// Deprecate: Sets the deprecation status of an image. If no message
--// body is given, clears the deprecation status instead.
--func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall {
--	c := &ImagesDeprecateCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.image = image
--	c.deprecationstatus = deprecationstatus
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ImagesDeprecateCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"image":   c.image,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Sets the deprecation status of an image. If no message body is given, clears the deprecation status instead.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.images.deprecate",
--	//   "parameterOrder": [
--	//     "project",
--	//     "image"
--	//   ],
--	//   "parameters": {
--	//     "image": {
--	//       "description": "Image name.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/images/{image}/deprecate",
--	//   "request": {
--	//     "$ref": "DeprecationStatus"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.images.get":
--
--type ImagesGetCall struct {
--	s       *Service
--	project string
--	image   string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified image resource.
--func (r *ImagesService) Get(project string, image string) *ImagesGetCall {
--	c := &ImagesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.image = image
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ImagesGetCall) Do() (*Image, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"image":   c.image,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Image
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified image resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.images.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "image"
--	//   ],
--	//   "parameters": {
--	//     "image": {
--	//       "description": "Name of the image resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/images/{image}",
--	//   "response": {
--	//     "$ref": "Image"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.images.insert":
--
--type ImagesInsertCall struct {
--	s       *Service
--	project string
--	image   *Image
--	opt_    map[string]interface{}
--}
--
--// Insert: Creates an image resource in the specified project using the
--// data included in the request.
--func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall {
--	c := &ImagesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.image = image
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ImagesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.image)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates an image resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.images.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/images",
--	//   "request": {
--	//     "$ref": "Image"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/devstorage.full_control",
--	//     "https://www.googleapis.com/auth/devstorage.read_only",
--	//     "https://www.googleapis.com/auth/devstorage.read_write"
--	//   ]
--	// }
--
--}
--
--// method id "compute.images.list":
--
--type ImagesListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of image resources available to the
--// specified project.
--func (r *ImagesService) List(project string) *ImagesListCall {
--	c := &ImagesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *ImagesListCall) Filter(filter string) *ImagesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ImagesListCall) Do() (*ImageList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ImageList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of image resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.images.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/images",
--	//   "response": {
--	//     "$ref": "ImageList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instanceTemplates.delete":
--
--type InstanceTemplatesDeleteCall struct {
--	s                *Service
--	project          string
--	instanceTemplate string
--	opt_             map[string]interface{}
--}
--
--// Delete: Deletes the specified instance template resource.
--func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall {
--	c := &InstanceTemplatesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.instanceTemplate = instanceTemplate
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstanceTemplatesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":          c.project,
--		"instanceTemplate": c.instanceTemplate,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified instance template resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.instanceTemplates.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "instanceTemplate"
--	//   ],
--	//   "parameters": {
--	//     "instanceTemplate": {
--	//       "description": "Name of the instance template resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/instanceTemplates/{instanceTemplate}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instanceTemplates.get":
--
--type InstanceTemplatesGetCall struct {
--	s                *Service
--	project          string
--	instanceTemplate string
--	opt_             map[string]interface{}
--}
--
--// Get: Returns the specified instance template resource.
--func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall {
--	c := &InstanceTemplatesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.instanceTemplate = instanceTemplate
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstanceTemplatesGetCall) Do() (*InstanceTemplate, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":          c.project,
--		"instanceTemplate": c.instanceTemplate,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *InstanceTemplate
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified instance template resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.instanceTemplates.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "instanceTemplate"
--	//   ],
--	//   "parameters": {
--	//     "instanceTemplate": {
--	//       "description": "Name of the instance template resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/instanceTemplates/{instanceTemplate}",
--	//   "response": {
--	//     "$ref": "InstanceTemplate"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instanceTemplates.insert":
--
--type InstanceTemplatesInsertCall struct {
--	s                *Service
--	project          string
--	instancetemplate *InstanceTemplate
--	opt_             map[string]interface{}
--}
--
--// Insert: Creates an instance template resource in the specified
--// project using the data included in the request.
--func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall {
--	c := &InstanceTemplatesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.instancetemplate = instancetemplate
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstanceTemplatesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates an instance template resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instanceTemplates.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/instanceTemplates",
--	//   "request": {
--	//     "$ref": "InstanceTemplate"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instanceTemplates.list":
--
--type InstanceTemplatesListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of instance template resources contained
--// within the specified project.
--func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall {
--	c := &InstanceTemplatesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstanceTemplatesListCall) Do() (*InstanceTemplateList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *InstanceTemplateList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of instance template resources contained within the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.instanceTemplates.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/instanceTemplates",
--	//   "response": {
--	//     "$ref": "InstanceTemplateList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.addAccessConfig":
--
--type InstancesAddAccessConfigCall struct {
--	s                *Service
--	project          string
--	zone             string
--	instance         string
--	networkInterface string
--	accessconfig     *AccessConfig
--	opt_             map[string]interface{}
--}
--
--// AddAccessConfig: Adds an access config to an instance's network
--// interface.
--func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall {
--	c := &InstancesAddAccessConfigCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	c.networkInterface = networkInterface
--	c.accessconfig = accessconfig
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesAddAccessConfigCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	params.Set("networkInterface", fmt.Sprintf("%v", c.networkInterface))
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Adds an access config to an instance's network interface.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.addAccessConfig",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance",
--	//     "networkInterface"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Instance name.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "networkInterface": {
--	//       "description": "Network interface name.",
--	//       "location": "query",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Project name.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig",
--	//   "request": {
--	//     "$ref": "AccessConfig"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.aggregatedList":
--
--type InstancesAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList:
--func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall {
--	c := &InstancesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesAggregatedListCall) Do() (*InstanceAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *InstanceAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "httpMethod": "GET",
--	//   "id": "compute.instances.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/instances",
--	//   "response": {
--	//     "$ref": "InstanceAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.attachDisk":
--
--type InstancesAttachDiskCall struct {
--	s            *Service
--	project      string
--	zone         string
--	instance     string
--	attacheddisk *AttachedDisk
--	opt_         map[string]interface{}
--}
--
--// AttachDisk: Attaches a disk resource to an instance.
--func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall {
--	c := &InstancesAttachDiskCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	c.attacheddisk = attacheddisk
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesAttachDiskCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Attaches a disk resource to an instance.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.attachDisk",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Instance name.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Project name.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/attachDisk",
--	//   "request": {
--	//     "$ref": "AttachedDisk"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.delete":
--
--type InstancesDeleteCall struct {
--	s        *Service
--	project  string
--	zone     string
--	instance string
--	opt_     map[string]interface{}
--}
--
--// Delete: Deletes the specified instance resource.
--func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall {
--	c := &InstancesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified instance resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.instances.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Name of the instance resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.deleteAccessConfig":
--
--type InstancesDeleteAccessConfigCall struct {
--	s                *Service
--	project          string
--	zone             string
--	instance         string
--	accessConfig     string
--	networkInterface string
--	opt_             map[string]interface{}
--}
--
--// DeleteAccessConfig: Deletes an access config from an instance's
--// network interface.
--func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall {
--	c := &InstancesDeleteAccessConfigCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	c.accessConfig = accessConfig
--	c.networkInterface = networkInterface
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesDeleteAccessConfigCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	params.Set("accessConfig", fmt.Sprintf("%v", c.accessConfig))
--	params.Set("networkInterface", fmt.Sprintf("%v", c.networkInterface))
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes an access config from an instance's network interface.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.deleteAccessConfig",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance",
--	//     "accessConfig",
--	//     "networkInterface"
--	//   ],
--	//   "parameters": {
--	//     "accessConfig": {
--	//       "description": "Access config name.",
--	//       "location": "query",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "instance": {
--	//       "description": "Instance name.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "networkInterface": {
--	//       "description": "Network interface name.",
--	//       "location": "query",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Project name.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.detachDisk":
--
--type InstancesDetachDiskCall struct {
--	s          *Service
--	project    string
--	zone       string
--	instance   string
--	deviceName string
--	opt_       map[string]interface{}
--}
--
--// DetachDisk: Detaches a disk from an instance.
--func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall {
--	c := &InstancesDetachDiskCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	c.deviceName = deviceName
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesDetachDiskCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	params.Set("deviceName", fmt.Sprintf("%v", c.deviceName))
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Detaches a disk from an instance.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.detachDisk",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance",
--	//     "deviceName"
--	//   ],
--	//   "parameters": {
--	//     "deviceName": {
--	//       "description": "Disk device name to detach.",
--	//       "location": "query",
--	//       "pattern": "\\w[\\w.-]{0,254}",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "instance": {
--	//       "description": "Instance name.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Project name.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/detachDisk",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.get":
--
--type InstancesGetCall struct {
--	s        *Service
--	project  string
--	zone     string
--	instance string
--	opt_     map[string]interface{}
--}
--
--// Get: Returns the specified instance resource.
--func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall {
--	c := &InstancesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesGetCall) Do() (*Instance, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Instance
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified instance resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.instances.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Name of the instance resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}",
--	//   "response": {
--	//     "$ref": "Instance"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.getSerialPortOutput":
--
--type InstancesGetSerialPortOutputCall struct {
--	s        *Service
--	project  string
--	zone     string
--	instance string
--	opt_     map[string]interface{}
--}
--
--// GetSerialPortOutput: Returns the specified instance's serial port
--// output.
--func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall {
--	c := &InstancesGetSerialPortOutputCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesGetSerialPortOutputCall) Do() (*SerialPortOutput, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *SerialPortOutput
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified instance's serial port output.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.instances.getSerialPortOutput",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Name of the instance scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/serialPort",
--	//   "response": {
--	//     "$ref": "SerialPortOutput"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.insert":
--
--type InstancesInsertCall struct {
--	s        *Service
--	project  string
--	zone     string
--	instance *Instance
--	opt_     map[string]interface{}
--}
--
--// Insert: Creates an instance resource in the specified project using
--// the data included in the request.
--func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall {
--	c := &InstancesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates an instance resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.insert",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances",
--	//   "request": {
--	//     "$ref": "Instance"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.list":
--
--type InstancesListCall struct {
--	s       *Service
--	project string
--	zone    string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of instance resources contained within the
--// specified zone.
--func (r *InstancesService) List(project string, zone string) *InstancesListCall {
--	c := &InstancesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *InstancesListCall) Filter(filter string) *InstancesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesListCall) Do() (*InstanceList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *InstanceList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of instance resources contained within the specified zone.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.instances.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances",
--	//   "response": {
--	//     "$ref": "InstanceList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.reset":
--
--type InstancesResetCall struct {
--	s        *Service
--	project  string
--	zone     string
--	instance string
--	opt_     map[string]interface{}
--}
--
--// Reset: Performs a hard reset on the instance.
--func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall {
--	c := &InstancesResetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesResetCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Performs a hard reset on the instance.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.reset",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Name of the instance scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/reset",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.setDiskAutoDelete":
--
--type InstancesSetDiskAutoDeleteCall struct {
--	s          *Service
--	project    string
--	zone       string
--	instance   string
--	autoDelete bool
--	deviceName string
--	opt_       map[string]interface{}
--}
--
--// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to
--// an instance
--func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall {
--	c := &InstancesSetDiskAutoDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	c.autoDelete = autoDelete
--	c.deviceName = deviceName
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesSetDiskAutoDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	params.Set("autoDelete", fmt.Sprintf("%v", c.autoDelete))
--	params.Set("deviceName", fmt.Sprintf("%v", c.deviceName))
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Sets the auto-delete flag for a disk attached to an instance",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.setDiskAutoDelete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance",
--	//     "autoDelete",
--	//     "deviceName"
--	//   ],
--	//   "parameters": {
--	//     "autoDelete": {
--	//       "description": "Whether to auto-delete the disk when the instance is deleted.",
--	//       "location": "query",
--	//       "required": true,
--	//       "type": "boolean"
--	//     },
--	//     "deviceName": {
--	//       "description": "Disk device name to modify.",
--	//       "location": "query",
--	//       "pattern": "\\w[\\w.-]{0,254}",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "instance": {
--	//       "description": "Instance name.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Project name.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.setMetadata":
--
--type InstancesSetMetadataCall struct {
--	s        *Service
--	project  string
--	zone     string
--	instance string
--	metadata *Metadata
--	opt_     map[string]interface{}
--}
--
--// SetMetadata: Sets metadata for the specified instance to the data
--// included in the request.
--func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall {
--	c := &InstancesSetMetadataCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	c.metadata = metadata
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesSetMetadataCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Sets metadata for the specified instance to the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.setMetadata",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Name of the instance scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/setMetadata",
--	//   "request": {
--	//     "$ref": "Metadata"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.setScheduling":
--
--type InstancesSetSchedulingCall struct {
--	s          *Service
--	project    string
--	zone       string
--	instance   string
--	scheduling *Scheduling
--	opt_       map[string]interface{}
--}
--
--// SetScheduling: Sets an instance's scheduling options.
--func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall {
--	c := &InstancesSetSchedulingCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	c.scheduling = scheduling
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesSetSchedulingCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Sets an instance's scheduling options.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.setScheduling",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Instance name.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Project name.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/setScheduling",
--	//   "request": {
--	//     "$ref": "Scheduling"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.instances.setTags":
--
--type InstancesSetTagsCall struct {
--	s        *Service
--	project  string
--	zone     string
--	instance string
--	tags     *Tags
--	opt_     map[string]interface{}
--}
--
--// SetTags: Sets tags for the specified instance to the data included in
--// the request.
--func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall {
--	c := &InstancesSetTagsCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.instance = instance
--	c.tags = tags
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *InstancesSetTagsCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"zone":     c.zone,
--		"instance": c.instance,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Sets tags for the specified instance to the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.instances.setTags",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "instance"
--	//   ],
--	//   "parameters": {
--	//     "instance": {
--	//       "description": "Name of the instance scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/instances/{instance}/setTags",
--	//   "request": {
--	//     "$ref": "Tags"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.licenses.get":
--
--type LicensesGetCall struct {
--	s       *Service
--	project string
--	license string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified license resource.
--func (r *LicensesService) Get(project string, license string) *LicensesGetCall {
--	c := &LicensesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.license = license
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *LicensesGetCall) Do() (*License, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"license": c.license,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *License
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified license resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.licenses.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "license"
--	//   ],
--	//   "parameters": {
--	//     "license": {
--	//       "description": "Name of the license resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/licenses/{license}",
--	//   "response": {
--	//     "$ref": "License"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.machineTypes.aggregatedList":
--
--type MachineTypesAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList: Retrieves the list of machine type resources grouped
--// by scope.
--func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall {
--	c := &MachineTypesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *MachineTypesAggregatedListCall) Do() (*MachineTypeAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *MachineTypeAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of machine type resources grouped by scope.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.machineTypes.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/machineTypes",
--	//   "response": {
--	//     "$ref": "MachineTypeAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.machineTypes.get":
--
--type MachineTypesGetCall struct {
--	s           *Service
--	project     string
--	zone        string
--	machineType string
--	opt_        map[string]interface{}
--}
--
--// Get: Returns the specified machine type resource.
--func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall {
--	c := &MachineTypesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.machineType = machineType
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *MachineTypesGetCall) Do() (*MachineType, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":     c.project,
--		"zone":        c.zone,
--		"machineType": c.machineType,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *MachineType
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified machine type resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.machineTypes.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "machineType"
--	//   ],
--	//   "parameters": {
--	//     "machineType": {
--	//       "description": "Name of the machine type resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/machineTypes/{machineType}",
--	//   "response": {
--	//     "$ref": "MachineType"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.machineTypes.list":
--
--type MachineTypesListCall struct {
--	s       *Service
--	project string
--	zone    string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of machine type resources available to the
--// specified project.
--func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall {
--	c := &MachineTypesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *MachineTypesListCall) Do() (*MachineTypeList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *MachineTypeList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of machine type resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.machineTypes.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/machineTypes",
--	//   "response": {
--	//     "$ref": "MachineTypeList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.networks.delete":
--
--type NetworksDeleteCall struct {
--	s       *Service
--	project string
--	network string
--	opt_    map[string]interface{}
--}
--
--// Delete: Deletes the specified network resource.
--func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall {
--	c := &NetworksDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.network = network
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *NetworksDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"network": c.network,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified network resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.networks.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "network"
--	//   ],
--	//   "parameters": {
--	//     "network": {
--	//       "description": "Name of the network resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/networks/{network}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.networks.get":
--
--type NetworksGetCall struct {
--	s       *Service
--	project string
--	network string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified network resource.
--func (r *NetworksService) Get(project string, network string) *NetworksGetCall {
--	c := &NetworksGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.network = network
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *NetworksGetCall) Do() (*Network, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"network": c.network,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Network
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified network resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.networks.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "network"
--	//   ],
--	//   "parameters": {
--	//     "network": {
--	//       "description": "Name of the network resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/networks/{network}",
--	//   "response": {
--	//     "$ref": "Network"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.networks.insert":
--
--type NetworksInsertCall struct {
--	s       *Service
--	project string
--	network *Network
--	opt_    map[string]interface{}
--}
--
--// Insert: Creates a network resource in the specified project using the
--// data included in the request.
--func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall {
--	c := &NetworksInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.network = network
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *NetworksInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.network)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a network resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.networks.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/networks",
--	//   "request": {
--	//     "$ref": "Network"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.networks.list":
--
--type NetworksListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of network resources available to the
--// specified project.
--func (r *NetworksService) List(project string) *NetworksListCall {
--	c := &NetworksListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *NetworksListCall) Filter(filter string) *NetworksListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *NetworksListCall) Do() (*NetworkList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *NetworkList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of network resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.networks.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/networks",
--	//   "response": {
--	//     "$ref": "NetworkList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.projects.get":
--
--type ProjectsGetCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified project resource.
--func (r *ProjectsService) Get(project string) *ProjectsGetCall {
--	c := &ProjectsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsGetCall) Do() (*Project, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Project
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified project resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.projects.get",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project resource to retrieve.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}",
--	//   "response": {
--	//     "$ref": "Project"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.projects.setCommonInstanceMetadata":
--
--type ProjectsSetCommonInstanceMetadataCall struct {
--	s        *Service
--	project  string
--	metadata *Metadata
--	opt_     map[string]interface{}
--}
--
--// SetCommonInstanceMetadata: Sets metadata common to all instances
--// within the specified project using the data included in the request.
--func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall {
--	c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.metadata = metadata
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsSetCommonInstanceMetadataCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Sets metadata common to all instances within the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.projects.setCommonInstanceMetadata",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/setCommonInstanceMetadata",
--	//   "request": {
--	//     "$ref": "Metadata"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.projects.setUsageExportBucket":
--
--type ProjectsSetUsageExportBucketCall struct {
--	s                   *Service
--	project             string
--	usageexportlocation *UsageExportLocation
--	opt_                map[string]interface{}
--}
--
--// SetUsageExportBucket: Sets usage export location
--func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall {
--	c := &ProjectsSetUsageExportBucketCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.usageexportlocation = usageexportlocation
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsSetUsageExportBucketCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Sets usage export location",
--	//   "httpMethod": "POST",
--	//   "id": "compute.projects.setUsageExportBucket",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/setUsageExportBucket",
--	//   "request": {
--	//     "$ref": "UsageExportLocation"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/devstorage.full_control",
--	//     "https://www.googleapis.com/auth/devstorage.read_only",
--	//     "https://www.googleapis.com/auth/devstorage.read_write"
--	//   ]
--	// }
--
--}
--
--// method id "compute.regionOperations.delete":
--
--type RegionOperationsDeleteCall struct {
--	s         *Service
--	project   string
--	region    string
--	operation string
--	opt_      map[string]interface{}
--}
--
--// Delete: Deletes the specified region-specific operation resource.
--func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall {
--	c := &RegionOperationsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.operation = operation
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RegionOperationsDeleteCall) Do() error {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":   c.project,
--		"region":    c.region,
--		"operation": c.operation,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return err
--	}
--	return nil
--	// {
--	//   "description": "Deletes the specified region-specific operation resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.regionOperations.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "operation"
--	//   ],
--	//   "parameters": {
--	//     "operation": {
--	//       "description": "Name of the operation resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/operations/{operation}",
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.regionOperations.get":
--
--type RegionOperationsGetCall struct {
--	s         *Service
--	project   string
--	region    string
--	operation string
--	opt_      map[string]interface{}
--}
--
--// Get: Retrieves the specified region-specific operation resource.
--func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall {
--	c := &RegionOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.operation = operation
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RegionOperationsGetCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":   c.project,
--		"region":    c.region,
--		"operation": c.operation,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the specified region-specific operation resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.regionOperations.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "operation"
--	//   ],
--	//   "parameters": {
--	//     "operation": {
--	//       "description": "Name of the operation resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/operations/{operation}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.regionOperations.list":
--
--type RegionOperationsListCall struct {
--	s       *Service
--	project string
--	region  string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of operation resources contained within the
--// specified region.
--func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall {
--	c := &RegionOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RegionOperationsListCall) Do() (*OperationList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *OperationList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of operation resources contained within the specified region.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.regionOperations.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/operations",
--	//   "response": {
--	//     "$ref": "OperationList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.regions.get":
--
--type RegionsGetCall struct {
--	s       *Service
--	project string
--	region  string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified region resource.
--func (r *RegionsService) Get(project string, region string) *RegionsGetCall {
--	c := &RegionsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RegionsGetCall) Do() (*Region, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Region
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified region resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.regions.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}",
--	//   "response": {
--	//     "$ref": "Region"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.regions.list":
--
--type RegionsListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of region resources available to the
--// specified project.
--func (r *RegionsService) List(project string) *RegionsListCall {
--	c := &RegionsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *RegionsListCall) Filter(filter string) *RegionsListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RegionsListCall) Do() (*RegionList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *RegionList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of region resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.regions.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions",
--	//   "response": {
--	//     "$ref": "RegionList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.routes.delete":
--
--type RoutesDeleteCall struct {
--	s       *Service
--	project string
--	route   string
--	opt_    map[string]interface{}
--}
--
--// Delete: Deletes the specified route resource.
--func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall {
--	c := &RoutesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.route = route
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RoutesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"route":   c.route,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified route resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.routes.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "route"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "route": {
--	//       "description": "Name of the route resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/routes/{route}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.routes.get":
--
--type RoutesGetCall struct {
--	s       *Service
--	project string
--	route   string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified route resource.
--func (r *RoutesService) Get(project string, route string) *RoutesGetCall {
--	c := &RoutesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.route = route
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RoutesGetCall) Do() (*Route, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"route":   c.route,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Route
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified route resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.routes.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "route"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "route": {
--	//       "description": "Name of the route resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/routes/{route}",
--	//   "response": {
--	//     "$ref": "Route"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.routes.insert":
--
--type RoutesInsertCall struct {
--	s       *Service
--	project string
--	route   *Route
--	opt_    map[string]interface{}
--}
--
--// Insert: Creates a route resource in the specified project using the
--// data included in the request.
--func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall {
--	c := &RoutesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.route = route
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RoutesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.route)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a route resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.routes.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/routes",
--	//   "request": {
--	//     "$ref": "Route"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.routes.list":
--
--type RoutesListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of route resources available to the
--// specified project.
--func (r *RoutesService) List(project string) *RoutesListCall {
--	c := &RoutesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *RoutesListCall) Filter(filter string) *RoutesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *RoutesListCall) Do() (*RouteList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *RouteList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of route resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.routes.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/routes",
--	//   "response": {
--	//     "$ref": "RouteList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.snapshots.delete":
--
--type SnapshotsDeleteCall struct {
--	s        *Service
--	project  string
--	snapshot string
--	opt_     map[string]interface{}
--}
--
--// Delete: Deletes the specified persistent disk snapshot resource.
--func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall {
--	c := &SnapshotsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.snapshot = snapshot
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *SnapshotsDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"snapshot": c.snapshot,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified persistent disk snapshot resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.snapshots.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "snapshot"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "snapshot": {
--	//       "description": "Name of the persistent disk snapshot resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/snapshots/{snapshot}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.snapshots.get":
--
--type SnapshotsGetCall struct {
--	s        *Service
--	project  string
--	snapshot string
--	opt_     map[string]interface{}
--}
--
--// Get: Returns the specified persistent disk snapshot resource.
--func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall {
--	c := &SnapshotsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.snapshot = snapshot
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *SnapshotsGetCall) Do() (*Snapshot, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":  c.project,
--		"snapshot": c.snapshot,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Snapshot
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified persistent disk snapshot resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.snapshots.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "snapshot"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "snapshot": {
--	//       "description": "Name of the persistent disk snapshot resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/snapshots/{snapshot}",
--	//   "response": {
--	//     "$ref": "Snapshot"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.snapshots.list":
--
--type SnapshotsListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of persistent disk snapshot resources
--// contained within the specified project.
--func (r *SnapshotsService) List(project string) *SnapshotsListCall {
--	c := &SnapshotsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *SnapshotsListCall) Do() (*SnapshotList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *SnapshotList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of persistent disk snapshot resources contained within the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.snapshots.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/snapshots",
--	//   "response": {
--	//     "$ref": "SnapshotList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetHttpProxies.delete":
--
--type TargetHttpProxiesDeleteCall struct {
--	s               *Service
--	project         string
--	targetHttpProxy string
--	opt_            map[string]interface{}
--}
--
--// Delete: Deletes the specified TargetHttpProxy resource.
--func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall {
--	c := &TargetHttpProxiesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.targetHttpProxy = targetHttpProxy
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetHttpProxiesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":         c.project,
--		"targetHttpProxy": c.targetHttpProxy,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified TargetHttpProxy resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.targetHttpProxies.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "targetHttpProxy"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetHttpProxy": {
--	//       "description": "Name of the TargetHttpProxy resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/targetHttpProxies/{targetHttpProxy}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetHttpProxies.get":
--
--type TargetHttpProxiesGetCall struct {
--	s               *Service
--	project         string
--	targetHttpProxy string
--	opt_            map[string]interface{}
--}
--
--// Get: Returns the specified TargetHttpProxy resource.
--func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall {
--	c := &TargetHttpProxiesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.targetHttpProxy = targetHttpProxy
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetHttpProxiesGetCall) Do() (*TargetHttpProxy, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":         c.project,
--		"targetHttpProxy": c.targetHttpProxy,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetHttpProxy
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified TargetHttpProxy resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.targetHttpProxies.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "targetHttpProxy"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetHttpProxy": {
--	//       "description": "Name of the TargetHttpProxy resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/targetHttpProxies/{targetHttpProxy}",
--	//   "response": {
--	//     "$ref": "TargetHttpProxy"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetHttpProxies.insert":
--
--type TargetHttpProxiesInsertCall struct {
--	s               *Service
--	project         string
--	targethttpproxy *TargetHttpProxy
--	opt_            map[string]interface{}
--}
--
--// Insert: Creates a TargetHttpProxy resource in the specified project
--// using the data included in the request.
--func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall {
--	c := &TargetHttpProxiesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.targethttpproxy = targethttpproxy
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetHttpProxiesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetHttpProxies.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/targetHttpProxies",
--	//   "request": {
--	//     "$ref": "TargetHttpProxy"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetHttpProxies.list":
--
--type TargetHttpProxiesListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of TargetHttpProxy resources available to
--// the specified project.
--func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall {
--	c := &TargetHttpProxiesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetHttpProxiesListCall) Do() (*TargetHttpProxyList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetHttpProxyList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.targetHttpProxies.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/targetHttpProxies",
--	//   "response": {
--	//     "$ref": "TargetHttpProxyList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetHttpProxies.setUrlMap":
--
--type TargetHttpProxiesSetUrlMapCall struct {
--	s               *Service
--	project         string
--	targetHttpProxy string
--	urlmapreference *UrlMapReference
--	opt_            map[string]interface{}
--}
--
--// SetUrlMap: Changes the URL map for TargetHttpProxy.
--func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall {
--	c := &TargetHttpProxiesSetUrlMapCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.targetHttpProxy = targetHttpProxy
--	c.urlmapreference = urlmapreference
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetHttpProxiesSetUrlMapCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":         c.project,
--		"targetHttpProxy": c.targetHttpProxy,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Changes the URL map for TargetHttpProxy.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetHttpProxies.setUrlMap",
--	//   "parameterOrder": [
--	//     "project",
--	//     "targetHttpProxy"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetHttpProxy": {
--	//       "description": "Name of the TargetHttpProxy resource whose URL map is to be set.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap",
--	//   "request": {
--	//     "$ref": "UrlMapReference"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetInstances.aggregatedList":
--
--type TargetInstancesAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList: Retrieves the list of target instances grouped by
--// scope.
--func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall {
--	c := &TargetInstancesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetInstancesAggregatedListCall) Do() (*TargetInstanceAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetInstanceAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of target instances grouped by scope.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.targetInstances.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/targetInstances",
--	//   "response": {
--	//     "$ref": "TargetInstanceAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetInstances.delete":
--
--type TargetInstancesDeleteCall struct {
--	s              *Service
--	project        string
--	zone           string
--	targetInstance string
--	opt_           map[string]interface{}
--}
--
--// Delete: Deletes the specified TargetInstance resource.
--func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall {
--	c := &TargetInstancesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.targetInstance = targetInstance
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetInstancesDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"zone":           c.zone,
--		"targetInstance": c.targetInstance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified TargetInstance resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.targetInstances.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "targetInstance"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetInstance": {
--	//       "description": "Name of the TargetInstance resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/targetInstances/{targetInstance}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetInstances.get":
--
--type TargetInstancesGetCall struct {
--	s              *Service
--	project        string
--	zone           string
--	targetInstance string
--	opt_           map[string]interface{}
--}
--
--// Get: Returns the specified TargetInstance resource.
--func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall {
--	c := &TargetInstancesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.targetInstance = targetInstance
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetInstancesGetCall) Do() (*TargetInstance, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":        c.project,
--		"zone":           c.zone,
--		"targetInstance": c.targetInstance,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetInstance
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified TargetInstance resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.targetInstances.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "targetInstance"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetInstance": {
--	//       "description": "Name of the TargetInstance resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/targetInstances/{targetInstance}",
--	//   "response": {
--	//     "$ref": "TargetInstance"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetInstances.insert":
--
--type TargetInstancesInsertCall struct {
--	s              *Service
--	project        string
--	zone           string
--	targetinstance *TargetInstance
--	opt_           map[string]interface{}
--}
--
--// Insert: Creates a TargetInstance resource in the specified project
--// and zone using the data included in the request.
--func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall {
--	c := &TargetInstancesInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.targetinstance = targetinstance
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetInstancesInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetInstances.insert",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/targetInstances",
--	//   "request": {
--	//     "$ref": "TargetInstance"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetInstances.list":
--
--type TargetInstancesListCall struct {
--	s       *Service
--	project string
--	zone    string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of TargetInstance resources available to the
--// specified project and zone.
--func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall {
--	c := &TargetInstancesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetInstancesListCall) Do() (*TargetInstanceList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetInstanceList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of TargetInstance resources available to the specified project and zone.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.targetInstances.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/targetInstances",
--	//   "response": {
--	//     "$ref": "TargetInstanceList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.addHealthCheck":
--
--type TargetPoolsAddHealthCheckCall struct {
--	s                                *Service
--	project                          string
--	region                           string
--	targetPool                       string
--	targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest
--	opt_                             map[string]interface{}
--}
--
--// AddHealthCheck: Adds health check URL to targetPool.
--func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall {
--	c := &TargetPoolsAddHealthCheckCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetPool = targetPool
--	c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsAddHealthCheckCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":    c.project,
--		"region":     c.region,
--		"targetPool": c.targetPool,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Adds health check URL to targetPool.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetPools.addHealthCheck",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "targetPool"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetPool": {
--	//       "description": "Name of the TargetPool resource to which health_check_url is to be added.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck",
--	//   "request": {
--	//     "$ref": "TargetPoolsAddHealthCheckRequest"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.addInstance":
--
--type TargetPoolsAddInstanceCall struct {
--	s                             *Service
--	project                       string
--	region                        string
--	targetPool                    string
--	targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest
--	opt_                          map[string]interface{}
--}
--
--// AddInstance: Adds instance url to targetPool.
--func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall {
--	c := &TargetPoolsAddInstanceCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetPool = targetPool
--	c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsAddInstanceCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":    c.project,
--		"region":     c.region,
--		"targetPool": c.targetPool,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Adds instance url to targetPool.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetPools.addInstance",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "targetPool"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetPool": {
--	//       "description": "Name of the TargetPool resource to which instance_url is to be added.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance",
--	//   "request": {
--	//     "$ref": "TargetPoolsAddInstanceRequest"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.aggregatedList":
--
--type TargetPoolsAggregatedListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// AggregatedList: Retrieves the list of target pools grouped by scope.
--func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall {
--	c := &TargetPoolsAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsAggregatedListCall) Do() (*TargetPoolAggregatedList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetPoolAggregatedList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of target pools grouped by scope.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.targetPools.aggregatedList",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/aggregated/targetPools",
--	//   "response": {
--	//     "$ref": "TargetPoolAggregatedList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.delete":
--
--type TargetPoolsDeleteCall struct {
--	s          *Service
--	project    string
--	region     string
--	targetPool string
--	opt_       map[string]interface{}
--}
--
--// Delete: Deletes the specified TargetPool resource.
--func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall {
--	c := &TargetPoolsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetPool = targetPool
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":    c.project,
--		"region":     c.region,
--		"targetPool": c.targetPool,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified TargetPool resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.targetPools.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "targetPool"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetPool": {
--	//       "description": "Name of the TargetPool resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools/{targetPool}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.get":
--
--type TargetPoolsGetCall struct {
--	s          *Service
--	project    string
--	region     string
--	targetPool string
--	opt_       map[string]interface{}
--}
--
--// Get: Returns the specified TargetPool resource.
--func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall {
--	c := &TargetPoolsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetPool = targetPool
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsGetCall) Do() (*TargetPool, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":    c.project,
--		"region":     c.region,
--		"targetPool": c.targetPool,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetPool
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified TargetPool resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.targetPools.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "targetPool"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetPool": {
--	//       "description": "Name of the TargetPool resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools/{targetPool}",
--	//   "response": {
--	//     "$ref": "TargetPool"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.getHealth":
--
--type TargetPoolsGetHealthCall struct {
--	s                 *Service
--	project           string
--	region            string
--	targetPool        string
--	instancereference *InstanceReference
--	opt_              map[string]interface{}
--}
--
--// GetHealth: Gets the most recent health check results for each IP for
--// the given instance that is referenced by given TargetPool.
--func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall {
--	c := &TargetPoolsGetHealthCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetPool = targetPool
--	c.instancereference = instancereference
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsGetHealthCall) Do() (*TargetPoolInstanceHealth, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":    c.project,
--		"region":     c.region,
--		"targetPool": c.targetPool,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetPoolInstanceHealth
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Gets the most recent health check results for each IP for the given instance that is referenced by given TargetPool.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetPools.getHealth",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "targetPool"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetPool": {
--	//       "description": "Name of the TargetPool resource to which the queried instance belongs.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth",
--	//   "request": {
--	//     "$ref": "InstanceReference"
--	//   },
--	//   "response": {
--	//     "$ref": "TargetPoolInstanceHealth"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.insert":
--
--type TargetPoolsInsertCall struct {
--	s          *Service
--	project    string
--	region     string
--	targetpool *TargetPool
--	opt_       map[string]interface{}
--}
--
--// Insert: Creates a TargetPool resource in the specified project and
--// region using the data included in the request.
--func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall {
--	c := &TargetPoolsInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetpool = targetpool
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a TargetPool resource in the specified project and region using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetPools.insert",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools",
--	//   "request": {
--	//     "$ref": "TargetPool"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.list":
--
--type TargetPoolsListCall struct {
--	s       *Service
--	project string
--	region  string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of TargetPool resources available to the
--// specified project and region.
--func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall {
--	c := &TargetPoolsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsListCall) Do() (*TargetPoolList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"region":  c.region,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *TargetPoolList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of TargetPool resources available to the specified project and region.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.targetPools.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools",
--	//   "response": {
--	//     "$ref": "TargetPoolList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.removeHealthCheck":
--
--type TargetPoolsRemoveHealthCheckCall struct {
--	s                                   *Service
--	project                             string
--	region                              string
--	targetPool                          string
--	targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest
--	opt_                                map[string]interface{}
--}
--
--// RemoveHealthCheck: Removes health check URL from targetPool.
--func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall {
--	c := &TargetPoolsRemoveHealthCheckCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetPool = targetPool
--	c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsRemoveHealthCheckCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":    c.project,
--		"region":     c.region,
--		"targetPool": c.targetPool,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Removes health check URL from targetPool.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetPools.removeHealthCheck",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "targetPool"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetPool": {
--	//       "description": "Name of the TargetPool resource to which health_check_url is to be removed.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck",
--	//   "request": {
--	//     "$ref": "TargetPoolsRemoveHealthCheckRequest"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.removeInstance":
--
--type TargetPoolsRemoveInstanceCall struct {
--	s                                *Service
--	project                          string
--	region                           string
--	targetPool                       string
--	targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest
--	opt_                             map[string]interface{}
--}
--
--// RemoveInstance: Removes instance URL from targetPool.
--func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall {
--	c := &TargetPoolsRemoveInstanceCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetPool = targetPool
--	c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsRemoveInstanceCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":    c.project,
--		"region":     c.region,
--		"targetPool": c.targetPool,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Removes instance URL from targetPool.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetPools.removeInstance",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "targetPool"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetPool": {
--	//       "description": "Name of the TargetPool resource to which instance_url is to be removed.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance",
--	//   "request": {
--	//     "$ref": "TargetPoolsRemoveInstanceRequest"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.targetPools.setBackup":
--
--type TargetPoolsSetBackupCall struct {
--	s               *Service
--	project         string
--	region          string
--	targetPool      string
--	targetreference *TargetReference
--	opt_            map[string]interface{}
--}
--
--// SetBackup: Changes backup pool configurations.
--func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall {
--	c := &TargetPoolsSetBackupCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.region = region
--	c.targetPool = targetPool
--	c.targetreference = targetreference
--	return c
--}
--
--// FailoverRatio sets the optional parameter "failoverRatio": New
--// failoverRatio value for the containing target pool.
--func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall {
--	c.opt_["failoverRatio"] = failoverRatio
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *TargetPoolsSetBackupCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["failoverRatio"]; ok {
--		params.Set("failoverRatio", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":    c.project,
--		"region":     c.region,
--		"targetPool": c.targetPool,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Changes backup pool configurations.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.targetPools.setBackup",
--	//   "parameterOrder": [
--	//     "project",
--	//     "region",
--	//     "targetPool"
--	//   ],
--	//   "parameters": {
--	//     "failoverRatio": {
--	//       "description": "New failoverRatio value for the containing target pool.",
--	//       "format": "float",
--	//       "location": "query",
--	//       "type": "number"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "region": {
--	//       "description": "Name of the region scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "targetPool": {
--	//       "description": "Name of the TargetPool resource for which the backup is to be set.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup",
--	//   "request": {
--	//     "$ref": "TargetReference"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.urlMaps.delete":
--
--type UrlMapsDeleteCall struct {
--	s       *Service
--	project string
--	urlMap  string
--	opt_    map[string]interface{}
--}
--
--// Delete: Deletes the specified UrlMap resource.
--func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall {
--	c := &UrlMapsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.urlMap = urlMap
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *UrlMapsDeleteCall) Fields(s ...googleapi.Field) *UrlMapsDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *UrlMapsDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"urlMap":  c.urlMap,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the specified UrlMap resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.urlMaps.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "urlMap"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "urlMap": {
--	//       "description": "Name of the UrlMap resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/urlMaps/{urlMap}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.urlMaps.get":
--
--type UrlMapsGetCall struct {
--	s       *Service
--	project string
--	urlMap  string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified UrlMap resource.
--func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall {
--	c := &UrlMapsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.urlMap = urlMap
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *UrlMapsGetCall) Fields(s ...googleapi.Field) *UrlMapsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *UrlMapsGetCall) Do() (*UrlMap, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"urlMap":  c.urlMap,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *UrlMap
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified UrlMap resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.urlMaps.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "urlMap"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "urlMap": {
--	//       "description": "Name of the UrlMap resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/urlMaps/{urlMap}",
--	//   "response": {
--	//     "$ref": "UrlMap"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.urlMaps.insert":
--
--type UrlMapsInsertCall struct {
--	s       *Service
--	project string
--	urlmap  *UrlMap
--	opt_    map[string]interface{}
--}
--
--// Insert: Creates a UrlMap resource in the specified project using the
--// data included in the request.
--func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall {
--	c := &UrlMapsInsertCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.urlmap = urlmap
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *UrlMapsInsertCall) Fields(s ...googleapi.Field) *UrlMapsInsertCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *UrlMapsInsertCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a UrlMap resource in the specified project using the data included in the request.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.urlMaps.insert",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/urlMaps",
--	//   "request": {
--	//     "$ref": "UrlMap"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.urlMaps.list":
--
--type UrlMapsListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of UrlMap resources available to the
--// specified project.
--func (r *UrlMapsService) List(project string) *UrlMapsListCall {
--	c := &UrlMapsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *UrlMapsListCall) Fields(s ...googleapi.Field) *UrlMapsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *UrlMapsListCall) Do() (*UrlMapList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *UrlMapList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of UrlMap resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.urlMaps.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/urlMaps",
--	//   "response": {
--	//     "$ref": "UrlMapList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.urlMaps.patch":
--
--type UrlMapsPatchCall struct {
--	s       *Service
--	project string
--	urlMap  string
--	urlmap  *UrlMap
--	opt_    map[string]interface{}
--}
--
--// Patch: Update the entire content of the UrlMap resource. This method
--// supports patch semantics.
--func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall {
--	c := &UrlMapsPatchCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.urlMap = urlMap
--	c.urlmap = urlmap
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *UrlMapsPatchCall) Fields(s ...googleapi.Field) *UrlMapsPatchCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *UrlMapsPatchCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("PATCH", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"urlMap":  c.urlMap,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Update the entire content of the UrlMap resource. This method supports patch semantics.",
--	//   "httpMethod": "PATCH",
--	//   "id": "compute.urlMaps.patch",
--	//   "parameterOrder": [
--	//     "project",
--	//     "urlMap"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "urlMap": {
--	//       "description": "Name of the UrlMap resource to update.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/urlMaps/{urlMap}",
--	//   "request": {
--	//     "$ref": "UrlMap"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.urlMaps.update":
--
--type UrlMapsUpdateCall struct {
--	s       *Service
--	project string
--	urlMap  string
--	urlmap  *UrlMap
--	opt_    map[string]interface{}
--}
--
--// Update: Update the entire content of the UrlMap resource.
--func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall {
--	c := &UrlMapsUpdateCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.urlMap = urlMap
--	c.urlmap = urlmap
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *UrlMapsUpdateCall) Fields(s ...googleapi.Field) *UrlMapsUpdateCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *UrlMapsUpdateCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("PUT", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"urlMap":  c.urlMap,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Update the entire content of the UrlMap resource.",
--	//   "httpMethod": "PUT",
--	//   "id": "compute.urlMaps.update",
--	//   "parameterOrder": [
--	//     "project",
--	//     "urlMap"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "urlMap": {
--	//       "description": "Name of the UrlMap resource to update.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/urlMaps/{urlMap}",
--	//   "request": {
--	//     "$ref": "UrlMap"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.urlMaps.validate":
--
--type UrlMapsValidateCall struct {
--	s                      *Service
--	project                string
--	urlMap                 string
--	urlmapsvalidaterequest *UrlMapsValidateRequest
--	opt_                   map[string]interface{}
--}
--
--// Validate: Run static validation for the UrlMap. In particular, the
--// tests of the provided UrlMap will be run. Calling this method does
--// NOT create the UrlMap.
--func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall {
--	c := &UrlMapsValidateCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.urlMap = urlMap
--	c.urlmapsvalidaterequest = urlmapsvalidaterequest
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *UrlMapsValidateCall) Fields(s ...googleapi.Field) *UrlMapsValidateCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *UrlMapsValidateCall) Do() (*UrlMapsValidateResponse, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"urlMap":  c.urlMap,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *UrlMapsValidateResponse
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Run static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.",
--	//   "httpMethod": "POST",
--	//   "id": "compute.urlMaps.validate",
--	//   "parameterOrder": [
--	//     "project",
--	//     "urlMap"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "urlMap": {
--	//       "description": "Name of the UrlMap resource to be validated as.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/global/urlMaps/{urlMap}/validate",
--	//   "request": {
--	//     "$ref": "UrlMapsValidateRequest"
--	//   },
--	//   "response": {
--	//     "$ref": "UrlMapsValidateResponse"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.zoneOperations.delete":
--
--type ZoneOperationsDeleteCall struct {
--	s         *Service
--	project   string
--	zone      string
--	operation string
--	opt_      map[string]interface{}
--}
--
--// Delete: Deletes the specified zone-specific operation resource.
--func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall {
--	c := &ZoneOperationsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.operation = operation
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ZoneOperationsDeleteCall) Fields(s ...googleapi.Field) *ZoneOperationsDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ZoneOperationsDeleteCall) Do() error {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":   c.project,
--		"zone":      c.zone,
--		"operation": c.operation,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return err
--	}
--	return nil
--	// {
--	//   "description": "Deletes the specified zone-specific operation resource.",
--	//   "httpMethod": "DELETE",
--	//   "id": "compute.zoneOperations.delete",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "operation"
--	//   ],
--	//   "parameters": {
--	//     "operation": {
--	//       "description": "Name of the operation resource to delete.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/operations/{operation}",
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute"
--	//   ]
--	// }
--
--}
--
--// method id "compute.zoneOperations.get":
--
--type ZoneOperationsGetCall struct {
--	s         *Service
--	project   string
--	zone      string
--	operation string
--	opt_      map[string]interface{}
--}
--
--// Get: Retrieves the specified zone-specific operation resource.
--func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall {
--	c := &ZoneOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	c.operation = operation
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ZoneOperationsGetCall) Fields(s ...googleapi.Field) *ZoneOperationsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ZoneOperationsGetCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project":   c.project,
--		"zone":      c.zone,
--		"operation": c.operation,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the specified zone-specific operation resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.zoneOperations.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone",
--	//     "operation"
--	//   ],
--	//   "parameters": {
--	//     "operation": {
--	//       "description": "Name of the operation resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/operations/{operation}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.zoneOperations.list":
--
--type ZoneOperationsListCall struct {
--	s       *Service
--	project string
--	zone    string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of operation resources contained within the
--// specified zone.
--func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperationsListCall {
--	c := &ZoneOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *ZoneOperationsListCall) MaxResults(maxResults int64) *ZoneOperationsListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ZoneOperationsListCall) Fields(s ...googleapi.Field) *ZoneOperationsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ZoneOperationsListCall) Do() (*OperationList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *OperationList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of operation resources contained within the specified zone.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.zoneOperations.list",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone scoping this request.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}/operations",
--	//   "response": {
--	//     "$ref": "OperationList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.zones.get":
--
--type ZonesGetCall struct {
--	s       *Service
--	project string
--	zone    string
--	opt_    map[string]interface{}
--}
--
--// Get: Returns the specified zone resource.
--func (r *ZonesService) Get(project string, zone string) *ZonesGetCall {
--	c := &ZonesGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	c.zone = zone
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ZonesGetCall) Fields(s ...googleapi.Field) *ZonesGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ZonesGetCall) Do() (*Zone, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--		"zone":    c.zone,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Zone
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Returns the specified zone resource.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.zones.get",
--	//   "parameterOrder": [
--	//     "project",
--	//     "zone"
--	//   ],
--	//   "parameters": {
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zone": {
--	//       "description": "Name of the zone resource to return.",
--	//       "location": "path",
--	//       "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones/{zone}",
--	//   "response": {
--	//     "$ref": "Zone"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
--
--// method id "compute.zones.list":
--
--type ZonesListCall struct {
--	s       *Service
--	project string
--	opt_    map[string]interface{}
--}
--
--// List: Retrieves the list of zone resources available to the specified
--// project.
--func (r *ZonesService) List(project string) *ZonesListCall {
--	c := &ZonesListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.project = project
--	return c
--}
--
--// Filter sets the optional parameter "filter": Filter expression for
--// filtering listed resources.
--func (c *ZonesListCall) Filter(filter string) *ZonesListCall {
--	c.opt_["filter"] = filter
--	return c
--}
--
--// MaxResults sets the optional parameter "maxResults": Maximum count of
--// results to be returned. Maximum value is 500 and default value is
--// 500.
--func (c *ZonesListCall) MaxResults(maxResults int64) *ZonesListCall {
--	c.opt_["maxResults"] = maxResults
--	return c
--}
--
--// PageToken sets the optional parameter "pageToken": Tag returned by a
--// previous list request truncated by maxResults. Used to continue a
--// previous list request.
--func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall {
--	c.opt_["pageToken"] = pageToken
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ZonesListCall) Fields(s ...googleapi.Field) *ZonesListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ZonesListCall) Do() (*ZoneList, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["filter"]; ok {
--		params.Set("filter", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["maxResults"]; ok {
--		params.Set("maxResults", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["pageToken"]; ok {
--		params.Set("pageToken", fmt.Sprintf("%v", v))
--	}
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"project": c.project,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ZoneList
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Retrieves the list of zone resources available to the specified project.",
--	//   "httpMethod": "GET",
--	//   "id": "compute.zones.list",
--	//   "parameterOrder": [
--	//     "project"
--	//   ],
--	//   "parameters": {
--	//     "filter": {
--	//       "description": "Optional. Filter expression for filtering listed resources.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "maxResults": {
--	//       "default": "500",
--	//       "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
--	//       "format": "uint32",
--	//       "location": "query",
--	//       "maximum": "500",
--	//       "minimum": "0",
--	//       "type": "integer"
--	//     },
--	//     "pageToken": {
--	//       "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
--	//       "location": "query",
--	//       "type": "string"
--	//     },
--	//     "project": {
--	//       "description": "Name of the project scoping this request.",
--	//       "location": "path",
--	//       "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{project}/zones",
--	//   "response": {
--	//     "$ref": "ZoneList"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/compute",
--	//     "https://www.googleapis.com/auth/compute.readonly"
--	//   ]
--	// }
--
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/container-api.json b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/container-api.json
-deleted file mode 100644
-index bca5a31..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/container-api.json
-+++ /dev/null
-@@ -1,579 +0,0 @@
--{
-- "kind": "discovery#restDescription",
-- "etag": "\"l66ggWbucbkBw9Lpos72oziyefE/ZrZBeDfQYPqAxFURJt0IhCOLUHQ\"",
-- "discoveryVersion": "v1",
-- "id": "container:v1beta1",
-- "name": "container",
-- "version": "v1beta1",
-- "revision": "20141103",
-- "title": "Google Container Engine API",
-- "description": "The Google Container Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.",
-- "ownerDomain": "google.com",
-- "ownerName": "Google",
-- "icons": {
--  "x16": "http://www.google.com/images/icons/product/search-16.gif",
--  "x32": "http://www.google.com/images/icons/product/search-32.gif"
-- },
-- "protocol": "rest",
-- "baseUrl": "https://www.googleapis.com/container/v1beta1/projects/",
-- "basePath": "/container/v1beta1/projects/",
-- "rootUrl": "https://www.googleapis.com/",
-- "servicePath": "container/v1beta1/projects/",
-- "batchPath": "batch",
-- "parameters": {
--  "alt": {
--   "type": "string",
--   "description": "Data format for the response.",
--   "default": "json",
--   "enum": [
--    "json"
--   ],
--   "enumDescriptions": [
--    "Responses with Content-Type of application/json"
--   ],
--   "location": "query"
--  },
--  "fields": {
--   "type": "string",
--   "description": "Selector specifying which fields to include in a partial response.",
--   "location": "query"
--  },
--  "key": {
--   "type": "string",
--   "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
--   "location": "query"
--  },
--  "oauth_token": {
--   "type": "string",
--   "description": "OAuth 2.0 token for the current user.",
--   "location": "query"
--  },
--  "prettyPrint": {
--   "type": "boolean",
--   "description": "Returns response with indentations and line breaks.",
--   "default": "true",
--   "location": "query"
--  },
--  "quotaUser": {
--   "type": "string",
--   "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
--   "location": "query"
--  },
--  "userIp": {
--   "type": "string",
--   "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
--   "location": "query"
--  }
-- },
-- "auth": {
--  "oauth2": {
--   "scopes": {
--    "https://www.googleapis.com/auth/cloud-platform": {
--     "description": "View and manage your data across Google Cloud Platform services"
--    }
--   }
--  }
-- },
-- "schemas": {
--  "Cluster": {
--   "id": "Cluster",
--   "type": "object",
--   "externalTypeName": "container.v1beta1.Cluster",
--   "properties": {
--    "clusterApiVersion": {
--     "type": "string",
--     "description": "The API version of the Kubernetes master and kubelets running in this cluster. Allowed value is 0.4.2, or leave blank to pick up the latest stable release."
--    },
--    "containerIpv4Cidr": {
--     "type": "string",
--     "description": "[Output only] The IP addresses of the container pods in this cluster, in  CIDR notation (e.g. 1.2.3.4/29)."
--    },
--    "creationTimestamp": {
--     "type": "string",
--     "description": "[Output only] The time the cluster was created, in RFC3339 text format."
--    },
--    "description": {
--     "type": "string",
--     "description": "An optional description of this cluster."
--    },
--    "endpoint": {
--     "type": "string",
--     "description": "[Output only] The IP address of this cluster's Kubernetes master. The endpoint can be accessed from the internet at https://username:password@endpoint/.\n\nSee the masterAuth property of this resource for username and password information."
--    },
--    "masterAuth": {
--     "$ref": "MasterAuth",
--     "description": "The HTTP basic authentication information for accessing the master. Because the master endpoint is open to the internet, you should create a strong password."
--    },
--    "name": {
--     "type": "string",
--     "description": "The name of this cluster. The name must be unique within this project and zone, and can be up to 40 characters with the following restrictions:  \n- Lowercase letters, numbers, and hyphens only.\n- Must start with a letter.\n- Must end with a number or a letter."
--    },
--    "nodeConfig": {
--     "$ref": "NodeConfig",
--     "description": "The machine type and image to use for all nodes in this cluster. See the descriptions of the child properties of nodeConfig."
--    },
--    "nodeRoutingPrefixSize": {
--     "type": "integer",
--     "description": "[Output only] The size of the address space on each node for hosting containers.",
--     "format": "int32"
--    },
--    "numNodes": {
--     "type": "integer",
--     "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine resource quota is sufficient for this number of instances plus one (to include the master). You must also have available firewall and routes quota.",
--     "format": "int32"
--    },
--    "servicesIpv4Cidr": {
--     "type": "string",
--     "description": "[Output only] The IP addresses of the Kubernetes services in this cluster, in  CIDR notation (e.g. 1.2.3.4/29). Service addresses are always in the 10.0.0.0/16 range."
--    },
--    "status": {
--     "type": "string",
--     "description": "[Output only] The current status of this cluster.",
--     "enum": [
--      "error",
--      "provisioning",
--      "running",
--      "stopping"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      "",
--      ""
--     ]
--    },
--    "statusMessage": {
--     "type": "string",
--     "description": "[Output only] Additional information about the current status of this cluster, if available."
--    },
--    "zone": {
--     "type": "string",
--     "description": "[Output only] The name of the Google Compute Engine zone in which the cluster resides."
--    }
--   }
--  },
--  "CreateClusterRequest": {
--   "id": "CreateClusterRequest",
--   "type": "object",
--   "externalTypeName": "container.v1beta1.CreateClusterRequest",
--   "properties": {
--    "cluster": {
--     "$ref": "Cluster",
--     "description": "A cluster resource."
--    }
--   }
--  },
--  "ListAggregatedClustersResponse": {
--   "id": "ListAggregatedClustersResponse",
--   "type": "object",
--   "externalTypeName": "container.v1beta1.ListAggregatedClustersResponse",
--   "properties": {
--    "clusters": {
--     "type": "array",
--     "description": "A list of clusters in the project, across all zones.",
--     "items": {
--      "$ref": "Cluster"
--     }
--    }
--   }
--  },
--  "ListAggregatedOperationsResponse": {
--   "id": "ListAggregatedOperationsResponse",
--   "type": "object",
--   "externalTypeName": "container.v1beta1.ListAggregatedOperationsResponse",
--   "properties": {
--    "operations": {
--     "type": "array",
--     "description": "A list of operations in the project, across all zones.",
--     "items": {
--      "$ref": "Operation"
--     }
--    }
--   }
--  },
--  "ListClustersResponse": {
--   "id": "ListClustersResponse",
--   "type": "object",
--   "externalTypeName": "container.v1beta1.ListClustersResponse",
--   "properties": {
--    "clusters": {
--     "type": "array",
--     "description": "A list of clusters in the project in the specified zone.",
--     "items": {
--      "$ref": "Cluster"
--     }
--    }
--   }
--  },
--  "ListOperationsResponse": {
--   "id": "ListOperationsResponse",
--   "type": "object",
--   "externalTypeName": "container.v1beta1.ListOperationsResponse",
--   "properties": {
--    "operations": {
--     "type": "array",
--     "description": "A list of operations in the project in the specified zone.",
--     "items": {
--      "$ref": "Operation"
--     }
--    }
--   }
--  },
--  "MasterAuth": {
--   "id": "MasterAuth",
--   "type": "object",
--   "externalTypeName": "container.v1beta1.MasterAuth",
--   "properties": {
--    "password": {
--     "type": "string",
--     "description": "The password to use when accessing the Kubernetes master endpoint."
--    },
--    "user": {
--     "type": "string",
--     "description": "The username to use when accessing the Kubernetes master endpoint."
--    }
--   }
--  },
--  "NodeConfig": {
--   "id": "NodeConfig",
--   "type": "object",
--   "externalTypeName": "container.v1beta1.NodeConfig",
--   "properties": {
--    "machineType": {
--     "type": "string",
--     "description": "The name of a Google Compute Engine machine type (e.g. n1-standard-1).\n\nIf unspecified, the default machine type is n1-standard-1."
--    },
--    "sourceImage": {
--     "type": "string",
--     "description": "The fully-specified name of a Google Compute Engine image. For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version date).\n\nIf specifying an image, you are responsible for ensuring its compatibility with the Debian 7 backports image. We recommend leaving this field blank to accept the default backports-debian-7-wheezy value."
--    }
--   }
--  },
--  "Operation": {
--   "id": "Operation",
--   "type": "object",
--   "description": "Defines the operation resource. All fields are output only.",
--   "externalTypeName": "container.v1beta1.Operation",
--   "properties": {
--    "errorMessage": {
--     "type": "string",
--     "description": "If an error has occurred, a textual description of the error."
--    },
--    "name": {
--     "type": "string",
--     "description": "The server-assigned ID for this operation. If the operation is fulfilled upfront, it may not have a resource name."
--    },
--    "operationType": {
--     "type": "string",
--     "description": "The operation type.",
--     "enum": [
--      "createCluster",
--      "deleteCluster"
--     ],
--     "enumDescriptions": [
--      "",
--      ""
--     ]
--    },
--    "status": {
--     "type": "string",
--     "description": "The current status of the operation.",
--     "enum": [
--      "done",
--      "pending",
--      "running"
--     ],
--     "enumDescriptions": [
--      "",
--      "",
--      ""
--     ]
--    },
--    "target": {
--     "type": "string",
--     "description": "[Optional] The URL of the cluster resource that this operation is associated with."
--    },
--    "zone": {
--     "type": "string",
--     "description": "The name of the Google Compute Engine zone in which the operation is taking place."
--    }
--   }
--  }
-- },
-- "resources": {
--  "projects": {
--   "resources": {
--    "clusters": {
--     "methods": {
--      "list": {
--       "id": "container.projects.clusters.list",
--       "path": "{projectId}/clusters",
--       "httpMethod": "GET",
--       "description": "Lists all clusters owned by a project across all zones.",
--       "parameters": {
--        "projectId": {
--         "type": "string",
--         "description": "The Google Developers Console project ID or  project number.",
--         "required": true,
--         "location": "path"
--        }
--       },
--       "parameterOrder": [
--        "projectId"
--       ],
--       "response": {
--        "$ref": "ListAggregatedClustersResponse"
--       },
--       "scopes": [
--        "https://www.googleapis.com/auth/cloud-platform"
--       ]
--      }
--     }
--    },
--    "operations": {
--     "methods": {
--      "list": {
--       "id": "container.projects.operations.list",
--       "path": "{projectId}/operations",
--       "httpMethod": "GET",
--       "description": "Lists all operations in a project, across all zones.",
--       "parameters": {
--        "projectId": {
--         "type": "string",
--         "description": "The Google Developers Console project ID or  project number.",
--         "required": true,
--         "location": "path"
--        }
--       },
--       "parameterOrder": [
--        "projectId"
--       ],
--       "response": {
--        "$ref": "ListAggregatedOperationsResponse"
--       },
--       "scopes": [
--        "https://www.googleapis.com/auth/cloud-platform"
--       ]
--      }
--     }
--    },
--    "zones": {
--     "resources": {
--      "clusters": {
--       "methods": {
--        "create": {
--         "id": "container.projects.zones.clusters.create",
--         "path": "{projectId}/zones/{zoneId}/clusters",
--         "httpMethod": "POST",
--         "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\n\nThe cluster is created in the project's default network.\n\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\n\nFinally, a route named k8s-iproute-10-xx-0-0 is created to track that the cluster's 10.xx.0.0/16 CIDR has been assigned.",
--         "parameters": {
--          "projectId": {
--           "type": "string",
--           "description": "The Google Developers Console project ID or  project number.",
--           "required": true,
--           "location": "path"
--          },
--          "zoneId": {
--           "type": "string",
--           "description": "The name of the Google Compute Engine zone in which the cluster resides.",
--           "required": true,
--           "location": "path"
--          }
--         },
--         "parameterOrder": [
--          "projectId",
--          "zoneId"
--         ],
--         "request": {
--          "$ref": "CreateClusterRequest"
--         },
--         "response": {
--          "$ref": "Operation"
--         },
--         "scopes": [
--          "https://www.googleapis.com/auth/cloud-platform"
--         ]
--        },
--        "delete": {
--         "id": "container.projects.zones.clusters.delete",
--         "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
--         "httpMethod": "DELETE",
--         "description": "Deletes the cluster, including the Kubernetes master and all worker nodes.\n\nFirewalls and routes that were configured at cluster creation are also deleted.",
--         "parameters": {
--          "clusterId": {
--           "type": "string",
--           "description": "The name of the cluster to delete.",
--           "required": true,
--           "location": "path"
--          },
--          "projectId": {
--           "type": "string",
--           "description": "The Google Developers Console project ID or  project number.",
--           "required": true,
--           "location": "path"
--          },
--          "zoneId": {
--           "type": "string",
--           "description": "The name of the Google Compute Engine zone in which the cluster resides.",
--           "required": true,
--           "location": "path"
--          }
--         },
--         "parameterOrder": [
--          "projectId",
--          "zoneId",
--          "clusterId"
--         ],
--         "response": {
--          "$ref": "Operation"
--         },
--         "scopes": [
--          "https://www.googleapis.com/auth/cloud-platform"
--         ]
--        },
--        "get": {
--         "id": "container.projects.zones.clusters.get",
--         "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
--         "httpMethod": "GET",
--         "description": "Gets a specific cluster.",
--         "parameters": {
--          "clusterId": {
--           "type": "string",
--           "description": "The name of the cluster to retrieve.",
--           "required": true,
--           "location": "path"
--          },
--          "projectId": {
--           "type": "string",
--           "description": "The Google Developers Console project ID or  project number.",
--           "required": true,
--           "location": "path"
--          },
--          "zoneId": {
--           "type": "string",
--           "description": "The name of the Google Compute Engine zone in which the cluster resides.",
--           "required": true,
--           "location": "path"
--          }
--         },
--         "parameterOrder": [
--          "projectId",
--          "zoneId",
--          "clusterId"
--         ],
--         "response": {
--          "$ref": "Cluster"
--         },
--         "scopes": [
--          "https://www.googleapis.com/auth/cloud-platform"
--         ]
--        },
--        "list": {
--         "id": "container.projects.zones.clusters.list",
--         "path": "{projectId}/zones/{zoneId}/clusters",
--         "httpMethod": "GET",
--         "description": "Lists all clusters owned by a project in the specified zone.",
--         "parameters": {
--          "projectId": {
--           "type": "string",
--           "description": "The Google Developers Console project ID or  project number.",
--           "required": true,
--           "location": "path"
--          },
--          "zoneId": {
--           "type": "string",
--           "description": "The name of the Google Compute Engine zone in which the cluster resides.",
--           "required": true,
--           "location": "path"
--          }
--         },
--         "parameterOrder": [
--          "projectId",
--          "zoneId"
--         ],
--         "response": {
--          "$ref": "ListClustersResponse"
--         },
--         "scopes": [
--          "https://www.googleapis.com/auth/cloud-platform"
--         ]
--        }
--       }
--      },
--      "operations": {
--       "methods": {
--        "get": {
--         "id": "container.projects.zones.operations.get",
--         "path": "{projectId}/zones/{zoneId}/operations/{operationId}",
--         "httpMethod": "GET",
--         "description": "Gets the specified operation.",
--         "parameters": {
--          "operationId": {
--           "type": "string",
--           "description": "The server-assigned name of the operation.",
--           "required": true,
--           "location": "path"
--          },
--          "projectId": {
--           "type": "string",
--           "description": "The Google Developers Console project ID or  project number.",
--           "required": true,
--           "location": "path"
--          },
--          "zoneId": {
--           "type": "string",
--           "description": "The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.",
--           "required": true,
--           "location": "path"
--          }
--         },
--         "parameterOrder": [
--          "projectId",
--          "zoneId",
--          "operationId"
--         ],
--         "response": {
--          "$ref": "Operation"
--         },
--         "scopes": [
--          "https://www.googleapis.com/auth/cloud-platform"
--         ]
--        },
--        "list": {
--         "id": "container.projects.zones.operations.list",
--         "path": "{projectId}/zones/{zoneId}/operations",
--         "httpMethod": "GET",
--         "description": "Lists all operations in a project in a specific zone.",
--         "parameters": {
--          "projectId": {
--           "type": "string",
--           "description": "The Google Developers Console project ID or  project number.",
--           "required": true,
--           "location": "path"
--          },
--          "zoneId": {
--           "type": "string",
--           "description": "The name of the Google Compute Engine zone to return operations for.",
--           "required": true,
--           "location": "path"
--          }
--         },
--         "parameterOrder": [
--          "projectId",
--          "zoneId"
--         ],
--         "response": {
--          "$ref": "ListOperationsResponse"
--         },
--         "scopes": [
--          "https://www.googleapis.com/auth/cloud-platform"
--         ]
--        }
--       }
--      }
--     }
--    }
--   }
--  }
-- }
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/container-gen.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/container-gen.go
-deleted file mode 100644
-index c9fce64..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/container-gen.go
-+++ /dev/null
-@@ -1,1007 +0,0 @@
--// Package container provides access to the Google Container Engine API.
--//
--// Usage example:
--//
--//   import "code.google.com/p/google-api-go-client/container/v1beta1"
--//   ...
--//   containerService, err := container.New(oauthHttpClient)
--package container
--
--import (
--	"bytes"
--	"code.google.com/p/google-api-go-client/googleapi"
--	"encoding/json"
--	"errors"
--	"fmt"
--	"io"
--	"net/http"
--	"net/url"
--	"strconv"
--	"strings"
--)
--
--// Always reference these packages, just in case the auto-generated code
--// below doesn't.
--var _ = bytes.NewBuffer
--var _ = strconv.Itoa
--var _ = fmt.Sprintf
--var _ = json.NewDecoder
--var _ = io.Copy
--var _ = url.Parse
--var _ = googleapi.Version
--var _ = errors.New
--var _ = strings.Replace
--
--const apiId = "container:v1beta1"
--const apiName = "container"
--const apiVersion = "v1beta1"
--const basePath = "https://www.googleapis.com/container/v1beta1/projects/"
--
--// OAuth2 scopes used by this API.
--const (
--	// View and manage your data across Google Cloud Platform services
--	CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
--)
--
--func New(client *http.Client) (*Service, error) {
--	if client == nil {
--		return nil, errors.New("client is nil")
--	}
--	s := &Service{client: client, BasePath: basePath}
--	s.Projects = NewProjectsService(s)
--	return s, nil
--}
--
--type Service struct {
--	client   *http.Client
--	BasePath string // API endpoint base URL
--
--	Projects *ProjectsService
--}
--
--func NewProjectsService(s *Service) *ProjectsService {
--	rs := &ProjectsService{s: s}
--	rs.Clusters = NewProjectsClustersService(s)
--	rs.Operations = NewProjectsOperationsService(s)
--	rs.Zones = NewProjectsZonesService(s)
--	return rs
--}
--
--type ProjectsService struct {
--	s *Service
--
--	Clusters *ProjectsClustersService
--
--	Operations *ProjectsOperationsService
--
--	Zones *ProjectsZonesService
--}
--
--func NewProjectsClustersService(s *Service) *ProjectsClustersService {
--	rs := &ProjectsClustersService{s: s}
--	return rs
--}
--
--type ProjectsClustersService struct {
--	s *Service
--}
--
--func NewProjectsOperationsService(s *Service) *ProjectsOperationsService {
--	rs := &ProjectsOperationsService{s: s}
--	return rs
--}
--
--type ProjectsOperationsService struct {
--	s *Service
--}
--
--func NewProjectsZonesService(s *Service) *ProjectsZonesService {
--	rs := &ProjectsZonesService{s: s}
--	rs.Clusters = NewProjectsZonesClustersService(s)
--	rs.Operations = NewProjectsZonesOperationsService(s)
--	return rs
--}
--
--type ProjectsZonesService struct {
--	s *Service
--
--	Clusters *ProjectsZonesClustersService
--
--	Operations *ProjectsZonesOperationsService
--}
--
--func NewProjectsZonesClustersService(s *Service) *ProjectsZonesClustersService {
--	rs := &ProjectsZonesClustersService{s: s}
--	return rs
--}
--
--type ProjectsZonesClustersService struct {
--	s *Service
--}
--
--func NewProjectsZonesOperationsService(s *Service) *ProjectsZonesOperationsService {
--	rs := &ProjectsZonesOperationsService{s: s}
--	return rs
--}
--
--type ProjectsZonesOperationsService struct {
--	s *Service
--}
--
--type Cluster struct {
--	// ClusterApiVersion: The API version of the Kubernetes master and
--	// kubelets running in this cluster. Allowed value is 0.4.2, or leave
--	// blank to pick up the latest stable release.
--	ClusterApiVersion string `json:"clusterApiVersion,omitempty"`
--
--	// ContainerIpv4Cidr: [Output only] The IP addresses of the container
--	// pods in this cluster, in  CIDR notation (e.g. 1.2.3.4/29).
--	ContainerIpv4Cidr string `json:"containerIpv4Cidr,omitempty"`
--
--	// CreationTimestamp: [Output only] The time the cluster was created, in
--	// RFC3339 text format.
--	CreationTimestamp string `json:"creationTimestamp,omitempty"`
--
--	// Description: An optional description of this cluster.
--	Description string `json:"description,omitempty"`
--
--	// Endpoint: [Output only] The IP address of this cluster's Kubernetes
--	// master. The endpoint can be accessed from the internet at
--	// https://username:password@endpoint/.
--	//
--	// See the masterAuth property of
--	// this resource for username and password information.
--	Endpoint string `json:"endpoint,omitempty"`
--
--	// MasterAuth: The HTTP basic authentication information for accessing
--	// the master. Because the master endpoint is open to the internet, you
--	// should create a strong password.
--	MasterAuth *MasterAuth `json:"masterAuth,omitempty"`
--
--	// Name: The name of this cluster. The name must be unique within this
--	// project and zone, and can be up to 40 characters with the following
--	// restrictions:
--	// - Lowercase letters, numbers, and hyphens only.
--	// -
--	// Must start with a letter.
--	// - Must end with a number or a letter.
--	Name string `json:"name,omitempty"`
--
--	// NodeConfig: The machine type and image to use for all nodes in this
--	// cluster. See the descriptions of the child properties of nodeConfig.
--	NodeConfig *NodeConfig `json:"nodeConfig,omitempty"`
--
--	// NodeRoutingPrefixSize: [Output only] The size of the address space on
--	// each node for hosting containers.
--	NodeRoutingPrefixSize int64 `json:"nodeRoutingPrefixSize,omitempty"`
--
--	// NumNodes: The number of nodes to create in this cluster. You must
--	// ensure that your Compute Engine resource quota is sufficient for this
--	// number of instances plus one (to include the master). You must also
--	// have available firewall and routes quota.
--	NumNodes int64 `json:"numNodes,omitempty"`
--
--	// ServicesIpv4Cidr: [Output only] The IP addresses of the Kubernetes
--	// services in this cluster, in  CIDR notation (e.g. 1.2.3.4/29).
--	// Service addresses are always in the 10.0.0.0/16 range.
--	ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"`
--
--	// Status: [Output only] The current status of this cluster.
--	Status string `json:"status,omitempty"`
--
--	// StatusMessage: [Output only] Additional information about the current
--	// status of this cluster, if available.
--	StatusMessage string `json:"statusMessage,omitempty"`
--
--	// Zone: [Output only] The name of the Google Compute Engine zone in
--	// which the cluster resides.
--	Zone string `json:"zone,omitempty"`
--}
--
--type CreateClusterRequest struct {
--	// Cluster: A cluster resource.
--	Cluster *Cluster `json:"cluster,omitempty"`
--}
--
--type ListAggregatedClustersResponse struct {
--	// Clusters: A list of clusters in the project, across all zones.
--	Clusters []*Cluster `json:"clusters,omitempty"`
--}
--
--type ListAggregatedOperationsResponse struct {
--	// Operations: A list of operations in the project, across all zones.
--	Operations []*Operation `json:"operations,omitempty"`
--}
--
--type ListClustersResponse struct {
--	// Clusters: A list of clusters in the project in the specified zone.
--	Clusters []*Cluster `json:"clusters,omitempty"`
--}
--
--type ListOperationsResponse struct {
--	// Operations: A list of operations in the project in the specified
--	// zone.
--	Operations []*Operation `json:"operations,omitempty"`
--}
--
--type MasterAuth struct {
--	// Password: The password to use when accessing the Kubernetes master
--	// endpoint.
--	Password string `json:"password,omitempty"`
--
--	// User: The username to use when accessing the Kubernetes master
--	// endpoint.
--	User string `json:"user,omitempty"`
--}
--
--type NodeConfig struct {
--	// MachineType: The name of a Google Compute Engine machine type (e.g.
--	// n1-standard-1).
--	//
--	// If unspecified, the default machine type is
--	// n1-standard-1.
--	MachineType string `json:"machineType,omitempty"`
--
--	// SourceImage: The fully-specified name of a Google Compute Engine
--	// image. For example:
--	// https://www.googleapis.com/compute/v1/projects/debian-cloud/global/ima
--	// ges/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version
--	// date).
--	//
--	// If specifying an image, you are responsible for ensuring its
--	// compatibility with the Debian 7 backports image. We recommend leaving
--	// this field blank to accept the default backports-debian-7-wheezy
--	// value.
--	SourceImage string `json:"sourceImage,omitempty"`
--}
--
--type Operation struct {
--	// ErrorMessage: If an error has occurred, a textual description of the
--	// error.
--	ErrorMessage string `json:"errorMessage,omitempty"`
--
--	// Name: The server-assigned ID for this operation. If the operation is
--	// fulfilled upfront, it may not have a resource name.
--	Name string `json:"name,omitempty"`
--
--	// OperationType: The operation type.
--	OperationType string `json:"operationType,omitempty"`
--
--	// Status: The current status of the operation.
--	Status string `json:"status,omitempty"`
--
--	// Target: [Optional] The URL of the cluster resource that this
--	// operation is associated with.
--	Target string `json:"target,omitempty"`
--
--	// Zone: The name of the Google Compute Engine zone in which the
--	// operation is taking place.
--	Zone string `json:"zone,omitempty"`
--}
--
--// method id "container.projects.clusters.list":
--
--type ProjectsClustersListCall struct {
--	s         *Service
--	projectId string
--	opt_      map[string]interface{}
--}
--
--// List: Lists all clusters owned by a project across all zones.
--func (r *ProjectsClustersService) List(projectId string) *ProjectsClustersListCall {
--	c := &ProjectsClustersListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.projectId = projectId
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsClustersListCall) Fields(s ...googleapi.Field) *ProjectsClustersListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsClustersListCall) Do() (*ListAggregatedClustersResponse, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/clusters")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"projectId": c.projectId,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ListAggregatedClustersResponse
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Lists all clusters owned by a project across all zones.",
--	//   "httpMethod": "GET",
--	//   "id": "container.projects.clusters.list",
--	//   "parameterOrder": [
--	//     "projectId"
--	//   ],
--	//   "parameters": {
--	//     "projectId": {
--	//       "description": "The Google Developers Console project ID or  project number.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{projectId}/clusters",
--	//   "response": {
--	//     "$ref": "ListAggregatedClustersResponse"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/cloud-platform"
--	//   ]
--	// }
--
--}
--
--// method id "container.projects.operations.list":
--
--type ProjectsOperationsListCall struct {
--	s         *Service
--	projectId string
--	opt_      map[string]interface{}
--}
--
--// List: Lists all operations in a project, across all zones.
--func (r *ProjectsOperationsService) List(projectId string) *ProjectsOperationsListCall {
--	c := &ProjectsOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.projectId = projectId
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsOperationsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsOperationsListCall) Do() (*ListAggregatedOperationsResponse, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/operations")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"projectId": c.projectId,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ListAggregatedOperationsResponse
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Lists all operations in a project, across all zones.",
--	//   "httpMethod": "GET",
--	//   "id": "container.projects.operations.list",
--	//   "parameterOrder": [
--	//     "projectId"
--	//   ],
--	//   "parameters": {
--	//     "projectId": {
--	//       "description": "The Google Developers Console project ID or  project number.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{projectId}/operations",
--	//   "response": {
--	//     "$ref": "ListAggregatedOperationsResponse"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/cloud-platform"
--	//   ]
--	// }
--
--}
--
--// method id "container.projects.zones.clusters.create":
--
--type ProjectsZonesClustersCreateCall struct {
--	s                    *Service
--	projectId            string
--	zoneId               string
--	createclusterrequest *CreateClusterRequest
--	opt_                 map[string]interface{}
--}
--
--// Create: Creates a cluster, consisting of the specified number and
--// type of Google Compute Engine instances, plus a Kubernetes master
--// instance.
--//
--// The cluster is created in the project's default
--// network.
--//
--// A firewall is added that allows traffic into port 443 on
--// the master, which enables HTTPS. A firewall and a route is added for
--// each node to allow the containers on that node to communicate with
--// all other instances in the cluster.
--//
--// Finally, a route named
--// k8s-iproute-10-xx-0-0 is created to track that the cluster's
--// 10.xx.0.0/16 CIDR has been assigned.
--func (r *ProjectsZonesClustersService) Create(projectId string, zoneId string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall {
--	c := &ProjectsZonesClustersCreateCall{s: r.s, opt_: make(map[string]interface{})}
--	c.projectId = projectId
--	c.zoneId = zoneId
--	c.createclusterrequest = createclusterrequest
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsZonesClustersCreateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersCreateCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsZonesClustersCreateCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest)
--	if err != nil {
--		return nil, err
--	}
--	ctype := "application/json"
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("POST", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"projectId": c.projectId,
--		"zoneId":    c.zoneId,
--	})
--	req.Header.Set("Content-Type", ctype)
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\n\nThe cluster is created in the project's default network.\n\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\n\nFinally, a route named k8s-iproute-10-xx-0-0 is created to track that the cluster's 10.xx.0.0/16 CIDR has been assigned.",
--	//   "httpMethod": "POST",
--	//   "id": "container.projects.zones.clusters.create",
--	//   "parameterOrder": [
--	//     "projectId",
--	//     "zoneId"
--	//   ],
--	//   "parameters": {
--	//     "projectId": {
--	//       "description": "The Google Developers Console project ID or  project number.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zoneId": {
--	//       "description": "The name of the Google Compute Engine zone in which the cluster resides.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{projectId}/zones/{zoneId}/clusters",
--	//   "request": {
--	//     "$ref": "CreateClusterRequest"
--	//   },
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/cloud-platform"
--	//   ]
--	// }
--
--}
--
--// method id "container.projects.zones.clusters.delete":
--
--type ProjectsZonesClustersDeleteCall struct {
--	s         *Service
--	projectId string
--	zoneId    string
--	clusterId string
--	opt_      map[string]interface{}
--}
--
--// Delete: Deletes the cluster, including the Kubernetes master and all
--// worker nodes.
--//
--// Firewalls and routes that were configured at cluster
--// creation are also deleted.
--func (r *ProjectsZonesClustersService) Delete(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersDeleteCall {
--	c := &ProjectsZonesClustersDeleteCall{s: r.s, opt_: make(map[string]interface{})}
--	c.projectId = projectId
--	c.zoneId = zoneId
--	c.clusterId = clusterId
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsZonesClustersDeleteCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersDeleteCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsZonesClustersDeleteCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters/{clusterId}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("DELETE", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"projectId": c.projectId,
--		"zoneId":    c.zoneId,
--		"clusterId": c.clusterId,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Deletes the cluster, including the Kubernetes master and all worker nodes.\n\nFirewalls and routes that were configured at cluster creation are also deleted.",
--	//   "httpMethod": "DELETE",
--	//   "id": "container.projects.zones.clusters.delete",
--	//   "parameterOrder": [
--	//     "projectId",
--	//     "zoneId",
--	//     "clusterId"
--	//   ],
--	//   "parameters": {
--	//     "clusterId": {
--	//       "description": "The name of the cluster to delete.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "projectId": {
--	//       "description": "The Google Developers Console project ID or  project number.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zoneId": {
--	//       "description": "The name of the Google Compute Engine zone in which the cluster resides.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/cloud-platform"
--	//   ]
--	// }
--
--}
--
--// method id "container.projects.zones.clusters.get":
--
--type ProjectsZonesClustersGetCall struct {
--	s         *Service
--	projectId string
--	zoneId    string
--	clusterId string
--	opt_      map[string]interface{}
--}
--
--// Get: Gets a specific cluster.
--func (r *ProjectsZonesClustersService) Get(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersGetCall {
--	c := &ProjectsZonesClustersGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.projectId = projectId
--	c.zoneId = zoneId
--	c.clusterId = clusterId
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsZonesClustersGetCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsZonesClustersGetCall) Do() (*Cluster, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters/{clusterId}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"projectId": c.projectId,
--		"zoneId":    c.zoneId,
--		"clusterId": c.clusterId,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Cluster
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Gets a specific cluster.",
--	//   "httpMethod": "GET",
--	//   "id": "container.projects.zones.clusters.get",
--	//   "parameterOrder": [
--	//     "projectId",
--	//     "zoneId",
--	//     "clusterId"
--	//   ],
--	//   "parameters": {
--	//     "clusterId": {
--	//       "description": "The name of the cluster to retrieve.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "projectId": {
--	//       "description": "The Google Developers Console project ID or  project number.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zoneId": {
--	//       "description": "The name of the Google Compute Engine zone in which the cluster resides.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
--	//   "response": {
--	//     "$ref": "Cluster"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/cloud-platform"
--	//   ]
--	// }
--
--}
--
--// method id "container.projects.zones.clusters.list":
--
--type ProjectsZonesClustersListCall struct {
--	s         *Service
--	projectId string
--	zoneId    string
--	opt_      map[string]interface{}
--}
--
--// List: Lists all clusters owned by a project in the specified zone.
--func (r *ProjectsZonesClustersService) List(projectId string, zoneId string) *ProjectsZonesClustersListCall {
--	c := &ProjectsZonesClustersListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.projectId = projectId
--	c.zoneId = zoneId
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsZonesClustersListCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsZonesClustersListCall) Do() (*ListClustersResponse, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"projectId": c.projectId,
--		"zoneId":    c.zoneId,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ListClustersResponse
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Lists all clusters owned by a project in the specified zone.",
--	//   "httpMethod": "GET",
--	//   "id": "container.projects.zones.clusters.list",
--	//   "parameterOrder": [
--	//     "projectId",
--	//     "zoneId"
--	//   ],
--	//   "parameters": {
--	//     "projectId": {
--	//       "description": "The Google Developers Console project ID or  project number.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zoneId": {
--	//       "description": "The name of the Google Compute Engine zone in which the cluster resides.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{projectId}/zones/{zoneId}/clusters",
--	//   "response": {
--	//     "$ref": "ListClustersResponse"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/cloud-platform"
--	//   ]
--	// }
--
--}
--
--// method id "container.projects.zones.operations.get":
--
--type ProjectsZonesOperationsGetCall struct {
--	s           *Service
--	projectId   string
--	zoneId      string
--	operationId string
--	opt_        map[string]interface{}
--}
--
--// Get: Gets the specified operation.
--func (r *ProjectsZonesOperationsService) Get(projectId string, zoneId string, operationId string) *ProjectsZonesOperationsGetCall {
--	c := &ProjectsZonesOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
--	c.projectId = projectId
--	c.zoneId = zoneId
--	c.operationId = operationId
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsZonesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsGetCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsZonesOperationsGetCall) Do() (*Operation, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/operations/{operationId}")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"projectId":   c.projectId,
--		"zoneId":      c.zoneId,
--		"operationId": c.operationId,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *Operation
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Gets the specified operation.",
--	//   "httpMethod": "GET",
--	//   "id": "container.projects.zones.operations.get",
--	//   "parameterOrder": [
--	//     "projectId",
--	//     "zoneId",
--	//     "operationId"
--	//   ],
--	//   "parameters": {
--	//     "operationId": {
--	//       "description": "The server-assigned name of the operation.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "projectId": {
--	//       "description": "The Google Developers Console project ID or  project number.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zoneId": {
--	//       "description": "The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{projectId}/zones/{zoneId}/operations/{operationId}",
--	//   "response": {
--	//     "$ref": "Operation"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/cloud-platform"
--	//   ]
--	// }
--
--}
--
--// method id "container.projects.zones.operations.list":
--
--type ProjectsZonesOperationsListCall struct {
--	s         *Service
--	projectId string
--	zoneId    string
--	opt_      map[string]interface{}
--}
--
--// List: Lists all operations in a project in a specific zone.
--func (r *ProjectsZonesOperationsService) List(projectId string, zoneId string) *ProjectsZonesOperationsListCall {
--	c := &ProjectsZonesOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
--	c.projectId = projectId
--	c.zoneId = zoneId
--	return c
--}
--
--// Fields allows partial responses to be retrieved.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--// for more information.
--func (c *ProjectsZonesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsListCall {
--	c.opt_["fields"] = googleapi.CombineFields(s)
--	return c
--}
--
--func (c *ProjectsZonesOperationsListCall) Do() (*ListOperationsResponse, error) {
--	var body io.Reader = nil
--	params := make(url.Values)
--	params.Set("alt", "json")
--	if v, ok := c.opt_["fields"]; ok {
--		params.Set("fields", fmt.Sprintf("%v", v))
--	}
--	urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/operations")
--	urls += "?" + params.Encode()
--	req, _ := http.NewRequest("GET", urls, body)
--	googleapi.Expand(req.URL, map[string]string{
--		"projectId": c.projectId,
--		"zoneId":    c.zoneId,
--	})
--	req.Header.Set("User-Agent", "google-api-go-client/0.5")
--	res, err := c.s.client.Do(req)
--	if err != nil {
--		return nil, err
--	}
--	defer googleapi.CloseBody(res)
--	if err := googleapi.CheckResponse(res); err != nil {
--		return nil, err
--	}
--	var ret *ListOperationsResponse
--	if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
--		return nil, err
--	}
--	return ret, nil
--	// {
--	//   "description": "Lists all operations in a project in a specific zone.",
--	//   "httpMethod": "GET",
--	//   "id": "container.projects.zones.operations.list",
--	//   "parameterOrder": [
--	//     "projectId",
--	//     "zoneId"
--	//   ],
--	//   "parameters": {
--	//     "projectId": {
--	//       "description": "The Google Developers Console project ID or  project number.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     },
--	//     "zoneId": {
--	//       "description": "The name of the Google Compute Engine zone to return operations for.",
--	//       "location": "path",
--	//       "required": true,
--	//       "type": "string"
--	//     }
--	//   },
--	//   "path": "{projectId}/zones/{zoneId}/operations",
--	//   "response": {
--	//     "$ref": "ListOperationsResponse"
--	//   },
--	//   "scopes": [
--	//     "https://www.googleapis.com/auth/cloud-platform"
--	//   ]
--	// }
--
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/googleapi.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/googleapi.go
-deleted file mode 100644
-index d6e5cd8..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/googleapi.go
-+++ /dev/null
-@@ -1,401 +0,0 @@
--// Copyright 2011 Google Inc. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package googleapi contains the common code shared by all Google API
--// libraries.
--package googleapi
--
--import (
--	"bytes"
--	"encoding/json"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"mime/multipart"
--	"net/http"
--	"net/textproto"
--	"net/url"
--	"os"
--	"strings"
--
--	"code.google.com/p/google-api-go-client/googleapi/internal/uritemplates"
--)
--
--// ContentTyper is an interface for Readers which know (or would like
--// to override) their Content-Type. If a media body doesn't implement
--// ContentTyper, the type is sniffed from the content using
--// http.DetectContentType.
--type ContentTyper interface {
--	ContentType() string
--}
--
--const Version = "0.5"
--
--// Error contains an error response from the server.
--type Error struct {
--	// Code is the HTTP response status code and will always be populated.
--	Code int `json:"code"`
--	// Message is the server response message and is only populated when
--	// explicitly referenced by the JSON server response.
--	Message string `json:"message"`
--	// Body is the raw response returned by the server.
--	// It is often but not always JSON, depending on how the request fails.
--	Body string
--
--	Errors []ErrorItem
--}
--
--// ErrorItem is a detailed error code & message from the Google API frontend.
--type ErrorItem struct {
--	// Reason is the typed error code. For example: "some_example".
--	Reason string `json:"reason"`
--	// Message is the human-readable description of the error.
--	Message string `json:"message"`
--}
--
--func (e *Error) Error() string {
--	if len(e.Errors) == 0 && e.Message == "" {
--		return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body)
--	}
--	var buf bytes.Buffer
--	fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code)
--	if e.Message != "" {
--		fmt.Fprintf(&buf, "%s", e.Message)
--	}
--	if len(e.Errors) == 0 {
--		return strings.TrimSpace(buf.String())
--	}
--	if len(e.Errors) == 1 && e.Errors[0].Message == e.Message {
--		fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason)
--		return buf.String()
--	}
--	fmt.Fprintln(&buf, "\nMore details:")
--	for _, v := range e.Errors {
--		fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message)
--	}
--	return buf.String()
--}
--
--type errorReply struct {
--	Error *Error `json:"error"`
--}
--
--// CheckResponse returns an error (of type *Error) if the response
--// status code is not 2xx.
--func CheckResponse(res *http.Response) error {
--	if res.StatusCode >= 200 && res.StatusCode <= 299 {
--		return nil
--	}
--	slurp, err := ioutil.ReadAll(res.Body)
--	if err == nil {
--		jerr := new(errorReply)
--		err = json.Unmarshal(slurp, jerr)
--		if err == nil && jerr.Error != nil {
--			if jerr.Error.Code == 0 {
--				jerr.Error.Code = res.StatusCode
--			}
--			jerr.Error.Body = string(slurp)
--			return jerr.Error
--		}
--	}
--	return &Error{
--		Code: res.StatusCode,
--		Body: string(slurp),
--	}
--}
--
--type MarshalStyle bool
--
--var WithDataWrapper = MarshalStyle(true)
--var WithoutDataWrapper = MarshalStyle(false)
--
--func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
--	buf := new(bytes.Buffer)
--	if wrap {
--		buf.Write([]byte(`{"data": `))
--	}
--	err := json.NewEncoder(buf).Encode(v)
--	if err != nil {
--		return nil, err
--	}
--	if wrap {
--		buf.Write([]byte(`}`))
--	}
--	return buf, nil
--}
--
--func getMediaType(media io.Reader) (io.Reader, string) {
--	if typer, ok := media.(ContentTyper); ok {
--		return media, typer.ContentType()
--	}
--
--	typ := "application/octet-stream"
--	buf := make([]byte, 1024)
--	n, err := media.Read(buf)
--	buf = buf[:n]
--	if err == nil {
--		typ = http.DetectContentType(buf)
--	}
--	return io.MultiReader(bytes.NewBuffer(buf), media), typ
--}
--
--type Lengther interface {
--	Len() int
--}
--
--// endingWithErrorReader from r until it returns an error.  If the
--// final error from r is os.EOF and e is non-nil, e is used instead.
--type endingWithErrorReader struct {
--	r io.Reader
--	e error
--}
--
--func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
--	n, err = er.r.Read(p)
--	if err == io.EOF && er.e != nil {
--		err = er.e
--	}
--	return
--}
--
--func getReaderSize(r io.Reader) (io.Reader, int64) {
--	// Ideal case, the reader knows its own size.
--	if lr, ok := r.(Lengther); ok {
--		return r, int64(lr.Len())
--	}
--
--	// But maybe it's a seeker and we can seek to the end to find its size.
--	if s, ok := r.(io.Seeker); ok {
--		pos0, err := s.Seek(0, os.SEEK_CUR)
--		if err == nil {
--			posend, err := s.Seek(0, os.SEEK_END)
--			if err == nil {
--				_, err = s.Seek(pos0, os.SEEK_SET)
--				if err == nil {
--					return r, posend - pos0
--				} else {
--					// We moved it forward but can't restore it.
--					// Seems unlikely, but can't really restore now.
--					return endingWithErrorReader{strings.NewReader(""), err}, posend - pos0
--				}
--			}
--		}
--	}
--
--	// Otherwise we have to make a copy to calculate how big the reader is.
--	buf := new(bytes.Buffer)
--	// TODO(bradfitz): put a cap on this copy? spill to disk after
--	// a certain point?
--	_, err := io.Copy(buf, r)
--	return endingWithErrorReader{buf, err}, int64(buf.Len())
--}
--
--func typeHeader(contentType string) textproto.MIMEHeader {
--	h := make(textproto.MIMEHeader)
--	h.Set("Content-Type", contentType)
--	return h
--}
--
--// countingWriter counts the number of bytes it receives to write, but
--// discards them.
--type countingWriter struct {
--	n *int64
--}
--
--func (w countingWriter) Write(p []byte) (int, error) {
--	*w.n += int64(len(p))
--	return len(p), nil
--}
--
--// ConditionallyIncludeMedia does nothing if media is nil.
--//
--// bodyp is an in/out parameter.  It should initially point to the
--// reader of the application/json (or whatever) payload to send in the
--// API request.  It's updated to point to the multipart body reader.
--//
--// ctypep is an in/out parameter.  It should initially point to the
--// content type of the bodyp, usually "application/json".  It's updated
--// to the "multipart/related" content type, with random boundary.
--//
--// The return value is the content-length of the entire multpart body.
--func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (totalContentLength int64, ok bool) {
--	if media == nil {
--		return
--	}
--	// Get the media type and size. The type check might return a
--	// different reader instance, so do the size check first,
--	// which looks at the specific type of the io.Reader.
--	var mediaType string
--	if typer, ok := media.(ContentTyper); ok {
--		mediaType = typer.ContentType()
--	}
--	media, mediaSize := getReaderSize(media)
--	if mediaType == "" {
--		media, mediaType = getMediaType(media)
--	}
--	body, bodyType := *bodyp, *ctypep
--	body, bodySize := getReaderSize(body)
--
--	// Calculate how big the the multipart will be.
--	{
--		totalContentLength = bodySize + mediaSize
--		mpw := multipart.NewWriter(countingWriter{&totalContentLength})
--		mpw.CreatePart(typeHeader(bodyType))
--		mpw.CreatePart(typeHeader(mediaType))
--		mpw.Close()
--	}
--
--	pr, pw := io.Pipe()
--	mpw := multipart.NewWriter(pw)
--	*bodyp = pr
--	*ctypep = "multipart/related; boundary=" + mpw.Boundary()
--	go func() {
--		defer pw.Close()
--		defer mpw.Close()
--
--		w, err := mpw.CreatePart(typeHeader(bodyType))
--		if err != nil {
--			return
--		}
--		_, err = io.Copy(w, body)
--		if err != nil {
--			return
--		}
--
--		w, err = mpw.CreatePart(typeHeader(mediaType))
--		if err != nil {
--			return
--		}
--		_, err = io.Copy(w, media)
--		if err != nil {
--			return
--		}
--	}()
--	return totalContentLength, true
--}
--
--func ResolveRelative(basestr, relstr string) string {
--	u, _ := url.Parse(basestr)
--	rel, _ := url.Parse(relstr)
--	u = u.ResolveReference(rel)
--	us := u.String()
--	us = strings.Replace(us, "%7B", "{", -1)
--	us = strings.Replace(us, "%7D", "}", -1)
--	return us
--}
--
--// has4860Fix is whether this Go environment contains the fix for
--// http://golang.org/issue/4860
--var has4860Fix bool
--
--// init initializes has4860Fix by checking the behavior of the net/http package.
--func init() {
--	r := http.Request{
--		URL: &url.URL{
--			Scheme: "http",
--			Opaque: "//opaque",
--		},
--	}
--	b := &bytes.Buffer{}
--	r.Write(b)
--	has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http"))
--}
--
--// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it
--// don't alter any hex-escaped characters in u.Path.
--func SetOpaque(u *url.URL) {
--	u.Opaque = "//" + u.Host + u.Path
--	if !has4860Fix {
--		u.Opaque = u.Scheme + ":" + u.Opaque
--	}
--}
--
--// Expand subsitutes any {encoded} strings in the URL passed in using
--// the map supplied.
--//
--// This calls SetOpaque to avoid encoding of the parameters in the URL path.
--func Expand(u *url.URL, expansions map[string]string) {
--	expanded, err := uritemplates.Expand(u.Path, expansions)
--	if err == nil {
--		u.Path = expanded
--		SetOpaque(u)
--	}
--}
--
--// CloseBody is used to close res.Body.
--// Prior to calling Close, it also tries to Read a small amount to see an EOF.
--// Not seeing an EOF can prevent HTTP Transports from reusing connections.
--func CloseBody(res *http.Response) {
--	if res == nil || res.Body == nil {
--		return
--	}
--	// Justification for 3 byte reads: two for up to "\r\n" after
--	// a JSON/XML document, and then 1 to see EOF if we haven't yet.
--	// TODO(bradfitz): detect Go 1.3+ and skip these reads.
--	// See https://codereview.appspot.com/58240043
--	// and https://codereview.appspot.com/49570044
--	buf := make([]byte, 1)
--	for i := 0; i < 3; i++ {
--		_, err := res.Body.Read(buf)
--		if err != nil {
--			break
--		}
--	}
--	res.Body.Close()
--
--}
--
--// VariantType returns the type name of the given variant.
--// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned.
--// This is used to support "variant" APIs that can return one of a number of different types.
--func VariantType(t map[string]interface{}) string {
--	s, _ := t["type"].(string)
--	return s
--}
--
--// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.
--// This is used to support "variant" APIs that can return one of a number of different types.
--// It reports whether the conversion was successful.
--func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
--	var buf bytes.Buffer
--	err := json.NewEncoder(&buf).Encode(v)
--	if err != nil {
--		return false
--	}
--	return json.Unmarshal(buf.Bytes(), dst) == nil
--}
--
--// A Field names a field to be retrieved with a partial response.
--// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
--//
--// Partial responses can dramatically reduce the amount of data that must be sent to your application.
--// In order to request partial responses, you can specify the full list of fields
--// that your application needs by adding the Fields option to your request.
--//
--// Field strings use camelCase with leading lower-case characters to identify fields within the response.
--//
--// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
--// you could request just those fields like this:
--//
--//     svc.Events.List().Fields("nextPageToken", "items/id").Do()
--//
--// or if you were also interested in each Item's "Updated" field, you can combine them like this:
--//
--//     svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
--//
--// More information about field formatting can be found here:
--// https://developers.google.com/+/api/#fields-syntax
--//
--// Another way to find field names is through the Google API explorer:
--// https://developers.google.com/apis-explorer/#p/
--type Field string
--
--// CombineFields combines fields into a single string.
--func CombineFields(s []Field) string {
--	r := make([]string, len(s))
--	for i, v := range s {
--		r[i] = string(v)
--	}
--	return strings.Join(r, ",")
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/googleapi_test.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/googleapi_test.go
-deleted file mode 100644
-index abc5185..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/googleapi_test.go
-+++ /dev/null
-@@ -1,361 +0,0 @@
--// Copyright 2011 Google Inc. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package googleapi
--
--import (
--	"bytes"
--	"encoding/json"
--	"fmt"
--	"io/ioutil"
--	"net/http"
--	"net/url"
--	"reflect"
--	"strings"
--	"testing"
--)
--
--type SetOpaqueTest struct {
--	in             *url.URL
--	wantRequestURI string
--}
--
--var setOpaqueTests = []SetOpaqueTest{
--	// no path
--	{
--		&url.URL{
--			Scheme: "http",
--			Host:   "www.golang.org",
--		},
--		"http://www.golang.org",
--	},
--	// path
--	{
--		&url.URL{
--			Scheme: "http",
--			Host:   "www.golang.org",
--			Path:   "/",
--		},
--		"http://www.golang.org/",
--	},
--	// file with hex escaping
--	{
--		&url.URL{
--			Scheme: "https",
--			Host:   "www.golang.org",
--			Path:   "/file%20one&two",
--		},
--		"https://www.golang.org/file%20one&two",
--	},
--	// query
--	{
--		&url.URL{
--			Scheme:   "http",
--			Host:     "www.golang.org",
--			Path:     "/",
--			RawQuery: "q=go+language",
--		},
--		"http://www.golang.org/?q=go+language",
--	},
--	// file with hex escaping in path plus query
--	{
--		&url.URL{
--			Scheme:   "https",
--			Host:     "www.golang.org",
--			Path:     "/file%20one&two",
--			RawQuery: "q=go+language",
--		},
--		"https://www.golang.org/file%20one&two?q=go+language",
--	},
--	// query with hex escaping
--	{
--		&url.URL{
--			Scheme:   "http",
--			Host:     "www.golang.org",
--			Path:     "/",
--			RawQuery: "q=go%20language",
--		},
--		"http://www.golang.org/?q=go%20language",
--	},
--}
--
--// prefixTmpl is a template for the expected prefix of the output of writing
--// an HTTP request.
--const prefixTmpl = "GET %v HTTP/1.1\r\nHost: %v\r\n"
--
--func TestSetOpaque(t *testing.T) {
--	for _, test := range setOpaqueTests {
--		u := *test.in
--		SetOpaque(&u)
--
--		w := &bytes.Buffer{}
--		r := &http.Request{URL: &u}
--		if err := r.Write(w); err != nil {
--			t.Errorf("write request: %v", err)
--			continue
--		}
--
--		prefix := fmt.Sprintf(prefixTmpl, test.wantRequestURI, test.in.Host)
--		if got := string(w.Bytes()); !strings.HasPrefix(got, prefix) {
--			t.Errorf("got %q expected prefix %q", got, prefix)
--		}
--	}
--}
--
--type ExpandTest struct {
--	in         string
--	expansions map[string]string
--	want       string
--}
--
--var expandTests = []ExpandTest{
--	// no expansions
--	{
--		"http://www.golang.org/",
--		map[string]string{},
--		"http://www.golang.org/",
--	},
--	// one expansion, no escaping
--	{
--		"http://www.golang.org/{bucket}/delete",
--		map[string]string{
--			"bucket": "red",
--		},
--		"http://www.golang.org/red/delete",
--	},
--	// one expansion, with hex escapes
--	{
--		"http://www.golang.org/{bucket}/delete",
--		map[string]string{
--			"bucket": "red/blue",
--		},
--		"http://www.golang.org/red%2Fblue/delete",
--	},
--	// one expansion, with space
--	{
--		"http://www.golang.org/{bucket}/delete",
--		map[string]string{
--			"bucket": "red or blue",
--		},
--		"http://www.golang.org/red%20or%20blue/delete",
--	},
--	// expansion not found
--	{
--		"http://www.golang.org/{object}/delete",
--		map[string]string{
--			"bucket": "red or blue",
--		},
--		"http://www.golang.org//delete",
--	},
--	// multiple expansions
--	{
--		"http://www.golang.org/{one}/{two}/{three}/get",
--		map[string]string{
--			"one":   "ONE",
--			"two":   "TWO",
--			"three": "THREE",
--		},
--		"http://www.golang.org/ONE/TWO/THREE/get",
--	},
--	// utf-8 characters
--	{
--		"http://www.golang.org/{bucket}/get",
--		map[string]string{
--			"bucket": "£100",
--		},
--		"http://www.golang.org/%C2%A3100/get",
--	},
--	// punctuations
--	{
--		"http://www.golang.org/{bucket}/get",
--		map[string]string{
--			"bucket": `/\@:,.`,
--		},
--		"http://www.golang.org/%2F%5C%40%3A%2C./get",
--	},
--	// mis-matched brackets
--	{
--		"http://www.golang.org/{bucket/get",
--		map[string]string{
--			"bucket": "red",
--		},
--		"http://www.golang.org/{bucket/get",
--	},
--	// "+" prefix for suppressing escape
--	// See also: http://tools.ietf.org/html/rfc6570#section-3.2.3
--	{
--		"http://www.golang.org/{+topic}",
--		map[string]string{
--			"topic": "/topics/myproject/mytopic",
--		},
--		// The double slashes here look weird, but it's intentional
--		"http://www.golang.org//topics/myproject/mytopic",
--	},
--}
--
--func TestExpand(t *testing.T) {
--	for i, test := range expandTests {
--		u := url.URL{
--			Path: test.in,
--		}
--		Expand(&u, test.expansions)
--		got := u.Path
--		if got != test.want {
--			t.Errorf("got %q expected %q in test %d", got, test.want, i+1)
--		}
--	}
--}
--
--type CheckResponseTest struct {
--	in       *http.Response
--	bodyText string
--	want     error
--	errText  string
--}
--
--var checkResponseTests = []CheckResponseTest{
--	{
--		&http.Response{
--			StatusCode: http.StatusOK,
--		},
--		"",
--		nil,
--		"",
--	},
--	{
--		&http.Response{
--			StatusCode: http.StatusInternalServerError,
--		},
--		`{"error":{}}`,
--		&Error{
--			Code: http.StatusInternalServerError,
--			Body: `{"error":{}}`,
--		},
--		`googleapi: got HTTP response code 500 with body: {"error":{}}`,
--	},
--	{
--		&http.Response{
--			StatusCode: http.StatusNotFound,
--		},
--		`{"error":{"message":"Error message for StatusNotFound."}}`,
--		&Error{
--			Code:    http.StatusNotFound,
--			Message: "Error message for StatusNotFound.",
--			Body:    `{"error":{"message":"Error message for StatusNotFound."}}`,
--		},
--		"googleapi: Error 404: Error message for StatusNotFound.",
--	},
--	{
--		&http.Response{
--			StatusCode: http.StatusBadRequest,
--		},
--		`{"error":"invalid_token","error_description":"Invalid Value"}`,
--		&Error{
--			Code: http.StatusBadRequest,
--			Body: `{"error":"invalid_token","error_description":"Invalid Value"}`,
--		},
--		`googleapi: got HTTP response code 400 with body: {"error":"invalid_token","error_description":"Invalid Value"}`,
--	},
--	{
--		&http.Response{
--			StatusCode: http.StatusBadRequest,
--		},
--		`{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`,
--		&Error{
--			Code: http.StatusBadRequest,
--			Errors: []ErrorItem{
--				{
--					Reason:  "keyInvalid",
--					Message: "Bad Request",
--				},
--			},
--			Body:    `{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`,
--			Message: "Bad Request",
--		},
--		"googleapi: Error 400: Bad Request, keyInvalid",
--	},
--}
--
--func TestCheckResponse(t *testing.T) {
--	for _, test := range checkResponseTests {
--		res := test.in
--		if test.bodyText != "" {
--			res.Body = ioutil.NopCloser(strings.NewReader(test.bodyText))
--		}
--		g := CheckResponse(res)
--		if !reflect.DeepEqual(g, test.want) {
--			t.Errorf("CheckResponse: got %v, want %v", g, test.want)
--			gotJson, err := json.Marshal(g)
--			if err != nil {
--				t.Error(err)
--			}
--			wantJson, err := json.Marshal(test.want)
--			if err != nil {
--				t.Error(err)
--			}
--			t.Errorf("json(got):  %q\njson(want): %q", string(gotJson), string(wantJson))
--		}
--		if g != nil && g.Error() != test.errText {
--			t.Errorf("CheckResponse: unexpected error message.\nGot:  %q\nwant: %q", g, test.errText)
--		}
--	}
--}
--
--type VariantPoint struct {
--	Type        string
--	Coordinates []float64
--}
--
--type VariantTest struct {
--	in     map[string]interface{}
--	result bool
--	want   VariantPoint
--}
--
--var coords = []interface{}{1.0, 2.0}
--
--var variantTests = []VariantTest{
--	{
--		in: map[string]interface{}{
--			"type":        "Point",
--			"coordinates": coords,
--		},
--		result: true,
--		want: VariantPoint{
--			Type:        "Point",
--			Coordinates: []float64{1.0, 2.0},
--		},
--	},
--	{
--		in: map[string]interface{}{
--			"type":  "Point",
--			"bogus": coords,
--		},
--		result: true,
--		want: VariantPoint{
--			Type: "Point",
--		},
--	},
--}
--
--func TestVariantType(t *testing.T) {
--	for _, test := range variantTests {
--		if g := VariantType(test.in); g != test.want.Type {
--			t.Errorf("VariantType(%v): got %v, want %v", test.in, g, test.want.Type)
--		}
--	}
--}
--
--func TestConvertVariant(t *testing.T) {
--	for _, test := range variantTests {
--		g := VariantPoint{}
--		r := ConvertVariant(test.in, &g)
--		if r != test.result {
--			t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, r, test.result)
--		}
--		if !reflect.DeepEqual(g, test.want) {
--			t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, g, test.want)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/LICENSE b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/LICENSE
-deleted file mode 100644
-index de9c88c..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/LICENSE
-+++ /dev/null
-@@ -1,18 +0,0 @@
--Copyright (c) 2013 Joshua Tacoma
--
--Permission is hereby granted, free of charge, to any person obtaining a copy of
--this software and associated documentation files (the "Software"), to deal in
--the Software without restriction, including without limitation the rights to
--use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
--the Software, and to permit persons to whom the Software is furnished to do so,
--subject to the following conditions:
--
--The above copyright notice and this permission notice shall be included in all
--copies or substantial portions of the Software.
--
--THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
--IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
--FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
--COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
--IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
--CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/uritemplates.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/uritemplates.go
-deleted file mode 100644
-index 8a84813..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/uritemplates.go
-+++ /dev/null
-@@ -1,359 +0,0 @@
--// Copyright 2013 Joshua Tacoma. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package uritemplates is a level 4 implementation of RFC 6570 (URI
--// Template, http://tools.ietf.org/html/rfc6570).
--//
--// To use uritemplates, parse a template string and expand it with a value
--// map:
--//
--//	template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
--//	values := make(map[string]interface{})
--//	values["user"] = "jtacoma"
--//	values["repo"] = "uritemplates"
--//	expanded, _ := template.ExpandString(values)
--//	fmt.Printf(expanded)
--//
--package uritemplates
--
--import (
--	"bytes"
--	"errors"
--	"fmt"
--	"reflect"
--	"regexp"
--	"strconv"
--	"strings"
--)
--
--var (
--	unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
--	reserved   = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
--	validname  = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
--	hex        = []byte("0123456789ABCDEF")
--)
--
--func pctEncode(src []byte) []byte {
--	dst := make([]byte, len(src)*3)
--	for i, b := range src {
--		buf := dst[i*3 : i*3+3]
--		buf[0] = 0x25
--		buf[1] = hex[b/16]
--		buf[2] = hex[b%16]
--	}
--	return dst
--}
--
--func escape(s string, allowReserved bool) (escaped string) {
--	if allowReserved {
--		escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
--	} else {
--		escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
--	}
--	return escaped
--}
--
--// A UriTemplate is a parsed representation of a URI template.
--type UriTemplate struct {
--	raw   string
--	parts []templatePart
--}
--
--// Parse parses a URI template string into a UriTemplate object.
--func Parse(rawtemplate string) (template *UriTemplate, err error) {
--	template = new(UriTemplate)
--	template.raw = rawtemplate
--	split := strings.Split(rawtemplate, "{")
--	template.parts = make([]templatePart, len(split)*2-1)
--	for i, s := range split {
--		if i == 0 {
--			if strings.Contains(s, "}") {
--				err = errors.New("unexpected }")
--				break
--			}
--			template.parts[i].raw = s
--		} else {
--			subsplit := strings.Split(s, "}")
--			if len(subsplit) != 2 {
--				err = errors.New("malformed template")
--				break
--			}
--			expression := subsplit[0]
--			template.parts[i*2-1], err = parseExpression(expression)
--			if err != nil {
--				break
--			}
--			template.parts[i*2].raw = subsplit[1]
--		}
--	}
--	if err != nil {
--		template = nil
--	}
--	return template, err
--}
--
--type templatePart struct {
--	raw           string
--	terms         []templateTerm
--	first         string
--	sep           string
--	named         bool
--	ifemp         string
--	allowReserved bool
--}
--
--type templateTerm struct {
--	name     string
--	explode  bool
--	truncate int
--}
--
--func parseExpression(expression string) (result templatePart, err error) {
--	switch expression[0] {
--	case '+':
--		result.sep = ","
--		result.allowReserved = true
--		expression = expression[1:]
--	case '.':
--		result.first = "."
--		result.sep = "."
--		expression = expression[1:]
--	case '/':
--		result.first = "/"
--		result.sep = "/"
--		expression = expression[1:]
--	case ';':
--		result.first = ";"
--		result.sep = ";"
--		result.named = true
--		expression = expression[1:]
--	case '?':
--		result.first = "?"
--		result.sep = "&"
--		result.named = true
--		result.ifemp = "="
--		expression = expression[1:]
--	case '&':
--		result.first = "&"
--		result.sep = "&"
--		result.named = true
--		result.ifemp = "="
--		expression = expression[1:]
--	case '#':
--		result.first = "#"
--		result.sep = ","
--		result.allowReserved = true
--		expression = expression[1:]
--	default:
--		result.sep = ","
--	}
--	rawterms := strings.Split(expression, ",")
--	result.terms = make([]templateTerm, len(rawterms))
--	for i, raw := range rawterms {
--		result.terms[i], err = parseTerm(raw)
--		if err != nil {
--			break
--		}
--	}
--	return result, err
--}
--
--func parseTerm(term string) (result templateTerm, err error) {
--	if strings.HasSuffix(term, "*") {
--		result.explode = true
--		term = term[:len(term)-1]
--	}
--	split := strings.Split(term, ":")
--	if len(split) == 1 {
--		result.name = term
--	} else if len(split) == 2 {
--		result.name = split[0]
--		var parsed int64
--		parsed, err = strconv.ParseInt(split[1], 10, 0)
--		result.truncate = int(parsed)
--	} else {
--		err = errors.New("multiple colons in same term")
--	}
--	if !validname.MatchString(result.name) {
--		err = errors.New("not a valid name: " + result.name)
--	}
--	if result.explode && result.truncate > 0 {
--		err = errors.New("both explode and prefix modifers on same term")
--	}
--	return result, err
--}
--
--// Expand expands a URI template with a set of values to produce a string.
--func (self *UriTemplate) Expand(value interface{}) (string, error) {
--	values, ismap := value.(map[string]interface{})
--	if !ismap {
--		if m, ismap := struct2map(value); !ismap {
--			return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
--		} else {
--			return self.Expand(m)
--		}
--	}
--	var buf bytes.Buffer
--	for _, p := range self.parts {
--		err := p.expand(&buf, values)
--		if err != nil {
--			return "", err
--		}
--	}
--	return buf.String(), nil
--}
--
--func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
--	if len(self.raw) > 0 {
--		buf.WriteString(self.raw)
--		return nil
--	}
--	var zeroLen = buf.Len()
--	buf.WriteString(self.first)
--	var firstLen = buf.Len()
--	for _, term := range self.terms {
--		value, exists := values[term.name]
--		if !exists {
--			continue
--		}
--		if buf.Len() != firstLen {
--			buf.WriteString(self.sep)
--		}
--		switch v := value.(type) {
--		case string:
--			self.expandString(buf, term, v)
--		case []interface{}:
--			self.expandArray(buf, term, v)
--		case map[string]interface{}:
--			if term.truncate > 0 {
--				return errors.New("cannot truncate a map expansion")
--			}
--			self.expandMap(buf, term, v)
--		default:
--			if m, ismap := struct2map(value); ismap {
--				if term.truncate > 0 {
--					return errors.New("cannot truncate a map expansion")
--				}
--				self.expandMap(buf, term, m)
--			} else {
--				str := fmt.Sprintf("%v", value)
--				self.expandString(buf, term, str)
--			}
--		}
--	}
--	if buf.Len() == firstLen {
--		original := buf.Bytes()[:zeroLen]
--		buf.Reset()
--		buf.Write(original)
--	}
--	return nil
--}
--
--func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
--	if self.named {
--		buf.WriteString(name)
--		if empty {
--			buf.WriteString(self.ifemp)
--		} else {
--			buf.WriteString("=")
--		}
--	}
--}
--
--func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
--	if len(s) > t.truncate && t.truncate > 0 {
--		s = s[:t.truncate]
--	}
--	self.expandName(buf, t.name, len(s) == 0)
--	buf.WriteString(escape(s, self.allowReserved))
--}
--
--func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
--	if len(a) == 0 {
--		return
--	} else if !t.explode {
--		self.expandName(buf, t.name, false)
--	}
--	for i, value := range a {
--		if t.explode && i > 0 {
--			buf.WriteString(self.sep)
--		} else if i > 0 {
--			buf.WriteString(",")
--		}
--		var s string
--		switch v := value.(type) {
--		case string:
--			s = v
--		default:
--			s = fmt.Sprintf("%v", v)
--		}
--		if len(s) > t.truncate && t.truncate > 0 {
--			s = s[:t.truncate]
--		}
--		if self.named && t.explode {
--			self.expandName(buf, t.name, len(s) == 0)
--		}
--		buf.WriteString(escape(s, self.allowReserved))
--	}
--}
--
--func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
--	if len(m) == 0 {
--		return
--	}
--	if !t.explode {
--		self.expandName(buf, t.name, len(m) == 0)
--	}
--	var firstLen = buf.Len()
--	for k, value := range m {
--		if firstLen != buf.Len() {
--			if t.explode {
--				buf.WriteString(self.sep)
--			} else {
--				buf.WriteString(",")
--			}
--		}
--		var s string
--		switch v := value.(type) {
--		case string:
--			s = v
--		default:
--			s = fmt.Sprintf("%v", v)
--		}
--		if t.explode {
--			buf.WriteString(escape(k, self.allowReserved))
--			buf.WriteRune('=')
--			buf.WriteString(escape(s, self.allowReserved))
--		} else {
--			buf.WriteString(escape(k, self.allowReserved))
--			buf.WriteRune(',')
--			buf.WriteString(escape(s, self.allowReserved))
--		}
--	}
--}
--
--func struct2map(v interface{}) (map[string]interface{}, bool) {
--	value := reflect.ValueOf(v)
--	switch value.Type().Kind() {
--	case reflect.Ptr:
--		return struct2map(value.Elem().Interface())
--	case reflect.Struct:
--		m := make(map[string]interface{})
--		for i := 0; i < value.NumField(); i++ {
--			tag := value.Type().Field(i).Tag
--			var name string
--			if strings.Contains(string(tag), ":") {
--				name = tag.Get("uri")
--			} else {
--				name = strings.TrimSpace(string(tag))
--			}
--			if len(name) == 0 {
--				name = value.Type().Field(i).Name
--			}
--			m[name] = value.Field(i).Interface()
--		}
--		return m, true
--	}
--	return nil, false
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/utils.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/utils.go
-deleted file mode 100644
-index 399ef46..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/internal/uritemplates/utils.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package uritemplates
--
--func Expand(path string, expansions map[string]string) (string, error) {
--	template, err := Parse(path)
--	if err != nil {
--		return "", err
--	}
--	values := make(map[string]interface{})
--	for k, v := range expansions {
--		values[k] = v
--	}
--	return template.Expand(values)
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/transport/apikey.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/transport/apikey.go
-deleted file mode 100644
-index eca1ea2..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/transport/apikey.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--// Copyright 2012 Google Inc. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package transport contains HTTP transports used to make
--// authenticated API requests.
--package transport
--
--import (
--	"errors"
--	"net/http"
--)
--
--// APIKey is an HTTP Transport which wraps an underlying transport and
--// appends an API Key "key" parameter to the URL of outgoing requests.
--type APIKey struct {
--	// Key is the API Key to set on requests.
--	Key string
--
--	// Transport is the underlying HTTP transport.
--	// If nil, http.DefaultTransport is used.
--	Transport http.RoundTripper
--}
--
--func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
--	rt := t.Transport
--	if rt == nil {
--		rt = http.DefaultTransport
--		if rt == nil {
--			return nil, errors.New("googleapi/transport: no Transport specified or available")
--		}
--	}
--	newReq := *req
--	args := newReq.URL.Query()
--	args.Set("key", t.Key)
--	newReq.URL.RawQuery = args.Encode()
--	return rt.RoundTrip(&newReq)
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/types.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/types.go
-deleted file mode 100644
-index 7ed7dd9..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/types.go
-+++ /dev/null
-@@ -1,150 +0,0 @@
--// Copyright 2013 Google Inc. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package googleapi
--
--import (
--	"encoding/json"
--	"strconv"
--)
--
--// Int64s is a slice of int64s that marshal as quoted strings in JSON.
--type Int64s []int64
--
--func (q *Int64s) UnmarshalJSON(raw []byte) error {
--	*q = (*q)[:0]
--	var ss []string
--	if err := json.Unmarshal(raw, &ss); err != nil {
--		return err
--	}
--	for _, s := range ss {
--		v, err := strconv.ParseInt(s, 10, 64)
--		if err != nil {
--			return err
--		}
--		*q = append(*q, int64(v))
--	}
--	return nil
--}
--
--// Int32s is a slice of int32s that marshal as quoted strings in JSON.
--type Int32s []int32
--
--func (q *Int32s) UnmarshalJSON(raw []byte) error {
--	*q = (*q)[:0]
--	var ss []string
--	if err := json.Unmarshal(raw, &ss); err != nil {
--		return err
--	}
--	for _, s := range ss {
--		v, err := strconv.ParseInt(s, 10, 32)
--		if err != nil {
--			return err
--		}
--		*q = append(*q, int32(v))
--	}
--	return nil
--}
--
--// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
--type Uint64s []uint64
--
--func (q *Uint64s) UnmarshalJSON(raw []byte) error {
--	*q = (*q)[:0]
--	var ss []string
--	if err := json.Unmarshal(raw, &ss); err != nil {
--		return err
--	}
--	for _, s := range ss {
--		v, err := strconv.ParseUint(s, 10, 64)
--		if err != nil {
--			return err
--		}
--		*q = append(*q, uint64(v))
--	}
--	return nil
--}
--
--// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
--type Uint32s []uint32
--
--func (q *Uint32s) UnmarshalJSON(raw []byte) error {
--	*q = (*q)[:0]
--	var ss []string
--	if err := json.Unmarshal(raw, &ss); err != nil {
--		return err
--	}
--	for _, s := range ss {
--		v, err := strconv.ParseUint(s, 10, 32)
--		if err != nil {
--			return err
--		}
--		*q = append(*q, uint32(v))
--	}
--	return nil
--}
--
--// Float64s is a slice of float64s that marshal as quoted strings in JSON.
--type Float64s []float64
--
--func (q *Float64s) UnmarshalJSON(raw []byte) error {
--	*q = (*q)[:0]
--	var ss []string
--	if err := json.Unmarshal(raw, &ss); err != nil {
--		return err
--	}
--	for _, s := range ss {
--		v, err := strconv.ParseFloat(s, 64)
--		if err != nil {
--			return err
--		}
--		*q = append(*q, float64(v))
--	}
--	return nil
--}
--
--func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
--	dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
--	dst = append(dst, '[')
--	for i := 0; i < n; i++ {
--		if i > 0 {
--			dst = append(dst, ',')
--		}
--		dst = append(dst, '"')
--		dst = fn(dst, i)
--		dst = append(dst, '"')
--	}
--	dst = append(dst, ']')
--	return dst, nil
--}
--
--func (s Int64s) MarshalJSON() ([]byte, error) {
--	return quotedList(len(s), func(dst []byte, i int) []byte {
--		return strconv.AppendInt(dst, s[i], 10)
--	})
--}
--
--func (s Int32s) MarshalJSON() ([]byte, error) {
--	return quotedList(len(s), func(dst []byte, i int) []byte {
--		return strconv.AppendInt(dst, int64(s[i]), 10)
--	})
--}
--
--func (s Uint64s) MarshalJSON() ([]byte, error) {
--	return quotedList(len(s), func(dst []byte, i int) []byte {
--		return strconv.AppendUint(dst, s[i], 10)
--	})
--}
--
--func (s Uint32s) MarshalJSON() ([]byte, error) {
--	return quotedList(len(s), func(dst []byte, i int) []byte {
--		return strconv.AppendUint(dst, uint64(s[i]), 10)
--	})
--}
--
--func (s Float64s) MarshalJSON() ([]byte, error) {
--	return quotedList(len(s), func(dst []byte, i int) []byte {
--		return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
--	})
--}
-diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/types_test.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/types_test.go
-deleted file mode 100644
-index a6b2045..0000000
---- a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/googleapi/types_test.go
-+++ /dev/null
-@@ -1,44 +0,0 @@
--// Copyright 2013 Google Inc. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package googleapi
--
--import (
--	"encoding/json"
--	"reflect"
--	"testing"
--)
--
--func TestTypes(t *testing.T) {
--	type T struct {
--		I32 Int32s
--		I64 Int64s
--		U32 Uint32s
--		U64 Uint64s
--		F64 Float64s
--	}
--	v := &T{
--		I32: Int32s{-1, 2, 3},
--		I64: Int64s{-1, 2, 1 << 33},
--		U32: Uint32s{1, 2},
--		U64: Uint64s{1, 2, 1 << 33},
--		F64: Float64s{1.5, 3.33},
--	}
--	got, err := json.Marshal(v)
--	if err != nil {
--		t.Fatal(err)
--	}
--	want := `{"I32":["-1","2","3"],"I64":["-1","2","8589934592"],"U32":["1","2"],"U64":["1","2","8589934592"],"F64":["1.5","3.33"]}`
--	if string(got) != want {
--		t.Fatalf("Marshal mismatch.\n got: %s\nwant: %s\n", got, want)
--	}
--
--	v2 := new(T)
--	if err := json.Unmarshal(got, v2); err != nil {
--		t.Fatalf("Unmarshal: %v", err)
--	}
--	if !reflect.DeepEqual(v, v2) {
--		t.Fatalf("Unmarshal didn't produce same results.\n got: %#v\nwant: %#v\n", v, v2)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
-deleted file mode 100644
-index 66be63a..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore
-+++ /dev/null
-@@ -1 +0,0 @@
--logrus
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
-deleted file mode 100644
-index c3af3ce..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
-+++ /dev/null
-@@ -1,10 +0,0 @@
--language: go
--go:
--  - 1.2
--  - 1.3
--  - tip
--install:
--  - go get github.com/stretchr/testify
--  - go get github.com/stvp/go-udp-testing
--  - go get github.com/tobi/airbrake-go
--  - go get github.com/getsentry/raven-go
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
-deleted file mode 100644
-index f090cb4..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE
-+++ /dev/null
-@@ -1,21 +0,0 @@
--The MIT License (MIT)
--
--Copyright (c) 2014 Simon Eskildsen
--
--Permission is hereby granted, free of charge, to any person obtaining a copy
--of this software and associated documentation files (the "Software"), to deal
--in the Software without restriction, including without limitation the rights
--to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
--copies of the Software, and to permit persons to whom the Software is
--furnished to do so, subject to the following conditions:
--
--The above copyright notice and this permission notice shall be included in
--all copies or substantial portions of the Software.
--
--THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
--IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
--FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
--AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
--LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
--OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
--THE SOFTWARE.
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
-deleted file mode 100644
-index b6aa84c..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
-+++ /dev/null
-@@ -1,352 +0,0 @@
--# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)
--
--Logrus is a structured logger for Go (golang), completely API compatible with
--the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
--yet stable (pre 1.0), the core API is unlikely change much but please version
--control your Logrus to make sure you aren't fetching latest `master` on every
--build.**
--
--Nicely color-coded in development (when a TTY is attached, otherwise just
--plain text):
--
--![Colored](http://i.imgur.com/PY7qMwd.png)
--
--With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
--or Splunk:
--
--```json
--{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
--ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
--
--{"level":"warning","msg":"The group's number increased tremendously!",
--"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
--
--{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
--"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
--
--{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
--"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
--
--{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
--"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
--```
--
--With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
--attached, the output is compatible with the
--[l2met](http://r.32k.io/l2met-introduction) format:
--
--```text
--time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
--time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
--time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
--time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
--time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
--```
--
--#### Example
--
--The simplest way to use Logrus is simply the package-level exported logger:
--
--```go
--package main
--
--import (
--  log "github.com/Sirupsen/logrus"
--)
--
--func main() {
--  log.WithFields(log.Fields{
--    "animal": "walrus",
--  }).Info("A walrus appears")
--}
--```
--
--Note that it's completely api-compatible with the stdlib logger, so you can
--replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
--and you'll now have the flexibility of Logrus. You can customize it all you
--want:
--
--```go
--package main
--
--import (
--  "os"
--  log "github.com/Sirupsen/logrus"
--  "github.com/Sirupsen/logrus/hooks/airbrake"
--)
--
--func init() {
--  // Log as JSON instead of the default ASCII formatter.
--  log.SetFormatter(&log.JSONFormatter{})
--
--  // Use the Airbrake hook to report errors that have Error severity or above to
--  // an exception tracker. You can create custom hooks, see the Hooks section.
--  log.AddHook(&logrus_airbrake.AirbrakeHook{})
--
--  // Output to stderr instead of stdout, could also be a file.
--  log.SetOutput(os.Stderr)
--
--  // Only log the warning severity or above.
--  log.SetLevel(log.WarnLevel)
--}
--
--func main() {
--  log.WithFields(log.Fields{
--    "animal": "walrus",
--    "size":   10,
--  }).Info("A group of walrus emerges from the ocean")
--
--  log.WithFields(log.Fields{
--    "omg":    true,
--    "number": 122,
--  }).Warn("The group's number increased tremendously!")
--
--  log.WithFields(log.Fields{
--    "omg":    true,
--    "number": 100,
--  }).Fatal("The ice breaks!")
--}
--```
--
--For more advanced usage such as logging to multiple locations from the same
--application, you can also create an instance of the `logrus` Logger:
--
--```go
--package main
--
--import (
--  "github.com/Sirupsen/logrus"
--)
--
--// Create a new instance of the logger. You can have any number of instances.
--var log = logrus.New()
--
--func main() {
--  // The API for setting attributes is a little different than the package level
--  // exported logger. See Godoc.
--  log.Out = os.Stderr
--
--  log.WithFields(logrus.Fields{
--    "animal": "walrus",
--    "size":   10,
--  }).Info("A group of walrus emerges from the ocean")
--}
--```
--
--#### Fields
--
--Logrus encourages careful, structured logging though logging fields instead of
--long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
--to send event %s to topic %s with key %d")`, you should log the much more
--discoverable:
--
--```go
--log.WithFields(log.Fields{
--  "event": event,
--  "topic": topic,
--  "key": key,
--}).Fatal("Failed to send event")
--```
--
--We've found this API forces you to think about logging in a way that produces
--much more useful logging messages. We've been in countless situations where just
--a single added field to a log statement that was already there would've saved us
--hours. The `WithFields` call is optional.
--
--In general, with Logrus using any of the `printf`-family functions should be
--seen as a hint you should add a field, however, you can still use the
--`printf`-family functions with Logrus.
--
--#### Hooks
--
--You can add hooks for logging levels. For example to send errors to an exception
--tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
--multiple places simultaneously, e.g. syslog.
--
--```go
--// Not the real implementation of the Airbrake hook. Just a simple sample.
--import (
--  log "github.com/Sirupsen/logrus"
--)
--
--func init() {
--  log.AddHook(new(AirbrakeHook))
--}
--
--type AirbrakeHook struct{}
--
--// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
--// the fields for the entry. See the Fields section of the README.
--func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
--  err := airbrake.Notify(entry.Data["error"].(error))
--  if err != nil {
--    log.WithFields(log.Fields{
--      "source":   "airbrake",
--      "endpoint": airbrake.Endpoint,
--    }).Info("Failed to send error to Airbrake")
--  }
--
--  return nil
--}
--
--// `Levels()` returns a slice of `Levels` the hook is fired for.
--func (hook *AirbrakeHook) Levels() []log.Level {
--  return []log.Level{
--    log.ErrorLevel,
--    log.FatalLevel,
--    log.PanicLevel,
--  }
--}
--```
--
--Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
--
--```go
--import (
--  log "github.com/Sirupsen/logrus"
--  "github.com/Sirupsen/logrus/hooks/airbrake"
--  "github.com/Sirupsen/logrus/hooks/syslog"
--  "log/syslog"
--)
--
--func init() {
--  log.AddHook(new(logrus_airbrake.AirbrakeHook))
--
--  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
--  if err != nil {
--    log.Error("Unable to connect to local syslog daemon")
--  } else {
--    log.AddHook(hook)
--  }
--}
--```
--
--* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
--  Send errors to an exception tracking service compatible with the Airbrake API.
--  Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
--
--* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
--  Send errors to the Papertrail hosted logging service via UDP.
--
--* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
--  Send errors to remote syslog server.
--  Uses standard library `log/syslog` behind the scenes.
--
--* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
--  Send errors to a channel in hipchat.
--
--* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
--  Send logs to Loggly (https://www.loggly.com/)
--
--#### Level logging
--
--Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
--
--```go
--log.Debug("Useful debugging information.")
--log.Info("Something noteworthy happened!")
--log.Warn("You should probably take a look at this.")
--log.Error("Something failed but I'm not quitting.")
--// Calls os.Exit(1) after logging
--log.Fatal("Bye.")
--// Calls panic() after logging
--log.Panic("I'm bailing.")
--```
--
--You can set the logging level on a `Logger`, then it will only log entries with
--that severity or anything above it:
--
--```go
--// Will log anything that is info or above (warn, error, fatal, panic). Default.
--log.SetLevel(log.InfoLevel)
--```
--
--It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
--environment if your application has that.
--
--#### Entries
--
--Besides the fields added with `WithField` or `WithFields` some fields are
--automatically added to all logging events:
--
--1. `time`. The timestamp when the entry was created.
--2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
--   the `AddFields` call. E.g. `Failed to send event.`
--3. `level`. The logging level. E.g. `info`.
--
--#### Environments
--
--Logrus has no notion of environment.
--
--If you wish for hooks and formatters to only be used in specific environments,
--you should handle that yourself. For example, if your application has a global
--variable `Environment`, which is a string representation of the environment you
--could do:
--
--```go
--import (
--  log "github.com/Sirupsen/logrus"
--)
--
--init() {
--  // do something here to set environment depending on an environment variable
--  // or command-line flag
--  if Environment == "production" {
--    log.SetFormatter(logrus.JSONFormatter)
--  } else {
--    // The TextFormatter is default, you don't actually have to do this.
--    log.SetFormatter(logrus.TextFormatter)
--  }
--}
--```
--
--This configuration is how `logrus` was intended to be used, but JSON in
--production is mostly only useful if you do log aggregation with tools like
--Splunk or Logstash.
--
--#### Formatters
--
--The built-in logging formatters are:
--
--* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
--  without colors.
--  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
--    field to `true`.  To force no colored output even if there is a TTY  set the
--    `DisableColors` field to `true`
--* `logrus.JSONFormatter`. Logs fields as JSON.
--
--Third party logging formatters:
--
--* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
--
--You can define your formatter by implementing the `Formatter` interface,
--requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
--`Fields` type (`map[string]interface{}`) with all your fields as well as the
--default ones (see Entries section above):
--
--```go
--type MyJSONFormatter struct {
--}
--
--log.SetFormatter(new(MyJSONFormatter))
--
--func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
--  // Note this doesn't include Time, Level and Message which are available on
--  // the Entry. Consult `godoc` on information about those fields or read the
--  // source of the official loggers.
--  serialized, err := json.Marshal(entry.Data)
--    if err != nil {
--      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
--    }
--  return append(serialized, '\n'), nil
--}
--```
--
--#### Rotation
--
--Log rotation is not provided with Logrus. Log rotation should be done by an
--external program (like `logrotated(8)`) that can compress and delete old log
--entries. It should not be a feature of the application-level logger.
--
--
--[godoc]: https://godoc.org/github.com/Sirupsen/logrus
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
-deleted file mode 100644
-index e164eec..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
-+++ /dev/null
-@@ -1,248 +0,0 @@
--package logrus
--
--import (
--	"bytes"
--	"fmt"
--	"io"
--	"os"
--	"time"
--)
--
--// An entry is the final or intermediate Logrus logging entry. It contains all
--// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
--// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
--// passed around as much as you wish to avoid field duplication.
--type Entry struct {
--	Logger *Logger
--
--	// Contains all the fields set by the user.
--	Data Fields
--
--	// Time at which the log entry was created
--	Time time.Time
--
--	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
--	Level Level
--
--	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
--	Message string
--}
--
--func NewEntry(logger *Logger) *Entry {
--	return &Entry{
--		Logger: logger,
--		// Default is three fields, give a little extra room
--		Data: make(Fields, 5),
--	}
--}
--
--// Returns a reader for the entry, which is a proxy to the formatter.
--func (entry *Entry) Reader() (*bytes.Buffer, error) {
--	serialized, err := entry.Logger.Formatter.Format(entry)
--	return bytes.NewBuffer(serialized), err
--}
--
--// Returns the string representation from the reader and ultimately the
--// formatter.
--func (entry *Entry) String() (string, error) {
--	reader, err := entry.Reader()
--	if err != nil {
--		return "", err
--	}
--
--	return reader.String(), err
--}
--
--// Add a single field to the Entry.
--func (entry *Entry) WithField(key string, value interface{}) *Entry {
--	return entry.WithFields(Fields{key: value})
--}
--
--// Add a map of fields to the Entry.
--func (entry *Entry) WithFields(fields Fields) *Entry {
--	data := Fields{}
--	for k, v := range entry.Data {
--		data[k] = v
--	}
--	for k, v := range fields {
--		data[k] = v
--	}
--	return &Entry{Logger: entry.Logger, Data: data}
--}
--
--func (entry *Entry) log(level Level, msg string) {
--	entry.Time = time.Now()
--	entry.Level = level
--	entry.Message = msg
--
--	if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
--		entry.Logger.mu.Lock()
--		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
--		entry.Logger.mu.Unlock()
--	}
--
--	reader, err := entry.Reader()
--	if err != nil {
--		entry.Logger.mu.Lock()
--		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
--		entry.Logger.mu.Unlock()
--	}
--
--	entry.Logger.mu.Lock()
--	defer entry.Logger.mu.Unlock()
--
--	_, err = io.Copy(entry.Logger.Out, reader)
--	if err != nil {
--		fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
--	}
--
--	// To avoid Entry#log() returning a value that only would make sense for
--	// panic() to use in Entry#Panic(), we avoid the allocation by checking
--	// directly here.
--	if level <= PanicLevel {
--		panic(entry)
--	}
--}
--
--func (entry *Entry) Debug(args ...interface{}) {
--	if entry.Logger.Level >= DebugLevel {
--		entry.log(DebugLevel, fmt.Sprint(args...))
--	}
--}
--
--func (entry *Entry) Print(args ...interface{}) {
--	entry.Info(args...)
--}
--
--func (entry *Entry) Info(args ...interface{}) {
--	if entry.Logger.Level >= InfoLevel {
--		entry.log(InfoLevel, fmt.Sprint(args...))
--	}
--}
--
--func (entry *Entry) Warn(args ...interface{}) {
--	if entry.Logger.Level >= WarnLevel {
--		entry.log(WarnLevel, fmt.Sprint(args...))
--	}
--}
--
--func (entry *Entry) Error(args ...interface{}) {
--	if entry.Logger.Level >= ErrorLevel {
--		entry.log(ErrorLevel, fmt.Sprint(args...))
--	}
--}
--
--func (entry *Entry) Fatal(args ...interface{}) {
--	if entry.Logger.Level >= FatalLevel {
--		entry.log(FatalLevel, fmt.Sprint(args...))
--	}
--	os.Exit(1)
--}
--
--func (entry *Entry) Panic(args ...interface{}) {
--	if entry.Logger.Level >= PanicLevel {
--		entry.log(PanicLevel, fmt.Sprint(args...))
--	}
--	panic(fmt.Sprint(args...))
--}
--
--// Entry Printf family functions
--
--func (entry *Entry) Debugf(format string, args ...interface{}) {
--	if entry.Logger.Level >= DebugLevel {
--		entry.Debug(fmt.Sprintf(format, args...))
--	}
--}
--
--func (entry *Entry) Infof(format string, args ...interface{}) {
--	if entry.Logger.Level >= InfoLevel {
--		entry.Info(fmt.Sprintf(format, args...))
--	}
--}
--
--func (entry *Entry) Printf(format string, args ...interface{}) {
--	entry.Infof(format, args...)
--}
--
--func (entry *Entry) Warnf(format string, args ...interface{}) {
--	if entry.Logger.Level >= WarnLevel {
--		entry.Warn(fmt.Sprintf(format, args...))
--	}
--}
--
--func (entry *Entry) Warningf(format string, args ...interface{}) {
--	entry.Warnf(format, args...)
--}
--
--func (entry *Entry) Errorf(format string, args ...interface{}) {
--	if entry.Logger.Level >= ErrorLevel {
--		entry.Error(fmt.Sprintf(format, args...))
--	}
--}
--
--func (entry *Entry) Fatalf(format string, args ...interface{}) {
--	if entry.Logger.Level >= FatalLevel {
--		entry.Fatal(fmt.Sprintf(format, args...))
--	}
--}
--
--func (entry *Entry) Panicf(format string, args ...interface{}) {
--	if entry.Logger.Level >= PanicLevel {
--		entry.Panic(fmt.Sprintf(format, args...))
--	}
--}
--
--// Entry Println family functions
--
--func (entry *Entry) Debugln(args ...interface{}) {
--	if entry.Logger.Level >= DebugLevel {
--		entry.Debug(entry.sprintlnn(args...))
--	}
--}
--
--func (entry *Entry) Infoln(args ...interface{}) {
--	if entry.Logger.Level >= InfoLevel {
--		entry.Info(entry.sprintlnn(args...))
--	}
--}
--
--func (entry *Entry) Println(args ...interface{}) {
--	entry.Infoln(args...)
--}
--
--func (entry *Entry) Warnln(args ...interface{}) {
--	if entry.Logger.Level >= WarnLevel {
--		entry.Warn(entry.sprintlnn(args...))
--	}
--}
--
--func (entry *Entry) Warningln(args ...interface{}) {
--	entry.Warnln(args...)
--}
--
--func (entry *Entry) Errorln(args ...interface{}) {
--	if entry.Logger.Level >= ErrorLevel {
--		entry.Error(entry.sprintlnn(args...))
--	}
--}
--
--func (entry *Entry) Fatalln(args ...interface{}) {
--	if entry.Logger.Level >= FatalLevel {
--		entry.Fatal(entry.sprintlnn(args...))
--	}
--}
--
--func (entry *Entry) Panicln(args ...interface{}) {
--	if entry.Logger.Level >= PanicLevel {
--		entry.Panic(entry.sprintlnn(args...))
--	}
--}
--
--// Sprintlnn => Sprint no newline. This is to get the behavior of how
--// fmt.Sprintln where spaces are always added between operands, regardless of
--// their type. Instead of vendoring the Sprintln implementation to spare a
--// string allocation, we do the simplest thing.
--func (entry *Entry) sprintlnn(args ...interface{}) string {
--	msg := fmt.Sprintln(args...)
--	return msg[:len(msg)-1]
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
-deleted file mode 100644
-index 98717df..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
-+++ /dev/null
-@@ -1,53 +0,0 @@
--package logrus
--
--import (
--	"bytes"
--	"fmt"
--	"testing"
--
--	"github.com/stretchr/testify/assert"
--)
--
--func TestEntryPanicln(t *testing.T) {
--	errBoom := fmt.Errorf("boom time")
--
--	defer func() {
--		p := recover()
--		assert.NotNil(t, p)
--
--		switch pVal := p.(type) {
--		case *Entry:
--			assert.Equal(t, "kaboom", pVal.Message)
--			assert.Equal(t, errBoom, pVal.Data["err"])
--		default:
--			t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
--		}
--	}()
--
--	logger := New()
--	logger.Out = &bytes.Buffer{}
--	entry := NewEntry(logger)
--	entry.WithField("err", errBoom).Panicln("kaboom")
--}
--
--func TestEntryPanicf(t *testing.T) {
--	errBoom := fmt.Errorf("boom again")
--
--	defer func() {
--		p := recover()
--		assert.NotNil(t, p)
--
--		switch pVal := p.(type) {
--		case *Entry:
--			assert.Equal(t, "kaboom true", pVal.Message)
--			assert.Equal(t, errBoom, pVal.Data["err"])
--		default:
--			t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
--		}
--	}()
--
--	logger := New()
--	logger.Out = &bytes.Buffer{}
--	entry := NewEntry(logger)
--	entry.WithField("err", errBoom).Panicf("kaboom %v", true)
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
-deleted file mode 100644
-index a62ba45..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
-+++ /dev/null
-@@ -1,40 +0,0 @@
--package main
--
--import (
--	"github.com/Sirupsen/logrus"
--)
--
--var log = logrus.New()
--
--func init() {
--	log.Formatter = new(logrus.JSONFormatter)
--	log.Formatter = new(logrus.TextFormatter) // default
--}
--
--func main() {
--	defer func() {
--		err := recover()
--		if err != nil {
--			log.WithFields(logrus.Fields{
--				"omg":    true,
--				"err":    err,
--				"number": 100,
--			}).Fatal("The ice breaks!")
--		}
--	}()
--
--	log.WithFields(logrus.Fields{
--		"animal": "walrus",
--		"size":   10,
--	}).Info("A group of walrus emerges from the ocean")
--
--	log.WithFields(logrus.Fields{
--		"omg":    true,
--		"number": 122,
--	}).Warn("The group's number increased tremendously!")
--
--	log.WithFields(logrus.Fields{
--		"animal": "orca",
--		"size":   9009,
--	}).Panic("It's over 9000!")
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
-deleted file mode 100644
-index 42e7a4c..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
-+++ /dev/null
-@@ -1,35 +0,0 @@
--package main
--
--import (
--	"github.com/Sirupsen/logrus"
--	"github.com/Sirupsen/logrus/hooks/airbrake"
--	"github.com/tobi/airbrake-go"
--)
--
--var log = logrus.New()
--
--func init() {
--	log.Formatter = new(logrus.TextFormatter) // default
--	log.Hooks.Add(new(logrus_airbrake.AirbrakeHook))
--}
--
--func main() {
--	airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml"
--	airbrake.ApiKey = "whatever"
--	airbrake.Environment = "production"
--
--	log.WithFields(logrus.Fields{
--		"animal": "walrus",
--		"size":   10,
--	}).Info("A group of walrus emerges from the ocean")
--
--	log.WithFields(logrus.Fields{
--		"omg":    true,
--		"number": 122,
--	}).Warn("The group's number increased tremendously!")
--
--	log.WithFields(logrus.Fields{
--		"omg":    true,
--		"number": 100,
--	}).Fatal("The ice breaks!")
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
-deleted file mode 100644
-index d087124..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
-+++ /dev/null
-@@ -1,182 +0,0 @@
--package logrus
--
--import (
--	"io"
--)
--
--var (
--	// std is the name of the standard logger in stdlib `log`
--	std = New()
--)
--
--// SetOutput sets the standard logger output.
--func SetOutput(out io.Writer) {
--	std.mu.Lock()
--	defer std.mu.Unlock()
--	std.Out = out
--}
--
--// SetFormatter sets the standard logger formatter.
--func SetFormatter(formatter Formatter) {
--	std.mu.Lock()
--	defer std.mu.Unlock()
--	std.Formatter = formatter
--}
--
--// SetLevel sets the standard logger level.
--func SetLevel(level Level) {
--	std.mu.Lock()
--	defer std.mu.Unlock()
--	std.Level = level
--}
--
--// GetLevel returns the standard logger level.
--func GetLevel() Level {
--	return std.Level
--}
--
--// AddHook adds a hook to the standard logger hooks.
--func AddHook(hook Hook) {
--	std.mu.Lock()
--	defer std.mu.Unlock()
--	std.Hooks.Add(hook)
--}
--
--// WithField creates an entry from the standard logger and adds a field to
--// it. If you want multiple fields, use `WithFields`.
--//
--// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
--// or Panic on the Entry it returns.
--func WithField(key string, value interface{}) *Entry {
--	return std.WithField(key, value)
--}
--
--// WithFields creates an entry from the standard logger and adds multiple
--// fields to it. This is simply a helper for `WithField`, invoking it
--// once for each field.
--//
--// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
--// or Panic on the Entry it returns.
--func WithFields(fields Fields) *Entry {
--	return std.WithFields(fields)
--}
--
--// Debug logs a message at level Debug on the standard logger.
--func Debug(args ...interface{}) {
--	std.Debug(args...)
--}
--
--// Print logs a message at level Info on the standard logger.
--func Print(args ...interface{}) {
--	std.Print(args...)
--}
--
--// Info logs a message at level Info on the standard logger.
--func Info(args ...interface{}) {
--	std.Info(args...)
--}
--
--// Warn logs a message at level Warn on the standard logger.
--func Warn(args ...interface{}) {
--	std.Warn(args...)
--}
--
--// Warning logs a message at level Warn on the standard logger.
--func Warning(args ...interface{}) {
--	std.Warning(args...)
--}
--
--// Error logs a message at level Error on the standard logger.
--func Error(args ...interface{}) {
--	std.Error(args...)
--}
--
--// Panic logs a message at level Panic on the standard logger.
--func Panic(args ...interface{}) {
--	std.Panic(args...)
--}
--
--// Fatal logs a message at level Fatal on the standard logger.
--func Fatal(args ...interface{}) {
--	std.Fatal(args...)
--}
--
--// Debugf logs a message at level Debug on the standard logger.
--func Debugf(format string, args ...interface{}) {
--	std.Debugf(format, args...)
--}
--
--// Printf logs a message at level Info on the standard logger.
--func Printf(format string, args ...interface{}) {
--	std.Printf(format, args...)
--}
--
--// Infof logs a message at level Info on the standard logger.
--func Infof(format string, args ...interface{}) {
--	std.Infof(format, args...)
--}
--
--// Warnf logs a message at level Warn on the standard logger.
--func Warnf(format string, args ...interface{}) {
--	std.Warnf(format, args...)
--}
--
--// Warningf logs a message at level Warn on the standard logger.
--func Warningf(format string, args ...interface{}) {
--	std.Warningf(format, args...)
--}
--
--// Errorf logs a message at level Error on the standard logger.
--func Errorf(format string, args ...interface{}) {
--	std.Errorf(format, args...)
--}
--
--// Panicf logs a message at level Panic on the standard logger.
--func Panicf(format string, args ...interface{}) {
--	std.Panicf(format, args...)
--}
--
--// Fatalf logs a message at level Fatal on the standard logger.
--func Fatalf(format string, args ...interface{}) {
--	std.Fatalf(format, args...)
--}
--
--// Debugln logs a message at level Debug on the standard logger.
--func Debugln(args ...interface{}) {
--	std.Debugln(args...)
--}
--
--// Println logs a message at level Info on the standard logger.
--func Println(args ...interface{}) {
--	std.Println(args...)
--}
--
--// Infoln logs a message at level Info on the standard logger.
--func Infoln(args ...interface{}) {
--	std.Infoln(args...)
--}
--
--// Warnln logs a message at level Warn on the standard logger.
--func Warnln(args ...interface{}) {
--	std.Warnln(args...)
--}
--
--// Warningln logs a message at level Warn on the standard logger.
--func Warningln(args ...interface{}) {
--	std.Warningln(args...)
--}
--
--// Errorln logs a message at level Error on the standard logger.
--func Errorln(args ...interface{}) {
--	std.Errorln(args...)
--}
--
--// Panicln logs a message at level Panic on the standard logger.
--func Panicln(args ...interface{}) {
--	std.Panicln(args...)
--}
--
--// Fatalln logs a message at level Fatal on the standard logger.
--func Fatalln(args ...interface{}) {
--	std.Fatalln(args...)
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
-deleted file mode 100644
-index 038ce9f..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
-+++ /dev/null
-@@ -1,44 +0,0 @@
--package logrus
--
--// The Formatter interface is used to implement a custom Formatter. It takes an
--// `Entry`. It exposes all the fields, including the default ones:
--//
--// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
--// * `entry.Data["time"]`. The timestamp.
--// * `entry.Data["level"]. The level the entry was logged at.
--//
--// Any additional fields added with `WithField` or `WithFields` are also in
--// `entry.Data`. Format is expected to return an array of bytes which are then
--// logged to `logger.Out`.
--type Formatter interface {
--	Format(*Entry) ([]byte, error)
--}
--
--// This is to not silently overwrite `time`, `msg` and `level` fields when
--// dumping it. If this code wasn't there doing:
--//
--//  logrus.WithField("level", 1).Info("hello")
--//
--// Would just silently drop the user provided level. Instead with this code
--// it'll logged as:
--//
--//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
--//
--// It's not exported because it's still using Data in an opinionated way. It's to
--// avoid code duplication between the two default formatters.
--func prefixFieldClashes(data Fields) {
--	_, ok := data["time"]
--	if ok {
--		data["fields.time"] = data["time"]
--	}
--
--	_, ok = data["msg"]
--	if ok {
--		data["fields.msg"] = data["msg"]
--	}
--
--	_, ok = data["level"]
--	if ok {
--		data["fields.level"] = data["level"]
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
-deleted file mode 100644
-index 77989da..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
-+++ /dev/null
-@@ -1,88 +0,0 @@
--package logrus
--
--import (
--	"testing"
--	"time"
--)
--
--// smallFields is a small size data set for benchmarking
--var smallFields = Fields{
--	"foo":   "bar",
--	"baz":   "qux",
--	"one":   "two",
--	"three": "four",
--}
--
--// largeFields is a large size data set for benchmarking
--var largeFields = Fields{
--	"foo":       "bar",
--	"baz":       "qux",
--	"one":       "two",
--	"three":     "four",
--	"five":      "six",
--	"seven":     "eight",
--	"nine":      "ten",
--	"eleven":    "twelve",
--	"thirteen":  "fourteen",
--	"fifteen":   "sixteen",
--	"seventeen": "eighteen",
--	"nineteen":  "twenty",
--	"a":         "b",
--	"c":         "d",
--	"e":         "f",
--	"g":         "h",
--	"i":         "j",
--	"k":         "l",
--	"m":         "n",
--	"o":         "p",
--	"q":         "r",
--	"s":         "t",
--	"u":         "v",
--	"w":         "x",
--	"y":         "z",
--	"this":      "will",
--	"make":      "thirty",
--	"entries":   "yeah",
--}
--
--func BenchmarkSmallTextFormatter(b *testing.B) {
--	doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
--}
--
--func BenchmarkLargeTextFormatter(b *testing.B) {
--	doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
--}
--
--func BenchmarkSmallColoredTextFormatter(b *testing.B) {
--	doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
--}
--
--func BenchmarkLargeColoredTextFormatter(b *testing.B) {
--	doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
--}
--
--func BenchmarkSmallJSONFormatter(b *testing.B) {
--	doBenchmark(b, &JSONFormatter{}, smallFields)
--}
--
--func BenchmarkLargeJSONFormatter(b *testing.B) {
--	doBenchmark(b, &JSONFormatter{}, largeFields)
--}
--
--func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
--	entry := &Entry{
--		Time:    time.Time{},
--		Level:   InfoLevel,
--		Message: "message",
--		Data:    fields,
--	}
--	var d []byte
--	var err error
--	for i := 0; i < b.N; i++ {
--		d, err = formatter.Format(entry)
--		if err != nil {
--			b.Fatal(err)
--		}
--		b.SetBytes(int64(len(d)))
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
-deleted file mode 100644
-index 13f34cb..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
-+++ /dev/null
-@@ -1,122 +0,0 @@
--package logrus
--
--import (
--	"testing"
--
--	"github.com/stretchr/testify/assert"
--)
--
--type TestHook struct {
--	Fired bool
--}
--
--func (hook *TestHook) Fire(entry *Entry) error {
--	hook.Fired = true
--	return nil
--}
--
--func (hook *TestHook) Levels() []Level {
--	return []Level{
--		DebugLevel,
--		InfoLevel,
--		WarnLevel,
--		ErrorLevel,
--		FatalLevel,
--		PanicLevel,
--	}
--}
--
--func TestHookFires(t *testing.T) {
--	hook := new(TestHook)
--
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Hooks.Add(hook)
--		assert.Equal(t, hook.Fired, false)
--
--		log.Print("test")
--	}, func(fields Fields) {
--		assert.Equal(t, hook.Fired, true)
--	})
--}
--
--type ModifyHook struct {
--}
--
--func (hook *ModifyHook) Fire(entry *Entry) error {
--	entry.Data["wow"] = "whale"
--	return nil
--}
--
--func (hook *ModifyHook) Levels() []Level {
--	return []Level{
--		DebugLevel,
--		InfoLevel,
--		WarnLevel,
--		ErrorLevel,
--		FatalLevel,
--		PanicLevel,
--	}
--}
--
--func TestHookCanModifyEntry(t *testing.T) {
--	hook := new(ModifyHook)
--
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Hooks.Add(hook)
--		log.WithField("wow", "elephant").Print("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["wow"], "whale")
--	})
--}
--
--func TestCanFireMultipleHooks(t *testing.T) {
--	hook1 := new(ModifyHook)
--	hook2 := new(TestHook)
--
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Hooks.Add(hook1)
--		log.Hooks.Add(hook2)
--
--		log.WithField("wow", "elephant").Print("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["wow"], "whale")
--		assert.Equal(t, hook2.Fired, true)
--	})
--}
--
--type ErrorHook struct {
--	Fired bool
--}
--
--func (hook *ErrorHook) Fire(entry *Entry) error {
--	hook.Fired = true
--	return nil
--}
--
--func (hook *ErrorHook) Levels() []Level {
--	return []Level{
--		ErrorLevel,
--	}
--}
--
--func TestErrorHookShouldntFireOnInfo(t *testing.T) {
--	hook := new(ErrorHook)
--
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Hooks.Add(hook)
--		log.Info("test")
--	}, func(fields Fields) {
--		assert.Equal(t, hook.Fired, false)
--	})
--}
--
--func TestErrorHookShouldFireOnError(t *testing.T) {
--	hook := new(ErrorHook)
--
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Hooks.Add(hook)
--		log.Error("test")
--	}, func(fields Fields) {
--		assert.Equal(t, hook.Fired, true)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
-deleted file mode 100644
-index 0da2b36..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
-+++ /dev/null
-@@ -1,34 +0,0 @@
--package logrus
--
--// A hook to be fired when logging on the logging levels returned from
--// `Levels()` on your implementation of the interface. Note that this is not
--// fired in a goroutine or a channel with workers, you should handle such
--// functionality yourself if your call is non-blocking and you don't wish for
--// the logging calls for levels returned from `Levels()` to block.
--type Hook interface {
--	Levels() []Level
--	Fire(*Entry) error
--}
--
--// Internal type for storing the hooks on a logger instance.
--type levelHooks map[Level][]Hook
--
--// Add a hook to an instance of logger. This is called with
--// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
--func (hooks levelHooks) Add(hook Hook) {
--	for _, level := range hook.Levels() {
--		hooks[level] = append(hooks[level], hook)
--	}
--}
--
--// Fire all the hooks for the passed level. Used by `entry.log` to fire
--// appropriate hooks for a log entry.
--func (hooks levelHooks) Fire(level Level, entry *Entry) error {
--	for _, hook := range hooks[level] {
--		if err := hook.Fire(entry); err != nil {
--			return err
--		}
--	}
--
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
-deleted file mode 100644
-index 880d21e..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
-+++ /dev/null
-@@ -1,54 +0,0 @@
--package logrus_airbrake
--
--import (
--	"github.com/Sirupsen/logrus"
--	"github.com/tobi/airbrake-go"
--)
--
--// AirbrakeHook to send exceptions to an exception-tracking service compatible
--// with the Airbrake API. You must set:
--// * airbrake.Endpoint
--// * airbrake.ApiKey
--// * airbrake.Environment (only sends exceptions when set to "production")
--//
--// Before using this hook, to send an error. Entries that trigger an Error,
--// Fatal or Panic should now include an "error" field to send to Airbrake.
--type AirbrakeHook struct{}
--
--func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
--	if entry.Data["error"] == nil {
--		entry.Logger.WithFields(logrus.Fields{
--			"source":   "airbrake",
--			"endpoint": airbrake.Endpoint,
--		}).Warn("Exceptions sent to Airbrake must have an 'error' key with the error")
--		return nil
--	}
--
--	err, ok := entry.Data["error"].(error)
--	if !ok {
--		entry.Logger.WithFields(logrus.Fields{
--			"source":   "airbrake",
--			"endpoint": airbrake.Endpoint,
--		}).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`")
--		return nil
--	}
--
--	airErr := airbrake.Notify(err)
--	if airErr != nil {
--		entry.Logger.WithFields(logrus.Fields{
--			"source":   "airbrake",
--			"endpoint": airbrake.Endpoint,
--			"error":    airErr,
--		}).Warn("Failed to send error to Airbrake")
--	}
--
--	return nil
--}
--
--func (hook *AirbrakeHook) Levels() []logrus.Level {
--	return []logrus.Level{
--		logrus.ErrorLevel,
--		logrus.FatalLevel,
--		logrus.PanicLevel,
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
-deleted file mode 100644
-index ae61e92..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
-+++ /dev/null
-@@ -1,28 +0,0 @@
--# Papertrail Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
--
--[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
--
--In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
--
--## Usage
--
--You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
--
--For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
--
--```go
--import (
--  "log/syslog"
--  "github.com/Sirupsen/logrus"
--  "github.com/Sirupsen/logrus/hooks/papertrail"
--)
--
--func main() {
--  log       := logrus.New()
--  hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
--
--  if err == nil {
--    log.Hooks.Add(hook)
--  }
--}
--```
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
-deleted file mode 100644
-index 12c56f2..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
-+++ /dev/null
-@@ -1,54 +0,0 @@
--package logrus_papertrail
--
--import (
--	"fmt"
--	"net"
--	"os"
--	"time"
--
--	"github.com/Sirupsen/logrus"
--)
--
--const (
--	format = "Jan 2 15:04:05"
--)
--
--// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
--type PapertrailHook struct {
--	Host    string
--	Port    int
--	AppName string
--	UDPConn net.Conn
--}
--
--// NewPapertrailHook creates a hook to be added to an instance of logger.
--func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
--	conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
--	return &PapertrailHook{host, port, appName, conn}, err
--}
--
--// Fire is called when a log event is fired.
--func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
--	date := time.Now().Format(format)
--	payload := fmt.Sprintf("<22> %s %s: [%s] %s", date, hook.AppName, entry.Level, entry.Message)
--
--	bytesWritten, err := hook.UDPConn.Write([]byte(payload))
--	if err != nil {
--		fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
--		return err
--	}
--
--	return nil
--}
--
--// Levels returns the available logging levels.
--func (hook *PapertrailHook) Levels() []logrus.Level {
--	return []logrus.Level{
--		logrus.PanicLevel,
--		logrus.FatalLevel,
--		logrus.ErrorLevel,
--		logrus.WarnLevel,
--		logrus.InfoLevel,
--		logrus.DebugLevel,
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
-deleted file mode 100644
-index 96318d0..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package logrus_papertrail
--
--import (
--	"fmt"
--	"testing"
--
--	"github.com/Sirupsen/logrus"
--	"github.com/stvp/go-udp-testing"
--)
--
--func TestWritingToUDP(t *testing.T) {
--	port := 16661
--	udp.SetAddr(fmt.Sprintf(":%d", port))
--
--	hook, err := NewPapertrailHook("localhost", port, "test")
--	if err != nil {
--		t.Errorf("Unable to connect to local UDP server.")
--	}
--
--	log := logrus.New()
--	log.Hooks.Add(hook)
--
--	udp.ShouldReceive(t, "foo", func() {
--		log.Info("foo")
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
-deleted file mode 100644
-index a409f3b..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
-+++ /dev/null
-@@ -1,61 +0,0 @@
--# Sentry Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
--
--[Sentry](https://getsentry.com) provides both self-hosted and hosted
--solutions for exception tracking.
--Both client and server are
--[open source](https://github.com/getsentry/sentry).
--
--## Usage
--
--Every sentry application defined on the server gets a different
--[DSN](https://www.getsentry.com/docs/). In the example below replace
--`YOUR_DSN` with the one created for your application.
--
--```go
--import (
--  "github.com/Sirupsen/logrus"
--  "github.com/Sirupsen/logrus/hooks/sentry"
--)
--
--func main() {
--  log       := logrus.New()
--  hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
--    logrus.PanicLevel,
--    logrus.FatalLevel,
--    logrus.ErrorLevel,
--  })
--
--  if err == nil {
--    log.Hooks.Add(hook)
--  }
--}
--```
--
--## Special fields
--
--Some logrus fields have a special meaning in this hook,
--these are server_name and logger.
--When logs are sent to sentry these fields are treated differently.
--- server_name (also known as hostname) is the name of the server which
--is logging the event (hostname.example.com)
--- logger is the part of the application which is logging the event.
--In go this usually means setting it to the name of the package.
--
--## Timeout
--
--`Timeout` is the time the sentry hook will wait for a response
--from the sentry server.
--
--If this time elapses with no response from
--the server an error will be returned.
--
--If `Timeout` is set to 0 the SentryHook will not wait for a reply
--and will assume a correct delivery.
--
--The SentryHook has a default timeout of `100 milliseconds` when created
--with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
--
--```go
--hook, _ := logrus_sentry.NewSentryHook(...)
--hook.Timeout = 20*time.Seconds
--```
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
-deleted file mode 100644
-index 379f281..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
-+++ /dev/null
-@@ -1,100 +0,0 @@
--package logrus_sentry
--
--import (
--	"fmt"
--	"time"
--
--	"github.com/Sirupsen/logrus"
--	"github.com/getsentry/raven-go"
--)
--
--var (
--	severityMap = map[logrus.Level]raven.Severity{
--		logrus.DebugLevel: raven.DEBUG,
--		logrus.InfoLevel:  raven.INFO,
--		logrus.WarnLevel:  raven.WARNING,
--		logrus.ErrorLevel: raven.ERROR,
--		logrus.FatalLevel: raven.FATAL,
--		logrus.PanicLevel: raven.FATAL,
--	}
--)
--
--func getAndDel(d logrus.Fields, key string) (string, bool) {
--	var (
--		ok  bool
--		v   interface{}
--		val string
--	)
--	if v, ok = d[key]; !ok {
--		return "", false
--	}
--
--	if val, ok = v.(string); !ok {
--		return "", false
--	}
--	delete(d, key)
--	return val, true
--}
--
--// SentryHook delivers logs to a sentry server.
--type SentryHook struct {
--	// Timeout sets the time to wait for a delivery error from the sentry server.
--	// If this is set to zero the server will not wait for any response and will
--	// consider the message correctly sent
--	Timeout time.Duration
--
--	client *raven.Client
--	levels []logrus.Level
--}
--
--// NewSentryHook creates a hook to be added to an instance of logger
--// and initializes the raven client.
--// This method sets the timeout to 100 milliseconds.
--func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
--	client, err := raven.NewClient(DSN, nil)
--	if err != nil {
--		return nil, err
--	}
--	return &SentryHook{100 * time.Millisecond, client, levels}, nil
--}
--
--// Called when an event should be sent to sentry
--// Special fields that sentry uses to give more information to the server
--// are extracted from entry.Data (if they are found)
--// These fields are: logger and server_name
--func (hook *SentryHook) Fire(entry *logrus.Entry) error {
--	packet := &raven.Packet{
--		Message:   entry.Message,
--		Timestamp: raven.Timestamp(entry.Time),
--		Level:     severityMap[entry.Level],
--		Platform:  "go",
--	}
--
--	d := entry.Data
--
--	if logger, ok := getAndDel(d, "logger"); ok {
--		packet.Logger = logger
--	}
--	if serverName, ok := getAndDel(d, "server_name"); ok {
--		packet.ServerName = serverName
--	}
--	packet.Extra = map[string]interface{}(d)
--
--	_, errCh := hook.client.Capture(packet, nil)
--	timeout := hook.Timeout
--	if timeout != 0 {
--		timeoutCh := time.After(timeout)
--		select {
--		case err := <-errCh:
--			return err
--		case <-timeoutCh:
--			return fmt.Errorf("no response from sentry server in %s", timeout)
--		}
--	}
--	return nil
--}
--
--// Levels returns the available logging levels.
--func (hook *SentryHook) Levels() []logrus.Level {
--	return hook.levels
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
-deleted file mode 100644
-index 45f18d1..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
-+++ /dev/null
-@@ -1,97 +0,0 @@
--package logrus_sentry
--
--import (
--	"encoding/json"
--	"fmt"
--	"io/ioutil"
--	"net/http"
--	"net/http/httptest"
--	"strings"
--	"testing"
--
--	"github.com/Sirupsen/logrus"
--	"github.com/getsentry/raven-go"
--)
--
--const (
--	message     = "error message"
--	server_name = "testserver.internal"
--	logger_name = "test.logger"
--)
--
--func getTestLogger() *logrus.Logger {
--	l := logrus.New()
--	l.Out = ioutil.Discard
--	return l
--}
--
--func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
--	pch := make(chan *raven.Packet, 1)
--	s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
--		defer req.Body.Close()
--		d := json.NewDecoder(req.Body)
--		p := &raven.Packet{}
--		err := d.Decode(p)
--		if err != nil {
--			t.Fatal(err.Error())
--		}
--
--		pch <- p
--	}))
--	defer s.Close()
--
--	fragments := strings.SplitN(s.URL, "://", 2)
--	dsn := fmt.Sprintf(
--		"%s://public:secret@%s/sentry/project-id",
--		fragments[0],
--		fragments[1],
--	)
--	tf(dsn, pch)
--}
--
--func TestSpecialFields(t *testing.T) {
--	WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
--		logger := getTestLogger()
--
--		hook, err := NewSentryHook(dsn, []logrus.Level{
--			logrus.ErrorLevel,
--		})
--
--		if err != nil {
--			t.Fatal(err.Error())
--		}
--		logger.Hooks.Add(hook)
--		logger.WithFields(logrus.Fields{
--			"server_name": server_name,
--			"logger":      logger_name,
--		}).Error(message)
--
--		packet := <-pch
--		if packet.Logger != logger_name {
--			t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
--		}
--
--		if packet.ServerName != server_name {
--			t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
--		}
--	})
--}
--
--func TestSentryHandler(t *testing.T) {
--	WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
--		logger := getTestLogger()
--		hook, err := NewSentryHook(dsn, []logrus.Level{
--			logrus.ErrorLevel,
--		})
--		if err != nil {
--			t.Fatal(err.Error())
--		}
--		logger.Hooks.Add(hook)
--
--		logger.Error(message)
--		packet := <-pch
--		if packet.Message != message {
--			t.Errorf("message should have been %s, was %s", message, packet.Message)
--		}
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
-deleted file mode 100644
-index 4dbb8e7..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
-+++ /dev/null
-@@ -1,20 +0,0 @@
--# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
--
--## Usage
--
--```go
--import (
--  "log/syslog"
--  "github.com/Sirupsen/logrus"
--  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
--)
--
--func main() {
--  log       := logrus.New()
--  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
--
--  if err == nil {
--    log.Hooks.Add(hook)
--  }
--}
--```
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
-deleted file mode 100644
-index b6fa374..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
-+++ /dev/null
-@@ -1,59 +0,0 @@
--package logrus_syslog
--
--import (
--	"fmt"
--	"github.com/Sirupsen/logrus"
--	"log/syslog"
--	"os"
--)
--
--// SyslogHook to send logs via syslog.
--type SyslogHook struct {
--	Writer        *syslog.Writer
--	SyslogNetwork string
--	SyslogRaddr   string
--}
--
--// Creates a hook to be added to an instance of logger. This is called with
--// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
--// `if err == nil { log.Hooks.Add(hook) }`
--func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
--	w, err := syslog.Dial(network, raddr, priority, tag)
--	return &SyslogHook{w, network, raddr}, err
--}
--
--func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
--	line, err := entry.String()
--	if err != nil {
--		fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
--		return err
--	}
--
--	switch entry.Level {
--	case logrus.PanicLevel:
--		return hook.Writer.Crit(line)
--	case logrus.FatalLevel:
--		return hook.Writer.Crit(line)
--	case logrus.ErrorLevel:
--		return hook.Writer.Err(line)
--	case logrus.WarnLevel:
--		return hook.Writer.Warning(line)
--	case logrus.InfoLevel:
--		return hook.Writer.Info(line)
--	case logrus.DebugLevel:
--		return hook.Writer.Debug(line)
--	default:
--		return nil
--	}
--}
--
--func (hook *SyslogHook) Levels() []logrus.Level {
--	return []logrus.Level{
--		logrus.PanicLevel,
--		logrus.FatalLevel,
--		logrus.ErrorLevel,
--		logrus.WarnLevel,
--		logrus.InfoLevel,
--		logrus.DebugLevel,
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
-deleted file mode 100644
-index 42762dc..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package logrus_syslog
--
--import (
--	"github.com/Sirupsen/logrus"
--	"log/syslog"
--	"testing"
--)
--
--func TestLocalhostAddAndPrint(t *testing.T) {
--	log := logrus.New()
--	hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
--
--	if err != nil {
--		t.Errorf("Unable to connect to local syslog.")
--	}
--
--	log.Hooks.Add(hook)
--
--	for _, level := range hook.Levels() {
--		if len(log.Hooks[level]) != 1 {
--			t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
--		}
--	}
--
--	log.Info("Congratulations!")
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
-deleted file mode 100644
-index b09227c..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package logrus
--
--import (
--	"encoding/json"
--	"fmt"
--	"time"
--)
--
--type JSONFormatter struct{}
--
--func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
--	data := make(Fields, len(entry.Data)+3)
--	for k, v := range entry.Data {
--		data[k] = v
--	}
--	prefixFieldClashes(data)
--	data["time"] = entry.Time.Format(time.RFC3339)
--	data["msg"] = entry.Message
--	data["level"] = entry.Level.String()
--
--	serialized, err := json.Marshal(data)
--	if err != nil {
--		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
--	}
--	return append(serialized, '\n'), nil
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
-deleted file mode 100644
-index b392e54..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
-+++ /dev/null
-@@ -1,161 +0,0 @@
--package logrus
--
--import (
--	"io"
--	"os"
--	"sync"
--)
--
--type Logger struct {
--	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
--	// file, or leave it default which is `os.Stdout`. You can also set this to
--	// something more adventorous, such as logging to Kafka.
--	Out io.Writer
--	// Hooks for the logger instance. These allow firing events based on logging
--	// levels and log entries. For example, to send errors to an error tracking
--	// service, log to StatsD or dump the core on fatal errors.
--	Hooks levelHooks
--	// All log entries pass through the formatter before logged to Out. The
--	// included formatters are `TextFormatter` and `JSONFormatter` for which
--	// TextFormatter is the default. In development (when a TTY is attached) it
--	// logs with colors, but to a file it wouldn't. You can easily implement your
--	// own that implements the `Formatter` interface, see the `README` or included
--	// formatters for examples.
--	Formatter Formatter
--	// The logging level the logger should log at. This is typically (and defaults
--	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
--	// logged. `logrus.Debug` is useful in
--	Level Level
--	// Used to sync writing to the log.
--	mu sync.Mutex
--}
--
--// Creates a new logger. Configuration should be set by changing `Formatter`,
--// `Out` and `Hooks` directly on the default logger instance. You can also just
--// instantiate your own:
--//
--//    var log = &Logger{
--//      Out: os.Stderr,
--//      Formatter: new(JSONFormatter),
--//      Hooks: make(levelHooks),
--//      Level: logrus.DebugLevel,
--//    }
--//
--// It's recommended to make this a global instance called `log`.
--func New() *Logger {
--	return &Logger{
--		Out:       os.Stdout,
--		Formatter: new(TextFormatter),
--		Hooks:     make(levelHooks),
--		Level:     InfoLevel,
--	}
--}
--
--// Adds a field to the log entry, note that you it doesn't log until you call
--// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
--// Ff you want multiple fields, use `WithFields`.
--func (logger *Logger) WithField(key string, value interface{}) *Entry {
--	return NewEntry(logger).WithField(key, value)
--}
--
--// Adds a struct of fields to the log entry. All it does is call `WithField` for
--// each `Field`.
--func (logger *Logger) WithFields(fields Fields) *Entry {
--	return NewEntry(logger).WithFields(fields)
--}
--
--func (logger *Logger) Debugf(format string, args ...interface{}) {
--	NewEntry(logger).Debugf(format, args...)
--}
--
--func (logger *Logger) Infof(format string, args ...interface{}) {
--	NewEntry(logger).Infof(format, args...)
--}
--
--func (logger *Logger) Printf(format string, args ...interface{}) {
--	NewEntry(logger).Printf(format, args...)
--}
--
--func (logger *Logger) Warnf(format string, args ...interface{}) {
--	NewEntry(logger).Warnf(format, args...)
--}
--
--func (logger *Logger) Warningf(format string, args ...interface{}) {
--	NewEntry(logger).Warnf(format, args...)
--}
--
--func (logger *Logger) Errorf(format string, args ...interface{}) {
--	NewEntry(logger).Errorf(format, args...)
--}
--
--func (logger *Logger) Fatalf(format string, args ...interface{}) {
--	NewEntry(logger).Fatalf(format, args...)
--}
--
--func (logger *Logger) Panicf(format string, args ...interface{}) {
--	NewEntry(logger).Panicf(format, args...)
--}
--
--func (logger *Logger) Debug(args ...interface{}) {
--	NewEntry(logger).Debug(args...)
--}
--
--func (logger *Logger) Info(args ...interface{}) {
--	NewEntry(logger).Info(args...)
--}
--
--func (logger *Logger) Print(args ...interface{}) {
--	NewEntry(logger).Info(args...)
--}
--
--func (logger *Logger) Warn(args ...interface{}) {
--	NewEntry(logger).Warn(args...)
--}
--
--func (logger *Logger) Warning(args ...interface{}) {
--	NewEntry(logger).Warn(args...)
--}
--
--func (logger *Logger) Error(args ...interface{}) {
--	NewEntry(logger).Error(args...)
--}
--
--func (logger *Logger) Fatal(args ...interface{}) {
--	NewEntry(logger).Fatal(args...)
--}
--
--func (logger *Logger) Panic(args ...interface{}) {
--	NewEntry(logger).Panic(args...)
--}
--
--func (logger *Logger) Debugln(args ...interface{}) {
--	NewEntry(logger).Debugln(args...)
--}
--
--func (logger *Logger) Infoln(args ...interface{}) {
--	NewEntry(logger).Infoln(args...)
--}
--
--func (logger *Logger) Println(args ...interface{}) {
--	NewEntry(logger).Println(args...)
--}
--
--func (logger *Logger) Warnln(args ...interface{}) {
--	NewEntry(logger).Warnln(args...)
--}
--
--func (logger *Logger) Warningln(args ...interface{}) {
--	NewEntry(logger).Warnln(args...)
--}
--
--func (logger *Logger) Errorln(args ...interface{}) {
--	NewEntry(logger).Errorln(args...)
--}
--
--func (logger *Logger) Fatalln(args ...interface{}) {
--	NewEntry(logger).Fatalln(args...)
--}
--
--func (logger *Logger) Panicln(args ...interface{}) {
--	NewEntry(logger).Panicln(args...)
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
-deleted file mode 100644
-index 43ee12e..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
-+++ /dev/null
-@@ -1,94 +0,0 @@
--package logrus
--
--import (
--	"fmt"
--	"log"
--)
--
--// Fields type, used to pass to `WithFields`.
--type Fields map[string]interface{}
--
--// Level type
--type Level uint8
--
--// Convert the Level to a string. E.g. PanicLevel becomes "panic".
--func (level Level) String() string {
--	switch level {
--	case DebugLevel:
--		return "debug"
--	case InfoLevel:
--		return "info"
--	case WarnLevel:
--		return "warning"
--	case ErrorLevel:
--		return "error"
--	case FatalLevel:
--		return "fatal"
--	case PanicLevel:
--		return "panic"
--	}
--
--	return "unknown"
--}
--
--// ParseLevel takes a string level and returns the Logrus log level constant.
--func ParseLevel(lvl string) (Level, error) {
--	switch lvl {
--	case "panic":
--		return PanicLevel, nil
--	case "fatal":
--		return FatalLevel, nil
--	case "error":
--		return ErrorLevel, nil
--	case "warn", "warning":
--		return WarnLevel, nil
--	case "info":
--		return InfoLevel, nil
--	case "debug":
--		return DebugLevel, nil
--	}
--
--	var l Level
--	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
--}
--
--// These are the different logging levels. You can set the logging level to log
--// on your instance of logger, obtained with `logrus.New()`.
--const (
--	// PanicLevel level, highest level of severity. Logs and then calls panic with the
--	// message passed to Debug, Info, ...
--	PanicLevel Level = iota
--	// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
--	// logging level is set to Panic.
--	FatalLevel
--	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
--	// Commonly used for hooks to send errors to an error tracking service.
--	ErrorLevel
--	// WarnLevel level. Non-critical entries that deserve eyes.
--	WarnLevel
--	// InfoLevel level. General operational entries about what's going on inside the
--	// application.
--	InfoLevel
--	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
--	DebugLevel
--)
--
--// Won't compile if StdLogger can't be realized by a log.Logger
--var _ StdLogger = &log.Logger{}
--
--// StdLogger is what your logrus-enabled library should take, that way
--// it'll accept a stdlib logger and a logrus logger. There's no standard
--// interface, this is the closest we get, unfortunately.
--type StdLogger interface {
--	Print(...interface{})
--	Printf(string, ...interface{})
--	Println(...interface{})
--
--	Fatal(...interface{})
--	Fatalf(string, ...interface{})
--	Fatalln(...interface{})
--
--	Panic(...interface{})
--	Panicf(string, ...interface{})
--	Panicln(...interface{})
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
-deleted file mode 100644
-index 5302542..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
-+++ /dev/null
-@@ -1,283 +0,0 @@
--package logrus
--
--import (
--	"bytes"
--	"encoding/json"
--	"strconv"
--	"strings"
--	"testing"
--
--	"github.com/stretchr/testify/assert"
--)
--
--func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
--	var buffer bytes.Buffer
--	var fields Fields
--
--	logger := New()
--	logger.Out = &buffer
--	logger.Formatter = new(JSONFormatter)
--
--	log(logger)
--
--	err := json.Unmarshal(buffer.Bytes(), &fields)
--	assert.Nil(t, err)
--
--	assertions(fields)
--}
--
--func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
--	var buffer bytes.Buffer
--
--	logger := New()
--	logger.Out = &buffer
--	logger.Formatter = &TextFormatter{
--		DisableColors: true,
--	}
--
--	log(logger)
--
--	fields := make(map[string]string)
--	for _, kv := range strings.Split(buffer.String(), " ") {
--		if !strings.Contains(kv, "=") {
--			continue
--		}
--		kvArr := strings.Split(kv, "=")
--		key := strings.TrimSpace(kvArr[0])
--		val := kvArr[1]
--		if kvArr[1][0] == '"' {
--			var err error
--			val, err = strconv.Unquote(val)
--			assert.NoError(t, err)
--		}
--		fields[key] = val
--	}
--	assertions(fields)
--}
--
--func TestPrint(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Print("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "test")
--		assert.Equal(t, fields["level"], "info")
--	})
--}
--
--func TestInfo(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Info("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "test")
--		assert.Equal(t, fields["level"], "info")
--	})
--}
--
--func TestWarn(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Warn("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "test")
--		assert.Equal(t, fields["level"], "warning")
--	})
--}
--
--func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Infoln("test", "test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "test test")
--	})
--}
--
--func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Infoln("test", 10)
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "test 10")
--	})
--}
--
--func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Infoln(10, 10)
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "10 10")
--	})
--}
--
--func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Infoln(10, 10)
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "10 10")
--	})
--}
--
--func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Info("test", 10)
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "test10")
--	})
--}
--
--func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.Info("test", "test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "testtest")
--	})
--}
--
--func TestWithFieldsShouldAllowAssignments(t *testing.T) {
--	var buffer bytes.Buffer
--	var fields Fields
--
--	logger := New()
--	logger.Out = &buffer
--	logger.Formatter = new(JSONFormatter)
--
--	localLog := logger.WithFields(Fields{
--		"key1": "value1",
--	})
--
--	localLog.WithField("key2", "value2").Info("test")
--	err := json.Unmarshal(buffer.Bytes(), &fields)
--	assert.Nil(t, err)
--
--	assert.Equal(t, "value2", fields["key2"])
--	assert.Equal(t, "value1", fields["key1"])
--
--	buffer = bytes.Buffer{}
--	fields = Fields{}
--	localLog.Info("test")
--	err = json.Unmarshal(buffer.Bytes(), &fields)
--	assert.Nil(t, err)
--
--	_, ok := fields["key2"]
--	assert.Equal(t, false, ok)
--	assert.Equal(t, "value1", fields["key1"])
--}
--
--func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.WithField("msg", "hello").Info("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "test")
--	})
--}
--
--func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.WithField("msg", "hello").Info("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["msg"], "test")
--		assert.Equal(t, fields["fields.msg"], "hello")
--	})
--}
--
--func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.WithField("time", "hello").Info("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["fields.time"], "hello")
--	})
--}
--
--func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
--	LogAndAssertJSON(t, func(log *Logger) {
--		log.WithField("level", 1).Info("test")
--	}, func(fields Fields) {
--		assert.Equal(t, fields["level"], "info")
--		assert.Equal(t, fields["fields.level"], 1)
--	})
--}
--
--func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
--	LogAndAssertText(t, func(log *Logger) {
--		ll := log.WithField("herp", "derp")
--		ll.Info("hello")
--		ll.Info("bye")
--	}, func(fields map[string]string) {
--		for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
--			if _, ok := fields[fieldName]; ok {
--				t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
--			}
--		}
--	})
--}
--
--func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
--
--	var buffer bytes.Buffer
--	var fields Fields
--
--	logger := New()
--	logger.Out = &buffer
--	logger.Formatter = new(JSONFormatter)
--
--	llog := logger.WithField("context", "eating raw fish")
--
--	llog.Info("looks delicious")
--
--	err := json.Unmarshal(buffer.Bytes(), &fields)
--	assert.NoError(t, err, "should have decoded first message")
--	assert.Len(t, fields, 4, "should only have msg/time/level/context fields")
--	assert.Equal(t, fields["msg"], "looks delicious")
--	assert.Equal(t, fields["context"], "eating raw fish")
--
--	buffer.Reset()
--
--	llog.Warn("omg it is!")
--
--	err = json.Unmarshal(buffer.Bytes(), &fields)
--	assert.NoError(t, err, "should have decoded second message")
--	assert.Len(t, fields, 4, "should only have msg/time/level/context fields")
--	assert.Equal(t, fields["msg"], "omg it is!")
--	assert.Equal(t, fields["context"], "eating raw fish")
--	assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
--
--}
--
--func TestConvertLevelToString(t *testing.T) {
--	assert.Equal(t, "debug", DebugLevel.String())
--	assert.Equal(t, "info", InfoLevel.String())
--	assert.Equal(t, "warning", WarnLevel.String())
--	assert.Equal(t, "error", ErrorLevel.String())
--	assert.Equal(t, "fatal", FatalLevel.String())
--	assert.Equal(t, "panic", PanicLevel.String())
--}
--
--func TestParseLevel(t *testing.T) {
--	l, err := ParseLevel("panic")
--	assert.Nil(t, err)
--	assert.Equal(t, PanicLevel, l)
--
--	l, err = ParseLevel("fatal")
--	assert.Nil(t, err)
--	assert.Equal(t, FatalLevel, l)
--
--	l, err = ParseLevel("error")
--	assert.Nil(t, err)
--	assert.Equal(t, ErrorLevel, l)
--
--	l, err = ParseLevel("warn")
--	assert.Nil(t, err)
--	assert.Equal(t, WarnLevel, l)
--
--	l, err = ParseLevel("warning")
--	assert.Nil(t, err)
--	assert.Equal(t, WarnLevel, l)
--
--	l, err = ParseLevel("info")
--	assert.Nil(t, err)
--	assert.Equal(t, InfoLevel, l)
--
--	l, err = ParseLevel("debug")
--	assert.Nil(t, err)
--	assert.Equal(t, DebugLevel, l)
--
--	l, err = ParseLevel("invalid")
--	assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
-deleted file mode 100644
-index 8fe02a4..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go
-+++ /dev/null
-@@ -1,12 +0,0 @@
--// Based on ssh/terminal:
--// Copyright 2013 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package logrus
--
--import "syscall"
--
--const ioctlReadTermios = syscall.TIOCGETA
--
--type Termios syscall.Termios
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
-deleted file mode 100644
-index 0428ee5..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
-+++ /dev/null
-@@ -1,20 +0,0 @@
--/*
--  Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
--*/
--package logrus
--
--import (
--	"syscall"
--)
--
--const ioctlReadTermios = syscall.TIOCGETA
--
--type Termios struct {
--	Iflag  uint32
--	Oflag  uint32
--	Cflag  uint32
--	Lflag  uint32
--	Cc     [20]uint8
--	Ispeed uint32
--	Ospeed uint32
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
-deleted file mode 100644
-index a2c0b40..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go
-+++ /dev/null
-@@ -1,12 +0,0 @@
--// Based on ssh/terminal:
--// Copyright 2013 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package logrus
--
--import "syscall"
--
--const ioctlReadTermios = syscall.TCGETS
--
--type Termios syscall.Termios
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
-deleted file mode 100644
-index 276447b..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
-+++ /dev/null
-@@ -1,21 +0,0 @@
--// Based on ssh/terminal:
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// +build linux,!appengine darwin freebsd
--
--package logrus
--
--import (
--	"syscall"
--	"unsafe"
--)
--
--// IsTerminal returns true if the given file descriptor is a terminal.
--func IsTerminal() bool {
--	fd := syscall.Stdout
--	var termios Termios
--	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
--	return err == 0
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
-deleted file mode 100644
-index 2e09f6f..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
-+++ /dev/null
-@@ -1,27 +0,0 @@
--// Based on ssh/terminal:
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// +build windows
--
--package logrus
--
--import (
--	"syscall"
--	"unsafe"
--)
--
--var kernel32 = syscall.NewLazyDLL("kernel32.dll")
--
--var (
--	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
--)
--
--// IsTerminal returns true if the given file descriptor is a terminal.
--func IsTerminal() bool {
--	fd := syscall.Stdout
--	var st uint32
--	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
--	return r != 0 && e == 0
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
-deleted file mode 100644
-index 78e7889..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
-+++ /dev/null
-@@ -1,124 +0,0 @@
--package logrus
--
--import (
--	"bytes"
--	"fmt"
--	"regexp"
--	"sort"
--	"strings"
--	"time"
--)
--
--const (
--	nocolor = 0
--	red     = 31
--	green   = 32
--	yellow  = 33
--	blue    = 34
--)
--
--var (
--	baseTimestamp time.Time
--	isTerminal    bool
--	noQuoteNeeded *regexp.Regexp
--)
--
--func init() {
--	baseTimestamp = time.Now()
--	isTerminal = IsTerminal()
--}
--
--func miniTS() int {
--	return int(time.Since(baseTimestamp) / time.Second)
--}
--
--type TextFormatter struct {
--	// Set to true to bypass checking for a TTY before outputting colors.
--	ForceColors   bool
--	DisableColors bool
--	// Set to true to disable timestamp logging (useful when the output
--	// is redirected to a logging system already adding a timestamp)
--	DisableTimestamp bool
--}
--
--func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
--
--	var keys []string
--	for k := range entry.Data {
--		keys = append(keys, k)
--	}
--	sort.Strings(keys)
--
--	b := &bytes.Buffer{}
--
--	prefixFieldClashes(entry.Data)
--
--	isColored := (f.ForceColors || isTerminal) && !f.DisableColors
--
--	if isColored {
--		printColored(b, entry, keys)
--	} else {
--		if !f.DisableTimestamp {
--			f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
--		}
--		f.appendKeyValue(b, "level", entry.Level.String())
--		f.appendKeyValue(b, "msg", entry.Message)
--		for _, key := range keys {
--			f.appendKeyValue(b, key, entry.Data[key])
--		}
--	}
--
--	b.WriteByte('\n')
--	return b.Bytes(), nil
--}
--
--func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
--	var levelColor int
--	switch entry.Level {
--	case WarnLevel:
--		levelColor = yellow
--	case ErrorLevel, FatalLevel, PanicLevel:
--		levelColor = red
--	default:
--		levelColor = blue
--	}
--
--	levelText := strings.ToUpper(entry.Level.String())[0:4]
--
--	fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
--	for _, k := range keys {
--		v := entry.Data[k]
--		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
--	}
--}
--
--func needsQuoting(text string) bool {
--	for _, ch := range text {
--		if !((ch >= 'a' && ch <= 'z') ||
--			(ch >= 'A' && ch <= 'Z') ||
--			(ch >= '0' && ch < '9') ||
--			ch == '-' || ch == '.') {
--			return false
--		}
--	}
--	return true
--}
--
--func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
--	switch value.(type) {
--	case string:
--		if needsQuoting(value.(string)) {
--			fmt.Fprintf(b, "%v=%s ", key, value)
--		} else {
--			fmt.Fprintf(b, "%v=%q ", key, value)
--		}
--	case error:
--		if needsQuoting(value.(error).Error()) {
--			fmt.Fprintf(b, "%v=%s ", key, value)
--		} else {
--			fmt.Fprintf(b, "%v=%q ", key, value)
--		}
--	default:
--		fmt.Fprintf(b, "%v=%v ", key, value)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go
-deleted file mode 100644
-index f604f1b..0000000
---- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--package logrus
--
--import (
--	"bytes"
--	"errors"
--
--	"testing"
--)
--
--func TestQuoting(t *testing.T) {
--	tf := &TextFormatter{DisableColors: true}
--
--	checkQuoting := func(q bool, value interface{}) {
--		b, _ := tf.Format(WithField("test", value))
--		idx := bytes.Index(b, ([]byte)("test="))
--		cont := bytes.Contains(b[idx+5:], []byte{'"'})
--		if cont != q {
--			if q {
--				t.Errorf("quoting expected for: %#v", value)
--			} else {
--				t.Errorf("quoting not expected for: %#v", value)
--			}
--		}
--	}
--
--	checkQuoting(false, "abcd")
--	checkQuoting(false, "v1.0")
--	checkQuoting(true, "/foobar")
--	checkQuoting(true, "x y")
--	checkQuoting(true, "x,y")
--	checkQuoting(false, errors.New("invalid"))
--	checkQuoting(true, errors.New("invalid argument"))
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go
-deleted file mode 100644
-index 7122be0..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package etcd
--
--// Add a new directory with a random etcd-generated key under the given path.
--func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) {
--	raw, err := c.post(key, "", ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--// Add a new file with a random etcd-generated key under the given path.
--func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) {
--	raw, err := c.post(key, value, ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go
-deleted file mode 100644
-index 26223ff..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go
-+++ /dev/null
-@@ -1,73 +0,0 @@
--package etcd
--
--import "testing"
--
--func TestAddChild(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("fooDir", true)
--		c.Delete("nonexistentDir", true)
--	}()
--
--	c.CreateDir("fooDir", 5)
--
--	_, err := c.AddChild("fooDir", "v0", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	_, err = c.AddChild("fooDir", "v1", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	resp, err := c.Get("fooDir", true, false)
--	// The child with v0 should proceed the child with v1 because it's added
--	// earlier, so it should have a lower key.
--	if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
--		t.Fatalf("AddChild 1 failed.  There should be two chlidren whose values are v0 and v1, respectively."+
--			"  The response was: %#v", resp)
--	}
--
--	// Creating a child under a nonexistent directory should succeed.
--	// The directory should be created.
--	resp, err = c.AddChild("nonexistentDir", "foo", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestAddChildDir(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("fooDir", true)
--		c.Delete("nonexistentDir", true)
--	}()
--
--	c.CreateDir("fooDir", 5)
--
--	_, err := c.AddChildDir("fooDir", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	_, err = c.AddChildDir("fooDir", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	resp, err := c.Get("fooDir", true, false)
--	// The child with v0 should proceed the child with v1 because it's added
--	// earlier, so it should have a lower key.
--	if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
--		t.Fatalf("AddChildDir 1 failed.  There should be two chlidren whose values are v0 and v1, respectively."+
--			"  The response was: %#v", resp)
--	}
--
--	// Creating a child under a nonexistent directory should succeed.
--	// The directory should be created.
--	resp, err = c.AddChildDir("nonexistentDir", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go
-deleted file mode 100644
-index f6ae548..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go
-+++ /dev/null
-@@ -1,435 +0,0 @@
--package etcd
--
--import (
--	"crypto/tls"
--	"crypto/x509"
--	"encoding/json"
--	"errors"
--	"io"
--	"io/ioutil"
--	"net"
--	"net/http"
--	"net/url"
--	"os"
--	"path"
--	"time"
--)
--
--// See SetConsistency for how to use these constants.
--const (
--	// Using strings rather than iota because the consistency level
--	// could be persisted to disk, so it'd be better to use
--	// human-readable values.
--	STRONG_CONSISTENCY = "STRONG"
--	WEAK_CONSISTENCY   = "WEAK"
--)
--
--const (
--	defaultBufferSize = 10
--)
--
--type Config struct {
--	CertFile    string        `json:"certFile"`
--	KeyFile     string        `json:"keyFile"`
--	CaCertFile  []string      `json:"caCertFiles"`
--	DialTimeout time.Duration `json:"timeout"`
--	Consistency string        `json:"consistency"`
--}
--
--type Client struct {
--	config      Config   `json:"config"`
--	cluster     *Cluster `json:"cluster"`
--	httpClient  *http.Client
--	persistence io.Writer
--	cURLch      chan string
--	// CheckRetry can be used to control the policy for failed requests
--	// and modify the cluster if needed.
--	// The client calls it before sending requests again, and
--	// stops retrying if CheckRetry returns some error. The cases that
--	// this function needs to handle include no response and unexpected
--	// http status code of response.
--	// If CheckRetry is nil, client will call the default one
--	// `DefaultCheckRetry`.
--	// Argument cluster is the etcd.Cluster object that these requests have been made on.
--	// Argument numReqs is the number of http.Requests that have been made so far.
--	// Argument lastResp is the http.Responses from the last request.
--	// Argument err is the reason of the failure.
--	CheckRetry func(cluster *Cluster, numReqs int,
--		lastResp http.Response, err error) error
--}
--
--// NewClient create a basic client that is configured to be used
--// with the given machine list.
--func NewClient(machines []string) *Client {
--	config := Config{
--		// default timeout is one second
--		DialTimeout: time.Second,
--		// default consistency level is STRONG
--		Consistency: STRONG_CONSISTENCY,
--	}
--
--	client := &Client{
--		cluster: NewCluster(machines),
--		config:  config,
--	}
--
--	client.initHTTPClient()
--	client.saveConfig()
--
--	return client
--}
--
--// NewTLSClient create a basic client with TLS configuration
--func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) {
--	// overwrite the default machine to use https
--	if len(machines) == 0 {
--		machines = []string{"https://127.0.0.1:4001"}
--	}
--
--	config := Config{
--		// default timeout is one second
--		DialTimeout: time.Second,
--		// default consistency level is STRONG
--		Consistency: STRONG_CONSISTENCY,
--		CertFile:    cert,
--		KeyFile:     key,
--		CaCertFile:  make([]string, 0),
--	}
--
--	client := &Client{
--		cluster: NewCluster(machines),
--		config:  config,
--	}
--
--	err := client.initHTTPSClient(cert, key)
--	if err != nil {
--		return nil, err
--	}
--
--	err = client.AddRootCA(caCert)
--
--	client.saveConfig()
--
--	return client, nil
--}
--
--// NewClientFromFile creates a client from a given file path.
--// The given file is expected to use the JSON format.
--func NewClientFromFile(fpath string) (*Client, error) {
--	fi, err := os.Open(fpath)
--	if err != nil {
--		return nil, err
--	}
--
--	defer func() {
--		if err := fi.Close(); err != nil {
--			panic(err)
--		}
--	}()
--
--	return NewClientFromReader(fi)
--}
--
--// NewClientFromReader creates a Client configured from a given reader.
--// The configuration is expected to use the JSON format.
--func NewClientFromReader(reader io.Reader) (*Client, error) {
--	c := new(Client)
--
--	b, err := ioutil.ReadAll(reader)
--	if err != nil {
--		return nil, err
--	}
--
--	err = json.Unmarshal(b, c)
--	if err != nil {
--		return nil, err
--	}
--	if c.config.CertFile == "" {
--		c.initHTTPClient()
--	} else {
--		err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile)
--	}
--
--	if err != nil {
--		return nil, err
--	}
--
--	for _, caCert := range c.config.CaCertFile {
--		if err := c.AddRootCA(caCert); err != nil {
--			return nil, err
--		}
--	}
--
--	return c, nil
--}
--
--// Override the Client's HTTP Transport object
--func (c *Client) SetTransport(tr *http.Transport) {
--	c.httpClient.Transport = tr
--}
--
--// initHTTPClient initializes a HTTP client for etcd client
--func (c *Client) initHTTPClient() {
--	tr := &http.Transport{
--		Dial: c.dial,
--		TLSClientConfig: &tls.Config{
--			InsecureSkipVerify: true,
--		},
--	}
--	c.httpClient = &http.Client{Transport: tr}
--}
--
--// initHTTPClient initializes a HTTPS client for etcd client
--func (c *Client) initHTTPSClient(cert, key string) error {
--	if cert == "" || key == "" {
--		return errors.New("Require both cert and key path")
--	}
--
--	tlsCert, err := tls.LoadX509KeyPair(cert, key)
--	if err != nil {
--		return err
--	}
--
--	tlsConfig := &tls.Config{
--		Certificates:       []tls.Certificate{tlsCert},
--		InsecureSkipVerify: true,
--	}
--
--	tr := &http.Transport{
--		TLSClientConfig: tlsConfig,
--		Dial:            c.dial,
--	}
--
--	c.httpClient = &http.Client{Transport: tr}
--	return nil
--}
--
--// SetPersistence sets a writer to which the config will be
--// written every time it's changed.
--func (c *Client) SetPersistence(writer io.Writer) {
--	c.persistence = writer
--}
--
--// SetConsistency changes the consistency level of the client.
--//
--// When consistency is set to STRONG_CONSISTENCY, all requests,
--// including GET, are sent to the leader.  This means that, assuming
--// the absence of leader failures, GET requests are guaranteed to see
--// the changes made by previous requests.
--//
--// When consistency is set to WEAK_CONSISTENCY, other requests
--// are still sent to the leader, but GET requests are sent to a
--// random server from the server pool.  This reduces the read
--// load on the leader, but it's not guaranteed that the GET requests
--// will see changes made by previous requests (they might have not
--// yet been committed on non-leader servers).
--func (c *Client) SetConsistency(consistency string) error {
--	if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) {
--		return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.")
--	}
--	c.config.Consistency = consistency
--	return nil
--}
--
--// Sets the DialTimeout value
--func (c *Client) SetDialTimeout(d time.Duration) {
--	c.config.DialTimeout = d
--}
--
--// AddRootCA adds a root CA cert for the etcd client
--func (c *Client) AddRootCA(caCert string) error {
--	if c.httpClient == nil {
--		return errors.New("Client has not been initialized yet!")
--	}
--
--	certBytes, err := ioutil.ReadFile(caCert)
--	if err != nil {
--		return err
--	}
--
--	tr, ok := c.httpClient.Transport.(*http.Transport)
--
--	if !ok {
--		panic("AddRootCA(): Transport type assert should not fail")
--	}
--
--	if tr.TLSClientConfig.RootCAs == nil {
--		caCertPool := x509.NewCertPool()
--		ok = caCertPool.AppendCertsFromPEM(certBytes)
--		if ok {
--			tr.TLSClientConfig.RootCAs = caCertPool
--		}
--		tr.TLSClientConfig.InsecureSkipVerify = false
--	} else {
--		ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
--	}
--
--	if !ok {
--		err = errors.New("Unable to load caCert")
--	}
--
--	c.config.CaCertFile = append(c.config.CaCertFile, caCert)
--	c.saveConfig()
--
--	return err
--}
--
--// SetCluster updates cluster information using the given machine list.
--func (c *Client) SetCluster(machines []string) bool {
--	success := c.internalSyncCluster(machines)
--	return success
--}
--
--func (c *Client) GetCluster() []string {
--	return c.cluster.Machines
--}
--
--// SyncCluster updates the cluster information using the internal machine list.
--func (c *Client) SyncCluster() bool {
--	return c.internalSyncCluster(c.cluster.Machines)
--}
--
--// internalSyncCluster syncs cluster information using the given machine list.
--func (c *Client) internalSyncCluster(machines []string) bool {
--	for _, machine := range machines {
--		httpPath := c.createHttpPath(machine, path.Join(version, "machines"))
--		resp, err := c.httpClient.Get(httpPath)
--		if err != nil {
--			// try another machine in the cluster
--			continue
--		} else {
--			b, err := ioutil.ReadAll(resp.Body)
--			resp.Body.Close()
--			if err != nil {
--				// try another machine in the cluster
--				continue
--			}
--
--			// update Machines List
--			c.cluster.updateFromStr(string(b))
--
--			// update leader
--			// the first one in the machine list is the leader
--			c.cluster.switchLeader(0)
--
--			logger.Debug("sync.machines ", c.cluster.Machines)
--			c.saveConfig()
--			return true
--		}
--	}
--	return false
--}
--
--// createHttpPath creates a complete HTTP URL.
--// serverName should contain both the host name and a port number, if any.
--func (c *Client) createHttpPath(serverName string, _path string) string {
--	u, err := url.Parse(serverName)
--	if err != nil {
--		panic(err)
--	}
--
--	u.Path = path.Join(u.Path, _path)
--
--	if u.Scheme == "" {
--		u.Scheme = "http"
--	}
--	return u.String()
--}
--
--// dial attempts to open a TCP connection to the provided address, explicitly
--// enabling keep-alives with a one-second interval.
--func (c *Client) dial(network, addr string) (net.Conn, error) {
--	conn, err := net.DialTimeout(network, addr, c.config.DialTimeout)
--	if err != nil {
--		return nil, err
--	}
--
--	tcpConn, ok := conn.(*net.TCPConn)
--	if !ok {
--		return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
--	}
--
--	// Keep TCP alive to check whether or not the remote machine is down
--	if err = tcpConn.SetKeepAlive(true); err != nil {
--		return nil, err
--	}
--
--	if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
--		return nil, err
--	}
--
--	return tcpConn, nil
--}
--
--func (c *Client) OpenCURL() {
--	c.cURLch = make(chan string, defaultBufferSize)
--}
--
--func (c *Client) CloseCURL() {
--	c.cURLch = nil
--}
--
--func (c *Client) sendCURL(command string) {
--	go func() {
--		select {
--		case c.cURLch <- command:
--		default:
--		}
--	}()
--}
--
--func (c *Client) RecvCURL() string {
--	return <-c.cURLch
--}
--
--// saveConfig saves the current config using c.persistence.
--func (c *Client) saveConfig() error {
--	if c.persistence != nil {
--		b, err := json.Marshal(c)
--		if err != nil {
--			return err
--		}
--
--		_, err = c.persistence.Write(b)
--		if err != nil {
--			return err
--		}
--	}
--
--	return nil
--}
--
--// MarshalJSON implements the Marshaller interface
--// as defined by the standard JSON package.
--func (c *Client) MarshalJSON() ([]byte, error) {
--	b, err := json.Marshal(struct {
--		Config  Config   `json:"config"`
--		Cluster *Cluster `json:"cluster"`
--	}{
--		Config:  c.config,
--		Cluster: c.cluster,
--	})
--
--	if err != nil {
--		return nil, err
--	}
--
--	return b, nil
--}
--
--// UnmarshalJSON implements the Unmarshaller interface
--// as defined by the standard JSON package.
--func (c *Client) UnmarshalJSON(b []byte) error {
--	temp := struct {
--		Config  Config   `json:"config"`
--		Cluster *Cluster `json:"cluster"`
--	}{}
--	err := json.Unmarshal(b, &temp)
--	if err != nil {
--		return err
--	}
--
--	c.cluster = temp.Cluster
--	c.config = temp.Config
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go
-deleted file mode 100644
-index c245e47..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go
-+++ /dev/null
-@@ -1,96 +0,0 @@
--package etcd
--
--import (
--	"encoding/json"
--	"fmt"
--	"net"
--	"net/url"
--	"os"
--	"testing"
--)
--
--// To pass this test, we need to create a cluster of 3 machines
--// The server should be listening on 127.0.0.1:4001, 4002, 4003
--func TestSync(t *testing.T) {
--	fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
--
--	// Explicit trailing slash to ensure this doesn't reproduce:
--	// https://github.com/coreos/go-etcd/issues/82
--	c := NewClient([]string{"http://127.0.0.1:4001/"})
--
--	success := c.SyncCluster()
--	if !success {
--		t.Fatal("cannot sync machines")
--	}
--
--	for _, m := range c.GetCluster() {
--		u, err := url.Parse(m)
--		if err != nil {
--			t.Fatal(err)
--		}
--		if u.Scheme != "http" {
--			t.Fatal("scheme must be http")
--		}
--
--		host, _, err := net.SplitHostPort(u.Host)
--		if err != nil {
--			t.Fatal(err)
--		}
--		if host != "127.0.0.1" {
--			t.Fatal("Host must be 127.0.0.1")
--		}
--	}
--
--	badMachines := []string{"abc", "edef"}
--
--	success = c.SetCluster(badMachines)
--
--	if success {
--		t.Fatal("should not sync on bad machines")
--	}
--
--	goodMachines := []string{"127.0.0.1:4002"}
--
--	success = c.SetCluster(goodMachines)
--
--	if !success {
--		t.Fatal("cannot sync machines")
--	} else {
--		fmt.Println(c.cluster.Machines)
--	}
--
--}
--
--func TestPersistence(t *testing.T) {
--	c := NewClient(nil)
--	c.SyncCluster()
--
--	fo, err := os.Create("config.json")
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer func() {
--		if err := fo.Close(); err != nil {
--			panic(err)
--		}
--	}()
--
--	c.SetPersistence(fo)
--	err = c.saveConfig()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	c2, err := NewClientFromFile("config.json")
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	// Verify that the two clients have the same config
--	b1, _ := json.Marshal(c)
--	b2, _ := json.Marshal(c2)
--
--	if string(b1) != string(b2) {
--		t.Fatalf("The two configs should be equal!")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go
-deleted file mode 100644
-index aaa2054..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go
-+++ /dev/null
-@@ -1,51 +0,0 @@
--package etcd
--
--import (
--	"net/url"
--	"strings"
--)
--
--type Cluster struct {
--	Leader   string   `json:"leader"`
--	Machines []string `json:"machines"`
--}
--
--func NewCluster(machines []string) *Cluster {
--	// if an empty slice was sent in then just assume HTTP 4001 on localhost
--	if len(machines) == 0 {
--		machines = []string{"http://127.0.0.1:4001"}
--	}
--
--	// default leader and machines
--	return &Cluster{
--		Leader:   machines[0],
--		Machines: machines,
--	}
--}
--
--// switchLeader switch the current leader to machines[num]
--func (cl *Cluster) switchLeader(num int) {
--	logger.Debugf("switch.leader[from %v to %v]",
--		cl.Leader, cl.Machines[num])
--
--	cl.Leader = cl.Machines[num]
--}
--
--func (cl *Cluster) updateFromStr(machines string) {
--	cl.Machines = strings.Split(machines, ", ")
--}
--
--func (cl *Cluster) updateLeader(leader string) {
--	logger.Debugf("update.leader[%s,%s]", cl.Leader, leader)
--	cl.Leader = leader
--}
--
--func (cl *Cluster) updateLeaderFromURL(u *url.URL) {
--	var leader string
--	if u.Scheme == "" {
--		leader = "http://" + u.Host
--	} else {
--		leader = u.Scheme + "://" + u.Host
--	}
--	cl.updateLeader(leader)
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go
-deleted file mode 100644
-index 11131bb..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go
-+++ /dev/null
-@@ -1,34 +0,0 @@
--package etcd
--
--import "fmt"
--
--func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) {
--	raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex)
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) {
--	if prevValue == "" && prevIndex == 0 {
--		return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
--	}
--
--	options := Options{}
--	if prevValue != "" {
--		options["prevValue"] = prevValue
--	}
--	if prevIndex != 0 {
--		options["prevIndex"] = prevIndex
--	}
--
--	raw, err := c.delete(key, options)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw, err
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
-deleted file mode 100644
-index 223e50f..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
-+++ /dev/null
-@@ -1,46 +0,0 @@
--package etcd
--
--import (
--	"testing"
--)
--
--func TestCompareAndDelete(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("foo", true)
--	}()
--
--	c.Set("foo", "bar", 5)
--
--	// This should succeed an correct prevValue
--	resp, err := c.CompareAndDelete("foo", "bar", 0)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
--		t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
--	}
--
--	resp, _ = c.Set("foo", "bar", 5)
--	// This should fail because it gives an incorrect prevValue
--	_, err = c.CompareAndDelete("foo", "xxx", 0)
--	if err == nil {
--		t.Fatalf("CompareAndDelete 2 should have failed.  The response is: %#v", resp)
--	}
--
--	// This should succeed because it gives an correct prevIndex
--	resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
--		t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
--	}
--
--	c.Set("foo", "bar", 5)
--	// This should fail because it gives an incorrect prevIndex
--	resp, err = c.CompareAndDelete("foo", "", 29817514)
--	if err == nil {
--		t.Fatalf("CompareAndDelete 4 should have failed.  The response is: %#v", resp)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go
-deleted file mode 100644
-index bb4f906..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go
-+++ /dev/null
-@@ -1,36 +0,0 @@
--package etcd
--
--import "fmt"
--
--func (c *Client) CompareAndSwap(key string, value string, ttl uint64,
--	prevValue string, prevIndex uint64) (*Response, error) {
--	raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex)
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64,
--	prevValue string, prevIndex uint64) (*RawResponse, error) {
--	if prevValue == "" && prevIndex == 0 {
--		return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
--	}
--
--	options := Options{}
--	if prevValue != "" {
--		options["prevValue"] = prevValue
--	}
--	if prevIndex != 0 {
--		options["prevIndex"] = prevIndex
--	}
--
--	raw, err := c.put(key, value, ttl, options)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw, err
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
-deleted file mode 100644
-index 14a1b00..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
-+++ /dev/null
-@@ -1,57 +0,0 @@
--package etcd
--
--import (
--	"testing"
--)
--
--func TestCompareAndSwap(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("foo", true)
--	}()
--
--	c.Set("foo", "bar", 5)
--
--	// This should succeed
--	resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
--		t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
--	}
--
--	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
--		t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
--	}
--
--	// This should fail because it gives an incorrect prevValue
--	resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
--	if err == nil {
--		t.Fatalf("CompareAndSwap 2 should have failed.  The response is: %#v", resp)
--	}
--
--	resp, err = c.Set("foo", "bar", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	// This should succeed
--	resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
--		t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
--	}
--
--	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
--		t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
--	}
--
--	// This should fail because it gives an incorrect prevIndex
--	resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
--	if err == nil {
--		t.Fatalf("CompareAndSwap 4 should have failed.  The response is: %#v", resp)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go
-deleted file mode 100644
-index 0f77788..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go
-+++ /dev/null
-@@ -1,55 +0,0 @@
--package etcd
--
--import (
--	"fmt"
--	"io/ioutil"
--	"log"
--	"strings"
--)
--
--var logger *etcdLogger
--
--func SetLogger(l *log.Logger) {
--	logger = &etcdLogger{l}
--}
--
--func GetLogger() *log.Logger {
--	return logger.log
--}
--
--type etcdLogger struct {
--	log *log.Logger
--}
--
--func (p *etcdLogger) Debug(args ...interface{}) {
--	msg := "DEBUG: " + fmt.Sprint(args...)
--	p.log.Println(msg)
--}
--
--func (p *etcdLogger) Debugf(f string, args ...interface{}) {
--	msg := "DEBUG: " + fmt.Sprintf(f, args...)
--	// Append newline if necessary
--	if !strings.HasSuffix(msg, "\n") {
--		msg = msg + "\n"
--	}
--	p.log.Print(msg)
--}
--
--func (p *etcdLogger) Warning(args ...interface{}) {
--	msg := "WARNING: " + fmt.Sprint(args...)
--	p.log.Println(msg)
--}
--
--func (p *etcdLogger) Warningf(f string, args ...interface{}) {
--	msg := "WARNING: " + fmt.Sprintf(f, args...)
--	// Append newline if necessary
--	if !strings.HasSuffix(msg, "\n") {
--		msg = msg + "\n"
--	}
--	p.log.Print(msg)
--}
--
--func init() {
--	// Default logger uses the go default log.
--	SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags))
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go
-deleted file mode 100644
-index 97f6d11..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go
-+++ /dev/null
-@@ -1,28 +0,0 @@
--package etcd
--
--import (
--	"testing"
--)
--
--type Foo struct{}
--type Bar struct {
--	one string
--	two int
--}
--
--// Tests that logs don't panic with arbitrary interfaces
--func TestDebug(t *testing.T) {
--	f := &Foo{}
--	b := &Bar{"asfd", 3}
--	for _, test := range []interface{}{
--		1234,
--		"asdf",
--		f,
--		b,
--	} {
--		logger.Debug(test)
--		logger.Debugf("something, %s", test)
--		logger.Warning(test)
--		logger.Warningf("something, %s", test)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go
-deleted file mode 100644
-index b37accd..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go
-+++ /dev/null
-@@ -1,40 +0,0 @@
--package etcd
--
--// Delete deletes the given key.
--//
--// When recursive set to false, if the key points to a
--// directory the method will fail.
--//
--// When recursive set to true, if the key points to a file,
--// the file will be deleted; if the key points to a directory,
--// then everything under the directory (including all child directories)
--// will be deleted.
--func (c *Client) Delete(key string, recursive bool) (*Response, error) {
--	raw, err := c.RawDelete(key, recursive, false)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--// DeleteDir deletes an empty directory or a key value pair
--func (c *Client) DeleteDir(key string) (*Response, error) {
--	raw, err := c.RawDelete(key, false, true)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) {
--	ops := Options{
--		"recursive": recursive,
--		"dir":       dir,
--	}
--
--	return c.delete(key, ops)
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go
-deleted file mode 100644
-index 5904971..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go
-+++ /dev/null
-@@ -1,81 +0,0 @@
--package etcd
--
--import (
--	"testing"
--)
--
--func TestDelete(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("foo", true)
--	}()
--
--	c.Set("foo", "bar", 5)
--	resp, err := c.Delete("foo", false)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Node.Value == "") {
--		t.Fatalf("Delete failed with %s", resp.Node.Value)
--	}
--
--	if !(resp.PrevNode.Value == "bar") {
--		t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
--	}
--
--	resp, err = c.Delete("foo", false)
--	if err == nil {
--		t.Fatalf("Delete should have failed because the key foo did not exist.  "+
--			"The response was: %v", resp)
--	}
--}
--
--func TestDeleteAll(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("foo", true)
--		c.Delete("fooDir", true)
--	}()
--
--	c.SetDir("foo", 5)
--	// test delete an empty dir
--	resp, err := c.DeleteDir("foo")
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Node.Value == "") {
--		t.Fatalf("DeleteAll 1 failed: %#v", resp)
--	}
--
--	if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
--		t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
--	}
--
--	c.CreateDir("fooDir", 5)
--	c.Set("fooDir/foo", "bar", 5)
--	_, err = c.DeleteDir("fooDir")
--	if err == nil {
--		t.Fatal("should not able to delete a non-empty dir with deletedir")
--	}
--
--	resp, err = c.Delete("fooDir", true)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Node.Value == "") {
--		t.Fatalf("DeleteAll 2 failed: %#v", resp)
--	}
--
--	if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
--		t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
--	}
--
--	resp, err = c.Delete("foo", true)
--	if err == nil {
--		t.Fatalf("DeleteAll should have failed because the key foo did not exist.  "+
--			"The response was: %v", resp)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go
-deleted file mode 100644
-index 7e69287..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go
-+++ /dev/null
-@@ -1,48 +0,0 @@
--package etcd
--
--import (
--	"encoding/json"
--	"fmt"
--)
--
--const (
--	ErrCodeEtcdNotReachable = 501
--)
--
--var (
--	errorMap = map[int]string{
--		ErrCodeEtcdNotReachable: "All the given peers are not reachable",
--	}
--)
--
--type EtcdError struct {
--	ErrorCode int    `json:"errorCode"`
--	Message   string `json:"message"`
--	Cause     string `json:"cause,omitempty"`
--	Index     uint64 `json:"index"`
--}
--
--func (e EtcdError) Error() string {
--	return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
--}
--
--func newError(errorCode int, cause string, index uint64) *EtcdError {
--	return &EtcdError{
--		ErrorCode: errorCode,
--		Message:   errorMap[errorCode],
--		Cause:     cause,
--		Index:     index,
--	}
--}
--
--func handleError(b []byte) error {
--	etcdErr := new(EtcdError)
--
--	err := json.Unmarshal(b, etcdErr)
--	if err != nil {
--		logger.Warningf("cannot unmarshal etcd error: %v", err)
--		return err
--	}
--
--	return etcdErr
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go
-deleted file mode 100644
-index 976bf07..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go
-+++ /dev/null
-@@ -1,27 +0,0 @@
--package etcd
--
--// Get gets the file or directory associated with the given key.
--// If the key points to a directory, files and directories under
--// it will be returned in sorted or unsorted order, depending on
--// the sort flag.
--// If recursive is set to false, contents under child directories
--// will not be returned.
--// If recursive is set to true, all the contents will be returned.
--func (c *Client) Get(key string, sort, recursive bool) (*Response, error) {
--	raw, err := c.RawGet(key, sort, recursive)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) {
--	ops := Options{
--		"recursive": recursive,
--		"sorted":    sort,
--	}
--
--	return c.get(key, ops)
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go
-deleted file mode 100644
-index 279c4e2..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go
-+++ /dev/null
-@@ -1,131 +0,0 @@
--package etcd
--
--import (
--	"reflect"
--	"testing"
--)
--
--// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
--func cleanNode(n *Node) {
--	n.Expiration = nil
--	n.ModifiedIndex = 0
--	n.CreatedIndex = 0
--}
--
--// cleanResult scrubs a result object two levels deep of Expiration,
--// ModifiedIndex and CreatedIndex.
--func cleanResult(result *Response) {
--	//  TODO(philips): make this recursive.
--	cleanNode(result.Node)
--	for i, _ := range result.Node.Nodes {
--		cleanNode(result.Node.Nodes[i])
--		for j, _ := range result.Node.Nodes[i].Nodes {
--			cleanNode(result.Node.Nodes[i].Nodes[j])
--		}
--	}
--}
--
--func TestGet(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("foo", true)
--	}()
--
--	c.Set("foo", "bar", 5)
--
--	result, err := c.Get("foo", false, false)
--
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if result.Node.Key != "/foo" || result.Node.Value != "bar" {
--		t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
--	}
--
--	result, err = c.Get("goo", false, false)
--	if err == nil {
--		t.Fatalf("should not be able to get non-exist key")
--	}
--}
--
--func TestGetAll(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("fooDir", true)
--	}()
--
--	c.CreateDir("fooDir", 5)
--	c.Set("fooDir/k0", "v0", 5)
--	c.Set("fooDir/k1", "v1", 5)
--
--	// Return kv-pairs in sorted order
--	result, err := c.Get("fooDir", true, false)
--
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	expected := Nodes{
--		&Node{
--			Key:   "/fooDir/k0",
--			Value: "v0",
--			TTL:   5,
--		},
--		&Node{
--			Key:   "/fooDir/k1",
--			Value: "v1",
--			TTL:   5,
--		},
--	}
--
--	cleanResult(result)
--
--	if !reflect.DeepEqual(result.Node.Nodes, expected) {
--		t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
--	}
--
--	// Test the `recursive` option
--	c.CreateDir("fooDir/childDir", 5)
--	c.Set("fooDir/childDir/k2", "v2", 5)
--
--	// Return kv-pairs in sorted order
--	result, err = c.Get("fooDir", true, true)
--
--	cleanResult(result)
--
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	expected = Nodes{
--		&Node{
--			Key: "/fooDir/childDir",
--			Dir: true,
--			Nodes: Nodes{
--				&Node{
--					Key:   "/fooDir/childDir/k2",
--					Value: "v2",
--					TTL:   5,
--				},
--			},
--			TTL: 5,
--		},
--		&Node{
--			Key:   "/fooDir/k0",
--			Value: "v0",
--			TTL:   5,
--		},
--		&Node{
--			Key:   "/fooDir/k1",
--			Value: "v1",
--			TTL:   5,
--		},
--	}
--
--	cleanResult(result)
--
--	if !reflect.DeepEqual(result.Node.Nodes, expected) {
--		t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go
-deleted file mode 100644
-index 701c9b3..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go
-+++ /dev/null
-@@ -1,72 +0,0 @@
--package etcd
--
--import (
--	"fmt"
--	"net/url"
--	"reflect"
--)
--
--type Options map[string]interface{}
--
--// An internally-used data structure that represents a mapping
--// between valid options and their kinds
--type validOptions map[string]reflect.Kind
--
--// Valid options for GET, PUT, POST, DELETE
--// Using CAPITALIZED_UNDERSCORE to emphasize that these
--// values are meant to be used as constants.
--var (
--	VALID_GET_OPTIONS = validOptions{
--		"recursive":  reflect.Bool,
--		"consistent": reflect.Bool,
--		"sorted":     reflect.Bool,
--		"wait":       reflect.Bool,
--		"waitIndex":  reflect.Uint64,
--	}
--
--	VALID_PUT_OPTIONS = validOptions{
--		"prevValue": reflect.String,
--		"prevIndex": reflect.Uint64,
--		"prevExist": reflect.Bool,
--		"dir":       reflect.Bool,
--	}
--
--	VALID_POST_OPTIONS = validOptions{}
--
--	VALID_DELETE_OPTIONS = validOptions{
--		"recursive": reflect.Bool,
--		"dir":       reflect.Bool,
--		"prevValue": reflect.String,
--		"prevIndex": reflect.Uint64,
--	}
--)
--
--// Convert options to a string of HTML parameters
--func (ops Options) toParameters(validOps validOptions) (string, error) {
--	p := "?"
--	values := url.Values{}
--
--	if ops == nil {
--		return "", nil
--	}
--
--	for k, v := range ops {
--		// Check if the given option is valid (that it exists)
--		kind := validOps[k]
--		if kind == reflect.Invalid {
--			return "", fmt.Errorf("Invalid option: %v", k)
--		}
--
--		// Check if the given option is of the valid type
--		t := reflect.TypeOf(v)
--		if kind != t.Kind() {
--			return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.",
--				k, kind, t.Kind())
--		}
--
--		values.Set(k, fmt.Sprintf("%v", v))
--	}
--
--	p += values.Encode()
--	return p, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go
-deleted file mode 100644
-index 5d8b45a..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go
-+++ /dev/null
-@@ -1,377 +0,0 @@
--package etcd
--
--import (
--	"errors"
--	"fmt"
--	"io/ioutil"
--	"math/rand"
--	"net/http"
--	"net/url"
--	"path"
--	"strings"
--	"sync"
--	"time"
--)
--
--// Errors introduced by handling requests
--var (
--	ErrRequestCancelled = errors.New("sending request is cancelled")
--)
--
--type RawRequest struct {
--	Method       string
--	RelativePath string
--	Values       url.Values
--	Cancel       <-chan bool
--}
--
--// NewRawRequest returns a new RawRequest
--func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest {
--	return &RawRequest{
--		Method:       method,
--		RelativePath: relativePath,
--		Values:       values,
--		Cancel:       cancel,
--	}
--}
--
--// getCancelable issues a cancelable GET request
--func (c *Client) getCancelable(key string, options Options,
--	cancel <-chan bool) (*RawResponse, error) {
--	logger.Debugf("get %s [%s]", key, c.cluster.Leader)
--	p := keyToPath(key)
--
--	// If consistency level is set to STRONG, append
--	// the `consistent` query string.
--	if c.config.Consistency == STRONG_CONSISTENCY {
--		options["consistent"] = true
--	}
--
--	str, err := options.toParameters(VALID_GET_OPTIONS)
--	if err != nil {
--		return nil, err
--	}
--	p += str
--
--	req := NewRawRequest("GET", p, nil, cancel)
--	resp, err := c.SendRequest(req)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return resp, nil
--}
--
--// get issues a GET request
--func (c *Client) get(key string, options Options) (*RawResponse, error) {
--	return c.getCancelable(key, options, nil)
--}
--
--// put issues a PUT request
--func (c *Client) put(key string, value string, ttl uint64,
--	options Options) (*RawResponse, error) {
--
--	logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.Leader)
--	p := keyToPath(key)
--
--	str, err := options.toParameters(VALID_PUT_OPTIONS)
--	if err != nil {
--		return nil, err
--	}
--	p += str
--
--	req := NewRawRequest("PUT", p, buildValues(value, ttl), nil)
--	resp, err := c.SendRequest(req)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return resp, nil
--}
--
--// post issues a POST request
--func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) {
--	logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.Leader)
--	p := keyToPath(key)
--
--	req := NewRawRequest("POST", p, buildValues(value, ttl), nil)
--	resp, err := c.SendRequest(req)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return resp, nil
--}
--
--// delete issues a DELETE request
--func (c *Client) delete(key string, options Options) (*RawResponse, error) {
--	logger.Debugf("delete %s [%s]", key, c.cluster.Leader)
--	p := keyToPath(key)
--
--	str, err := options.toParameters(VALID_DELETE_OPTIONS)
--	if err != nil {
--		return nil, err
--	}
--	p += str
--
--	req := NewRawRequest("DELETE", p, nil, nil)
--	resp, err := c.SendRequest(req)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return resp, nil
--}
--
--// SendRequest sends a HTTP request and returns a Response as defined by etcd
--func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
--
--	var req *http.Request
--	var resp *http.Response
--	var httpPath string
--	var err error
--	var respBody []byte
--
--	var numReqs = 1
--
--	checkRetry := c.CheckRetry
--	if checkRetry == nil {
--		checkRetry = DefaultCheckRetry
--	}
--
--	cancelled := make(chan bool, 1)
--	reqLock := new(sync.Mutex)
--
--	if rr.Cancel != nil {
--		cancelRoutine := make(chan bool)
--		defer close(cancelRoutine)
--
--		go func() {
--			select {
--			case <-rr.Cancel:
--				cancelled <- true
--				logger.Debug("send.request is cancelled")
--			case <-cancelRoutine:
--				return
--			}
--
--			// Repeat canceling request until this thread is stopped
--			// because we have no idea about whether it succeeds.
--			for {
--				reqLock.Lock()
--				c.httpClient.Transport.(*http.Transport).CancelRequest(req)
--				reqLock.Unlock()
--
--				select {
--				case <-time.After(100 * time.Millisecond):
--				case <-cancelRoutine:
--					return
--				}
--			}
--		}()
--	}
--
--	// If we connect to a follower and consistency is required, retry until
--	// we connect to a leader
--	sleep := 25 * time.Millisecond
--	maxSleep := time.Second
--	for attempt := 0; ; attempt++ {
--		if attempt > 0 {
--			select {
--			case <-cancelled:
--				return nil, ErrRequestCancelled
--			case <-time.After(sleep):
--				sleep = sleep * 2
--				if sleep > maxSleep {
--					sleep = maxSleep
--				}
--			}
--		}
--
--		logger.Debug("Connecting to etcd: attempt", attempt+1, "for", rr.RelativePath)
--
--		if rr.Method == "GET" && c.config.Consistency == WEAK_CONSISTENCY {
--			// If it's a GET and consistency level is set to WEAK,
--			// then use a random machine.
--			httpPath = c.getHttpPath(true, rr.RelativePath)
--		} else {
--			// Else use the leader.
--			httpPath = c.getHttpPath(false, rr.RelativePath)
--		}
--
--		// Return a cURL command if curlChan is set
--		if c.cURLch != nil {
--			command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath)
--			for key, value := range rr.Values {
--				command += fmt.Sprintf(" -d %s=%s", key, value[0])
--			}
--			c.sendCURL(command)
--		}
--
--		logger.Debug("send.request.to ", httpPath, " | method ", rr.Method)
--
--		reqLock.Lock()
--		if rr.Values == nil {
--			if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil {
--				return nil, err
--			}
--		} else {
--			body := strings.NewReader(rr.Values.Encode())
--			if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil {
--				return nil, err
--			}
--
--			req.Header.Set("Content-Type",
--				"application/x-www-form-urlencoded; param=value")
--		}
--		reqLock.Unlock()
--
--		resp, err = c.httpClient.Do(req)
--		defer func() {
--			if resp != nil {
--				resp.Body.Close()
--			}
--		}()
--
--		// If the request was cancelled, return ErrRequestCancelled directly
--		select {
--		case <-cancelled:
--			return nil, ErrRequestCancelled
--		default:
--		}
--
--		numReqs++
--
--		// network error, change a machine!
--		if err != nil {
--			logger.Debug("network error:", err.Error())
--			lastResp := http.Response{}
--			if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil {
--				return nil, checkErr
--			}
--
--			c.cluster.switchLeader(attempt % len(c.cluster.Machines))
--			continue
--		}
--
--		// if there is no error, it should receive response
--		logger.Debug("recv.response.from", httpPath)
--
--		if validHttpStatusCode[resp.StatusCode] {
--			// try to read byte code and break the loop
--			respBody, err = ioutil.ReadAll(resp.Body)
--			if err == nil {
--				logger.Debug("recv.success.", httpPath)
--				break
--			}
--			// ReadAll error may be caused due to cancel request
--			select {
--			case <-cancelled:
--				return nil, ErrRequestCancelled
--			default:
--			}
--		}
--
--		// if resp is TemporaryRedirect, set the new leader and retry
--		if resp.StatusCode == http.StatusTemporaryRedirect {
--			u, err := resp.Location()
--
--			if err != nil {
--				logger.Warning(err)
--			} else {
--				// Update cluster leader based on redirect location
--				// because it should point to the leader address
--				c.cluster.updateLeaderFromURL(u)
--				logger.Debug("recv.response.relocate", u.String())
--			}
--			resp.Body.Close()
--			continue
--		}
--
--		if checkErr := checkRetry(c.cluster, numReqs, *resp,
--			errors.New("Unexpected HTTP status code")); checkErr != nil {
--			return nil, checkErr
--		}
--		resp.Body.Close()
--	}
--
--	r := &RawResponse{
--		StatusCode: resp.StatusCode,
--		Body:       respBody,
--		Header:     resp.Header,
--	}
--
--	return r, nil
--}
--
--// DefaultCheckRetry defines the retrying behaviour for bad HTTP requests
--// If we have retried 2 * machine number, stop retrying.
--// If status code is InternalServerError, sleep for 200ms.
--func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response,
--	err error) error {
--
--	if numReqs >= 2*len(cluster.Machines) {
--		return newError(ErrCodeEtcdNotReachable,
--			"Tried to connect to each peer twice and failed", 0)
--	}
--
--	code := lastResp.StatusCode
--	if code == http.StatusInternalServerError {
--		time.Sleep(time.Millisecond * 200)
--
--	}
--
--	logger.Warning("bad response status code", code)
--	return nil
--}
--
--func (c *Client) getHttpPath(random bool, s ...string) string {
--	var machine string
--	if random {
--		machine = c.cluster.Machines[rand.Intn(len(c.cluster.Machines))]
--	} else {
--		machine = c.cluster.Leader
--	}
--
--	fullPath := machine + "/" + version
--	for _, seg := range s {
--		fullPath = fullPath + "/" + seg
--	}
--
--	return fullPath
--}
--
--// buildValues builds a url.Values map according to the given value and ttl
--func buildValues(value string, ttl uint64) url.Values {
--	v := url.Values{}
--
--	if value != "" {
--		v.Set("value", value)
--	}
--
--	if ttl > 0 {
--		v.Set("ttl", fmt.Sprintf("%v", ttl))
--	}
--
--	return v
--}
--
--// convert key string to http path exclude version
--// for example: key[foo] -> path[keys/foo]
--// key[/] -> path[keys/]
--func keyToPath(key string) string {
--	p := path.Join("keys", key)
--
--	// corner case: if key is "/" or "//" ect
--	// path join will clear the tailing "/"
--	// we need to add it back
--	if p == "keys" {
--		p = "keys/"
--	}
--
--	return p
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go
-deleted file mode 100644
-index 1fe9b4e..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go
-+++ /dev/null
-@@ -1,89 +0,0 @@
--package etcd
--
--import (
--	"encoding/json"
--	"net/http"
--	"strconv"
--	"time"
--)
--
--const (
--	rawResponse = iota
--	normalResponse
--)
--
--type responseType int
--
--type RawResponse struct {
--	StatusCode int
--	Body       []byte
--	Header     http.Header
--}
--
--var (
--	validHttpStatusCode = map[int]bool{
--		http.StatusCreated:            true,
--		http.StatusOK:                 true,
--		http.StatusBadRequest:         true,
--		http.StatusNotFound:           true,
--		http.StatusPreconditionFailed: true,
--		http.StatusForbidden:          true,
--	}
--)
--
--// Unmarshal parses RawResponse and stores the result in Response
--func (rr *RawResponse) Unmarshal() (*Response, error) {
--	if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated {
--		return nil, handleError(rr.Body)
--	}
--
--	resp := new(Response)
--
--	err := json.Unmarshal(rr.Body, resp)
--
--	if err != nil {
--		return nil, err
--	}
--
--	// attach index and term to response
--	resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64)
--	resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64)
--	resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64)
--
--	return resp, nil
--}
--
--type Response struct {
--	Action    string `json:"action"`
--	Node      *Node  `json:"node"`
--	PrevNode  *Node  `json:"prevNode,omitempty"`
--	EtcdIndex uint64 `json:"etcdIndex"`
--	RaftIndex uint64 `json:"raftIndex"`
--	RaftTerm  uint64 `json:"raftTerm"`
--}
--
--type Node struct {
--	Key           string     `json:"key, omitempty"`
--	Value         string     `json:"value,omitempty"`
--	Dir           bool       `json:"dir,omitempty"`
--	Expiration    *time.Time `json:"expiration,omitempty"`
--	TTL           int64      `json:"ttl,omitempty"`
--	Nodes         Nodes      `json:"nodes,omitempty"`
--	ModifiedIndex uint64     `json:"modifiedIndex,omitempty"`
--	CreatedIndex  uint64     `json:"createdIndex,omitempty"`
--}
--
--type Nodes []*Node
--
--// interfaces for sorting
--func (ns Nodes) Len() int {
--	return len(ns)
--}
--
--func (ns Nodes) Less(i, j int) bool {
--	return ns[i].Key < ns[j].Key
--}
--
--func (ns Nodes) Swap(i, j int) {
--	ns[i], ns[j] = ns[j], ns[i]
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
-deleted file mode 100644
-index 756e317..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
-+++ /dev/null
-@@ -1,42 +0,0 @@
--package etcd
--
--import (
--	"fmt"
--	"testing"
--)
--
--func TestSetCurlChan(t *testing.T) {
--	c := NewClient(nil)
--	c.OpenCURL()
--
--	defer func() {
--		c.Delete("foo", true)
--	}()
--
--	_, err := c.Set("foo", "bar", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
--		c.cluster.Leader)
--	actual := c.RecvCURL()
--	if expected != actual {
--		t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
--			actual, expected)
--	}
--
--	c.SetConsistency(STRONG_CONSISTENCY)
--	_, err = c.Get("foo", false, false)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?consistent=true&recursive=false&sorted=false",
--		c.cluster.Leader)
--	actual = c.RecvCURL()
--	if expected != actual {
--		t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
--			actual, expected)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go
-deleted file mode 100644
-index cb0d567..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go
-+++ /dev/null
-@@ -1,137 +0,0 @@
--package etcd
--
--// Set sets the given key to the given value.
--// It will create a new key value pair or replace the old one.
--// It will not replace a existing directory.
--func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) {
--	raw, err := c.RawSet(key, value, ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--// Set sets the given key to a directory.
--// It will create a new directory or replace the old key value pair by a directory.
--// It will not replace a existing directory.
--func (c *Client) SetDir(key string, ttl uint64) (*Response, error) {
--	raw, err := c.RawSetDir(key, ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--// CreateDir creates a directory. It succeeds only if
--// the given key does not yet exist.
--func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) {
--	raw, err := c.RawCreateDir(key, ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--// UpdateDir updates the given directory. It succeeds only if the
--// given key already exists.
--func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) {
--	raw, err := c.RawUpdateDir(key, ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--// Create creates a file with the given value under the given key.  It succeeds
--// only if the given key does not yet exist.
--func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) {
--	raw, err := c.RawCreate(key, value, ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--// CreateInOrder creates a file with a key that's guaranteed to be higher than other
--// keys in the given directory. It is useful for creating queues.
--func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) {
--	raw, err := c.RawCreateInOrder(dir, value, ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--// Update updates the given key to the given value.  It succeeds only if the
--// given key already exists.
--func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) {
--	raw, err := c.RawUpdate(key, value, ttl)
--
--	if err != nil {
--		return nil, err
--	}
--
--	return raw.Unmarshal()
--}
--
--func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) {
--	ops := Options{
--		"prevExist": true,
--		"dir":       true,
--	}
--
--	return c.put(key, "", ttl, ops)
--}
--
--func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) {
--	ops := Options{
--		"prevExist": false,
--		"dir":       true,
--	}
--
--	return c.put(key, "", ttl, ops)
--}
--
--func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) {
--	return c.put(key, value, ttl, nil)
--}
--
--func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) {
--	ops := Options{
--		"dir": true,
--	}
--
--	return c.put(key, "", ttl, ops)
--}
--
--func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) {
--	ops := Options{
--		"prevExist": true,
--	}
--
--	return c.put(key, value, ttl, ops)
--}
--
--func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) {
--	ops := Options{
--		"prevExist": false,
--	}
--
--	return c.put(key, value, ttl, ops)
--}
--
--func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) {
--	return c.post(dir, value, ttl)
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go
-deleted file mode 100644
-index ced0f06..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go
-+++ /dev/null
-@@ -1,241 +0,0 @@
--package etcd
--
--import (
--	"testing"
--)
--
--func TestSet(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("foo", true)
--	}()
--
--	resp, err := c.Set("foo", "bar", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
--		t.Fatalf("Set 1 failed: %#v", resp)
--	}
--	if resp.PrevNode != nil {
--		t.Fatalf("Set 1 PrevNode failed: %#v", resp)
--	}
--
--	resp, err = c.Set("foo", "bar2", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
--		t.Fatalf("Set 2 failed: %#v", resp)
--	}
--	if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
--		t.Fatalf("Set 2 PrevNode failed: %#v", resp)
--	}
--}
--
--func TestUpdate(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("foo", true)
--		c.Delete("nonexistent", true)
--	}()
--
--	resp, err := c.Set("foo", "bar", 5)
--
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	// This should succeed.
--	resp, err = c.Update("foo", "wakawaka", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
--		t.Fatalf("Update 1 failed: %#v", resp)
--	}
--	if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
--		t.Fatalf("Update 1 prevValue failed: %#v", resp)
--	}
--
--	// This should fail because the key does not exist.
--	resp, err = c.Update("nonexistent", "whatever", 5)
--	if err == nil {
--		t.Fatalf("The key %v did not exist, so the update should have failed."+
--			"The response was: %#v", resp.Node.Key, resp)
--	}
--}
--
--func TestCreate(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("newKey", true)
--	}()
--
--	newKey := "/newKey"
--	newValue := "/newValue"
--
--	// This should succeed
--	resp, err := c.Create(newKey, newValue, 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Action == "create" && resp.Node.Key == newKey &&
--		resp.Node.Value == newValue && resp.Node.TTL == 5) {
--		t.Fatalf("Create 1 failed: %#v", resp)
--	}
--	if resp.PrevNode != nil {
--		t.Fatalf("Create 1 PrevNode failed: %#v", resp)
--	}
--
--	// This should fail, because the key is already there
--	resp, err = c.Create(newKey, newValue, 5)
--	if err == nil {
--		t.Fatalf("The key %v did exist, so the creation should have failed."+
--			"The response was: %#v", resp.Node.Key, resp)
--	}
--}
--
--func TestCreateInOrder(t *testing.T) {
--	c := NewClient(nil)
--	dir := "/queue"
--	defer func() {
--		c.DeleteDir(dir)
--	}()
--
--	var firstKey, secondKey string
--
--	resp, err := c.CreateInOrder(dir, "1", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
--		t.Fatalf("Create 1 failed: %#v", resp)
--	}
--
--	firstKey = resp.Node.Key
--
--	resp, err = c.CreateInOrder(dir, "2", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
--		t.Fatalf("Create 2 failed: %#v", resp)
--	}
--
--	secondKey = resp.Node.Key
--
--	if firstKey >= secondKey {
--		t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
--			firstKey, secondKey)
--	}
--}
--
--func TestSetDir(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("foo", true)
--		c.Delete("fooDir", true)
--	}()
--
--	resp, err := c.CreateDir("fooDir", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
--		t.Fatalf("SetDir 1 failed: %#v", resp)
--	}
--	if resp.PrevNode != nil {
--		t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
--	}
--
--	// This should fail because /fooDir already points to a directory
--	resp, err = c.CreateDir("/fooDir", 5)
--	if err == nil {
--		t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
--			"The response was: %#v", resp)
--	}
--
--	_, err = c.Set("foo", "bar", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	// This should succeed
--	// It should replace the key
--	resp, err = c.SetDir("foo", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
--		t.Fatalf("SetDir 2 failed: %#v", resp)
--	}
--	if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
--		t.Fatalf("SetDir 2 failed: %#v", resp)
--	}
--}
--
--func TestUpdateDir(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("fooDir", true)
--	}()
--
--	resp, err := c.CreateDir("fooDir", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	// This should succeed.
--	resp, err = c.UpdateDir("fooDir", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
--		resp.Node.Value == "" && resp.Node.TTL == 5) {
--		t.Fatalf("UpdateDir 1 failed: %#v", resp)
--	}
--	if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
--		t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
--	}
--
--	// This should fail because the key does not exist.
--	resp, err = c.UpdateDir("nonexistentDir", 5)
--	if err == nil {
--		t.Fatalf("The key %v did not exist, so the update should have failed."+
--			"The response was: %#v", resp.Node.Key, resp)
--	}
--}
--
--func TestCreateDir(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("fooDir", true)
--	}()
--
--	// This should succeed
--	resp, err := c.CreateDir("fooDir", 5)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
--		resp.Node.Value == "" && resp.Node.TTL == 5) {
--		t.Fatalf("CreateDir 1 failed: %#v", resp)
--	}
--	if resp.PrevNode != nil {
--		t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
--	}
--
--	// This should fail, because the key is already there
--	resp, err = c.CreateDir("fooDir", 5)
--	if err == nil {
--		t.Fatalf("The key %v did exist, so the creation should have failed."+
--			"The response was: %#v", resp.Node.Key, resp)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go
-deleted file mode 100644
-index b3d05df..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--package etcd
--
--const version = "v2"
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go
-deleted file mode 100644
-index aa8d3df..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go
-+++ /dev/null
-@@ -1,103 +0,0 @@
--package etcd
--
--import (
--	"errors"
--)
--
--// Errors introduced by the Watch command.
--var (
--	ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel")
--)
--
--// If recursive is set to true the watch returns the first change under the given
--// prefix since the given index.
--//
--// If recursive is set to false the watch returns the first change to the given key
--// since the given index.
--//
--// To watch for the latest change, set waitIndex = 0.
--//
--// If a receiver channel is given, it will be a long-term watch. Watch will block at the
--//channel. After someone receives the channel, it will go on to watch that
--// prefix.  If a stop channel is given, the client can close long-term watch using
--// the stop channel.
--func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool,
--	receiver chan *Response, stop chan bool) (*Response, error) {
--	logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader)
--	if receiver == nil {
--		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
--
--		if err != nil {
--			return nil, err
--		}
--
--		return raw.Unmarshal()
--	}
--	defer close(receiver)
--
--	for {
--		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
--
--		if err != nil {
--			return nil, err
--		}
--
--		resp, err := raw.Unmarshal()
--
--		if err != nil {
--			return nil, err
--		}
--
--		waitIndex = resp.Node.ModifiedIndex + 1
--		receiver <- resp
--	}
--}
--
--func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool,
--	receiver chan *RawResponse, stop chan bool) (*RawResponse, error) {
--
--	logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader)
--	if receiver == nil {
--		return c.watchOnce(prefix, waitIndex, recursive, stop)
--	}
--
--	for {
--		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
--
--		if err != nil {
--			return nil, err
--		}
--
--		resp, err := raw.Unmarshal()
--
--		if err != nil {
--			return nil, err
--		}
--
--		waitIndex = resp.Node.ModifiedIndex + 1
--		receiver <- raw
--	}
--}
--
--// helper func
--// return when there is change under the given prefix
--func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) {
--
--	options := Options{
--		"wait": true,
--	}
--	if waitIndex > 0 {
--		options["waitIndex"] = waitIndex
--	}
--	if recursive {
--		options["recursive"] = true
--	}
--
--	resp, err := c.getCancelable(key, options, stop)
--
--	if err == ErrRequestCancelled {
--		return nil, ErrWatchStoppedByUser
--	}
--
--	return resp, err
--}
-diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go
-deleted file mode 100644
-index 43e1dfe..0000000
---- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go
-+++ /dev/null
-@@ -1,119 +0,0 @@
--package etcd
--
--import (
--	"fmt"
--	"runtime"
--	"testing"
--	"time"
--)
--
--func TestWatch(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("watch_foo", true)
--	}()
--
--	go setHelper("watch_foo", "bar", c)
--
--	resp, err := c.Watch("watch_foo", 0, false, nil, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
--		t.Fatalf("Watch 1 failed: %#v", resp)
--	}
--
--	go setHelper("watch_foo", "bar", c)
--
--	resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
--		t.Fatalf("Watch 2 failed: %#v", resp)
--	}
--
--	routineNum := runtime.NumGoroutine()
--
--	ch := make(chan *Response, 10)
--	stop := make(chan bool, 1)
--
--	go setLoop("watch_foo", "bar", c)
--
--	go receiver(ch, stop)
--
--	_, err = c.Watch("watch_foo", 0, false, ch, stop)
--	if err != ErrWatchStoppedByUser {
--		t.Fatalf("Watch returned a non-user stop error")
--	}
--
--	if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
--		t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
--	}
--}
--
--func TestWatchAll(t *testing.T) {
--	c := NewClient(nil)
--	defer func() {
--		c.Delete("watch_foo", true)
--	}()
--
--	go setHelper("watch_foo/foo", "bar", c)
--
--	resp, err := c.Watch("watch_foo", 0, true, nil, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
--		t.Fatalf("WatchAll 1 failed: %#v", resp)
--	}
--
--	go setHelper("watch_foo/foo", "bar", c)
--
--	resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
--		t.Fatalf("WatchAll 2 failed: %#v", resp)
--	}
--
--	ch := make(chan *Response, 10)
--	stop := make(chan bool, 1)
--
--	routineNum := runtime.NumGoroutine()
--
--	go setLoop("watch_foo/foo", "bar", c)
--
--	go receiver(ch, stop)
--
--	_, err = c.Watch("watch_foo", 0, true, ch, stop)
--	if err != ErrWatchStoppedByUser {
--		t.Fatalf("Watch returned a non-user stop error")
--	}
--
--	if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
--		t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
--	}
--}
--
--func setHelper(key, value string, c *Client) {
--	time.Sleep(time.Second)
--	c.Set(key, value, 100)
--}
--
--func setLoop(key, value string, c *Client) {
--	time.Sleep(time.Second)
--	for i := 0; i < 10; i++ {
--		newValue := fmt.Sprintf("%s_%v", value, i)
--		c.Set(key, newValue, 100)
--		time.Sleep(time.Second / 10)
--	}
--}
--
--func receiver(c chan *Response, stop chan bool) {
--	for i := 0; i < 10; i++ {
--		<-c
--	}
--	stop <- true
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go
-deleted file mode 100644
-index 0ce0df1..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go
-+++ /dev/null
-@@ -1,371 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--package spew
--
--import (
--	"fmt"
--	"io"
--	"reflect"
--	"sort"
--	"strconv"
--	"unsafe"
--)
--
--const (
--	// ptrSize is the size of a pointer on the current arch.
--	ptrSize = unsafe.Sizeof((*byte)(nil))
--)
--
--var (
--	// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
--	// internal reflect.Value fields.  These values are valid before golang
--	// commit ecccf07e7f9d which changed the format.  The are also valid
--	// after commit 82f48826c6c7 which changed the format again to mirror
--	// the original format.  Code in the init function updates these offsets
--	// as necessary.
--	offsetPtr    = uintptr(ptrSize)
--	offsetScalar = uintptr(0)
--	offsetFlag   = uintptr(ptrSize * 2)
--
--	// flagKindWidth and flagKindShift indicate various bits that the
--	// reflect package uses internally to track kind information.
--	//
--	// flagRO indicates whether or not the value field of a reflect.Value is
--	// read-only.
--	//
--	// flagIndir indicates whether the value field of a reflect.Value is
--	// the actual data or a pointer to the data.
--	//
--	// These values are valid before golang commit 90a7c3c86944 which
--	// changed their positions.  Code in the init function updates these
--	// flags as necessary.
--	flagKindWidth = uintptr(5)
--	flagKindShift = uintptr(flagKindWidth - 1)
--	flagRO        = uintptr(1 << 0)
--	flagIndir     = uintptr(1 << 1)
--)
--
--func init() {
--	// Older versions of reflect.Value stored small integers directly in the
--	// ptr field (which is named val in the older versions).  Versions
--	// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
--	// scalar for this purpose which unfortunately came before the flag
--	// field, so the offset of the flag field is different for those
--	// versions.
--	//
--	// This code constructs a new reflect.Value from a known small integer
--	// and checks if the size of the reflect.Value struct indicates it has
--	// the scalar field. When it does, the offsets are updated accordingly.
--	vv := reflect.ValueOf(0xf00)
--	if unsafe.Sizeof(vv) == (ptrSize * 4) {
--		offsetScalar = ptrSize * 2
--		offsetFlag = ptrSize * 3
--	}
--
--	// Commit 90a7c3c86944 changed the flag positions such that the low
--	// order bits are the kind.  This code extracts the kind from the flags
--	// field and ensures it's the correct type.  When it's not, the flag
--	// order has been changed to the newer format, so the flags are updated
--	// accordingly.
--	upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
--	upfv := *(*uintptr)(upf)
--	flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
--	if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
--		flagKindShift = 0
--		flagRO = 1 << 5
--		flagIndir = 1 << 6
--	}
--}
--
--// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
--// the typical safety restrictions preventing access to unaddressable and
--// unexported data.  It works by digging the raw pointer to the underlying
--// value out of the protected value and generating a new unprotected (unsafe)
--// reflect.Value to it.
--//
--// This allows us to check for implementations of the Stringer and error
--// interfaces to be used for pretty printing ordinarily unaddressable and
--// inaccessible values such as unexported struct fields.
--func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
--	indirects := 1
--	vt := v.Type()
--	upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
--	rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
--	if rvf&flagIndir != 0 {
--		vt = reflect.PtrTo(v.Type())
--		indirects++
--	} else if offsetScalar != 0 {
--		// The value is in the scalar field when it's not one of the
--		// reference types.
--		switch vt.Kind() {
--		case reflect.Uintptr:
--		case reflect.Chan:
--		case reflect.Func:
--		case reflect.Map:
--		case reflect.Ptr:
--		case reflect.UnsafePointer:
--		default:
--			upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
--				offsetScalar)
--		}
--	}
--
--	pv := reflect.NewAt(vt, upv)
--	rv = pv
--	for i := 0; i < indirects; i++ {
--		rv = rv.Elem()
--	}
--	return rv
--}
--
--// Some constants in the form of bytes to avoid string overhead.  This mirrors
--// the technique used in the fmt package.
--var (
--	panicBytes            = []byte("(PANIC=")
--	plusBytes             = []byte("+")
--	iBytes                = []byte("i")
--	trueBytes             = []byte("true")
--	falseBytes            = []byte("false")
--	interfaceBytes        = []byte("(interface {})")
--	commaNewlineBytes     = []byte(",\n")
--	newlineBytes          = []byte("\n")
--	openBraceBytes        = []byte("{")
--	openBraceNewlineBytes = []byte("{\n")
--	closeBraceBytes       = []byte("}")
--	asteriskBytes         = []byte("*")
--	colonBytes            = []byte(":")
--	colonSpaceBytes       = []byte(": ")
--	openParenBytes        = []byte("(")
--	closeParenBytes       = []byte(")")
--	spaceBytes            = []byte(" ")
--	pointerChainBytes     = []byte("->")
--	nilAngleBytes         = []byte("<nil>")
--	maxNewlineBytes       = []byte("<max depth reached>\n")
--	maxShortBytes         = []byte("<max>")
--	circularBytes         = []byte("<already shown>")
--	circularShortBytes    = []byte("<shown>")
--	invalidAngleBytes     = []byte("<invalid>")
--	openBracketBytes      = []byte("[")
--	closeBracketBytes     = []byte("]")
--	percentBytes          = []byte("%")
--	precisionBytes        = []byte(".")
--	openAngleBytes        = []byte("<")
--	closeAngleBytes       = []byte(">")
--	openMapBytes          = []byte("map[")
--	closeMapBytes         = []byte("]")
--	lenEqualsBytes        = []byte("len=")
--	capEqualsBytes        = []byte("cap=")
--)
--
--// hexDigits is used to map a decimal value to a hex digit.
--var hexDigits = "0123456789abcdef"
--
--// catchPanic handles any panics that might occur during the handleMethods
--// calls.
--func catchPanic(w io.Writer, v reflect.Value) {
--	if err := recover(); err != nil {
--		w.Write(panicBytes)
--		fmt.Fprintf(w, "%v", err)
--		w.Write(closeParenBytes)
--	}
--}
--
--// handleMethods attempts to call the Error and String methods on the underlying
--// type the passed reflect.Value represents and outputes the result to Writer w.
--//
--// It handles panics in any called methods by catching and displaying the error
--// as the formatted value.
--func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
--	// We need an interface to check if the type implements the error or
--	// Stringer interface.  However, the reflect package won't give us an
--	// interface on certain things like unexported struct fields in order
--	// to enforce visibility rules.  We use unsafe to bypass these restrictions
--	// since this package does not mutate the values.
--	if !v.CanInterface() {
--		v = unsafeReflectValue(v)
--	}
--
--	// Choose whether or not to do error and Stringer interface lookups against
--	// the base type or a pointer to the base type depending on settings.
--	// Technically calling one of these methods with a pointer receiver can
--	// mutate the value, however, types which choose to satisify an error or
--	// Stringer interface with a pointer receiver should not be mutating their
--	// state inside these interface methods.
--	var viface interface{}
--	if !cs.DisablePointerMethods {
--		if !v.CanAddr() {
--			v = unsafeReflectValue(v)
--		}
--		viface = v.Addr().Interface()
--	} else {
--		if v.CanAddr() {
--			v = v.Addr()
--		}
--		viface = v.Interface()
--	}
--
--	// Is it an error or Stringer?
--	switch iface := viface.(type) {
--	case error:
--		defer catchPanic(w, v)
--		if cs.ContinueOnMethod {
--			w.Write(openParenBytes)
--			w.Write([]byte(iface.Error()))
--			w.Write(closeParenBytes)
--			w.Write(spaceBytes)
--			return false
--		}
--
--		w.Write([]byte(iface.Error()))
--		return true
--
--	case fmt.Stringer:
--		defer catchPanic(w, v)
--		if cs.ContinueOnMethod {
--			w.Write(openParenBytes)
--			w.Write([]byte(iface.String()))
--			w.Write(closeParenBytes)
--			w.Write(spaceBytes)
--			return false
--		}
--		w.Write([]byte(iface.String()))
--		return true
--	}
--	return false
--}
--
--// printBool outputs a boolean value as true or false to Writer w.
--func printBool(w io.Writer, val bool) {
--	if val {
--		w.Write(trueBytes)
--	} else {
--		w.Write(falseBytes)
--	}
--}
--
--// printInt outputs a signed integer value to Writer w.
--func printInt(w io.Writer, val int64, base int) {
--	w.Write([]byte(strconv.FormatInt(val, base)))
--}
--
--// printUint outputs an unsigned integer value to Writer w.
--func printUint(w io.Writer, val uint64, base int) {
--	w.Write([]byte(strconv.FormatUint(val, base)))
--}
--
--// printFloat outputs a floating point value using the specified precision,
--// which is expected to be 32 or 64bit, to Writer w.
--func printFloat(w io.Writer, val float64, precision int) {
--	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
--}
--
--// printComplex outputs a complex value using the specified float precision
--// for the real and imaginary parts to Writer w.
--func printComplex(w io.Writer, c complex128, floatPrecision int) {
--	r := real(c)
--	w.Write(openParenBytes)
--	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
--	i := imag(c)
--	if i >= 0 {
--		w.Write(plusBytes)
--	}
--	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
--	w.Write(iBytes)
--	w.Write(closeParenBytes)
--}
--
--// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
--// prefix to Writer w.
--func printHexPtr(w io.Writer, p uintptr) {
--	// Null pointer.
--	num := uint64(p)
--	if num == 0 {
--		w.Write(nilAngleBytes)
--		return
--	}
--
--	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
--	buf := make([]byte, 18)
--
--	// It's simpler to construct the hex string right to left.
--	base := uint64(16)
--	i := len(buf) - 1
--	for num >= base {
--		buf[i] = hexDigits[num%base]
--		num /= base
--		i--
--	}
--	buf[i] = hexDigits[num]
--
--	// Add '0x' prefix.
--	i--
--	buf[i] = 'x'
--	i--
--	buf[i] = '0'
--
--	// Strip unused leading bytes.
--	buf = buf[i:]
--	w.Write(buf)
--}
--
--// valuesSorter implements sort.Interface to allow a slice of reflect.Value
--// elements to be sorted.
--type valuesSorter struct {
--	values []reflect.Value
--}
--
--// Len returns the number of values in the slice.  It is part of the
--// sort.Interface implementation.
--func (s *valuesSorter) Len() int {
--	return len(s.values)
--}
--
--// Swap swaps the values at the passed indices.  It is part of the
--// sort.Interface implementation.
--func (s *valuesSorter) Swap(i, j int) {
--	s.values[i], s.values[j] = s.values[j], s.values[i]
--}
--
--// Less returns whether the value at index i should sort before the
--// value at index j.  It is part of the sort.Interface implementation.
--func (s *valuesSorter) Less(i, j int) bool {
--	switch s.values[i].Kind() {
--	case reflect.Bool:
--		return !s.values[i].Bool() && s.values[j].Bool()
--	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
--		return s.values[i].Int() < s.values[j].Int()
--	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
--		return s.values[i].Uint() < s.values[j].Uint()
--	case reflect.Float32, reflect.Float64:
--		return s.values[i].Float() < s.values[j].Float()
--	case reflect.String:
--		return s.values[i].String() < s.values[j].String()
--	case reflect.Uintptr:
--		return s.values[i].Uint() < s.values[j].Uint()
--	}
--	return s.values[i].String() < s.values[j].String()
--}
--
--// sortValues is a generic sort function for native types: int, uint, bool,
--// string and uintptr.  Other inputs are sorted according to their
--// Value.String() value to ensure display stability.
--func sortValues(values []reflect.Value) {
--	if len(values) == 0 {
--		return
--	}
--	sort.Sort(&valuesSorter{values})
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common_test.go
-deleted file mode 100644
-index 3bea81f..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common_test.go
-+++ /dev/null
-@@ -1,192 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--package spew_test
--
--import (
--	"fmt"
--	"github.com/davecgh/go-spew/spew"
--	"reflect"
--	"testing"
--)
--
--// custom type to test Stinger interface on non-pointer receiver.
--type stringer string
--
--// String implements the Stringer interface for testing invocation of custom
--// stringers on types with non-pointer receivers.
--func (s stringer) String() string {
--	return "stringer " + string(s)
--}
--
--// custom type to test Stinger interface on pointer receiver.
--type pstringer string
--
--// String implements the Stringer interface for testing invocation of custom
--// stringers on types with only pointer receivers.
--func (s *pstringer) String() string {
--	return "stringer " + string(*s)
--}
--
--// xref1 and xref2 are cross referencing structs for testing circular reference
--// detection.
--type xref1 struct {
--	ps2 *xref2
--}
--type xref2 struct {
--	ps1 *xref1
--}
--
--// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
--// reference for testing detection.
--type indirCir1 struct {
--	ps2 *indirCir2
--}
--type indirCir2 struct {
--	ps3 *indirCir3
--}
--type indirCir3 struct {
--	ps1 *indirCir1
--}
--
--// embed is used to test embedded structures.
--type embed struct {
--	a string
--}
--
--// embedwrap is used to test embedded structures.
--type embedwrap struct {
--	*embed
--	e *embed
--}
--
--// panicer is used to intentionally cause a panic for testing spew properly
--// handles them
--type panicer int
--
--func (p panicer) String() string {
--	panic("test panic")
--}
--
--// customError is used to test custom error interface invocation.
--type customError int
--
--func (e customError) Error() string {
--	return fmt.Sprintf("error: %d", int(e))
--}
--
--// stringizeWants converts a slice of wanted test output into a format suitable
--// for a test error message.
--func stringizeWants(wants []string) string {
--	s := ""
--	for i, want := range wants {
--		if i > 0 {
--			s += fmt.Sprintf("want%d: %s", i+1, want)
--		} else {
--			s += "want: " + want
--		}
--	}
--	return s
--}
--
--// testFailed returns whether or not a test failed by checking if the result
--// of the test is in the slice of wanted strings.
--func testFailed(result string, wants []string) bool {
--	for _, want := range wants {
--		if result == want {
--			return false
--		}
--	}
--	return true
--}
--
--// TestSortValues ensures the sort functionality for relect.Value based sorting
--// works as intended.
--func TestSortValues(t *testing.T) {
--	getInterfaces := func(values []reflect.Value) []interface{} {
--		interfaces := []interface{}{}
--		for _, v := range values {
--			interfaces = append(interfaces, v.Interface())
--		}
--		return interfaces
--	}
--
--	v := reflect.ValueOf
--
--	a := v("a")
--	b := v("b")
--	c := v("c")
--	embedA := v(embed{"a"})
--	embedB := v(embed{"b"})
--	embedC := v(embed{"c"})
--	tests := []struct {
--		input    []reflect.Value
--		expected []reflect.Value
--	}{
--		// No values.
--		{
--			[]reflect.Value{},
--			[]reflect.Value{},
--		},
--		// Bools.
--		{
--			[]reflect.Value{v(false), v(true), v(false)},
--			[]reflect.Value{v(false), v(false), v(true)},
--		},
--		// Ints.
--		{
--			[]reflect.Value{v(2), v(1), v(3)},
--			[]reflect.Value{v(1), v(2), v(3)},
--		},
--		// Uints.
--		{
--			[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
--			[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
--		},
--		// Floats.
--		{
--			[]reflect.Value{v(2.0), v(1.0), v(3.0)},
--			[]reflect.Value{v(1.0), v(2.0), v(3.0)},
--		},
--		// Strings.
--		{
--			[]reflect.Value{b, a, c},
--			[]reflect.Value{a, b, c},
--		},
--		// Uintptrs.
--		{
--			[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
--			[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
--		},
--		// Invalid.
--		{
--			[]reflect.Value{embedB, embedA, embedC},
--			[]reflect.Value{embedB, embedA, embedC},
--		},
--	}
--	for _, test := range tests {
--		spew.SortValues(test.input)
--		// reflect.DeepEqual cannot really make sense of reflect.Value,
--		// probably because of all the pointer tricks. For instance,
--		// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
--		// instead.
--		input := getInterfaces(test.input)
--		expected := getInterfaces(test.expected)
--		if !reflect.DeepEqual(input, expected) {
--			t.Errorf("Sort mismatch:\n %v != %v", input, expected)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go
-deleted file mode 100644
-index e516675..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go
-+++ /dev/null
-@@ -1,288 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--package spew
--
--import (
--	"bytes"
--	"fmt"
--	"io"
--	"os"
--)
--
--// ConfigState houses the configuration options used by spew to format and
--// display values.  There is a global instance, Config, that is used to control
--// all top-level Formatter and Dump functionality.  Each ConfigState instance
--// provides methods equivalent to the top-level functions.
--//
--// The zero value for ConfigState provides no indentation.  You would typically
--// want to set it to a space or a tab.
--//
--// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
--// with default settings.  See the documentation of NewDefaultConfig for default
--// values.
--type ConfigState struct {
--	// Indent specifies the string to use for each indentation level.  The
--	// global config instance that all top-level functions use set this to a
--	// single space by default.  If you would like more indentation, you might
--	// set this to a tab with "\t" or perhaps two spaces with "  ".
--	Indent string
--
--	// MaxDepth controls the maximum number of levels to descend into nested
--	// data structures.  The default, 0, means there is no limit.
--	//
--	// NOTE: Circular data structures are properly detected, so it is not
--	// necessary to set this value unless you specifically want to limit deeply
--	// nested data structures.
--	MaxDepth int
--
--	// DisableMethods specifies whether or not error and Stringer interfaces are
--	// invoked for types that implement them.
--	DisableMethods bool
--
--	// DisablePointerMethods specifies whether or not to check for and invoke
--	// error and Stringer interfaces on types which only accept a pointer
--	// receiver when the current type is not a pointer.
--	//
--	// NOTE: This might be an unsafe action since calling one of these methods
--	// with a pointer receiver could technically mutate the value, however,
--	// in practice, types which choose to satisify an error or Stringer
--	// interface with a pointer receiver should not be mutating their state
--	// inside these interface methods.
--	DisablePointerMethods bool
--
--	// ContinueOnMethod specifies whether or not recursion should continue once
--	// a custom error or Stringer interface is invoked.  The default, false,
--	// means it will print the results of invoking the custom error or Stringer
--	// interface and return immediately instead of continuing to recurse into
--	// the internals of the data type.
--	//
--	// NOTE: This flag does not have any effect if method invocation is disabled
--	// via the DisableMethods or DisablePointerMethods options.
--	ContinueOnMethod bool
--
--	// SortKeys specifies map keys should be sorted before being printed. Use
--	// this to have a more deterministic, diffable output.  Note that only
--	// native types (bool, int, uint, floats, uintptr and string) are supported
--	// with other types sorted according to the reflect.Value.String() output
--	// which guarantees display stability.
--	SortKeys bool
--}
--
--// Config is the active configuration of the top-level functions.
--// The configuration can be changed by modifying the contents of spew.Config.
--var Config = ConfigState{Indent: " "}
--
--// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
--// passed with a Formatter interface returned by c.NewFormatter.  It returns
--// the formatted string as a value that satisfies error.  See NewFormatter
--// for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
--	return fmt.Errorf(format, c.convertArgs(a)...)
--}
--
--// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
--// passed with a Formatter interface returned by c.NewFormatter.  It returns
--// the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
--	return fmt.Fprint(w, c.convertArgs(a)...)
--}
--
--// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
--// passed with a Formatter interface returned by c.NewFormatter.  It returns
--// the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
--	return fmt.Fprintf(w, format, c.convertArgs(a)...)
--}
--
--// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
--// passed with a Formatter interface returned by c.NewFormatter.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
--	return fmt.Fprintln(w, c.convertArgs(a)...)
--}
--
--// Print is a wrapper for fmt.Print that treats each argument as if it were
--// passed with a Formatter interface returned by c.NewFormatter.  It returns
--// the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
--	return fmt.Print(c.convertArgs(a)...)
--}
--
--// Printf is a wrapper for fmt.Printf that treats each argument as if it were
--// passed with a Formatter interface returned by c.NewFormatter.  It returns
--// the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
--	return fmt.Printf(format, c.convertArgs(a)...)
--}
--
--// Println is a wrapper for fmt.Println that treats each argument as if it were
--// passed with a Formatter interface returned by c.NewFormatter.  It returns
--// the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
--	return fmt.Println(c.convertArgs(a)...)
--}
--
--// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
--// passed with a Formatter interface returned by c.NewFormatter.  It returns
--// the resulting string.  See NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Sprint(a ...interface{}) string {
--	return fmt.Sprint(c.convertArgs(a)...)
--}
--
--// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
--// passed with a Formatter interface returned by c.NewFormatter.  It returns
--// the resulting string.  See NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
--	return fmt.Sprintf(format, c.convertArgs(a)...)
--}
--
--// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
--// were passed with a Formatter interface returned by c.NewFormatter.  It
--// returns the resulting string.  See NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
--func (c *ConfigState) Sprintln(a ...interface{}) string {
--	return fmt.Sprintln(c.convertArgs(a)...)
--}
--
--/*
--NewFormatter returns a custom formatter that satisfies the fmt.Formatter
--interface.  As a result, it integrates cleanly with standard fmt package
--printing functions.  The formatter is useful for inline printing of smaller data
--types similar to the standard %v format specifier.
--
--The custom formatter only responds to the %v (most compact), %+v (adds pointer
--addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
--combinations.  Any other verbs such as %x and %q will be sent to the the
--standard fmt package for formatting.  In addition, the custom formatter ignores
--the width and precision arguments (however they will still work on the format
--specifiers not handled by the custom formatter).
--
--Typically this function shouldn't be called directly.  It is much easier to make
--use of the custom formatter by calling one of the convenience functions such as
--c.Printf, c.Println, or c.Printf.
--*/
--func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
--	return newFormatter(c, v)
--}
--
--// Fdump formats and displays the passed arguments to io.Writer w.  It formats
--// exactly the same as Dump.
--func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
--	fdump(c, w, a...)
--}
--
--/*
--Dump displays the passed parameters to standard out with newlines, customizable
--indentation, and additional debug information such as complete types and all
--pointer addresses used to indirect to the final value.  It provides the
--following features over the built-in printing facilities provided by the fmt
--package:
--
--	* Pointers are dereferenced and followed
--	* Circular data structures are detected and handled properly
--	* Custom Stringer/error interfaces are optionally invoked, including
--	  on unexported types
--	* Custom types which only implement the Stringer/error interfaces via
--	  a pointer receiver are optionally invoked when passing non-pointer
--	  variables
--	* Byte arrays and slices are dumped like the hexdump -C command which
--	  includes offsets, byte values in hex, and ASCII output
--
--The configuration options are controlled by modifying the public members
--of c.  See ConfigState for options documentation.
--
--See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
--get the formatted result as a string.
--*/
--func (c *ConfigState) Dump(a ...interface{}) {
--	fdump(c, os.Stdout, a...)
--}
--
--// Sdump returns a string with the passed arguments formatted exactly the same
--// as Dump.
--func (c *ConfigState) Sdump(a ...interface{}) string {
--	var buf bytes.Buffer
--	fdump(c, &buf, a...)
--	return buf.String()
--}
--
--// convertArgs accepts a slice of arguments and returns a slice of the same
--// length with each argument converted to a spew Formatter interface using
--// the ConfigState associated with s.
--func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
--	formatters = make([]interface{}, len(args))
--	for index, arg := range args {
--		formatters[index] = newFormatter(c, arg)
--	}
--	return formatters
--}
--
--// NewDefaultConfig returns a ConfigState with the following default settings.
--//
--// 	Indent: " "
--// 	MaxDepth: 0
--// 	DisableMethods: false
--// 	DisablePointerMethods: false
--// 	ContinueOnMethod: false
--// 	SortKeys: false
--func NewDefaultConfig() *ConfigState {
--	return &ConfigState{Indent: " "}
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go
-deleted file mode 100644
-index a0d73ac..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go
-+++ /dev/null
-@@ -1,196 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--/*
--Package spew implements a deep pretty printer for Go data structures to aid in
--debugging.
--
--A quick overview of the additional features spew provides over the built-in
--printing facilities for Go data types are as follows:
--
--	* Pointers are dereferenced and followed
--	* Circular data structures are detected and handled properly
--	* Custom Stringer/error interfaces are optionally invoked, including
--	  on unexported types
--	* Custom types which only implement the Stringer/error interfaces via
--	  a pointer receiver are optionally invoked when passing non-pointer
--	  variables
--	* Byte arrays and slices are dumped like the hexdump -C command which
--	  includes offsets, byte values in hex, and ASCII output (only when using
--	  Dump style)
--
--There are two different approaches spew allows for dumping Go data structures:
--
--	* Dump style which prints with newlines, customizable indentation,
--	  and additional debug information such as types and all pointer addresses
--	  used to indirect to the final value
--	* A custom Formatter interface that integrates cleanly with the standard fmt
--	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
--	  similar to the default %v while providing the additional functionality
--	  outlined above and passing unsupported format verbs such as %x and %q
--	  along to fmt
--
--Quick Start
--
--This section demonstrates how to quickly get started with spew.  See the
--sections below for further details on formatting and configuration options.
--
--To dump a variable with full newlines, indentation, type, and pointer
--information use Dump, Fdump, or Sdump:
--	spew.Dump(myVar1, myVar2, ...)
--	spew.Fdump(someWriter, myVar1, myVar2, ...)
--	str := spew.Sdump(myVar1, myVar2, ...)
--
--Alternatively, if you would prefer to use format strings with a compacted inline
--printing style, use the convenience wrappers Printf, Fprintf, etc with
--%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
--%#+v (adds types and pointer addresses):
--	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
--	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
--	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
--	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
--
--Configuration Options
--
--Configuration of spew is handled by fields in the ConfigState type.  For
--convenience, all of the top-level functions use a global state available
--via the spew.Config global.
--
--It is also possible to create a ConfigState instance that provides methods
--equivalent to the top-level functions.  This allows concurrent configuration
--options.  See the ConfigState documentation for more details.
--
--The following configuration options are available:
--	* Indent
--		String to use for each indentation level for Dump functions.
--		It is a single space by default.  A popular alternative is "\t".
--
--	* MaxDepth
--		Maximum number of levels to descend into nested data structures.
--		There is no limit by default.
--
--	* DisableMethods
--		Disables invocation of error and Stringer interface methods.
--		Method invocation is enabled by default.
--
--	* DisablePointerMethods
--		Disables invocation of error and Stringer interface methods on types
--		which only accept pointer receivers from non-pointer variables.
--		Pointer method invocation is enabled by default.
--
--	* ContinueOnMethod
--		Enables recursion into types after invoking error and Stringer interface
--		methods. Recursion after method invocation is disabled by default.
--
--	* SortKeys
--		Specifies map keys should be sorted before being printed. Use
--		this to have a more deterministic, diffable output.  Note that
--		only native types (bool, int, uint, floats, uintptr and string)
--		are supported with other types sorted according to the
--		reflect.Value.String() output which guarantees display stability.
--		Natural map order is used by default.
--
--Dump Usage
--
--Simply call spew.Dump with a list of variables you want to dump:
--
--	spew.Dump(myVar1, myVar2, ...)
--
--You may also call spew.Fdump if you would prefer to output to an arbitrary
--io.Writer.  For example, to dump to standard error:
--
--	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
--
--A third option is to call spew.Sdump to get the formatted output as a string:
--
--	str := spew.Sdump(myVar1, myVar2, ...)
--
--Sample Dump Output
--
--See the Dump example for details on the setup of the types and variables being
--shown here.
--
--	(main.Foo) {
--	 unexportedField: (*main.Bar)(0xf84002e210)({
--	  flag: (main.Flag) flagTwo,
--	  data: (uintptr) <nil>
--	 }),
--	 ExportedField: (map[interface {}]interface {}) (len=1) {
--	  (string) (len=3) "one": (bool) true
--	 }
--	}
--
--Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
--command as shown.
--	([]uint8) (len=32 cap=32) {
--	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
--	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
--	 00000020  31 32                                             |12|
--	}
--
--Custom Formatter
--
--Spew provides a custom formatter that implements the fmt.Formatter interface
--so that it integrates cleanly with standard fmt package printing functions. The
--formatter is useful for inline printing of smaller data types similar to the
--standard %v format specifier.
--
--The custom formatter only responds to the %v (most compact), %+v (adds pointer
--addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
--combinations.  Any other verbs such as %x and %q will be sent to the the
--standard fmt package for formatting.  In addition, the custom formatter ignores
--the width and precision arguments (however they will still work on the format
--specifiers not handled by the custom formatter).
--
--Custom Formatter Usage
--
--The simplest way to make use of the spew custom formatter is to call one of the
--convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
--functions have syntax you are most likely already familiar with:
--
--	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
--	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
--	spew.Println(myVar, myVar2)
--	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
--	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
--
--See the Index for the full list convenience functions.
--
--Sample Formatter Output
--
--Double pointer to a uint8:
--	  %v: <**>5
--	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
--	 %#v: (**uint8)5
--	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
--
--Pointer to circular struct with a uint8 field and a pointer to itself:
--	  %v: <*>{1 <*><shown>}
--	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
--	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
--	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
--
--See the Printf example for details on the setup of variables being shown
--here.
--
--Errors
--
--Since it is possible for custom Stringer/error interfaces to panic, spew
--detects them and handles them internally by printing the panic information
--inline with the output.  Since spew is intended to provide deep pretty printing
--capabilities on structures, it intentionally does not return any errors.
--*/
--package spew
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go
-deleted file mode 100644
-index 02d4c9d..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go
-+++ /dev/null
-@@ -1,500 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--package spew
--
--import (
--	"bytes"
--	"encoding/hex"
--	"fmt"
--	"io"
--	"os"
--	"reflect"
--	"regexp"
--	"strconv"
--	"strings"
--)
--
--var (
--	// uint8Type is a reflect.Type representing a uint8.  It is used to
--	// convert cgo types to uint8 slices for hexdumping.
--	uint8Type = reflect.TypeOf(uint8(0))
--
--	// cCharRE is a regular expression that matches a cgo char.
--	// It is used to detect character arrays to hexdump them.
--	cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
--
--	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
--	// char.  It is used to detect unsigned character arrays to hexdump
--	// them.
--	cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
--
--	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
--	// It is used to detect uint8_t arrays to hexdump them.
--	cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
--)
--
--// dumpState contains information about the state of a dump operation.
--type dumpState struct {
--	w                io.Writer
--	depth            int
--	pointers         map[uintptr]int
--	ignoreNextType   bool
--	ignoreNextIndent bool
--	cs               *ConfigState
--}
--
--// indent performs indentation according to the depth level and cs.Indent
--// option.
--func (d *dumpState) indent() {
--	if d.ignoreNextIndent {
--		d.ignoreNextIndent = false
--		return
--	}
--	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
--}
--
--// unpackValue returns values inside of non-nil interfaces when possible.
--// This is useful for data types like structs, arrays, slices, and maps which
--// can contain varying types packed inside an interface.
--func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
--	if v.Kind() == reflect.Interface && !v.IsNil() {
--		v = v.Elem()
--	}
--	return v
--}
--
--// dumpPtr handles formatting of pointers by indirecting them as necessary.
--func (d *dumpState) dumpPtr(v reflect.Value) {
--	// Remove pointers at or below the current depth from map used to detect
--	// circular refs.
--	for k, depth := range d.pointers {
--		if depth >= d.depth {
--			delete(d.pointers, k)
--		}
--	}
--
--	// Keep list of all dereferenced pointers to show later.
--	pointerChain := make([]uintptr, 0)
--
--	// Figure out how many levels of indirection there are by dereferencing
--	// pointers and unpacking interfaces down the chain while detecting circular
--	// references.
--	nilFound := false
--	cycleFound := false
--	indirects := 0
--	ve := v
--	for ve.Kind() == reflect.Ptr {
--		if ve.IsNil() {
--			nilFound = true
--			break
--		}
--		indirects++
--		addr := ve.Pointer()
--		pointerChain = append(pointerChain, addr)
--		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
--			cycleFound = true
--			indirects--
--			break
--		}
--		d.pointers[addr] = d.depth
--
--		ve = ve.Elem()
--		if ve.Kind() == reflect.Interface {
--			if ve.IsNil() {
--				nilFound = true
--				break
--			}
--			ve = ve.Elem()
--		}
--	}
--
--	// Display type information.
--	d.w.Write(openParenBytes)
--	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
--	d.w.Write([]byte(ve.Type().String()))
--	d.w.Write(closeParenBytes)
--
--	// Display pointer information.
--	if len(pointerChain) > 0 {
--		d.w.Write(openParenBytes)
--		for i, addr := range pointerChain {
--			if i > 0 {
--				d.w.Write(pointerChainBytes)
--			}
--			printHexPtr(d.w, addr)
--		}
--		d.w.Write(closeParenBytes)
--	}
--
--	// Display dereferenced value.
--	d.w.Write(openParenBytes)
--	switch {
--	case nilFound == true:
--		d.w.Write(nilAngleBytes)
--
--	case cycleFound == true:
--		d.w.Write(circularBytes)
--
--	default:
--		d.ignoreNextType = true
--		d.dump(ve)
--	}
--	d.w.Write(closeParenBytes)
--}
--
--// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
--// reflection) arrays and slices are dumped in hexdump -C fashion.
--func (d *dumpState) dumpSlice(v reflect.Value) {
--	// Determine whether this type should be hex dumped or not.  Also,
--	// for types which should be hexdumped, try to use the underlying data
--	// first, then fall back to trying to convert them to a uint8 slice.
--	var buf []uint8
--	doConvert := false
--	doHexDump := false
--	numEntries := v.Len()
--	if numEntries > 0 {
--		vt := v.Index(0).Type()
--		vts := vt.String()
--		switch {
--		// C types that need to be converted.
--		case cCharRE.MatchString(vts):
--			fallthrough
--		case cUnsignedCharRE.MatchString(vts):
--			fallthrough
--		case cUint8tCharRE.MatchString(vts):
--			doConvert = true
--
--		// Try to use existing uint8 slices and fall back to converting
--		// and copying if that fails.
--		case vt.Kind() == reflect.Uint8:
--			// We need an addressable interface to convert the type back
--			// into a byte slice.  However, the reflect package won't give
--			// us an interface on certain things like unexported struct
--			// fields in order to enforce visibility rules.  We use unsafe
--			// to bypass these restrictions since this package does not
--			// mutate the values.
--			vs := v
--			if !vs.CanInterface() || !vs.CanAddr() {
--				vs = unsafeReflectValue(vs)
--			}
--			vs = vs.Slice(0, numEntries)
--
--			// Use the existing uint8 slice if it can be type
--			// asserted.
--			iface := vs.Interface()
--			if slice, ok := iface.([]uint8); ok {
--				buf = slice
--				doHexDump = true
--				break
--			}
--
--			// The underlying data needs to be converted if it can't
--			// be type asserted to a uint8 slice.
--			doConvert = true
--		}
--
--		// Copy and convert the underlying type if needed.
--		if doConvert && vt.ConvertibleTo(uint8Type) {
--			// Convert and copy each element into a uint8 byte
--			// slice.
--			buf = make([]uint8, numEntries)
--			for i := 0; i < numEntries; i++ {
--				vv := v.Index(i)
--				buf[i] = uint8(vv.Convert(uint8Type).Uint())
--			}
--			doHexDump = true
--		}
--	}
--
--	// Hexdump the entire slice as needed.
--	if doHexDump {
--		indent := strings.Repeat(d.cs.Indent, d.depth)
--		str := indent + hex.Dump(buf)
--		str = strings.Replace(str, "\n", "\n"+indent, -1)
--		str = strings.TrimRight(str, d.cs.Indent)
--		d.w.Write([]byte(str))
--		return
--	}
--
--	// Recursively call dump for each item.
--	for i := 0; i < numEntries; i++ {
--		d.dump(d.unpackValue(v.Index(i)))
--		if i < (numEntries - 1) {
--			d.w.Write(commaNewlineBytes)
--		} else {
--			d.w.Write(newlineBytes)
--		}
--	}
--}
--
--// dump is the main workhorse for dumping a value.  It uses the passed reflect
--// value to figure out what kind of object we are dealing with and formats it
--// appropriately.  It is a recursive function, however circular data structures
--// are detected and handled properly.
--func (d *dumpState) dump(v reflect.Value) {
--	// Handle invalid reflect values immediately.
--	kind := v.Kind()
--	if kind == reflect.Invalid {
--		d.w.Write(invalidAngleBytes)
--		return
--	}
--
--	// Handle pointers specially.
--	if kind == reflect.Ptr {
--		d.indent()
--		d.dumpPtr(v)
--		return
--	}
--
--	// Print type information unless already handled elsewhere.
--	if !d.ignoreNextType {
--		d.indent()
--		d.w.Write(openParenBytes)
--		d.w.Write([]byte(v.Type().String()))
--		d.w.Write(closeParenBytes)
--		d.w.Write(spaceBytes)
--	}
--	d.ignoreNextType = false
--
--	// Display length and capacity if the built-in len and cap functions
--	// work with the value's kind and the len/cap itself is non-zero.
--	valueLen, valueCap := 0, 0
--	switch v.Kind() {
--	case reflect.Array, reflect.Slice, reflect.Chan:
--		valueLen, valueCap = v.Len(), v.Cap()
--	case reflect.Map, reflect.String:
--		valueLen = v.Len()
--	}
--	if valueLen != 0 || valueCap != 0 {
--		d.w.Write(openParenBytes)
--		if valueLen != 0 {
--			d.w.Write(lenEqualsBytes)
--			printInt(d.w, int64(valueLen), 10)
--		}
--		if valueCap != 0 {
--			if valueLen != 0 {
--				d.w.Write(spaceBytes)
--			}
--			d.w.Write(capEqualsBytes)
--			printInt(d.w, int64(valueCap), 10)
--		}
--		d.w.Write(closeParenBytes)
--		d.w.Write(spaceBytes)
--	}
--
--	// Call Stringer/error interfaces if they exist and the handle methods flag
--	// is enabled
--	if !d.cs.DisableMethods {
--		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
--			if handled := handleMethods(d.cs, d.w, v); handled {
--				return
--			}
--		}
--	}
--
--	switch kind {
--	case reflect.Invalid:
--		// Do nothing.  We should never get here since invalid has already
--		// been handled above.
--
--	case reflect.Bool:
--		printBool(d.w, v.Bool())
--
--	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
--		printInt(d.w, v.Int(), 10)
--
--	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
--		printUint(d.w, v.Uint(), 10)
--
--	case reflect.Float32:
--		printFloat(d.w, v.Float(), 32)
--
--	case reflect.Float64:
--		printFloat(d.w, v.Float(), 64)
--
--	case reflect.Complex64:
--		printComplex(d.w, v.Complex(), 32)
--
--	case reflect.Complex128:
--		printComplex(d.w, v.Complex(), 64)
--
--	case reflect.Slice:
--		if v.IsNil() {
--			d.w.Write(nilAngleBytes)
--			break
--		}
--		fallthrough
--
--	case reflect.Array:
--		d.w.Write(openBraceNewlineBytes)
--		d.depth++
--		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
--			d.indent()
--			d.w.Write(maxNewlineBytes)
--		} else {
--			d.dumpSlice(v)
--		}
--		d.depth--
--		d.indent()
--		d.w.Write(closeBraceBytes)
--
--	case reflect.String:
--		d.w.Write([]byte(strconv.Quote(v.String())))
--
--	case reflect.Interface:
--		// The only time we should get here is for nil interfaces due to
--		// unpackValue calls.
--		if v.IsNil() {
--			d.w.Write(nilAngleBytes)
--		}
--
--	case reflect.Ptr:
--		// Do nothing.  We should never get here since pointers have already
--		// been handled above.
--
--	case reflect.Map:
--		d.w.Write(openBraceNewlineBytes)
--		d.depth++
--		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
--			d.indent()
--			d.w.Write(maxNewlineBytes)
--		} else {
--			numEntries := v.Len()
--			keys := v.MapKeys()
--			if d.cs.SortKeys {
--				sortValues(keys)
--			}
--			for i, key := range keys {
--				d.dump(d.unpackValue(key))
--				d.w.Write(colonSpaceBytes)
--				d.ignoreNextIndent = true
--				d.dump(d.unpackValue(v.MapIndex(key)))
--				if i < (numEntries - 1) {
--					d.w.Write(commaNewlineBytes)
--				} else {
--					d.w.Write(newlineBytes)
--				}
--			}
--		}
--		d.depth--
--		d.indent()
--		d.w.Write(closeBraceBytes)
--
--	case reflect.Struct:
--		d.w.Write(openBraceNewlineBytes)
--		d.depth++
--		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
--			d.indent()
--			d.w.Write(maxNewlineBytes)
--		} else {
--			vt := v.Type()
--			numFields := v.NumField()
--			for i := 0; i < numFields; i++ {
--				d.indent()
--				vtf := vt.Field(i)
--				d.w.Write([]byte(vtf.Name))
--				d.w.Write(colonSpaceBytes)
--				d.ignoreNextIndent = true
--				d.dump(d.unpackValue(v.Field(i)))
--				if i < (numFields - 1) {
--					d.w.Write(commaNewlineBytes)
--				} else {
--					d.w.Write(newlineBytes)
--				}
--			}
--		}
--		d.depth--
--		d.indent()
--		d.w.Write(closeBraceBytes)
--
--	case reflect.Uintptr:
--		printHexPtr(d.w, uintptr(v.Uint()))
--
--	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
--		printHexPtr(d.w, v.Pointer())
--
--	// There were not any other types at the time this code was written, but
--	// fall back to letting the default fmt package handle it in case any new
--	// types are added.
--	default:
--		if v.CanInterface() {
--			fmt.Fprintf(d.w, "%v", v.Interface())
--		} else {
--			fmt.Fprintf(d.w, "%v", v.String())
--		}
--	}
--}
--
--// fdump is a helper function to consolidate the logic from the various public
--// methods which take varying writers and config states.
--func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
--	for _, arg := range a {
--		if arg == nil {
--			w.Write(interfaceBytes)
--			w.Write(spaceBytes)
--			w.Write(nilAngleBytes)
--			w.Write(newlineBytes)
--			continue
--		}
--
--		d := dumpState{w: w, cs: cs}
--		d.pointers = make(map[uintptr]int)
--		d.dump(reflect.ValueOf(arg))
--		d.w.Write(newlineBytes)
--	}
--}
--
--// Fdump formats and displays the passed arguments to io.Writer w.  It formats
--// exactly the same as Dump.
--func Fdump(w io.Writer, a ...interface{}) {
--	fdump(&Config, w, a...)
--}
--
--// Sdump returns a string with the passed arguments formatted exactly the same
--// as Dump.
--func Sdump(a ...interface{}) string {
--	var buf bytes.Buffer
--	fdump(&Config, &buf, a...)
--	return buf.String()
--}
--
--/*
--Dump displays the passed parameters to standard out with newlines, customizable
--indentation, and additional debug information such as complete types and all
--pointer addresses used to indirect to the final value.  It provides the
--following features over the built-in printing facilities provided by the fmt
--package:
--
--	* Pointers are dereferenced and followed
--	* Circular data structures are detected and handled properly
--	* Custom Stringer/error interfaces are optionally invoked, including
--	  on unexported types
--	* Custom types which only implement the Stringer/error interfaces via
--	  a pointer receiver are optionally invoked when passing non-pointer
--	  variables
--	* Byte arrays and slices are dumped like the hexdump -C command which
--	  includes offsets, byte values in hex, and ASCII output
--
--The configuration options are controlled by an exported package global,
--spew.Config.  See ConfigState for options documentation.
--
--See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
--get the formatted result as a string.
--*/
--func Dump(a ...interface{}) {
--	fdump(&Config, os.Stdout, a...)
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go
-deleted file mode 100644
-index f1a5644..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go
-+++ /dev/null
-@@ -1,978 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--/*
--Test Summary:
--NOTE: For each test, a nil pointer, a single pointer and double pointer to the
--base test element are also tested to ensure proper indirection across all types.
--
--- Max int8, int16, int32, int64, int
--- Max uint8, uint16, uint32, uint64, uint
--- Boolean true and false
--- Standard complex64 and complex128
--- Array containing standard ints
--- Array containing type with custom formatter on pointer receiver only
--- Array containing interfaces
--- Array containing bytes
--- Slice containing standard float32 values
--- Slice containing type with custom formatter on pointer receiver only
--- Slice containing interfaces
--- Slice containing bytes
--- Nil slice
--- Standard string
--- Nil interface
--- Sub-interface
--- Map with string keys and int vals
--- Map with custom formatter type on pointer receiver only keys and vals
--- Map with interface keys and values
--- Map with nil interface value
--- Struct with primitives
--- Struct that contains another struct
--- Struct that contains custom type with Stringer pointer interface via both
--  exported and unexported fields
--- Struct that contains embedded struct and field to same struct
--- Uintptr to 0 (null pointer)
--- Uintptr address of real variable
--- Unsafe.Pointer to 0 (null pointer)
--- Unsafe.Pointer to address of real variable
--- Nil channel
--- Standard int channel
--- Function with no params and no returns
--- Function with param and no returns
--- Function with multiple params and multiple returns
--- Struct that is circular through self referencing
--- Structs that are circular through cross referencing
--- Structs that are indirectly circular
--- Type that panics in its Stringer interface
--*/
--
--package spew_test
--
--import (
--	"bytes"
--	"fmt"
--	"github.com/davecgh/go-spew/spew"
--	"testing"
--	"unsafe"
--)
--
--// dumpTest is used to describe a test to be perfomed against the Dump method.
--type dumpTest struct {
--	in    interface{}
--	wants []string
--}
--
--// dumpTests houses all of the tests to be performed against the Dump method.
--var dumpTests = make([]dumpTest, 0)
--
--// addDumpTest is a helper method to append the passed input and desired result
--// to dumpTests
--func addDumpTest(in interface{}, wants ...string) {
--	test := dumpTest{in, wants}
--	dumpTests = append(dumpTests, test)
--}
--
--func addIntDumpTests() {
--	// Max int8.
--	v := int8(127)
--	nv := (*int8)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "int8"
--	vs := "127"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Max int16.
--	v2 := int16(32767)
--	nv2 := (*int16)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "int16"
--	v2s := "32767"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
--
--	// Max int32.
--	v3 := int32(2147483647)
--	nv3 := (*int32)(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "int32"
--	v3s := "2147483647"
--	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
--	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
--	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
--	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
--
--	// Max int64.
--	v4 := int64(9223372036854775807)
--	nv4 := (*int64)(nil)
--	pv4 := &v4
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "int64"
--	v4s := "9223372036854775807"
--	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
--	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
--	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
--	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
--
--	// Max int.
--	v5 := int(2147483647)
--	nv5 := (*int)(nil)
--	pv5 := &v5
--	v5Addr := fmt.Sprintf("%p", pv5)
--	pv5Addr := fmt.Sprintf("%p", &pv5)
--	v5t := "int"
--	v5s := "2147483647"
--	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
--	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
--	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
--	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
--}
--
--func addUintDumpTests() {
--	// Max uint8.
--	v := uint8(255)
--	nv := (*uint8)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "uint8"
--	vs := "255"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Max uint16.
--	v2 := uint16(65535)
--	nv2 := (*uint16)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "uint16"
--	v2s := "65535"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
--
--	// Max uint32.
--	v3 := uint32(4294967295)
--	nv3 := (*uint32)(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "uint32"
--	v3s := "4294967295"
--	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
--	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
--	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
--	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
--
--	// Max uint64.
--	v4 := uint64(18446744073709551615)
--	nv4 := (*uint64)(nil)
--	pv4 := &v4
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "uint64"
--	v4s := "18446744073709551615"
--	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
--	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
--	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
--	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
--
--	// Max uint.
--	v5 := uint(4294967295)
--	nv5 := (*uint)(nil)
--	pv5 := &v5
--	v5Addr := fmt.Sprintf("%p", pv5)
--	pv5Addr := fmt.Sprintf("%p", &pv5)
--	v5t := "uint"
--	v5s := "4294967295"
--	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
--	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
--	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
--	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
--}
--
--func addBoolDumpTests() {
--	// Boolean true.
--	v := bool(true)
--	nv := (*bool)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "bool"
--	vs := "true"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Boolean false.
--	v2 := bool(false)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "bool"
--	v2s := "false"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--}
--
--func addFloatDumpTests() {
--	// Standard float32.
--	v := float32(3.1415)
--	nv := (*float32)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "float32"
--	vs := "3.1415"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Standard float64.
--	v2 := float64(3.1415926)
--	nv2 := (*float64)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "float64"
--	v2s := "3.1415926"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
--}
--
--func addComplexDumpTests() {
--	// Standard complex64.
--	v := complex(float32(6), -2)
--	nv := (*complex64)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "complex64"
--	vs := "(6-2i)"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Standard complex128.
--	v2 := complex(float64(-6), 2)
--	nv2 := (*complex128)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "complex128"
--	v2s := "(-6+2i)"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
--}
--
--func addArrayDumpTests() {
--	// Array containing standard ints.
--	v := [3]int{1, 2, 3}
--	vLen := fmt.Sprintf("%d", len(v))
--	vCap := fmt.Sprintf("%d", cap(v))
--	nv := (*[3]int)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "int"
--	vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" +
--		vt + ") 2,\n (" + vt + ") 3\n}"
--	addDumpTest(v, "([3]"+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*[3]"+vt+")(<nil>)\n")
--
--	// Array containing type with custom formatter on pointer receiver only.
--	v2i0 := pstringer("1")
--	v2i1 := pstringer("2")
--	v2i2 := pstringer("3")
--	v2 := [3]pstringer{v2i0, v2i1, v2i2}
--	v2i0Len := fmt.Sprintf("%d", len(v2i0))
--	v2i1Len := fmt.Sprintf("%d", len(v2i1))
--	v2i2Len := fmt.Sprintf("%d", len(v2i2))
--	v2Len := fmt.Sprintf("%d", len(v2))
--	v2Cap := fmt.Sprintf("%d", cap(v2))
--	nv2 := (*[3]pstringer)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "spew_test.pstringer"
--	v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" +
--		v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len +
--		") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " +
--		"stringer 3\n}"
--	addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*[3]"+v2t+")(<nil>)\n")
--
--	// Array containing interfaces.
--	v3i0 := "one"
--	v3 := [3]interface{}{v3i0, int(2), uint(3)}
--	v3i0Len := fmt.Sprintf("%d", len(v3i0))
--	v3Len := fmt.Sprintf("%d", len(v3))
--	v3Cap := fmt.Sprintf("%d", cap(v3))
--	nv3 := (*[3]interface{})(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "[3]interface {}"
--	v3t2 := "string"
--	v3t3 := "int"
--	v3t4 := "uint"
--	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
--		"(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
--		v3t4 + ") 3\n}"
--	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
--	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
--	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
--	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
--
--	// Array containing bytes.
--	v4 := [34]byte{
--		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
--		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
--		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
--		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
--		0x31, 0x32,
--	}
--	v4Len := fmt.Sprintf("%d", len(v4))
--	v4Cap := fmt.Sprintf("%d", cap(v4))
--	nv4 := (*[34]byte)(nil)
--	pv4 := &v4
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "[34]uint8"
--	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
--		"{\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20" +
--		"  |............... |\n" +
--		" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30" +
--		"  |!\"#$%&'()*+,-./0|\n" +
--		" 00000020  31 32                                           " +
--		"  |12|\n}"
--	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
--	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
--	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
--	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
--}
--
--func addSliceDumpTests() {
--	// Slice containing standard float32 values.
--	v := []float32{3.14, 6.28, 12.56}
--	vLen := fmt.Sprintf("%d", len(v))
--	vCap := fmt.Sprintf("%d", cap(v))
--	nv := (*[]float32)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "float32"
--	vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" +
--		vt + ") 6.28,\n (" + vt + ") 12.56\n}"
--	addDumpTest(v, "([]"+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*[]"+vt+")(<nil>)\n")
--
--	// Slice containing type with custom formatter on pointer receiver only.
--	v2i0 := pstringer("1")
--	v2i1 := pstringer("2")
--	v2i2 := pstringer("3")
--	v2 := []pstringer{v2i0, v2i1, v2i2}
--	v2i0Len := fmt.Sprintf("%d", len(v2i0))
--	v2i1Len := fmt.Sprintf("%d", len(v2i1))
--	v2i2Len := fmt.Sprintf("%d", len(v2i2))
--	v2Len := fmt.Sprintf("%d", len(v2))
--	v2Cap := fmt.Sprintf("%d", cap(v2))
--	nv2 := (*[]pstringer)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "spew_test.pstringer"
--	v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" +
--		v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len +
--		") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " +
--		"stringer 3\n}"
--	addDumpTest(v2, "([]"+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*[]"+v2t+")(<nil>)\n")
--
--	// Slice containing interfaces.
--	v3i0 := "one"
--	v3 := []interface{}{v3i0, int(2), uint(3), nil}
--	v3i0Len := fmt.Sprintf("%d", len(v3i0))
--	v3Len := fmt.Sprintf("%d", len(v3))
--	v3Cap := fmt.Sprintf("%d", cap(v3))
--	nv3 := (*[]interface{})(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "[]interface {}"
--	v3t2 := "string"
--	v3t3 := "int"
--	v3t4 := "uint"
--	v3t5 := "interface {}"
--	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
--		"(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
--		v3t4 + ") 3,\n (" + v3t5 + ") <nil>\n}"
--	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
--	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
--	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
--	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
--
--	// Slice containing bytes.
--	v4 := []byte{
--		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
--		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
--		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
--		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
--		0x31, 0x32,
--	}
--	v4Len := fmt.Sprintf("%d", len(v4))
--	v4Cap := fmt.Sprintf("%d", cap(v4))
--	nv4 := (*[]byte)(nil)
--	pv4 := &v4
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "[]uint8"
--	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
--		"{\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20" +
--		"  |............... |\n" +
--		" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30" +
--		"  |!\"#$%&'()*+,-./0|\n" +
--		" 00000020  31 32                                           " +
--		"  |12|\n}"
--	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
--	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
--	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
--	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
--
--	// Nil slice.
--	v5 := []int(nil)
--	nv5 := (*[]int)(nil)
--	pv5 := &v5
--	v5Addr := fmt.Sprintf("%p", pv5)
--	pv5Addr := fmt.Sprintf("%p", &pv5)
--	v5t := "[]int"
--	v5s := "<nil>"
--	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
--	addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
--	addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
--	addDumpTest(nv5, "(*"+v5t+")(<nil>)\n")
--}
--
--func addStringDumpTests() {
--	// Standard string.
--	v := "test"
--	vLen := fmt.Sprintf("%d", len(v))
--	nv := (*string)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "string"
--	vs := "(len=" + vLen + ") \"test\""
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--}
--
--func addInterfaceDumpTests() {
--	// Nil interface.
--	var v interface{}
--	nv := (*interface{})(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "interface {}"
--	vs := "<nil>"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Sub-interface.
--	v2 := interface{}(uint16(65535))
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "uint16"
--	v2s := "65535"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--}
--
--func addMapDumpTests() {
--	// Map with string keys and int vals.
--	k := "one"
--	kk := "two"
--	m := map[string]int{k: 1, kk: 2}
--	klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up
--	kkLen := fmt.Sprintf("%d", len(kk))
--	mLen := fmt.Sprintf("%d", len(m))
--	nm := (*map[string]int)(nil)
--	pm := &m
--	mAddr := fmt.Sprintf("%p", pm)
--	pmAddr := fmt.Sprintf("%p", &pm)
--	mt := "map[string]int"
--	mt1 := "string"
--	mt2 := "int"
--	ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " +
--		"\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen +
--		") \"two\": (" + mt2 + ") 2\n}"
--	ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " +
--		"\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen +
--		") \"one\": (" + mt2 + ") 1\n}"
--	addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n")
--	addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n",
--		"(*"+mt+")("+mAddr+")("+ms2+")\n")
--	addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n",
--		"(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n")
--	addDumpTest(nm, "(*"+mt+")(<nil>)\n")
--
--	// Map with custom formatter type on pointer receiver only keys and vals.
--	k2 := pstringer("one")
--	v2 := pstringer("1")
--	m2 := map[pstringer]pstringer{k2: v2}
--	k2Len := fmt.Sprintf("%d", len(k2))
--	v2Len := fmt.Sprintf("%d", len(v2))
--	m2Len := fmt.Sprintf("%d", len(m2))
--	nm2 := (*map[pstringer]pstringer)(nil)
--	pm2 := &m2
--	m2Addr := fmt.Sprintf("%p", pm2)
--	pm2Addr := fmt.Sprintf("%p", &pm2)
--	m2t := "map[spew_test.pstringer]spew_test.pstringer"
--	m2t1 := "spew_test.pstringer"
--	m2t2 := "spew_test.pstringer"
--	m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " +
--		"stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}"
--	addDumpTest(m2, "("+m2t+") "+m2s+"\n")
--	addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n")
--	addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n")
--	addDumpTest(nm2, "(*"+m2t+")(<nil>)\n")
--
--	// Map with interface keys and values.
--	k3 := "one"
--	k3Len := fmt.Sprintf("%d", len(k3))
--	m3 := map[interface{}]interface{}{k3: 1}
--	m3Len := fmt.Sprintf("%d", len(m3))
--	nm3 := (*map[interface{}]interface{})(nil)
--	pm3 := &m3
--	m3Addr := fmt.Sprintf("%p", pm3)
--	pm3Addr := fmt.Sprintf("%p", &pm3)
--	m3t := "map[interface {}]interface {}"
--	m3t1 := "string"
--	m3t2 := "int"
--	m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " +
--		"\"one\": (" + m3t2 + ") 1\n}"
--	addDumpTest(m3, "("+m3t+") "+m3s+"\n")
--	addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n")
--	addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n")
--	addDumpTest(nm3, "(*"+m3t+")(<nil>)\n")
--
--	// Map with nil interface value.
--	k4 := "nil"
--	k4Len := fmt.Sprintf("%d", len(k4))
--	m4 := map[string]interface{}{k4: nil}
--	m4Len := fmt.Sprintf("%d", len(m4))
--	nm4 := (*map[string]interface{})(nil)
--	pm4 := &m4
--	m4Addr := fmt.Sprintf("%p", pm4)
--	pm4Addr := fmt.Sprintf("%p", &pm4)
--	m4t := "map[string]interface {}"
--	m4t1 := "string"
--	m4t2 := "interface {}"
--	m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" +
--		" \"nil\": (" + m4t2 + ") <nil>\n}"
--	addDumpTest(m4, "("+m4t+") "+m4s+"\n")
--	addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n")
--	addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n")
--	addDumpTest(nm4, "(*"+m4t+")(<nil>)\n")
--}
--
--func addStructDumpTests() {
--	// Struct with primitives.
--	type s1 struct {
--		a int8
--		b uint8
--	}
--	v := s1{127, 255}
--	nv := (*s1)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "spew_test.s1"
--	vt2 := "int8"
--	vt3 := "uint8"
--	vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Struct that contains another struct.
--	type s2 struct {
--		s1 s1
--		b  bool
--	}
--	v2 := s2{s1{127, 255}, true}
--	nv2 := (*s2)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "spew_test.s2"
--	v2t2 := "spew_test.s1"
--	v2t3 := "int8"
--	v2t4 := "uint8"
--	v2t5 := "bool"
--	v2s := "{\n s1: (" + v2t2 + ") {\n  a: (" + v2t3 + ") 127,\n  b: (" +
--		v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
--
--	// Struct that contains custom type with Stringer pointer interface via both
--	// exported and unexported fields.
--	type s3 struct {
--		s pstringer
--		S pstringer
--	}
--	v3 := s3{"test", "test2"}
--	nv3 := (*s3)(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "spew_test.s3"
--	v3t2 := "spew_test.pstringer"
--	v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 +
--		") (len=5) stringer test2\n}"
--	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
--	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
--	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
--	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
--
--	// Struct that contains embedded struct and field to same struct.
--	e := embed{"embedstr"}
--	eLen := fmt.Sprintf("%d", len("embedstr"))
--	v4 := embedwrap{embed: &e, e: &e}
--	nv4 := (*embedwrap)(nil)
--	pv4 := &v4
--	eAddr := fmt.Sprintf("%p", &e)
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "spew_test.embedwrap"
--	v4t2 := "spew_test.embed"
--	v4t3 := "string"
--	v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n  a: (" + v4t3 +
--		") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 +
--		")(" + eAddr + ")({\n  a: (" + v4t3 + ") (len=" + eLen + ")" +
--		" \"embedstr\"\n })\n}"
--	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
--	addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
--	addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
--	addDumpTest(nv4, "(*"+v4t+")(<nil>)\n")
--}
--
--func addUintptrDumpTests() {
--	// Null pointer.
--	v := uintptr(0)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "uintptr"
--	vs := "<nil>"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--
--	// Address of real variable.
--	i := 1
--	v2 := uintptr(unsafe.Pointer(&i))
--	nv2 := (*uintptr)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "uintptr"
--	v2s := fmt.Sprintf("%p", &i)
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
--}
--
--func addUnsafePointerDumpTests() {
--	// Null pointer.
--	v := unsafe.Pointer(uintptr(0))
--	nv := (*unsafe.Pointer)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "unsafe.Pointer"
--	vs := "<nil>"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Address of real variable.
--	i := 1
--	v2 := unsafe.Pointer(&i)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "unsafe.Pointer"
--	v2s := fmt.Sprintf("%p", &i)
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--}
--
--func addChanDumpTests() {
--	// Nil channel.
--	var v chan int
--	pv := &v
--	nv := (*chan int)(nil)
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "chan int"
--	vs := "<nil>"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Real channel.
--	v2 := make(chan int)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "chan int"
--	v2s := fmt.Sprintf("%p", v2)
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--}
--
--func addFuncDumpTests() {
--	// Function with no params and no returns.
--	v := addIntDumpTests
--	nv := (*func())(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "func()"
--	vs := fmt.Sprintf("%p", v)
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--
--	// Function with param and no returns.
--	v2 := TestDump
--	nv2 := (*func(*testing.T))(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "func(*testing.T)"
--	v2s := fmt.Sprintf("%p", v2)
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
--	addDumpTest(nv2, "(*"+v2t+")(<nil>)\n")
--
--	// Function with multiple params and multiple returns.
--	var v3 = func(i int, s string) (b bool, err error) {
--		return true, nil
--	}
--	nv3 := (*func(int, string) (bool, error))(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "func(int, string) (bool, error)"
--	v3s := fmt.Sprintf("%p", v3)
--	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
--	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
--	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
--	addDumpTest(nv3, "(*"+v3t+")(<nil>)\n")
--}
--
--func addCircularDumpTests() {
--	// Struct that is circular through self referencing.
--	type circular struct {
--		c *circular
--	}
--	v := circular{nil}
--	v.c = &v
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "spew_test.circular"
--	vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n  c: (*" + vt + ")(" +
--		vAddr + ")(<already shown>)\n })\n}"
--	vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")(<already shown>)\n}"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n")
--
--	// Structs that are circular through cross referencing.
--	v2 := xref1{nil}
--	ts2 := xref2{&v2}
--	v2.ps2 = &ts2
--	pv2 := &v2
--	ts2Addr := fmt.Sprintf("%p", &ts2)
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "spew_test.xref1"
--	v2t2 := "spew_test.xref2"
--	v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n  ps1: (*" + v2t +
--		")(" + v2Addr + ")({\n   ps2: (*" + v2t2 + ")(" + ts2Addr +
--		")(<already shown>)\n  })\n })\n}"
--	v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n  ps1: (*" + v2t +
--		")(" + v2Addr + ")(<already shown>)\n })\n}"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--	addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n")
--	addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n")
--
--	// Structs that are indirectly circular.
--	v3 := indirCir1{nil}
--	tic2 := indirCir2{nil}
--	tic3 := indirCir3{&v3}
--	tic2.ps3 = &tic3
--	v3.ps2 = &tic2
--	pv3 := &v3
--	tic2Addr := fmt.Sprintf("%p", &tic2)
--	tic3Addr := fmt.Sprintf("%p", &tic3)
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "spew_test.indirCir1"
--	v3t2 := "spew_test.indirCir2"
--	v3t3 := "spew_test.indirCir3"
--	v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n  ps3: (*" + v3t3 +
--		")(" + tic3Addr + ")({\n   ps1: (*" + v3t + ")(" + v3Addr +
--		")({\n    ps2: (*" + v3t2 + ")(" + tic2Addr +
--		")(<already shown>)\n   })\n  })\n })\n}"
--	v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n  ps3: (*" + v3t3 +
--		")(" + tic3Addr + ")({\n   ps1: (*" + v3t + ")(" + v3Addr +
--		")(<already shown>)\n  })\n })\n}"
--	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
--	addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n")
--	addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n")
--}
--
--func addPanicDumpTests() {
--	// Type that panics in its Stringer interface.
--	v := panicer(127)
--	nv := (*panicer)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "spew_test.panicer"
--	vs := "(PANIC=test panic)127"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--}
--
--func addErrorDumpTests() {
--	// Type that has a custom Error interface.
--	v := customError(127)
--	nv := (*customError)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "spew_test.customError"
--	vs := "error: 127"
--	addDumpTest(v, "("+vt+") "+vs+"\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
--	addDumpTest(nv, "(*"+vt+")(<nil>)\n")
--}
--
--// TestDump executes all of the tests described by dumpTests.
--func TestDump(t *testing.T) {
--	// Setup tests.
--	addIntDumpTests()
--	addUintDumpTests()
--	addBoolDumpTests()
--	addFloatDumpTests()
--	addComplexDumpTests()
--	addArrayDumpTests()
--	addSliceDumpTests()
--	addStringDumpTests()
--	addInterfaceDumpTests()
--	addMapDumpTests()
--	addStructDumpTests()
--	addUintptrDumpTests()
--	addUnsafePointerDumpTests()
--	addChanDumpTests()
--	addFuncDumpTests()
--	addCircularDumpTests()
--	addPanicDumpTests()
--	addErrorDumpTests()
--	addCgoDumpTests()
--
--	t.Logf("Running %d tests", len(dumpTests))
--	for i, test := range dumpTests {
--		buf := new(bytes.Buffer)
--		spew.Fdump(buf, test.in)
--		s := buf.String()
--		if testFailed(s, test.wants) {
--			t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants))
--			continue
--		}
--	}
--}
--
--func TestDumpSortedKeys(t *testing.T) {
--	cfg := spew.ConfigState{SortKeys: true}
--	s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"})
--	expected := `(map[int]string) (len=3) {
--(int) 1: (string) (len=1) "1",
--(int) 2: (string) (len=1) "2",
--(int) 3: (string) (len=1) "3"
--}
--`
--	if s != expected {
--		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpcgo_test.go
-deleted file mode 100644
-index 9b8a358..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpcgo_test.go
-+++ /dev/null
-@@ -1,97 +0,0 @@
--// Copyright (c) 2013 Dave Collins <dave at davec.name>
--//
--// Permission to use, copy, modify, and distribute this software for any
--// purpose with or without fee is hereby granted, provided that the above
--// copyright notice and this permission notice appear in all copies.
--//
--// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
--// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
--// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
--// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
--// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
--// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
--// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
--
--// NOTE: Due to the following build constraints, this file will only be compiled
--// when both cgo is supported and "-tags testcgo" is added to the go test
--// command line.  This means the cgo tests are only added (and hence run) when
--// specifially requested.  This configuration is used because spew itself
--// does not require cgo to run even though it does handle certain cgo types
--// specially.  Rather than forcing all clients to require cgo and an external
--// C compiler just to run the tests, this scheme makes them optional.
--// +build cgo,testcgo
--
--package spew_test
--
--import (
--	"fmt"
--	"github.com/davecgh/go-spew/spew/testdata"
--)
--
--func addCgoDumpTests() {
--	// C char pointer.
--	v := testdata.GetCgoCharPointer()
--	nv := testdata.GetCgoNullCharPointer()
--	pv := &v
--	vcAddr := fmt.Sprintf("%p", v)
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "*testdata._Ctype_char"
--	vs := "116"
--	addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
--	addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
--	addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
--	addDumpTest(nv, "("+vt+")(<nil>)\n")
--
--	// C char array.
--	v2, v2l, v2c := testdata.GetCgoCharArray()
--	v2Len := fmt.Sprintf("%d", v2l)
--	v2Cap := fmt.Sprintf("%d", v2c)
--	v2t := "[6]testdata._Ctype_char"
--	v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
--		"{\n 00000000  74 65 73 74 32 00                               " +
--		"  |test2.|\n}"
--	addDumpTest(v2, "("+v2t+") "+v2s+"\n")
--
--	// C unsigned char array.
--	v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
--	v3Len := fmt.Sprintf("%d", v3l)
--	v3Cap := fmt.Sprintf("%d", v3c)
--	v3t := "[6]testdata._Ctype_unsignedchar"
--	v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
--		"{\n 00000000  74 65 73 74 33 00                               " +
--		"  |test3.|\n}"
--	addDumpTest(v3, "("+v3t+") "+v3s+"\n")
--
--	// C signed char array.
--	v4, v4l, v4c := testdata.GetCgoSignedCharArray()
--	v4Len := fmt.Sprintf("%d", v4l)
--	v4Cap := fmt.Sprintf("%d", v4c)
--	v4t := "[6]testdata._Ctype_schar"
--	v4t2 := "testdata._Ctype_schar"
--	v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
--		"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
--		") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
--		") 0\n}"
--	addDumpTest(v4, "("+v4t+") "+v4s+"\n")
--
--	// C uint8_t array.
--	v5, v5l, v5c := testdata.GetCgoUint8tArray()
--	v5Len := fmt.Sprintf("%d", v5l)
--	v5Cap := fmt.Sprintf("%d", v5c)
--	v5t := "[6]testdata._Ctype_uint8_t"
--	v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
--		"{\n 00000000  74 65 73 74 35 00                               " +
--		"  |test5.|\n}"
--	addDumpTest(v5, "("+v5t+") "+v5s+"\n")
--
--	// C typedefed unsigned char array.
--	v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
--	v6Len := fmt.Sprintf("%d", v6l)
--	v6Cap := fmt.Sprintf("%d", v6c)
--	v6t := "[6]testdata._Ctype_custom_uchar_t"
--	v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
--		"{\n 00000000  74 65 73 74 36 00                               " +
--		"  |test6.|\n}"
--	addDumpTest(v6, "("+v6t+") "+v6s+"\n")
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
-deleted file mode 100644
-index 52a0971..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--// Copyright (c) 2013 Dave Collins <dave at davec.name>
--//
--// Permission to use, copy, modify, and distribute this software for any
--// purpose with or without fee is hereby granted, provided that the above
--// copyright notice and this permission notice appear in all copies.
--//
--// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
--// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
--// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
--// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
--// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
--// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
--// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
--
--// NOTE: Due to the following build constraints, this file will only be compiled
--// when either cgo is not supported or "-tags testcgo" is not added to the go
--// test command line.  This file intentionally does not setup any cgo tests in
--// this scenario.
--// +build !cgo !testcgo
--
--package spew_test
--
--func addCgoDumpTests() {
--	// Don't add any tests for cgo since this file is only compiled when
--	// there should not be any cgo tests.
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/example_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/example_test.go
-deleted file mode 100644
-index a7acd14..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/example_test.go
-+++ /dev/null
-@@ -1,230 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--package spew_test
--
--import (
--	"fmt"
--	"github.com/davecgh/go-spew/spew"
--)
--
--type Flag int
--
--const (
--	flagOne Flag = iota
--	flagTwo
--)
--
--var flagStrings = map[Flag]string{
--	flagOne: "flagOne",
--	flagTwo: "flagTwo",
--}
--
--func (f Flag) String() string {
--	if s, ok := flagStrings[f]; ok {
--		return s
--	}
--	return fmt.Sprintf("Unknown flag (%d)", int(f))
--}
--
--type Bar struct {
--	flag Flag
--	data uintptr
--}
--
--type Foo struct {
--	unexportedField Bar
--	ExportedField   map[interface{}]interface{}
--}
--
--// This example demonstrates how to use Dump to dump variables to stdout.
--func ExampleDump() {
--	// The following package level declarations are assumed for this example:
--	/*
--		type Flag int
--
--		const (
--			flagOne Flag = iota
--			flagTwo
--		)
--
--		var flagStrings = map[Flag]string{
--			flagOne: "flagOne",
--			flagTwo: "flagTwo",
--		}
--
--		func (f Flag) String() string {
--			if s, ok := flagStrings[f]; ok {
--				return s
--			}
--			return fmt.Sprintf("Unknown flag (%d)", int(f))
--		}
--
--		type Bar struct {
--			flag Flag
--			data uintptr
--		}
--
--		type Foo struct {
--			unexportedField Bar
--			ExportedField   map[interface{}]interface{}
--		}
--	*/
--
--	// Setup some sample data structures for the example.
--	bar := Bar{Flag(flagTwo), uintptr(0)}
--	s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
--	f := Flag(5)
--	b := []byte{
--		0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
--		0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
--		0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
--		0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
--		0x31, 0x32,
--	}
--
--	// Dump!
--	spew.Dump(s1, f, b)
--
--	// Output:
--	// (spew_test.Foo) {
--	//  unexportedField: (spew_test.Bar) {
--	//   flag: (spew_test.Flag) flagTwo,
--	//   data: (uintptr) <nil>
--	//  },
--	//  ExportedField: (map[interface {}]interface {}) (len=1) {
--	//   (string) (len=3) "one": (bool) true
--	//  }
--	// }
--	// (spew_test.Flag) Unknown flag (5)
--	// ([]uint8) (len=34 cap=34) {
--	//  00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
--	//  00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
--	//  00000020  31 32                                             |12|
--	// }
--	//
--}
--
--// This example demonstrates how to use Printf to display a variable with a
--// format string and inline formatting.
--func ExamplePrintf() {
--	// Create a double pointer to a uint 8.
--	ui8 := uint8(5)
--	pui8 := &ui8
--	ppui8 := &pui8
--
--	// Create a circular data type.
--	type circular struct {
--		ui8 uint8
--		c   *circular
--	}
--	c := circular{ui8: 1}
--	c.c = &c
--
--	// Print!
--	spew.Printf("ppui8: %v\n", ppui8)
--	spew.Printf("circular: %v\n", c)
--
--	// Output:
--	// ppui8: <**>5
--	// circular: {1 <*>{1 <*><shown>}}
--}
--
--// This example demonstrates how to use a ConfigState.
--func ExampleConfigState() {
--	// Modify the indent level of the ConfigState only.  The global
--	// configuration is not modified.
--	scs := spew.ConfigState{Indent: "\t"}
--
--	// Output using the ConfigState instance.
--	v := map[string]int{"one": 1}
--	scs.Printf("v: %v\n", v)
--	scs.Dump(v)
--
--	// Output:
--	// v: map[one:1]
--	// (map[string]int) (len=1) {
--	// 	(string) (len=3) "one": (int) 1
--	// }
--}
--
--// This example demonstrates how to use ConfigState.Dump to dump variables to
--// stdout
--func ExampleConfigState_Dump() {
--	// See the top-level Dump example for details on the types used in this
--	// example.
--
--	// Create two ConfigState instances with different indentation.
--	scs := spew.ConfigState{Indent: "\t"}
--	scs2 := spew.ConfigState{Indent: " "}
--
--	// Setup some sample data structures for the example.
--	bar := Bar{Flag(flagTwo), uintptr(0)}
--	s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
--
--	// Dump using the ConfigState instances.
--	scs.Dump(s1)
--	scs2.Dump(s1)
--
--	// Output:
--	// (spew_test.Foo) {
--	// 	unexportedField: (spew_test.Bar) {
--	// 		flag: (spew_test.Flag) flagTwo,
--	// 		data: (uintptr) <nil>
--	// 	},
--	// 	ExportedField: (map[interface {}]interface {}) (len=1) {
--	//		(string) (len=3) "one": (bool) true
--	// 	}
--	// }
--	// (spew_test.Foo) {
--	//  unexportedField: (spew_test.Bar) {
--	//   flag: (spew_test.Flag) flagTwo,
--	//   data: (uintptr) <nil>
--	//  },
--	//  ExportedField: (map[interface {}]interface {}) (len=1) {
--	//   (string) (len=3) "one": (bool) true
--	//  }
--	// }
--	//
--}
--
--// This example demonstrates how to use ConfigState.Printf to display a variable
--// with a format string and inline formatting.
--func ExampleConfigState_Printf() {
--	// See the top-level Dump example for details on the types used in this
--	// example.
--
--	// Create two ConfigState instances and modify the method handling of the
--	// first ConfigState only.
--	scs := spew.NewDefaultConfig()
--	scs2 := spew.NewDefaultConfig()
--	scs.DisableMethods = true
--
--	// Alternatively
--	// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
--	// scs2 := spew.ConfigState{Indent: " "}
--
--	// This is of type Flag which implements a Stringer and has raw value 1.
--	f := flagTwo
--
--	// Dump using the ConfigState instances.
--	scs.Printf("f: %v\n", f)
--	scs2.Printf("f: %v\n", f)
--
--	// Output:
--	// f: 1
--	// f: flagTwo
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go
-deleted file mode 100644
-index b6b1fb0..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go
-+++ /dev/null
-@@ -1,413 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--package spew
--
--import (
--	"bytes"
--	"fmt"
--	"reflect"
--	"strconv"
--	"strings"
--)
--
--// supportedFlags is a list of all the character flags supported by fmt package.
--const supportedFlags = "0-+# "
--
--// formatState implements the fmt.Formatter interface and contains information
--// about the state of a formatting operation.  The NewFormatter function can
--// be used to get a new Formatter which can be used directly as arguments
--// in standard fmt package printing calls.
--type formatState struct {
--	value          interface{}
--	fs             fmt.State
--	depth          int
--	pointers       map[uintptr]int
--	ignoreNextType bool
--	cs             *ConfigState
--}
--
--// buildDefaultFormat recreates the original format string without precision
--// and width information to pass in to fmt.Sprintf in the case of an
--// unrecognized type.  Unless new types are added to the language, this
--// function won't ever be called.
--func (f *formatState) buildDefaultFormat() (format string) {
--	buf := bytes.NewBuffer(percentBytes)
--
--	for _, flag := range supportedFlags {
--		if f.fs.Flag(int(flag)) {
--			buf.WriteRune(flag)
--		}
--	}
--
--	buf.WriteRune('v')
--
--	format = buf.String()
--	return format
--}
--
--// constructOrigFormat recreates the original format string including precision
--// and width information to pass along to the standard fmt package.  This allows
--// automatic deferral of all format strings this package doesn't support.
--func (f *formatState) constructOrigFormat(verb rune) (format string) {
--	buf := bytes.NewBuffer(percentBytes)
--
--	for _, flag := range supportedFlags {
--		if f.fs.Flag(int(flag)) {
--			buf.WriteRune(flag)
--		}
--	}
--
--	if width, ok := f.fs.Width(); ok {
--		buf.WriteString(strconv.Itoa(width))
--	}
--
--	if precision, ok := f.fs.Precision(); ok {
--		buf.Write(precisionBytes)
--		buf.WriteString(strconv.Itoa(precision))
--	}
--
--	buf.WriteRune(verb)
--
--	format = buf.String()
--	return format
--}
--
--// unpackValue returns values inside of non-nil interfaces when possible and
--// ensures that types for values which have been unpacked from an interface
--// are displayed when the show types flag is also set.
--// This is useful for data types like structs, arrays, slices, and maps which
--// can contain varying types packed inside an interface.
--func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
--	if v.Kind() == reflect.Interface {
--		f.ignoreNextType = false
--		if !v.IsNil() {
--			v = v.Elem()
--		}
--	}
--	return v
--}
--
--// formatPtr handles formatting of pointers by indirecting them as necessary.
--func (f *formatState) formatPtr(v reflect.Value) {
--	// Display nil if top level pointer is nil.
--	showTypes := f.fs.Flag('#')
--	if v.IsNil() && (!showTypes || f.ignoreNextType) {
--		f.fs.Write(nilAngleBytes)
--		return
--	}
--
--	// Remove pointers at or below the current depth from map used to detect
--	// circular refs.
--	for k, depth := range f.pointers {
--		if depth >= f.depth {
--			delete(f.pointers, k)
--		}
--	}
--
--	// Keep list of all dereferenced pointers to possibly show later.
--	pointerChain := make([]uintptr, 0)
--
--	// Figure out how many levels of indirection there are by derferencing
--	// pointers and unpacking interfaces down the chain while detecting circular
--	// references.
--	nilFound := false
--	cycleFound := false
--	indirects := 0
--	ve := v
--	for ve.Kind() == reflect.Ptr {
--		if ve.IsNil() {
--			nilFound = true
--			break
--		}
--		indirects++
--		addr := ve.Pointer()
--		pointerChain = append(pointerChain, addr)
--		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
--			cycleFound = true
--			indirects--
--			break
--		}
--		f.pointers[addr] = f.depth
--
--		ve = ve.Elem()
--		if ve.Kind() == reflect.Interface {
--			if ve.IsNil() {
--				nilFound = true
--				break
--			}
--			ve = ve.Elem()
--		}
--	}
--
--	// Display type or indirection level depending on flags.
--	if showTypes && !f.ignoreNextType {
--		f.fs.Write(openParenBytes)
--		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
--		f.fs.Write([]byte(ve.Type().String()))
--		f.fs.Write(closeParenBytes)
--	} else {
--		if nilFound || cycleFound {
--			indirects += strings.Count(ve.Type().String(), "*")
--		}
--		f.fs.Write(openAngleBytes)
--		f.fs.Write([]byte(strings.Repeat("*", indirects)))
--		f.fs.Write(closeAngleBytes)
--	}
--
--	// Display pointer information depending on flags.
--	if f.fs.Flag('+') && (len(pointerChain) > 0) {
--		f.fs.Write(openParenBytes)
--		for i, addr := range pointerChain {
--			if i > 0 {
--				f.fs.Write(pointerChainBytes)
--			}
--			printHexPtr(f.fs, addr)
--		}
--		f.fs.Write(closeParenBytes)
--	}
--
--	// Display dereferenced value.
--	switch {
--	case nilFound == true:
--		f.fs.Write(nilAngleBytes)
--
--	case cycleFound == true:
--		f.fs.Write(circularShortBytes)
--
--	default:
--		f.ignoreNextType = true
--		f.format(ve)
--	}
--}
--
--// format is the main workhorse for providing the Formatter interface.  It
--// uses the passed reflect value to figure out what kind of object we are
--// dealing with and formats it appropriately.  It is a recursive function,
--// however circular data structures are detected and handled properly.
--func (f *formatState) format(v reflect.Value) {
--	// Handle invalid reflect values immediately.
--	kind := v.Kind()
--	if kind == reflect.Invalid {
--		f.fs.Write(invalidAngleBytes)
--		return
--	}
--
--	// Handle pointers specially.
--	if kind == reflect.Ptr {
--		f.formatPtr(v)
--		return
--	}
--
--	// Print type information unless already handled elsewhere.
--	if !f.ignoreNextType && f.fs.Flag('#') {
--		f.fs.Write(openParenBytes)
--		f.fs.Write([]byte(v.Type().String()))
--		f.fs.Write(closeParenBytes)
--	}
--	f.ignoreNextType = false
--
--	// Call Stringer/error interfaces if they exist and the handle methods
--	// flag is enabled.
--	if !f.cs.DisableMethods {
--		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
--			if handled := handleMethods(f.cs, f.fs, v); handled {
--				return
--			}
--		}
--	}
--
--	switch kind {
--	case reflect.Invalid:
--		// Do nothing.  We should never get here since invalid has already
--		// been handled above.
--
--	case reflect.Bool:
--		printBool(f.fs, v.Bool())
--
--	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
--		printInt(f.fs, v.Int(), 10)
--
--	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
--		printUint(f.fs, v.Uint(), 10)
--
--	case reflect.Float32:
--		printFloat(f.fs, v.Float(), 32)
--
--	case reflect.Float64:
--		printFloat(f.fs, v.Float(), 64)
--
--	case reflect.Complex64:
--		printComplex(f.fs, v.Complex(), 32)
--
--	case reflect.Complex128:
--		printComplex(f.fs, v.Complex(), 64)
--
--	case reflect.Slice:
--		if v.IsNil() {
--			f.fs.Write(nilAngleBytes)
--			break
--		}
--		fallthrough
--
--	case reflect.Array:
--		f.fs.Write(openBracketBytes)
--		f.depth++
--		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
--			f.fs.Write(maxShortBytes)
--		} else {
--			numEntries := v.Len()
--			for i := 0; i < numEntries; i++ {
--				if i > 0 {
--					f.fs.Write(spaceBytes)
--				}
--				f.ignoreNextType = true
--				f.format(f.unpackValue(v.Index(i)))
--			}
--		}
--		f.depth--
--		f.fs.Write(closeBracketBytes)
--
--	case reflect.String:
--		f.fs.Write([]byte(v.String()))
--
--	case reflect.Interface:
--		// The only time we should get here is for nil interfaces due to
--		// unpackValue calls.
--		if v.IsNil() {
--			f.fs.Write(nilAngleBytes)
--		}
--
--	case reflect.Ptr:
--		// Do nothing.  We should never get here since pointers have already
--		// been handled above.
--
--	case reflect.Map:
--		f.fs.Write(openMapBytes)
--		f.depth++
--		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
--			f.fs.Write(maxShortBytes)
--		} else {
--			keys := v.MapKeys()
--			if f.cs.SortKeys {
--				sortValues(keys)
--			}
--			for i, key := range keys {
--				if i > 0 {
--					f.fs.Write(spaceBytes)
--				}
--				f.ignoreNextType = true
--				f.format(f.unpackValue(key))
--				f.fs.Write(colonBytes)
--				f.ignoreNextType = true
--				f.format(f.unpackValue(v.MapIndex(key)))
--			}
--		}
--		f.depth--
--		f.fs.Write(closeMapBytes)
--
--	case reflect.Struct:
--		numFields := v.NumField()
--		f.fs.Write(openBraceBytes)
--		f.depth++
--		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
--			f.fs.Write(maxShortBytes)
--		} else {
--			vt := v.Type()
--			for i := 0; i < numFields; i++ {
--				if i > 0 {
--					f.fs.Write(spaceBytes)
--				}
--				vtf := vt.Field(i)
--				if f.fs.Flag('+') || f.fs.Flag('#') {
--					f.fs.Write([]byte(vtf.Name))
--					f.fs.Write(colonBytes)
--				}
--				f.format(f.unpackValue(v.Field(i)))
--			}
--		}
--		f.depth--
--		f.fs.Write(closeBraceBytes)
--
--	case reflect.Uintptr:
--		printHexPtr(f.fs, uintptr(v.Uint()))
--
--	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
--		printHexPtr(f.fs, v.Pointer())
--
--	// There were not any other types at the time this code was written, but
--	// fall back to letting the default fmt package handle it if any get added.
--	default:
--		format := f.buildDefaultFormat()
--		if v.CanInterface() {
--			fmt.Fprintf(f.fs, format, v.Interface())
--		} else {
--			fmt.Fprintf(f.fs, format, v.String())
--		}
--	}
--}
--
--// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
--// details.
--func (f *formatState) Format(fs fmt.State, verb rune) {
--	f.fs = fs
--
--	// Use standard formatting for verbs that are not v.
--	if verb != 'v' {
--		format := f.constructOrigFormat(verb)
--		fmt.Fprintf(fs, format, f.value)
--		return
--	}
--
--	if f.value == nil {
--		if fs.Flag('#') {
--			fs.Write(interfaceBytes)
--		}
--		fs.Write(nilAngleBytes)
--		return
--	}
--
--	f.format(reflect.ValueOf(f.value))
--}
--
--// newFormatter is a helper function to consolidate the logic from the various
--// public methods which take varying config states.
--func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
--	fs := &formatState{value: v, cs: cs}
--	fs.pointers = make(map[uintptr]int)
--	return fs
--}
--
--/*
--NewFormatter returns a custom formatter that satisfies the fmt.Formatter
--interface.  As a result, it integrates cleanly with standard fmt package
--printing functions.  The formatter is useful for inline printing of smaller data
--types similar to the standard %v format specifier.
--
--The custom formatter only responds to the %v (most compact), %+v (adds pointer
--addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
--combinations.  Any other verbs such as %x and %q will be sent to the the
--standard fmt package for formatting.  In addition, the custom formatter ignores
--the width and precision arguments (however they will still work on the format
--specifiers not handled by the custom formatter).
--
--Typically this function shouldn't be called directly.  It is much easier to make
--use of the custom formatter by calling one of the convenience functions such as
--Printf, Println, or Fprintf.
--*/
--func NewFormatter(v interface{}) fmt.Formatter {
--	return newFormatter(&Config, v)
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go
-deleted file mode 100644
-index 80c5ef9..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go
-+++ /dev/null
-@@ -1,1483 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--/*
--Test Summary:
--NOTE: For each test, a nil pointer, a single pointer and double pointer to the
--base test element are also tested to ensure proper indirection across all types.
--
--- Max int8, int16, int32, int64, int
--- Max uint8, uint16, uint32, uint64, uint
--- Boolean true and false
--- Standard complex64 and complex128
--- Array containing standard ints
--- Array containing type with custom formatter on pointer receiver only
--- Array containing interfaces
--- Slice containing standard float32 values
--- Slice containing type with custom formatter on pointer receiver only
--- Slice containing interfaces
--- Nil slice
--- Standard string
--- Nil interface
--- Sub-interface
--- Map with string keys and int vals
--- Map with custom formatter type on pointer receiver only keys and vals
--- Map with interface keys and values
--- Map with nil interface value
--- Struct with primitives
--- Struct that contains another struct
--- Struct that contains custom type with Stringer pointer interface via both
--  exported and unexported fields
--- Struct that contains embedded struct and field to same struct
--- Uintptr to 0 (null pointer)
--- Uintptr address of real variable
--- Unsafe.Pointer to 0 (null pointer)
--- Unsafe.Pointer to address of real variable
--- Nil channel
--- Standard int channel
--- Function with no params and no returns
--- Function with param and no returns
--- Function with multiple params and multiple returns
--- Struct that is circular through self referencing
--- Structs that are circular through cross referencing
--- Structs that are indirectly circular
--- Type that panics in its Stringer interface
--- Type that has a custom Error interface
--- %x passthrough with uint
--- %#x passthrough with uint
--- %f passthrough with precision
--- %f passthrough with width and precision
--- %d passthrough with width
--- %q passthrough with string
--*/
--
--package spew_test
--
--import (
--	"bytes"
--	"fmt"
--	"github.com/davecgh/go-spew/spew"
--	"testing"
--	"unsafe"
--)
--
--// formatterTest is used to describe a test to be perfomed against NewFormatter.
--type formatterTest struct {
--	format string
--	in     interface{}
--	wants  []string
--}
--
--// formatterTests houses all of the tests to be performed against NewFormatter.
--var formatterTests = make([]formatterTest, 0)
--
--// addFormatterTest is a helper method to append the passed input and desired
--// result to formatterTests.
--func addFormatterTest(format string, in interface{}, wants ...string) {
--	test := formatterTest{format, in, wants}
--	formatterTests = append(formatterTests, test)
--}
--
--func addIntFormatterTests() {
--	// Max int8.
--	v := int8(127)
--	nv := (*int8)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "int8"
--	vs := "127"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Max int16.
--	v2 := int16(32767)
--	nv2 := (*int16)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "int16"
--	v2s := "32767"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--
--	// Max int32.
--	v3 := int32(2147483647)
--	nv3 := (*int32)(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "int32"
--	v3s := "2147483647"
--	addFormatterTest("%v", v3, v3s)
--	addFormatterTest("%v", pv3, "<*>"+v3s)
--	addFormatterTest("%v", &pv3, "<**>"+v3s)
--	addFormatterTest("%v", nv3, "<nil>")
--	addFormatterTest("%+v", v3, v3s)
--	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
--	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
--	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
--	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
--	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
--	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--
--	// Max int64.
--	v4 := int64(9223372036854775807)
--	nv4 := (*int64)(nil)
--	pv4 := &v4
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "int64"
--	v4s := "9223372036854775807"
--	addFormatterTest("%v", v4, v4s)
--	addFormatterTest("%v", pv4, "<*>"+v4s)
--	addFormatterTest("%v", &pv4, "<**>"+v4s)
--	addFormatterTest("%v", nv4, "<nil>")
--	addFormatterTest("%+v", v4, v4s)
--	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
--	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
--	addFormatterTest("%+v", nv4, "<nil>")
--	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
--	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
--	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
--	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
--	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
--	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
--	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
--	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
--
--	// Max int.
--	v5 := int(2147483647)
--	nv5 := (*int)(nil)
--	pv5 := &v5
--	v5Addr := fmt.Sprintf("%p", pv5)
--	pv5Addr := fmt.Sprintf("%p", &pv5)
--	v5t := "int"
--	v5s := "2147483647"
--	addFormatterTest("%v", v5, v5s)
--	addFormatterTest("%v", pv5, "<*>"+v5s)
--	addFormatterTest("%v", &pv5, "<**>"+v5s)
--	addFormatterTest("%v", nv5, "<nil>")
--	addFormatterTest("%+v", v5, v5s)
--	addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
--	addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
--	addFormatterTest("%+v", nv5, "<nil>")
--	addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
--	addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
--	addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
--	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
--	addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
--	addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
--	addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
--	addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"<nil>")
--}
--
--func addUintFormatterTests() {
--	// Max uint8.
--	v := uint8(255)
--	nv := (*uint8)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "uint8"
--	vs := "255"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Max uint16.
--	v2 := uint16(65535)
--	nv2 := (*uint16)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "uint16"
--	v2s := "65535"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--
--	// Max uint32.
--	v3 := uint32(4294967295)
--	nv3 := (*uint32)(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "uint32"
--	v3s := "4294967295"
--	addFormatterTest("%v", v3, v3s)
--	addFormatterTest("%v", pv3, "<*>"+v3s)
--	addFormatterTest("%v", &pv3, "<**>"+v3s)
--	addFormatterTest("%v", nv3, "<nil>")
--	addFormatterTest("%+v", v3, v3s)
--	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
--	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
--	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
--	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
--	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
--	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--
--	// Max uint64.
--	v4 := uint64(18446744073709551615)
--	nv4 := (*uint64)(nil)
--	pv4 := &v4
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "uint64"
--	v4s := "18446744073709551615"
--	addFormatterTest("%v", v4, v4s)
--	addFormatterTest("%v", pv4, "<*>"+v4s)
--	addFormatterTest("%v", &pv4, "<**>"+v4s)
--	addFormatterTest("%v", nv4, "<nil>")
--	addFormatterTest("%+v", v4, v4s)
--	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
--	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
--	addFormatterTest("%+v", nv4, "<nil>")
--	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
--	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
--	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
--	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
--	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
--	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
--	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
--	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
--
--	// Max uint.
--	v5 := uint(4294967295)
--	nv5 := (*uint)(nil)
--	pv5 := &v5
--	v5Addr := fmt.Sprintf("%p", pv5)
--	pv5Addr := fmt.Sprintf("%p", &pv5)
--	v5t := "uint"
--	v5s := "4294967295"
--	addFormatterTest("%v", v5, v5s)
--	addFormatterTest("%v", pv5, "<*>"+v5s)
--	addFormatterTest("%v", &pv5, "<**>"+v5s)
--	addFormatterTest("%v", nv5, "<nil>")
--	addFormatterTest("%+v", v5, v5s)
--	addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
--	addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
--	addFormatterTest("%+v", nv5, "<nil>")
--	addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
--	addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
--	addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
--	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
--	addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
--	addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
--	addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
--	addFormatterTest("%#v", nv5, "(*"+v5t+")"+"<nil>")
--}
--
--func addBoolFormatterTests() {
--	// Boolean true.
--	v := bool(true)
--	nv := (*bool)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "bool"
--	vs := "true"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Boolean false.
--	v2 := bool(false)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "bool"
--	v2s := "false"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--}
--
--func addFloatFormatterTests() {
--	// Standard float32.
--	v := float32(3.1415)
--	nv := (*float32)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "float32"
--	vs := "3.1415"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Standard float64.
--	v2 := float64(3.1415926)
--	nv2 := (*float64)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "float64"
--	v2s := "3.1415926"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--}
--
--func addComplexFormatterTests() {
--	// Standard complex64.
--	v := complex(float32(6), -2)
--	nv := (*complex64)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "complex64"
--	vs := "(6-2i)"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Standard complex128.
--	v2 := complex(float64(-6), 2)
--	nv2 := (*complex128)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "complex128"
--	v2s := "(-6+2i)"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--}
--
--func addArrayFormatterTests() {
--	// Array containing standard ints.
--	v := [3]int{1, 2, 3}
--	nv := (*[3]int)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "[3]int"
--	vs := "[1 2 3]"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Array containing type with custom formatter on pointer receiver only.
--	v2 := [3]pstringer{"1", "2", "3"}
--	nv2 := (*[3]pstringer)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "[3]spew_test.pstringer"
--	v2s := "[stringer 1 stringer 2 stringer 3]"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--
--	// Array containing interfaces.
--	v3 := [3]interface{}{"one", int(2), uint(3)}
--	nv3 := (*[3]interface{})(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "[3]interface {}"
--	v3t2 := "string"
--	v3t3 := "int"
--	v3t4 := "uint"
--	v3s := "[one 2 3]"
--	v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]"
--	addFormatterTest("%v", v3, v3s)
--	addFormatterTest("%v", pv3, "<*>"+v3s)
--	addFormatterTest("%v", &pv3, "<**>"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%+v", v3, v3s)
--	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
--	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
--	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
--	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
--	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
--	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
--	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
--}
--
--func addSliceFormatterTests() {
--	// Slice containing standard float32 values.
--	v := []float32{3.14, 6.28, 12.56}
--	nv := (*[]float32)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "[]float32"
--	vs := "[3.14 6.28 12.56]"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Slice containing type with custom formatter on pointer receiver only.
--	v2 := []pstringer{"1", "2", "3"}
--	nv2 := (*[]pstringer)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "[]spew_test.pstringer"
--	v2s := "[stringer 1 stringer 2 stringer 3]"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--
--	// Slice containing interfaces.
--	v3 := []interface{}{"one", int(2), uint(3), nil}
--	nv3 := (*[]interface{})(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "[]interface {}"
--	v3t2 := "string"
--	v3t3 := "int"
--	v3t4 := "uint"
--	v3t5 := "interface {}"
--	v3s := "[one 2 3 <nil>]"
--	v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 +
--		")<nil>]"
--	addFormatterTest("%v", v3, v3s)
--	addFormatterTest("%v", pv3, "<*>"+v3s)
--	addFormatterTest("%v", &pv3, "<**>"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%+v", v3, v3s)
--	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
--	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
--	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
--	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
--	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
--	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
--	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
--
--	// Nil slice.
--	var v4 []int
--	nv4 := (*[]int)(nil)
--	pv4 := &v4
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "[]int"
--	v4s := "<nil>"
--	addFormatterTest("%v", v4, v4s)
--	addFormatterTest("%v", pv4, "<*>"+v4s)
--	addFormatterTest("%v", &pv4, "<**>"+v4s)
--	addFormatterTest("%+v", nv4, "<nil>")
--	addFormatterTest("%+v", v4, v4s)
--	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
--	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
--	addFormatterTest("%+v", nv4, "<nil>")
--	addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
--	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
--	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
--	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
--	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
--	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
--	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
--	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
--}
--
--func addStringFormatterTests() {
--	// Standard string.
--	v := "test"
--	nv := (*string)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "string"
--	vs := "test"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--}
--
--func addInterfaceFormatterTests() {
--	// Nil interface.
--	var v interface{}
--	nv := (*interface{})(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "interface {}"
--	vs := "<nil>"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Sub-interface.
--	v2 := interface{}(uint16(65535))
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "uint16"
--	v2s := "65535"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--}
--
--func addMapFormatterTests() {
--	// Map with string keys and int vals.
--	v := map[string]int{"one": 1, "two": 2}
--	nv := (*map[string]int)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "map[string]int"
--	vs := "map[one:1 two:2]"
--	vs2 := "map[two:2 one:1]"
--	addFormatterTest("%v", v, vs, vs2)
--	addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2)
--	addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs, vs2)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs,
--		"<**>("+pvAddr+"->"+vAddr+")"+vs2)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs,
--		"(*"+vt+")("+vAddr+")"+vs2)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs,
--		"(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Map with custom formatter type on pointer receiver only keys and vals.
--	v2 := map[pstringer]pstringer{"one": "1"}
--	nv2 := (*map[pstringer]pstringer)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "map[spew_test.pstringer]spew_test.pstringer"
--	v2s := "map[stringer one:stringer 1]"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--
--	// Map with interface keys and values.
--	v3 := map[interface{}]interface{}{"one": 1}
--	nv3 := (*map[interface{}]interface{})(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "map[interface {}]interface {}"
--	v3t1 := "string"
--	v3t2 := "int"
--	v3s := "map[one:1]"
--	v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]"
--	addFormatterTest("%v", v3, v3s)
--	addFormatterTest("%v", pv3, "<*>"+v3s)
--	addFormatterTest("%v", &pv3, "<**>"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%+v", v3, v3s)
--	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
--	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
--	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
--	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
--	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
--	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
--	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
--
--	// Map with nil interface value
--	v4 := map[string]interface{}{"nil": nil}
--	nv4 := (*map[string]interface{})(nil)
--	pv4 := &v4
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "map[string]interface {}"
--	v4t1 := "interface {}"
--	v4s := "map[nil:<nil>]"
--	v4s2 := "map[nil:(" + v4t1 + ")<nil>]"
--	addFormatterTest("%v", v4, v4s)
--	addFormatterTest("%v", pv4, "<*>"+v4s)
--	addFormatterTest("%v", &pv4, "<**>"+v4s)
--	addFormatterTest("%+v", nv4, "<nil>")
--	addFormatterTest("%+v", v4, v4s)
--	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
--	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
--	addFormatterTest("%+v", nv4, "<nil>")
--	addFormatterTest("%#v", v4, "("+v4t+")"+v4s2)
--	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2)
--	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2)
--	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
--	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2)
--	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2)
--	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2)
--	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
--}
--
--func addStructFormatterTests() {
--	// Struct with primitives.
--	type s1 struct {
--		a int8
--		b uint8
--	}
--	v := s1{127, 255}
--	nv := (*s1)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "spew_test.s1"
--	vt2 := "int8"
--	vt3 := "uint8"
--	vs := "{127 255}"
--	vs2 := "{a:127 b:255}"
--	vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs2)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs3)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs3)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs3)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Struct that contains another struct.
--	type s2 struct {
--		s1 s1
--		b  bool
--	}
--	v2 := s2{s1{127, 255}, true}
--	nv2 := (*s2)(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "spew_test.s2"
--	v2t2 := "spew_test.s1"
--	v2t3 := "int8"
--	v2t4 := "uint8"
--	v2t5 := "bool"
--	v2s := "{{127 255} true}"
--	v2s2 := "{s1:{a:127 b:255} b:true}"
--	v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" +
--		v2t5 + ")true}"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s2)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s3)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--
--	// Struct that contains custom type with Stringer pointer interface via both
--	// exported and unexported fields.
--	type s3 struct {
--		s pstringer
--		S pstringer
--	}
--	v3 := s3{"test", "test2"}
--	nv3 := (*s3)(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "spew_test.s3"
--	v3t2 := "spew_test.pstringer"
--	v3s := "{stringer test stringer test2}"
--	v3s2 := "{s:stringer test S:stringer test2}"
--	v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}"
--	addFormatterTest("%v", v3, v3s)
--	addFormatterTest("%v", pv3, "<*>"+v3s)
--	addFormatterTest("%v", &pv3, "<**>"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%+v", v3, v3s2)
--	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2)
--	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%#v", v3, "("+v3t+")"+v3s3)
--	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3)
--	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3)
--	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3)
--	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3)
--	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
--
--	// Struct that contains embedded struct and field to same struct.
--	e := embed{"embedstr"}
--	v4 := embedwrap{embed: &e, e: &e}
--	nv4 := (*embedwrap)(nil)
--	pv4 := &v4
--	eAddr := fmt.Sprintf("%p", &e)
--	v4Addr := fmt.Sprintf("%p", pv4)
--	pv4Addr := fmt.Sprintf("%p", &pv4)
--	v4t := "spew_test.embedwrap"
--	v4t2 := "spew_test.embed"
--	v4t3 := "string"
--	v4s := "{<*>{embedstr} <*>{embedstr}}"
--	v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr +
--		"){a:embedstr}}"
--	v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 +
--		"){a:(" + v4t3 + ")embedstr}}"
--	v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 +
--		")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}"
--	addFormatterTest("%v", v4, v4s)
--	addFormatterTest("%v", pv4, "<*>"+v4s)
--	addFormatterTest("%v", &pv4, "<**>"+v4s)
--	addFormatterTest("%+v", nv4, "<nil>")
--	addFormatterTest("%+v", v4, v4s2)
--	addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2)
--	addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2)
--	addFormatterTest("%+v", nv4, "<nil>")
--	addFormatterTest("%#v", v4, "("+v4t+")"+v4s3)
--	addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3)
--	addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3)
--	addFormatterTest("%#v", nv4, "(*"+v4t+")"+"<nil>")
--	addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4)
--	addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4)
--	addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4)
--	addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"<nil>")
--}
--
--func addUintptrFormatterTests() {
--	// Null pointer.
--	v := uintptr(0)
--	nv := (*uintptr)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "uintptr"
--	vs := "<nil>"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Address of real variable.
--	i := 1
--	v2 := uintptr(unsafe.Pointer(&i))
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "uintptr"
--	v2s := fmt.Sprintf("%p", &i)
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--}
--
--func addUnsafePointerFormatterTests() {
--	// Null pointer.
--	v := unsafe.Pointer(uintptr(0))
--	nv := (*unsafe.Pointer)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "unsafe.Pointer"
--	vs := "<nil>"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Address of real variable.
--	i := 1
--	v2 := unsafe.Pointer(&i)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "unsafe.Pointer"
--	v2s := fmt.Sprintf("%p", &i)
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--}
--
--func addChanFormatterTests() {
--	// Nil channel.
--	var v chan int
--	pv := &v
--	nv := (*chan int)(nil)
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "chan int"
--	vs := "<nil>"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Real channel.
--	v2 := make(chan int)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "chan int"
--	v2s := fmt.Sprintf("%p", v2)
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--}
--
--func addFuncFormatterTests() {
--	// Function with no params and no returns.
--	v := addIntFormatterTests
--	nv := (*func())(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "func()"
--	vs := fmt.Sprintf("%p", v)
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--
--	// Function with param and no returns.
--	v2 := TestFormatter
--	nv2 := (*func(*testing.T))(nil)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "func(*testing.T)"
--	v2s := fmt.Sprintf("%p", v2)
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s)
--	addFormatterTest("%v", &pv2, "<**>"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%+v", v2, v2s)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%+v", nv2, "<nil>")
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
--	addFormatterTest("%#v", nv2, "(*"+v2t+")"+"<nil>")
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
--	addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"<nil>")
--
--	// Function with multiple params and multiple returns.
--	var v3 = func(i int, s string) (b bool, err error) {
--		return true, nil
--	}
--	nv3 := (*func(int, string) (bool, error))(nil)
--	pv3 := &v3
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "func(int, string) (bool, error)"
--	v3s := fmt.Sprintf("%p", v3)
--	addFormatterTest("%v", v3, v3s)
--	addFormatterTest("%v", pv3, "<*>"+v3s)
--	addFormatterTest("%v", &pv3, "<**>"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%+v", v3, v3s)
--	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
--	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%+v", nv3, "<nil>")
--	addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
--	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
--	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
--	addFormatterTest("%#v", nv3, "(*"+v3t+")"+"<nil>")
--	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
--	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
--	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
--	addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"<nil>")
--}
--
--func addCircularFormatterTests() {
--	// Struct that is circular through self referencing.
--	type circular struct {
--		c *circular
--	}
--	v := circular{nil}
--	v.c = &v
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "spew_test.circular"
--	vs := "{<*>{<*><shown>}}"
--	vs2 := "{<*><shown>}"
--	vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")<shown>}}"
--	vs4 := "{c:<*>(" + vAddr + ")<shown>}"
--	vs5 := "{c:(*" + vt + "){c:(*" + vt + ")<shown>}}"
--	vs6 := "{c:(*" + vt + ")<shown>}"
--	vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr +
--		")<shown>}}"
--	vs8 := "{c:(*" + vt + ")(" + vAddr + ")<shown>}"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs2)
--	addFormatterTest("%v", &pv, "<**>"+vs2)
--	addFormatterTest("%+v", v, vs3)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4)
--	addFormatterTest("%#v", v, "("+vt+")"+vs5)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs6)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6)
--	addFormatterTest("%#+v", v, "("+vt+")"+vs7)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8)
--
--	// Structs that are circular through cross referencing.
--	v2 := xref1{nil}
--	ts2 := xref2{&v2}
--	v2.ps2 = &ts2
--	pv2 := &v2
--	ts2Addr := fmt.Sprintf("%p", &ts2)
--	v2Addr := fmt.Sprintf("%p", pv2)
--	pv2Addr := fmt.Sprintf("%p", &pv2)
--	v2t := "spew_test.xref1"
--	v2t2 := "spew_test.xref2"
--	v2s := "{<*>{<*>{<*><shown>}}}"
--	v2s2 := "{<*>{<*><shown>}}"
--	v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" +
--		ts2Addr + ")<shown>}}}"
--	v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")<shown>}}"
--	v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 +
--		")<shown>}}}"
--	v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")<shown>}}"
--	v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t +
--		")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr +
--		")<shown>}}}"
--	v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t +
--		")(" + v2Addr + ")<shown>}}"
--	addFormatterTest("%v", v2, v2s)
--	addFormatterTest("%v", pv2, "<*>"+v2s2)
--	addFormatterTest("%v", &pv2, "<**>"+v2s2)
--	addFormatterTest("%+v", v2, v2s3)
--	addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4)
--	addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4)
--	addFormatterTest("%#v", v2, "("+v2t+")"+v2s5)
--	addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6)
--	addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6)
--	addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7)
--	addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8)
--	addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8)
--
--	// Structs that are indirectly circular.
--	v3 := indirCir1{nil}
--	tic2 := indirCir2{nil}
--	tic3 := indirCir3{&v3}
--	tic2.ps3 = &tic3
--	v3.ps2 = &tic2
--	pv3 := &v3
--	tic2Addr := fmt.Sprintf("%p", &tic2)
--	tic3Addr := fmt.Sprintf("%p", &tic3)
--	v3Addr := fmt.Sprintf("%p", pv3)
--	pv3Addr := fmt.Sprintf("%p", &pv3)
--	v3t := "spew_test.indirCir1"
--	v3t2 := "spew_test.indirCir2"
--	v3t3 := "spew_test.indirCir3"
--	v3s := "{<*>{<*>{<*>{<*><shown>}}}}"
--	v3s2 := "{<*>{<*>{<*><shown>}}}"
--	v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" +
--		v3Addr + "){ps2:<*>(" + tic2Addr + ")<shown>}}}}"
--	v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" +
--		v3Addr + ")<shown>}}}"
--	v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t +
--		"){ps2:(*" + v3t2 + ")<shown>}}}}"
--	v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t +
--		")<shown>}}}"
--	v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" +
--		tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 +
--		")(" + tic2Addr + ")<shown>}}}}"
--	v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" +
--		tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")<shown>}}}"
--	addFormatterTest("%v", v3, v3s)
--	addFormatterTest("%v", pv3, "<*>"+v3s2)
--	addFormatterTest("%v", &pv3, "<**>"+v3s2)
--	addFormatterTest("%+v", v3, v3s3)
--	addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4)
--	addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4)
--	addFormatterTest("%#v", v3, "("+v3t+")"+v3s5)
--	addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6)
--	addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6)
--	addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7)
--	addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8)
--	addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8)
--}
--
--func addPanicFormatterTests() {
--	// Type that panics in its Stringer interface.
--	v := panicer(127)
--	nv := (*panicer)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "spew_test.panicer"
--	vs := "(PANIC=test panic)127"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--}
--
--func addErrorFormatterTests() {
--	// Type that has a custom Error interface.
--	v := customError(127)
--	nv := (*customError)(nil)
--	pv := &v
--	vAddr := fmt.Sprintf("%p", pv)
--	pvAddr := fmt.Sprintf("%p", &pv)
--	vt := "spew_test.customError"
--	vs := "error: 127"
--	addFormatterTest("%v", v, vs)
--	addFormatterTest("%v", pv, "<*>"+vs)
--	addFormatterTest("%v", &pv, "<**>"+vs)
--	addFormatterTest("%v", nv, "<nil>")
--	addFormatterTest("%+v", v, vs)
--	addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
--	addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%+v", nv, "<nil>")
--	addFormatterTest("%#v", v, "("+vt+")"+vs)
--	addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
--	addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
--	addFormatterTest("%#v", nv, "(*"+vt+")"+"<nil>")
--	addFormatterTest("%#+v", v, "("+vt+")"+vs)
--	addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
--	addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
--	addFormatterTest("%#+v", nv, "(*"+vt+")"+"<nil>")
--}
--
--func addPassthroughFormatterTests() {
--	// %x passthrough with uint.
--	v := uint(4294967295)
--	pv := &v
--	vAddr := fmt.Sprintf("%x", pv)
--	pvAddr := fmt.Sprintf("%x", &pv)
--	vs := "ffffffff"
--	addFormatterTest("%x", v, vs)
--	addFormatterTest("%x", pv, vAddr)
--	addFormatterTest("%x", &pv, pvAddr)
--
--	// %#x passthrough with uint.
--	v2 := int(2147483647)
--	pv2 := &v2
--	v2Addr := fmt.Sprintf("%#x", pv2)
--	pv2Addr := fmt.Sprintf("%#x", &pv2)
--	v2s := "0x7fffffff"
--	addFormatterTest("%#x", v2, v2s)
--	addFormatterTest("%#x", pv2, v2Addr)
--	addFormatterTest("%#x", &pv2, pv2Addr)
--
--	// %f passthrough with precision.
--	addFormatterTest("%.2f", 3.1415, "3.14")
--	addFormatterTest("%.3f", 3.1415, "3.142")
--	addFormatterTest("%.4f", 3.1415, "3.1415")
--
--	// %f passthrough with width and precision.
--	addFormatterTest("%5.2f", 3.1415, " 3.14")
--	addFormatterTest("%6.3f", 3.1415, " 3.142")
--	addFormatterTest("%7.4f", 3.1415, " 3.1415")
--
--	// %d passthrough with width.
--	addFormatterTest("%3d", 127, "127")
--	addFormatterTest("%4d", 127, " 127")
--	addFormatterTest("%5d", 127, "  127")
--
--	// %q passthrough with string.
--	addFormatterTest("%q", "test", "\"test\"")
--}
--
--// TestFormatter executes all of the tests described by formatterTests.
--func TestFormatter(t *testing.T) {
--	// Setup tests.
--	addIntFormatterTests()
--	addUintFormatterTests()
--	addBoolFormatterTests()
--	addFloatFormatterTests()
--	addComplexFormatterTests()
--	addArrayFormatterTests()
--	addSliceFormatterTests()
--	addStringFormatterTests()
--	addInterfaceFormatterTests()
--	addMapFormatterTests()
--	addStructFormatterTests()
--	addUintptrFormatterTests()
--	addUnsafePointerFormatterTests()
--	addChanFormatterTests()
--	addFuncFormatterTests()
--	addCircularFormatterTests()
--	addPanicFormatterTests()
--	addErrorFormatterTests()
--	addPassthroughFormatterTests()
--
--	t.Logf("Running %d tests", len(formatterTests))
--	for i, test := range formatterTests {
--		buf := new(bytes.Buffer)
--		spew.Fprintf(buf, test.format, test.in)
--		s := buf.String()
--		if testFailed(s, test.wants) {
--			t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s,
--				stringizeWants(test.wants))
--			continue
--		}
--	}
--}
--
--func TestPrintSortedKeys(t *testing.T) {
--	cfg := spew.ConfigState{SortKeys: true}
--	s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"})
--	expected := "map[1:1 2:2 3:3]"
--	if s != expected {
--		t.Errorf("Sorted keys mismatch:\n  %v %v", s, expected)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/internal_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/internal_test.go
-deleted file mode 100644
-index 10dc0b1..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/internal_test.go
-+++ /dev/null
-@@ -1,156 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--/*
--This test file is part of the spew package rather than than the spew_test
--package because it needs access to internals to properly test certain cases
--which are not possible via the public interface since they should never happen.
--*/
--
--package spew
--
--import (
--	"bytes"
--	"reflect"
--	"testing"
--	"unsafe"
--)
--
--// dummyFmtState implements a fake fmt.State to use for testing invalid
--// reflect.Value handling.  This is necessary because the fmt package catches
--// invalid values before invoking the formatter on them.
--type dummyFmtState struct {
--	bytes.Buffer
--}
--
--func (dfs *dummyFmtState) Flag(f int) bool {
--	if f == int('+') {
--		return true
--	}
--	return false
--}
--
--func (dfs *dummyFmtState) Precision() (int, bool) {
--	return 0, false
--}
--
--func (dfs *dummyFmtState) Width() (int, bool) {
--	return 0, false
--}
--
--// TestInvalidReflectValue ensures the dump and formatter code handles an
--// invalid reflect value properly.  This needs access to internal state since it
--// should never happen in real code and therefore can't be tested via the public
--// API.
--func TestInvalidReflectValue(t *testing.T) {
--	i := 1
--
--	// Dump invalid reflect value.
--	v := new(reflect.Value)
--	buf := new(bytes.Buffer)
--	d := dumpState{w: buf, cs: &Config}
--	d.dump(*v)
--	s := buf.String()
--	want := "<invalid>"
--	if s != want {
--		t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
--	}
--	i++
--
--	// Formatter invalid reflect value.
--	buf2 := new(dummyFmtState)
--	f := formatState{value: *v, cs: &Config, fs: buf2}
--	f.format(*v)
--	s = buf2.String()
--	want = "<invalid>"
--	if s != want {
--		t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
--	}
--}
--
--// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
--// the maximum kind value which does not exist.  This is needed to test the
--// fallback code which punts to the standard fmt library for new types that
--// might get added to the language.
--func changeKind(v *reflect.Value, readOnly bool) {
--	rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
--	*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
--	if readOnly {
--		*rvf |= flagRO
--	} else {
--		*rvf &= ^uintptr(flagRO)
--	}
--}
--
--// TestAddedReflectValue tests functionaly of the dump and formatter code which
--// falls back to the standard fmt library for new types that might get added to
--// the language.
--func TestAddedReflectValue(t *testing.T) {
--	i := 1
--
--	// Dump using a reflect.Value that is exported.
--	v := reflect.ValueOf(int8(5))
--	changeKind(&v, false)
--	buf := new(bytes.Buffer)
--	d := dumpState{w: buf, cs: &Config}
--	d.dump(v)
--	s := buf.String()
--	want := "(int8) 5"
--	if s != want {
--		t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
--	}
--	i++
--
--	// Dump using a reflect.Value that is not exported.
--	changeKind(&v, true)
--	buf.Reset()
--	d.dump(v)
--	s = buf.String()
--	want = "(int8) <int8 Value>"
--	if s != want {
--		t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
--	}
--	i++
--
--	// Formatter using a reflect.Value that is exported.
--	changeKind(&v, false)
--	buf2 := new(dummyFmtState)
--	f := formatState{value: v, cs: &Config, fs: buf2}
--	f.format(v)
--	s = buf2.String()
--	want = "5"
--	if s != want {
--		t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
--	}
--	i++
--
--	// Formatter using a reflect.Value that is not exported.
--	changeKind(&v, true)
--	buf2.Reset()
--	f = formatState{value: v, cs: &Config, fs: buf2}
--	f.format(v)
--	s = buf2.String()
--	want = "<int8 Value>"
--	if s != want {
--		t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
--	}
--}
--
--// SortValues makes the internal sortValues function available to the test
--// package.
--func SortValues(values []reflect.Value) {
--	sortValues(values)
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew.go
-deleted file mode 100644
-index d8233f5..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew.go
-+++ /dev/null
-@@ -1,148 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--package spew
--
--import (
--	"fmt"
--	"io"
--)
--
--// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
--// passed with a default Formatter interface returned by NewFormatter.  It
--// returns the formatted string as a value that satisfies error.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
--func Errorf(format string, a ...interface{}) (err error) {
--	return fmt.Errorf(format, convertArgs(a)...)
--}
--
--// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
--// passed with a default Formatter interface returned by NewFormatter.  It
--// returns the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
--func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
--	return fmt.Fprint(w, convertArgs(a)...)
--}
--
--// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
--// passed with a default Formatter interface returned by NewFormatter.  It
--// returns the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
--func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
--	return fmt.Fprintf(w, format, convertArgs(a)...)
--}
--
--// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
--// passed with a default Formatter interface returned by NewFormatter.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
--func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
--	return fmt.Fprintln(w, convertArgs(a)...)
--}
--
--// Print is a wrapper for fmt.Print that treats each argument as if it were
--// passed with a default Formatter interface returned by NewFormatter.  It
--// returns the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
--func Print(a ...interface{}) (n int, err error) {
--	return fmt.Print(convertArgs(a)...)
--}
--
--// Printf is a wrapper for fmt.Printf that treats each argument as if it were
--// passed with a default Formatter interface returned by NewFormatter.  It
--// returns the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
--func Printf(format string, a ...interface{}) (n int, err error) {
--	return fmt.Printf(format, convertArgs(a)...)
--}
--
--// Println is a wrapper for fmt.Println that treats each argument as if it were
--// passed with a default Formatter interface returned by NewFormatter.  It
--// returns the number of bytes written and any write error encountered.  See
--// NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
--func Println(a ...interface{}) (n int, err error) {
--	return fmt.Println(convertArgs(a)...)
--}
--
--// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
--// passed with a default Formatter interface returned by NewFormatter.  It
--// returns the resulting string.  See NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
--func Sprint(a ...interface{}) string {
--	return fmt.Sprint(convertArgs(a)...)
--}
--
--// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
--// passed with a default Formatter interface returned by NewFormatter.  It
--// returns the resulting string.  See NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
--func Sprintf(format string, a ...interface{}) string {
--	return fmt.Sprintf(format, convertArgs(a)...)
--}
--
--// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
--// were passed with a default Formatter interface returned by NewFormatter.  It
--// returns the resulting string.  See NewFormatter for formatting details.
--//
--// This function is shorthand for the following syntax:
--//
--//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
--func Sprintln(a ...interface{}) string {
--	return fmt.Sprintln(convertArgs(a)...)
--}
--
--// convertArgs accepts a slice of arguments and returns a slice of the same
--// length with each argument converted to a default spew Formatter interface.
--func convertArgs(args []interface{}) (formatters []interface{}) {
--	formatters = make([]interface{}, len(args))
--	for index, arg := range args {
--		formatters[index] = NewFormatter(arg)
--	}
--	return formatters
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew_test.go
-deleted file mode 100644
-index 3831ed2..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew_test.go
-+++ /dev/null
-@@ -1,308 +0,0 @@
--/*
-- * Copyright (c) 2013 Dave Collins <dave at davec.name>
-- *
-- * Permission to use, copy, modify, and distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--package spew_test
--
--import (
--	"bytes"
--	"fmt"
--	"github.com/davecgh/go-spew/spew"
--	"io/ioutil"
--	"os"
--	"testing"
--)
--
--// spewFunc is used to identify which public function of the spew package or
--// ConfigState a test applies to.
--type spewFunc int
--
--const (
--	fCSFdump spewFunc = iota
--	fCSFprint
--	fCSFprintf
--	fCSFprintln
--	fCSPrint
--	fCSPrintln
--	fCSSdump
--	fCSSprint
--	fCSSprintf
--	fCSSprintln
--	fCSErrorf
--	fCSNewFormatter
--	fErrorf
--	fFprint
--	fFprintln
--	fPrint
--	fPrintln
--	fSdump
--	fSprint
--	fSprintf
--	fSprintln
--)
--
--// Map of spewFunc values to names for pretty printing.
--var spewFuncStrings = map[spewFunc]string{
--	fCSFdump:        "ConfigState.Fdump",
--	fCSFprint:       "ConfigState.Fprint",
--	fCSFprintf:      "ConfigState.Fprintf",
--	fCSFprintln:     "ConfigState.Fprintln",
--	fCSSdump:        "ConfigState.Sdump",
--	fCSPrint:        "ConfigState.Print",
--	fCSPrintln:      "ConfigState.Println",
--	fCSSprint:       "ConfigState.Sprint",
--	fCSSprintf:      "ConfigState.Sprintf",
--	fCSSprintln:     "ConfigState.Sprintln",
--	fCSErrorf:       "ConfigState.Errorf",
--	fCSNewFormatter: "ConfigState.NewFormatter",
--	fErrorf:         "spew.Errorf",
--	fFprint:         "spew.Fprint",
--	fFprintln:       "spew.Fprintln",
--	fPrint:          "spew.Print",
--	fPrintln:        "spew.Println",
--	fSdump:          "spew.Sdump",
--	fSprint:         "spew.Sprint",
--	fSprintf:        "spew.Sprintf",
--	fSprintln:       "spew.Sprintln",
--}
--
--func (f spewFunc) String() string {
--	if s, ok := spewFuncStrings[f]; ok {
--		return s
--	}
--	return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
--}
--
--// spewTest is used to describe a test to be performed against the public
--// functions of the spew package or ConfigState.
--type spewTest struct {
--	cs     *spew.ConfigState
--	f      spewFunc
--	format string
--	in     interface{}
--	want   string
--}
--
--// spewTests houses the tests to be performed against the public functions of
--// the spew package and ConfigState.
--//
--// These tests are only intended to ensure the public functions are exercised
--// and are intentionally not exhaustive of types.  The exhaustive type
--// tests are handled in the dump and format tests.
--var spewTests []spewTest
--
--// redirStdout is a helper function to return the standard output from f as a
--// byte slice.
--func redirStdout(f func()) ([]byte, error) {
--	tempFile, err := ioutil.TempFile("", "ss-test")
--	if err != nil {
--		return nil, err
--	}
--	fileName := tempFile.Name()
--	defer os.Remove(fileName) // Ignore error
--
--	origStdout := os.Stdout
--	os.Stdout = tempFile
--	f()
--	os.Stdout = origStdout
--	tempFile.Close()
--
--	return ioutil.ReadFile(fileName)
--}
--
--func initSpewTests() {
--	// Config states with various settings.
--	scsDefault := spew.NewDefaultConfig()
--	scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
--	scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
--	scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
--	scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
--
--	// Variables for tests on types which implement Stringer interface with and
--	// without a pointer receiver.
--	ts := stringer("test")
--	tps := pstringer("test")
--
--	// depthTester is used to test max depth handling for structs, array, slices
--	// and maps.
--	type depthTester struct {
--		ic    indirCir1
--		arr   [1]string
--		slice []string
--		m     map[string]int
--	}
--	dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
--		map[string]int{"one": 1}}
--
--	// Variable for tests on types which implement error interface.
--	te := customError(10)
--
--	spewTests = []spewTest{
--		{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
--		{scsDefault, fCSFprint, "", int16(32767), "32767"},
--		{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
--		{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
--		{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
--		{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
--		{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
--		{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
--		{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
--		{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
--		{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
--		{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
--		{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
--		{scsDefault, fFprint, "", float32(3.14), "3.14"},
--		{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
--		{scsDefault, fPrint, "", true, "true"},
--		{scsDefault, fPrintln, "", false, "false\n"},
--		{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
--		{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
--		{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
--		{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
--		{scsNoMethods, fCSFprint, "", ts, "test"},
--		{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
--		{scsNoMethods, fCSFprint, "", tps, "test"},
--		{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
--		{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
--		{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
--		{scsNoPmethods, fCSFprint, "", tps, "test"},
--		{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
--		{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
--		{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
--			" ic: (spew_test.indirCir1) {\n  <max depth reached>\n },\n" +
--			" arr: ([1]string) (len=1 cap=1) {\n  <max depth reached>\n },\n" +
--			" slice: ([]string) (len=1 cap=1) {\n  <max depth reached>\n },\n" +
--			" m: (map[string]int) (len=1) {\n  <max depth reached>\n }\n}\n"},
--		{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
--		{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
--			"(len=4) (stringer test) \"test\"\n"},
--		{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
--		{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
--			"(error: 10) 10\n"},
--	}
--}
--
--// TestSpew executes all of the tests described by spewTests.
--func TestSpew(t *testing.T) {
--	initSpewTests()
--
--	t.Logf("Running %d tests", len(spewTests))
--	for i, test := range spewTests {
--		buf := new(bytes.Buffer)
--		switch test.f {
--		case fCSFdump:
--			test.cs.Fdump(buf, test.in)
--
--		case fCSFprint:
--			test.cs.Fprint(buf, test.in)
--
--		case fCSFprintf:
--			test.cs.Fprintf(buf, test.format, test.in)
--
--		case fCSFprintln:
--			test.cs.Fprintln(buf, test.in)
--
--		case fCSPrint:
--			b, err := redirStdout(func() { test.cs.Print(test.in) })
--			if err != nil {
--				t.Errorf("%v #%d %v", test.f, i, err)
--				continue
--			}
--			buf.Write(b)
--
--		case fCSPrintln:
--			b, err := redirStdout(func() { test.cs.Println(test.in) })
--			if err != nil {
--				t.Errorf("%v #%d %v", test.f, i, err)
--				continue
--			}
--			buf.Write(b)
--
--		case fCSSdump:
--			str := test.cs.Sdump(test.in)
--			buf.WriteString(str)
--
--		case fCSSprint:
--			str := test.cs.Sprint(test.in)
--			buf.WriteString(str)
--
--		case fCSSprintf:
--			str := test.cs.Sprintf(test.format, test.in)
--			buf.WriteString(str)
--
--		case fCSSprintln:
--			str := test.cs.Sprintln(test.in)
--			buf.WriteString(str)
--
--		case fCSErrorf:
--			err := test.cs.Errorf(test.format, test.in)
--			buf.WriteString(err.Error())
--
--		case fCSNewFormatter:
--			fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
--
--		case fErrorf:
--			err := spew.Errorf(test.format, test.in)
--			buf.WriteString(err.Error())
--
--		case fFprint:
--			spew.Fprint(buf, test.in)
--
--		case fFprintln:
--			spew.Fprintln(buf, test.in)
--
--		case fPrint:
--			b, err := redirStdout(func() { spew.Print(test.in) })
--			if err != nil {
--				t.Errorf("%v #%d %v", test.f, i, err)
--				continue
--			}
--			buf.Write(b)
--
--		case fPrintln:
--			b, err := redirStdout(func() { spew.Println(test.in) })
--			if err != nil {
--				t.Errorf("%v #%d %v", test.f, i, err)
--				continue
--			}
--			buf.Write(b)
--
--		case fSdump:
--			str := spew.Sdump(test.in)
--			buf.WriteString(str)
--
--		case fSprint:
--			str := spew.Sprint(test.in)
--			buf.WriteString(str)
--
--		case fSprintf:
--			str := spew.Sprintf(test.format, test.in)
--			buf.WriteString(str)
--
--		case fSprintln:
--			str := spew.Sprintln(test.in)
--			buf.WriteString(str)
--
--		default:
--			t.Errorf("%v #%d unrecognized function", test.f, i)
--			continue
--		}
--		s := buf.String()
--		if test.want != s {
--			t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
--			continue
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
-deleted file mode 100644
-index 5c87dd4..0000000
---- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
-+++ /dev/null
-@@ -1,82 +0,0 @@
--// Copyright (c) 2013 Dave Collins <dave at davec.name>
--//
--// Permission to use, copy, modify, and distribute this software for any
--// purpose with or without fee is hereby granted, provided that the above
--// copyright notice and this permission notice appear in all copies.
--//
--// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
--// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
--// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
--// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
--// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
--// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
--// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
--
--// NOTE: Due to the following build constraints, this file will only be compiled
--// when both cgo is supported and "-tags testcgo" is added to the go test
--// command line.  This code should really only be in the dumpcgo_test.go file,
--// but unfortunately Go will not allow cgo in test files, so this is a
--// workaround to allow cgo types to be tested.  This configuration is used
--// because spew itself does not require cgo to run even though it does handle
--// certain cgo types specially.  Rather than forcing all clients to require cgo
--// and an external C compiler just to run the tests, this scheme makes them
--// optional.
--// +build cgo,testcgo
--
--package testdata
--
--/*
--#include <stdint.h>
--typedef unsigned char custom_uchar_t;
--
--char            *ncp = 0;
--char            *cp = "test";
--char             ca[6] = {'t', 'e', 's', 't', '2', '\0'};
--unsigned char    uca[6] = {'t', 'e', 's', 't', '3', '\0'};
--signed char      sca[6] = {'t', 'e', 's', 't', '4', '\0'};
--uint8_t          ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
--custom_uchar_t   tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
--*/
--import "C"
--
--// GetCgoNullCharPointer returns a null char pointer via cgo.  This is only
--// used for tests.
--func GetCgoNullCharPointer() interface{} {
--	return C.ncp
--}
--
--// GetCgoCharPointer returns a char pointer via cgo.  This is only used for
--// tests.
--func GetCgoCharPointer() interface{} {
--	return C.cp
--}
--
--// GetCgoCharArray returns a char array via cgo and the array's len and cap.
--// This is only used for tests.
--func GetCgoCharArray() (interface{}, int, int) {
--	return C.ca, len(C.ca), cap(C.ca)
--}
--
--// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
--// array's len and cap.  This is only used for tests.
--func GetCgoUnsignedCharArray() (interface{}, int, int) {
--	return C.uca, len(C.uca), cap(C.uca)
--}
--
--// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
--// and cap.  This is only used for tests.
--func GetCgoSignedCharArray() (interface{}, int, int) {
--	return C.sca, len(C.sca), cap(C.sca)
--}
--
--// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
--// cap.  This is only used for tests.
--func GetCgoUint8tArray() (interface{}, int, int) {
--	return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
--}
--
--// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
--// cgo and the array's len and cap.  This is only used for tests.
--func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
--	return C.tuca, len(C.tuca), cap(C.tuca)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS
-deleted file mode 100644
-index 2aac726..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/MAINTAINERS
-+++ /dev/null
-@@ -1,2 +0,0 @@
--Cristian Staretu <cristian.staretu at gmail.com> (@unclejack)
--Tibor Vass <teabee89 at gmail.com> (@tiborvass)
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md
-deleted file mode 100644
-index 7307d96..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/README.md
-+++ /dev/null
-@@ -1 +0,0 @@
--This code provides helper functions for dealing with archive files.
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
-deleted file mode 100644
-index ec45d85..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
-+++ /dev/null
-@@ -1,802 +0,0 @@
--package archive
--
--import (
--	"bufio"
--	"bytes"
--	"compress/bzip2"
--	"compress/gzip"
--	"errors"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"os/exec"
--	"path"
--	"path/filepath"
--	"strings"
--	"syscall"
--
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--
--	log "github.com/Sirupsen/logrus"
--	"github.com/docker/docker/pkg/fileutils"
--	"github.com/docker/docker/pkg/pools"
--	"github.com/docker/docker/pkg/promise"
--	"github.com/docker/docker/pkg/system"
--)
--
--type (
--	Archive       io.ReadCloser
--	ArchiveReader io.Reader
--	Compression   int
--	TarOptions    struct {
--		Includes    []string
--		Excludes    []string
--		Compression Compression
--		NoLchown    bool
--		Name        string
--	}
--
--	// Archiver allows the reuse of most utility functions of this package
--	// with a pluggable Untar function.
--	Archiver struct {
--		Untar func(io.Reader, string, *TarOptions) error
--	}
--
--	// breakoutError is used to differentiate errors related to breaking out
--	// When testing archive breakout in the unit tests, this error is expected
--	// in order for the test to pass.
--	breakoutError error
--)
--
--var (
--	ErrNotImplemented = errors.New("Function not implemented")
--	defaultArchiver   = &Archiver{Untar}
--)
--
--const (
--	Uncompressed Compression = iota
--	Bzip2
--	Gzip
--	Xz
--)
--
--func IsArchive(header []byte) bool {
--	compression := DetectCompression(header)
--	if compression != Uncompressed {
--		return true
--	}
--	r := tar.NewReader(bytes.NewBuffer(header))
--	_, err := r.Next()
--	return err == nil
--}
--
--func DetectCompression(source []byte) Compression {
--	for compression, m := range map[Compression][]byte{
--		Bzip2: {0x42, 0x5A, 0x68},
--		Gzip:  {0x1F, 0x8B, 0x08},
--		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
--	} {
--		if len(source) < len(m) {
--			log.Debugf("Len too short")
--			continue
--		}
--		if bytes.Compare(m, source[:len(m)]) == 0 {
--			return compression
--		}
--	}
--	return Uncompressed
--}
--
--func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
--	args := []string{"xz", "-d", "-c", "-q"}
--
--	return CmdStream(exec.Command(args[0], args[1:]...), archive)
--}
--
--func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
--	p := pools.BufioReader32KPool
--	buf := p.Get(archive)
--	bs, err := buf.Peek(10)
--	if err != nil {
--		return nil, err
--	}
--	log.Debugf("[tar autodetect] n: %v", bs)
--
--	compression := DetectCompression(bs)
--	switch compression {
--	case Uncompressed:
--		readBufWrapper := p.NewReadCloserWrapper(buf, buf)
--		return readBufWrapper, nil
--	case Gzip:
--		gzReader, err := gzip.NewReader(buf)
--		if err != nil {
--			return nil, err
--		}
--		readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
--		return readBufWrapper, nil
--	case Bzip2:
--		bz2Reader := bzip2.NewReader(buf)
--		readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
--		return readBufWrapper, nil
--	case Xz:
--		xzReader, err := xzDecompress(buf)
--		if err != nil {
--			return nil, err
--		}
--		readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
--		return readBufWrapper, nil
--	default:
--		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
--	}
--}
--
--func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
--	p := pools.BufioWriter32KPool
--	buf := p.Get(dest)
--	switch compression {
--	case Uncompressed:
--		writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
--		return writeBufWrapper, nil
--	case Gzip:
--		gzWriter := gzip.NewWriter(dest)
--		writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
--		return writeBufWrapper, nil
--	case Bzip2, Xz:
--		// archive/bzip2 does not support writing, and there is no xz support at all
--		// However, this is not a problem as docker only currently generates gzipped tars
--		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
--	default:
--		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
--	}
--}
--
--func (compression *Compression) Extension() string {
--	switch *compression {
--	case Uncompressed:
--		return "tar"
--	case Bzip2:
--		return "tar.bz2"
--	case Gzip:
--		return "tar.gz"
--	case Xz:
--		return "tar.xz"
--	}
--	return ""
--}
--
--type tarAppender struct {
--	TarWriter *tar.Writer
--	Buffer    *bufio.Writer
--
--	// for hardlink mapping
--	SeenFiles map[uint64]string
--}
--
--func (ta *tarAppender) addTarFile(path, name string) error {
--	fi, err := os.Lstat(path)
--	if err != nil {
--		return err
--	}
--
--	link := ""
--	if fi.Mode()&os.ModeSymlink != 0 {
--		if link, err = os.Readlink(path); err != nil {
--			return err
--		}
--	}
--
--	hdr, err := tar.FileInfoHeader(fi, link)
--	if err != nil {
--		return err
--	}
--
--	if fi.IsDir() && !strings.HasSuffix(name, "/") {
--		name = name + "/"
--	}
--
--	hdr.Name = name
--
--	nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
--	if err != nil {
--		return err
--	}
--
--	// if it's a regular file and has more than 1 link,
--	// it's hardlinked, so set the type flag accordingly
--	if fi.Mode().IsRegular() && nlink > 1 {
--		// a link should have a name that it links too
--		// and that linked name should be first in the tar archive
--		if oldpath, ok := ta.SeenFiles[inode]; ok {
--			hdr.Typeflag = tar.TypeLink
--			hdr.Linkname = oldpath
--			hdr.Size = 0 // This Must be here for the writer math to add up!
--		} else {
--			ta.SeenFiles[inode] = name
--		}
--	}
--
--	capability, _ := system.Lgetxattr(path, "security.capability")
--	if capability != nil {
--		hdr.Xattrs = make(map[string]string)
--		hdr.Xattrs["security.capability"] = string(capability)
--	}
--
--	if err := ta.TarWriter.WriteHeader(hdr); err != nil {
--		return err
--	}
--
--	if hdr.Typeflag == tar.TypeReg {
--		file, err := os.Open(path)
--		if err != nil {
--			return err
--		}
--
--		ta.Buffer.Reset(ta.TarWriter)
--		defer ta.Buffer.Reset(nil)
--		_, err = io.Copy(ta.Buffer, file)
--		file.Close()
--		if err != nil {
--			return err
--		}
--		err = ta.Buffer.Flush()
--		if err != nil {
--			return err
--		}
--	}
--
--	return nil
--}
--
--func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
--	// hdr.Mode is in linux format, which we can use for sycalls,
--	// but for os.Foo() calls we need the mode converted to os.FileMode,
--	// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
--	hdrInfo := hdr.FileInfo()
--
--	switch hdr.Typeflag {
--	case tar.TypeDir:
--		// Create directory unless it exists as a directory already.
--		// In that case we just want to merge the two
--		if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
--			if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
--				return err
--			}
--		}
--
--	case tar.TypeReg, tar.TypeRegA:
--		// Source is regular file
--		file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
--		if err != nil {
--			return err
--		}
--		if _, err := io.Copy(file, reader); err != nil {
--			file.Close()
--			return err
--		}
--		file.Close()
--
--	case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
--		mode := uint32(hdr.Mode & 07777)
--		switch hdr.Typeflag {
--		case tar.TypeBlock:
--			mode |= syscall.S_IFBLK
--		case tar.TypeChar:
--			mode |= syscall.S_IFCHR
--		case tar.TypeFifo:
--			mode |= syscall.S_IFIFO
--		}
--
--		if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
--			return err
--		}
--
--	case tar.TypeLink:
--		targetPath := filepath.Join(extractDir, hdr.Linkname)
--		// check for hardlink breakout
--		if !strings.HasPrefix(targetPath, extractDir) {
--			return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
--		}
--		if err := os.Link(targetPath, path); err != nil {
--			return err
--		}
--
--	case tar.TypeSymlink:
--		// 	path 				-> hdr.Linkname = targetPath
--		// e.g. /extractDir/path/to/symlink 	-> ../2/file	= /extractDir/path/2/file
--		targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
--
--		// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
--		// that symlink would first have to be created, which would be caught earlier, at this very check:
--		if !strings.HasPrefix(targetPath, extractDir) {
--			return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
--		}
--		if err := os.Symlink(hdr.Linkname, path); err != nil {
--			return err
--		}
--
--	case tar.TypeXGlobalHeader:
--		log.Debugf("PAX Global Extended Headers found and ignored")
--		return nil
--
--	default:
--		return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
--	}
--
--	if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
--		return err
--	}
--
--	for key, value := range hdr.Xattrs {
--		if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
--			return err
--		}
--	}
--
--	// There is no LChmod, so ignore mode for symlink. Also, this
--	// must happen after chown, as that can modify the file mode
--	if hdr.Typeflag != tar.TypeSymlink {
--		if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
--			return err
--		}
--	}
--
--	ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
--	// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
--	if hdr.Typeflag != tar.TypeSymlink {
--		if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
--			return err
--		}
--	} else {
--		if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
--			return err
--		}
--	}
--	return nil
--}
--
--// Tar creates an archive from the directory at `path`, and returns it as a
--// stream of bytes.
--func Tar(path string, compression Compression) (io.ReadCloser, error) {
--	return TarWithOptions(path, &TarOptions{Compression: compression})
--}
--
--func escapeName(name string) string {
--	escaped := make([]byte, 0)
--	for i, c := range []byte(name) {
--		if i == 0 && c == '/' {
--			continue
--		}
--		// all printable chars except "-" which is 0x2d
--		if (0x20 <= c && c <= 0x7E) && c != 0x2d {
--			escaped = append(escaped, c)
--		} else {
--			escaped = append(escaped, fmt.Sprintf("\\%03o", c)...)
--		}
--	}
--	return string(escaped)
--}
--
--// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
--// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`.
--func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
--	pipeReader, pipeWriter := io.Pipe()
--
--	compressWriter, err := CompressStream(pipeWriter, options.Compression)
--	if err != nil {
--		return nil, err
--	}
--
--	go func() {
--		ta := &tarAppender{
--			TarWriter: tar.NewWriter(compressWriter),
--			Buffer:    pools.BufioWriter32KPool.Get(nil),
--			SeenFiles: make(map[uint64]string),
--		}
--		// this buffer is needed for the duration of this piped stream
--		defer pools.BufioWriter32KPool.Put(ta.Buffer)
--
--		// In general we log errors here but ignore them because
--		// during e.g. a diff operation the container can continue
--		// mutating the filesystem and we can see transient errors
--		// from this
--
--		if options.Includes == nil {
--			options.Includes = []string{"."}
--		}
--
--		var renamedRelFilePath string // For when tar.Options.Name is set
--		for _, include := range options.Includes {
--			filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
--				if err != nil {
--					log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
--					return nil
--				}
--
--				relFilePath, err := filepath.Rel(srcPath, filePath)
--				if err != nil || (relFilePath == "." && f.IsDir()) {
--					// Error getting relative path OR we are looking
--					// at the root path. Skip in both situations.
--					return nil
--				}
--
--				skip, err := fileutils.Matches(relFilePath, options.Excludes)
--				if err != nil {
--					log.Debugf("Error matching %s", relFilePath, err)
--					return err
--				}
--
--				if skip {
--					if f.IsDir() {
--						return filepath.SkipDir
--					}
--					return nil
--				}
--
--				// Rename the base resource
--				if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
--					renamedRelFilePath = relFilePath
--				}
--				// Set this to make sure the items underneath also get renamed
--				if options.Name != "" {
--					relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
--				}
--
--				if err := ta.addTarFile(filePath, relFilePath); err != nil {
--					log.Debugf("Can't add file %s to tar: %s", srcPath, err)
--				}
--				return nil
--			})
--		}
--
--		// Make sure to check the error on Close.
--		if err := ta.TarWriter.Close(); err != nil {
--			log.Debugf("Can't close tar writer: %s", err)
--		}
--		if err := compressWriter.Close(); err != nil {
--			log.Debugf("Can't close compress writer: %s", err)
--		}
--		if err := pipeWriter.Close(); err != nil {
--			log.Debugf("Can't close pipe writer: %s", err)
--		}
--	}()
--
--	return pipeReader, nil
--}
--
--func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
--	tr := tar.NewReader(decompressedArchive)
--	trBuf := pools.BufioReader32KPool.Get(nil)
--	defer pools.BufioReader32KPool.Put(trBuf)
--
--	var dirs []*tar.Header
--
--	// Iterate through the files in the archive.
--loop:
--	for {
--		hdr, err := tr.Next()
--		if err == io.EOF {
--			// end of tar archive
--			break
--		}
--		if err != nil {
--			return err
--		}
--
--		// Normalize name, for safety and for a simple is-root check
--		// This keeps "../" as-is, but normalizes "/../" to "/"
--		hdr.Name = filepath.Clean(hdr.Name)
--
--		for _, exclude := range options.Excludes {
--			if strings.HasPrefix(hdr.Name, exclude) {
--				continue loop
--			}
--		}
--
--		if !strings.HasSuffix(hdr.Name, "/") {
--			// Not the root directory, ensure that the parent directory exists
--			parent := filepath.Dir(hdr.Name)
--			parentPath := filepath.Join(dest, parent)
--			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
--				err = os.MkdirAll(parentPath, 0777)
--				if err != nil {
--					return err
--				}
--			}
--		}
--
--		path := filepath.Join(dest, hdr.Name)
--		rel, err := filepath.Rel(dest, path)
--		if err != nil {
--			return err
--		}
--		if strings.HasPrefix(rel, "..") {
--			return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
--		}
--
--		// If path exits we almost always just want to remove and replace it
--		// The only exception is when it is a directory *and* the file from
--		// the layer is also a directory. Then we want to merge them (i.e.
--		// just apply the metadata from the layer).
--		if fi, err := os.Lstat(path); err == nil {
--			if fi.IsDir() && hdr.Name == "." {
--				continue
--			}
--			if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
--				if err := os.RemoveAll(path); err != nil {
--					return err
--				}
--			}
--		}
--		trBuf.Reset(tr)
--		if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
--			return err
--		}
--
--		// Directory mtimes must be handled at the end to avoid further
--		// file creation in them to modify the directory mtime
--		if hdr.Typeflag == tar.TypeDir {
--			dirs = append(dirs, hdr)
--		}
--	}
--
--	for _, hdr := range dirs {
--		path := filepath.Join(dest, hdr.Name)
--		ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
--		if err := syscall.UtimesNano(path, ts); err != nil {
--			return err
--		}
--	}
--	return nil
--}
--
--// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
--// and unpacks it into the directory at `dest`.
--// The archive may be compressed with one of the following algorithms:
--//  identity (uncompressed), gzip, bzip2, xz.
--// FIXME: specify behavior when target path exists vs. doesn't exist.
--func Untar(archive io.Reader, dest string, options *TarOptions) error {
--	if archive == nil {
--		return fmt.Errorf("Empty archive")
--	}
--	dest = filepath.Clean(dest)
--	if options == nil {
--		options = &TarOptions{}
--	}
--	if options.Excludes == nil {
--		options.Excludes = []string{}
--	}
--	decompressedArchive, err := DecompressStream(archive)
--	if err != nil {
--		return err
--	}
--	defer decompressedArchive.Close()
--	return Unpack(decompressedArchive, dest, options)
--}
--
--func (archiver *Archiver) TarUntar(src, dst string) error {
--	log.Debugf("TarUntar(%s %s)", src, dst)
--	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
--	if err != nil {
--		return err
--	}
--	defer archive.Close()
--	return archiver.Untar(archive, dst, nil)
--}
--
--// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
--// If either Tar or Untar fails, TarUntar aborts and returns the error.
--func TarUntar(src, dst string) error {
--	return defaultArchiver.TarUntar(src, dst)
--}
--
--func (archiver *Archiver) UntarPath(src, dst string) error {
--	archive, err := os.Open(src)
--	if err != nil {
--		return err
--	}
--	defer archive.Close()
--	if err := archiver.Untar(archive, dst, nil); err != nil {
--		return err
--	}
--	return nil
--}
--
--// UntarPath is a convenience function which looks for an archive
--// at filesystem path `src`, and unpacks it at `dst`.
--func UntarPath(src, dst string) error {
--	return defaultArchiver.UntarPath(src, dst)
--}
--
--func (archiver *Archiver) CopyWithTar(src, dst string) error {
--	srcSt, err := os.Stat(src)
--	if err != nil {
--		return err
--	}
--	if !srcSt.IsDir() {
--		return archiver.CopyFileWithTar(src, dst)
--	}
--	// Create dst, copy src's content into it
--	log.Debugf("Creating dest directory: %s", dst)
--	if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
--		return err
--	}
--	log.Debugf("Calling TarUntar(%s, %s)", src, dst)
--	return archiver.TarUntar(src, dst)
--}
--
--// CopyWithTar creates a tar archive of filesystem path `src`, and
--// unpacks it at filesystem path `dst`.
--// The archive is streamed directly with fixed buffering and no
--// intermediary disk IO.
--func CopyWithTar(src, dst string) error {
--	return defaultArchiver.CopyWithTar(src, dst)
--}
--
--func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
--	log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
--	srcSt, err := os.Stat(src)
--	if err != nil {
--		return err
--	}
--	if srcSt.IsDir() {
--		return fmt.Errorf("Can't copy a directory")
--	}
--	// Clean up the trailing /
--	if dst[len(dst)-1] == '/' {
--		dst = path.Join(dst, filepath.Base(src))
--	}
--	// Create the holding directory if necessary
--	if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
--		return err
--	}
--
--	r, w := io.Pipe()
--	errC := promise.Go(func() error {
--		defer w.Close()
--
--		srcF, err := os.Open(src)
--		if err != nil {
--			return err
--		}
--		defer srcF.Close()
--
--		hdr, err := tar.FileInfoHeader(srcSt, "")
--		if err != nil {
--			return err
--		}
--		hdr.Name = filepath.Base(dst)
--		tw := tar.NewWriter(w)
--		defer tw.Close()
--		if err := tw.WriteHeader(hdr); err != nil {
--			return err
--		}
--		if _, err := io.Copy(tw, srcF); err != nil {
--			return err
--		}
--		return nil
--	})
--	defer func() {
--		if er := <-errC; err != nil {
--			err = er
--		}
--	}()
--	return archiver.Untar(r, filepath.Dir(dst), nil)
--}
--
--// CopyFileWithTar emulates the behavior of the 'cp' command-line
--// for a single file. It copies a regular file from path `src` to
--// path `dst`, and preserves all its metadata.
--//
--// If `dst` ends with a trailing slash '/', the final destination path
--// will be `dst/base(src)`.
--func CopyFileWithTar(src, dst string) (err error) {
--	return defaultArchiver.CopyFileWithTar(src, dst)
--}
--
--// CmdStream executes a command, and returns its stdout as a stream.
--// If the command fails to run or doesn't complete successfully, an error
--// will be returned, including anything written on stderr.
--func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
--	if input != nil {
--		stdin, err := cmd.StdinPipe()
--		if err != nil {
--			return nil, err
--		}
--		// Write stdin if any
--		go func() {
--			io.Copy(stdin, input)
--			stdin.Close()
--		}()
--	}
--	stdout, err := cmd.StdoutPipe()
--	if err != nil {
--		return nil, err
--	}
--	stderr, err := cmd.StderrPipe()
--	if err != nil {
--		return nil, err
--	}
--	pipeR, pipeW := io.Pipe()
--	errChan := make(chan []byte)
--	// Collect stderr, we will use it in case of an error
--	go func() {
--		errText, e := ioutil.ReadAll(stderr)
--		if e != nil {
--			errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
--		}
--		errChan <- errText
--	}()
--	// Copy stdout to the returned pipe
--	go func() {
--		_, err := io.Copy(pipeW, stdout)
--		if err != nil {
--			pipeW.CloseWithError(err)
--		}
--		errText := <-errChan
--		if err := cmd.Wait(); err != nil {
--			pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
--		} else {
--			pipeW.Close()
--		}
--	}()
--	// Run the command and return the pipe
--	if err := cmd.Start(); err != nil {
--		return nil, err
--	}
--	return pipeR, nil
--}
--
--// NewTempArchive reads the content of src into a temporary file, and returns the contents
--// of that file as an archive. The archive can only be read once - as soon as reading completes,
--// the file will be deleted.
--func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
--	f, err := ioutil.TempFile(dir, "")
--	if err != nil {
--		return nil, err
--	}
--	if _, err := io.Copy(f, src); err != nil {
--		return nil, err
--	}
--	if err = f.Sync(); err != nil {
--		return nil, err
--	}
--	if _, err := f.Seek(0, 0); err != nil {
--		return nil, err
--	}
--	st, err := f.Stat()
--	if err != nil {
--		return nil, err
--	}
--	size := st.Size()
--	return &TempArchive{File: f, Size: size}, nil
--}
--
--type TempArchive struct {
--	*os.File
--	Size   int64 // Pre-computed from Stat().Size() as a convenience
--	read   int64
--	closed bool
--}
--
--// Close closes the underlying file if it's still open, or does a no-op
--// to allow callers to try to close the TempArchive multiple times safely.
--func (archive *TempArchive) Close() error {
--	if archive.closed {
--		return nil
--	}
--
--	archive.closed = true
--
--	return archive.File.Close()
--}
--
--func (archive *TempArchive) Read(data []byte) (int, error) {
--	n, err := archive.File.Read(data)
--	archive.read += int64(n)
--	if err != nil || archive.read == archive.Size {
--		archive.Close()
--		os.Remove(archive.File.Name())
--	}
--	return n, err
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
-deleted file mode 100644
-index fdba6fb..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
-+++ /dev/null
-@@ -1,625 +0,0 @@
--package archive
--
--import (
--	"bytes"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"os/exec"
--	"path"
--	"path/filepath"
--	"strings"
--	"syscall"
--	"testing"
--	"time"
--
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--)
--
--func TestCmdStreamLargeStderr(t *testing.T) {
--	cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
--	out, err := CmdStream(cmd, nil)
--	if err != nil {
--		t.Fatalf("Failed to start command: %s", err)
--	}
--	errCh := make(chan error)
--	go func() {
--		_, err := io.Copy(ioutil.Discard, out)
--		errCh <- err
--	}()
--	select {
--	case err := <-errCh:
--		if err != nil {
--			t.Fatalf("Command should not have failed (err=%.100s...)", err)
--		}
--	case <-time.After(5 * time.Second):
--		t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
--	}
--}
--
--func TestCmdStreamBad(t *testing.T) {
--	badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
--	out, err := CmdStream(badCmd, nil)
--	if err != nil {
--		t.Fatalf("Failed to start command: %s", err)
--	}
--	if output, err := ioutil.ReadAll(out); err == nil {
--		t.Fatalf("Command should have failed")
--	} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
--		t.Fatalf("Wrong error value (%s)", err)
--	} else if s := string(output); s != "hello\n" {
--		t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
--	}
--}
--
--func TestCmdStreamGood(t *testing.T) {
--	cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
--	out, err := CmdStream(cmd, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if output, err := ioutil.ReadAll(out); err != nil {
--		t.Fatalf("Command should not have failed (err=%s)", err)
--	} else if s := string(output); s != "hello\n" {
--		t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
--	}
--}
--
--func TestTarFiles(t *testing.T) {
--	// try without hardlinks
--	if err := checkNoChanges(1000, false); err != nil {
--		t.Fatal(err)
--	}
--	// try with hardlinks
--	if err := checkNoChanges(1000, true); err != nil {
--		t.Fatal(err)
--	}
--}
--
--func checkNoChanges(fileNum int, hardlinks bool) error {
--	srcDir, err := ioutil.TempDir("", "docker-test-srcDir")
--	if err != nil {
--		return err
--	}
--	defer os.RemoveAll(srcDir)
--
--	destDir, err := ioutil.TempDir("", "docker-test-destDir")
--	if err != nil {
--		return err
--	}
--	defer os.RemoveAll(destDir)
--
--	_, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks)
--	if err != nil {
--		return err
--	}
--
--	err = TarUntar(srcDir, destDir)
--	if err != nil {
--		return err
--	}
--
--	changes, err := ChangesDirs(destDir, srcDir)
--	if err != nil {
--		return err
--	}
--	if len(changes) > 0 {
--		return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes))
--	}
--	return nil
--}
--
--func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) {
--	archive, err := TarWithOptions(origin, options)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer archive.Close()
--
--	buf := make([]byte, 10)
--	if _, err := archive.Read(buf); err != nil {
--		return nil, err
--	}
--	wrap := io.MultiReader(bytes.NewReader(buf), archive)
--
--	detectedCompression := DetectCompression(buf)
--	compression := options.Compression
--	if detectedCompression.Extension() != compression.Extension() {
--		return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
--	}
--
--	tmp, err := ioutil.TempDir("", "docker-test-untar")
--	if err != nil {
--		return nil, err
--	}
--	defer os.RemoveAll(tmp)
--	if err := Untar(wrap, tmp, nil); err != nil {
--		return nil, err
--	}
--	if _, err := os.Stat(tmp); err != nil {
--		return nil, err
--	}
--
--	return ChangesDirs(origin, tmp)
--}
--
--func TestTarUntar(t *testing.T) {
--	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer os.RemoveAll(origin)
--	if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
--		t.Fatal(err)
--	}
--	if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
--		t.Fatal(err)
--	}
--	if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
--		t.Fatal(err)
--	}
--
--	for _, c := range []Compression{
--		Uncompressed,
--		Gzip,
--	} {
--		changes, err := tarUntar(t, origin, &TarOptions{
--			Compression: c,
--			Excludes:    []string{"3"},
--		})
--
--		if err != nil {
--			t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
--		}
--
--		if len(changes) != 1 || changes[0].Path != "/3" {
--			t.Fatalf("Unexpected differences after tarUntar: %v", changes)
--		}
--	}
--}
--
--func TestTarWithOptions(t *testing.T) {
--	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer os.RemoveAll(origin)
--	if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
--		t.Fatal(err)
--	}
--	if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
--		t.Fatal(err)
--	}
--
--	cases := []struct {
--		opts       *TarOptions
--		numChanges int
--	}{
--		{&TarOptions{Includes: []string{"1"}}, 1},
--		{&TarOptions{Excludes: []string{"2"}}, 1},
--	}
--	for _, testCase := range cases {
--		changes, err := tarUntar(t, origin, testCase.opts)
--		if err != nil {
--			t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err)
--		}
--		if len(changes) != testCase.numChanges {
--			t.Errorf("Expected %d changes, got %d for %+v:",
--				testCase.numChanges, len(changes), testCase.opts)
--		}
--	}
--}
--
--// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz
--// use PAX Global Extended Headers.
--// Failing prevents the archives from being uncompressed during ADD
--func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
--	hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
--	tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test")
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer os.RemoveAll(tmpDir)
--	err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things.
--// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work.
--func TestUntarUstarGnuConflict(t *testing.T) {
--	f, err := os.Open("testdata/broken.tar")
--	if err != nil {
--		t.Fatal(err)
--	}
--	found := false
--	tr := tar.NewReader(f)
--	// Iterate through the files in the archive.
--	for {
--		hdr, err := tr.Next()
--		if err == io.EOF {
--			// end of tar archive
--			break
--		}
--		if err != nil {
--			t.Fatal(err)
--		}
--		if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" {
--			found = true
--			break
--		}
--	}
--	if !found {
--		t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
--	}
--}
--
--func TestTarWithHardLink(t *testing.T) {
--	origin, err := ioutil.TempDir("", "docker-test-tar-hardlink")
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer os.RemoveAll(origin)
--	if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
--		t.Fatal(err)
--	}
--	if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil {
--		t.Fatal(err)
--	}
--
--	var i1, i2 uint64
--	if i1, err = getNlink(path.Join(origin, "1")); err != nil {
--		t.Fatal(err)
--	}
--	// sanity check that we can hardlink
--	if i1 != 2 {
--		t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1)
--	}
--
--	dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest")
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer os.RemoveAll(dest)
--
--	// we'll do this in two steps to separate failure
--	fh, err := Tar(origin, Uncompressed)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	// ensure we can read the whole thing with no error, before writing back out
--	buf, err := ioutil.ReadAll(fh)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	bRdr := bytes.NewReader(buf)
--	err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed})
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if i1, err = getInode(path.Join(dest, "1")); err != nil {
--		t.Fatal(err)
--	}
--	if i2, err = getInode(path.Join(dest, "2")); err != nil {
--		t.Fatal(err)
--	}
--
--	if i1 != i2 {
--		t.Errorf("expected matching inodes, but got %d and %d", i1, i2)
--	}
--}
--
--func getNlink(path string) (uint64, error) {
--	stat, err := os.Stat(path)
--	if err != nil {
--		return 0, err
--	}
--	statT, ok := stat.Sys().(*syscall.Stat_t)
--	if !ok {
--		return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
--	}
--	return statT.Nlink, nil
--}
--
--func getInode(path string) (uint64, error) {
--	stat, err := os.Stat(path)
--	if err != nil {
--		return 0, err
--	}
--	statT, ok := stat.Sys().(*syscall.Stat_t)
--	if !ok {
--		return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
--	}
--	return statT.Ino, nil
--}
--
--func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
--	fileData := []byte("fooo")
--	for n := 0; n < numberOfFiles; n++ {
--		fileName := fmt.Sprintf("file-%d", n)
--		if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
--			return 0, err
--		}
--		if makeLinks {
--			if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
--				return 0, err
--			}
--		}
--	}
--	totalSize := numberOfFiles * len(fileData)
--	return totalSize, nil
--}
--
--func BenchmarkTarUntar(b *testing.B) {
--	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
--	if err != nil {
--		b.Fatal(err)
--	}
--	tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
--	if err != nil {
--		b.Fatal(err)
--	}
--	target := path.Join(tempDir, "dest")
--	n, err := prepareUntarSourceDirectory(100, origin, false)
--	if err != nil {
--		b.Fatal(err)
--	}
--	defer os.RemoveAll(origin)
--	defer os.RemoveAll(tempDir)
--
--	b.ResetTimer()
--	b.SetBytes(int64(n))
--	for n := 0; n < b.N; n++ {
--		err := TarUntar(origin, target)
--		if err != nil {
--			b.Fatal(err)
--		}
--		os.RemoveAll(target)
--	}
--}
--
--func BenchmarkTarUntarWithLinks(b *testing.B) {
--	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
--	if err != nil {
--		b.Fatal(err)
--	}
--	tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
--	if err != nil {
--		b.Fatal(err)
--	}
--	target := path.Join(tempDir, "dest")
--	n, err := prepareUntarSourceDirectory(100, origin, true)
--	if err != nil {
--		b.Fatal(err)
--	}
--	defer os.RemoveAll(origin)
--	defer os.RemoveAll(tempDir)
--
--	b.ResetTimer()
--	b.SetBytes(int64(n))
--	for n := 0; n < b.N; n++ {
--		err := TarUntar(origin, target)
--		if err != nil {
--			b.Fatal(err)
--		}
--		os.RemoveAll(target)
--	}
--}
--
--func TestUntarInvalidFilenames(t *testing.T) {
--	for i, headers := range [][]*tar.Header{
--		{
--			{
--				Name:     "../victim/dotdot",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--		{
--			{
--				// Note the leading slash
--				Name:     "/../victim/slash-dotdot",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--	} {
--		if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil {
--			t.Fatalf("i=%d. %v", i, err)
--		}
--	}
--}
--
--func TestUntarInvalidHardlink(t *testing.T) {
--	for i, headers := range [][]*tar.Header{
--		{ // try reading victim/hello (../)
--			{
--				Name:     "dotdot",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (/../)
--			{
--				Name:     "slash-dotdot",
--				Typeflag: tar.TypeLink,
--				// Note the leading slash
--				Linkname: "/../victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try writing victim/file
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "loophole-victim/file",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (hardlink, symlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "symlink",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "loophole-victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // Try reading victim/hello (hardlink, hardlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "hardlink",
--				Typeflag: tar.TypeLink,
--				Linkname: "loophole-victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // Try removing victim directory (hardlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--	} {
--		if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil {
--			t.Fatalf("i=%d. %v", i, err)
--		}
--	}
--}
--
--func TestUntarInvalidSymlink(t *testing.T) {
--	for i, headers := range [][]*tar.Header{
--		{ // try reading victim/hello (../)
--			{
--				Name:     "dotdot",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (/../)
--			{
--				Name:     "slash-dotdot",
--				Typeflag: tar.TypeSymlink,
--				// Note the leading slash
--				Linkname: "/../victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try writing victim/file
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "loophole-victim/file",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (symlink, symlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "symlink",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "loophole-victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (symlink, hardlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "hardlink",
--				Typeflag: tar.TypeLink,
--				Linkname: "loophole-victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try removing victim directory (symlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--		{ // try writing to victim/newdir/newfile with a symlink in the path
--			{
--				// this header needs to be before the next one, or else there is an error
--				Name:     "dir/loophole",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "dir/loophole/newdir/newfile",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--	} {
--		if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil {
--			t.Fatalf("i=%d. %v", i, err)
--		}
--	}
--}
--
--func TestTempArchiveCloseMultipleTimes(t *testing.T) {
--	reader := ioutil.NopCloser(strings.NewReader("hello"))
--	tempArchive, err := NewTempArchive(reader, "")
--	buf := make([]byte, 10)
--	n, err := tempArchive.Read(buf)
--	if n != 5 {
--		t.Fatalf("Expected to read 5 bytes. Read %d instead", n)
--	}
--	for i := 0; i < 3; i++ {
--		if err = tempArchive.Close(); err != nil {
--			t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go
-deleted file mode 100644
-index c0e8aee..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--// +build !windows
--
--package archive
--
--import (
--	"errors"
--	"syscall"
--
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--)
--
--func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
--	s, ok := stat.(*syscall.Stat_t)
--
--	if !ok {
--		err = errors.New("cannot convert stat value to syscall.Stat_t")
--		return
--	}
--
--	nlink = uint32(s.Nlink)
--	inode = uint64(s.Ino)
--
--	// Currently go does not fil in the major/minors
--	if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK ||
--		s.Mode&syscall.S_IFCHR == syscall.S_IFCHR {
--		hdr.Devmajor = int64(major(uint64(s.Rdev)))
--		hdr.Devminor = int64(minor(uint64(s.Rdev)))
--	}
--
--	return
--}
--
--func major(device uint64) uint64 {
--	return (device >> 8) & 0xfff
--}
--
--func minor(device uint64) uint64 {
--	return (device & 0xff) | ((device >> 12) & 0xfff00)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go
-deleted file mode 100644
-index 3cc2493..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go
-+++ /dev/null
-@@ -1,12 +0,0 @@
--// +build windows
--
--package archive
--
--import (
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--)
--
--func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
--	// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
--	return
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go
-deleted file mode 100644
-index 85217f6..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go
-+++ /dev/null
-@@ -1,413 +0,0 @@
--package archive
--
--import (
--	"bytes"
--	"fmt"
--	"io"
--	"os"
--	"path/filepath"
--	"strings"
--	"syscall"
--	"time"
--
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--
--	log "github.com/Sirupsen/logrus"
--	"github.com/docker/docker/pkg/pools"
--	"github.com/docker/docker/pkg/system"
--)
--
--type ChangeType int
--
--const (
--	ChangeModify = iota
--	ChangeAdd
--	ChangeDelete
--)
--
--type Change struct {
--	Path string
--	Kind ChangeType
--}
--
--func (change *Change) String() string {
--	var kind string
--	switch change.Kind {
--	case ChangeModify:
--		kind = "C"
--	case ChangeAdd:
--		kind = "A"
--	case ChangeDelete:
--		kind = "D"
--	}
--	return fmt.Sprintf("%s %s", kind, change.Path)
--}
--
--// Gnu tar and the go tar writer don't have sub-second mtime
--// precision, which is problematic when we apply changes via tar
--// files, we handle this by comparing for exact times, *or* same
--// second count and either a or b having exactly 0 nanoseconds
--func sameFsTime(a, b time.Time) bool {
--	return a == b ||
--		(a.Unix() == b.Unix() &&
--			(a.Nanosecond() == 0 || b.Nanosecond() == 0))
--}
--
--func sameFsTimeSpec(a, b syscall.Timespec) bool {
--	return a.Sec == b.Sec &&
--		(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
--}
--
--// Changes walks the path rw and determines changes for the files in the path,
--// with respect to the parent layers
--func Changes(layers []string, rw string) ([]Change, error) {
--	var changes []Change
--	err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
--		if err != nil {
--			return err
--		}
--
--		// Rebase path
--		path, err = filepath.Rel(rw, path)
--		if err != nil {
--			return err
--		}
--		path = filepath.Join("/", path)
--
--		// Skip root
--		if path == "/" {
--			return nil
--		}
--
--		// Skip AUFS metadata
--		if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched {
--			return err
--		}
--
--		change := Change{
--			Path: path,
--		}
--
--		// Find out what kind of modification happened
--		file := filepath.Base(path)
--		// If there is a whiteout, then the file was removed
--		if strings.HasPrefix(file, ".wh.") {
--			originalFile := file[len(".wh."):]
--			change.Path = filepath.Join(filepath.Dir(path), originalFile)
--			change.Kind = ChangeDelete
--		} else {
--			// Otherwise, the file was added
--			change.Kind = ChangeAdd
--
--			// ...Unless it already existed in a top layer, in which case, it's a modification
--			for _, layer := range layers {
--				stat, err := os.Stat(filepath.Join(layer, path))
--				if err != nil && !os.IsNotExist(err) {
--					return err
--				}
--				if err == nil {
--					// The file existed in the top layer, so that's a modification
--
--					// However, if it's a directory, maybe it wasn't actually modified.
--					// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
--					if stat.IsDir() && f.IsDir() {
--						if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
--							// Both directories are the same, don't record the change
--							return nil
--						}
--					}
--					change.Kind = ChangeModify
--					break
--				}
--			}
--		}
--
--		// Record change
--		changes = append(changes, change)
--		return nil
--	})
--	if err != nil && !os.IsNotExist(err) {
--		return nil, err
--	}
--	return changes, nil
--}
--
--type FileInfo struct {
--	parent     *FileInfo
--	name       string
--	stat       *system.Stat
--	children   map[string]*FileInfo
--	capability []byte
--	added      bool
--}
--
--func (root *FileInfo) LookUp(path string) *FileInfo {
--	parent := root
--	if path == "/" {
--		return root
--	}
--
--	pathElements := strings.Split(path, "/")
--	for _, elem := range pathElements {
--		if elem != "" {
--			child := parent.children[elem]
--			if child == nil {
--				return nil
--			}
--			parent = child
--		}
--	}
--	return parent
--}
--
--func (info *FileInfo) path() string {
--	if info.parent == nil {
--		return "/"
--	}
--	return filepath.Join(info.parent.path(), info.name)
--}
--
--func (info *FileInfo) isDir() bool {
--	return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR
--}
--
--func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
--
--	sizeAtEntry := len(*changes)
--
--	if oldInfo == nil {
--		// add
--		change := Change{
--			Path: info.path(),
--			Kind: ChangeAdd,
--		}
--		*changes = append(*changes, change)
--		info.added = true
--	}
--
--	// We make a copy so we can modify it to detect additions
--	// also, we only recurse on the old dir if the new info is a directory
--	// otherwise any previous delete/change is considered recursive
--	oldChildren := make(map[string]*FileInfo)
--	if oldInfo != nil && info.isDir() {
--		for k, v := range oldInfo.children {
--			oldChildren[k] = v
--		}
--	}
--
--	for name, newChild := range info.children {
--		oldChild, _ := oldChildren[name]
--		if oldChild != nil {
--			// change?
--			oldStat := oldChild.stat
--			newStat := newChild.stat
--			// Note: We can't compare inode or ctime or blocksize here, because these change
--			// when copying a file into a container. However, that is not generally a problem
--			// because any content change will change mtime, and any status change should
--			// be visible when actually comparing the stat fields. The only time this
--			// breaks down is if some code intentionally hides a change by setting
--			// back mtime
--			if oldStat.Mode() != newStat.Mode() ||
--				oldStat.Uid() != newStat.Uid() ||
--				oldStat.Gid() != newStat.Gid() ||
--				oldStat.Rdev() != newStat.Rdev() ||
--				// Don't look at size for dirs, its not a good measure of change
--				(oldStat.Size() != newStat.Size() && oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR) ||
--				!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) ||
--				bytes.Compare(oldChild.capability, newChild.capability) != 0 {
--				change := Change{
--					Path: newChild.path(),
--					Kind: ChangeModify,
--				}
--				*changes = append(*changes, change)
--				newChild.added = true
--			}
--
--			// Remove from copy so we can detect deletions
--			delete(oldChildren, name)
--		}
--
--		newChild.addChanges(oldChild, changes)
--	}
--	for _, oldChild := range oldChildren {
--		// delete
--		change := Change{
--			Path: oldChild.path(),
--			Kind: ChangeDelete,
--		}
--		*changes = append(*changes, change)
--	}
--
--	// If there were changes inside this directory, we need to add it, even if the directory
--	// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
--	if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" {
--		change := Change{
--			Path: info.path(),
--			Kind: ChangeModify,
--		}
--		// Let's insert the directory entry before the recently added entries located inside this dir
--		*changes = append(*changes, change) // just to resize the slice, will be overwritten
--		copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
--		(*changes)[sizeAtEntry] = change
--	}
--
--}
--
--func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
--	var changes []Change
--
--	info.addChanges(oldInfo, &changes)
--
--	return changes
--}
--
--func newRootFileInfo() *FileInfo {
--	root := &FileInfo{
--		name:     "/",
--		children: make(map[string]*FileInfo),
--	}
--	return root
--}
--
--func collectFileInfo(sourceDir string) (*FileInfo, error) {
--	root := newRootFileInfo()
--
--	err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
--		if err != nil {
--			return err
--		}
--
--		// Rebase path
--		relPath, err := filepath.Rel(sourceDir, path)
--		if err != nil {
--			return err
--		}
--		relPath = filepath.Join("/", relPath)
--
--		if relPath == "/" {
--			return nil
--		}
--
--		parent := root.LookUp(filepath.Dir(relPath))
--		if parent == nil {
--			return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
--		}
--
--		info := &FileInfo{
--			name:     filepath.Base(relPath),
--			children: make(map[string]*FileInfo),
--			parent:   parent,
--		}
--
--		s, err := system.Lstat(path)
--		if err != nil {
--			return err
--		}
--		info.stat = s
--
--		info.capability, _ = system.Lgetxattr(path, "security.capability")
--
--		parent.children[info.name] = info
--
--		return nil
--	})
--	if err != nil {
--		return nil, err
--	}
--	return root, nil
--}
--
--// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
--// If oldDir is "", then all files in newDir will be Add-Changes.
--func ChangesDirs(newDir, oldDir string) ([]Change, error) {
--	var (
--		oldRoot, newRoot *FileInfo
--		err1, err2       error
--		errs             = make(chan error, 2)
--	)
--	go func() {
--		if oldDir != "" {
--			oldRoot, err1 = collectFileInfo(oldDir)
--		}
--		errs <- err1
--	}()
--	go func() {
--		newRoot, err2 = collectFileInfo(newDir)
--		errs <- err2
--	}()
--
--	// block until both routines have returned
--	for i := 0; i < 2; i++ {
--		if err := <-errs; err != nil {
--			return nil, err
--		}
--	}
--
--	return newRoot.Changes(oldRoot), nil
--}
--
--// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
--func ChangesSize(newDir string, changes []Change) int64 {
--	var size int64
--	for _, change := range changes {
--		if change.Kind == ChangeModify || change.Kind == ChangeAdd {
--			file := filepath.Join(newDir, change.Path)
--			fileInfo, _ := os.Lstat(file)
--			if fileInfo != nil && !fileInfo.IsDir() {
--				size += fileInfo.Size()
--			}
--		}
--	}
--	return size
--}
--
--// ExportChanges produces an Archive from the provided changes, relative to dir.
--func ExportChanges(dir string, changes []Change) (Archive, error) {
--	reader, writer := io.Pipe()
--	go func() {
--		ta := &tarAppender{
--			TarWriter: tar.NewWriter(writer),
--			Buffer:    pools.BufioWriter32KPool.Get(nil),
--			SeenFiles: make(map[uint64]string),
--		}
--		// this buffer is needed for the duration of this piped stream
--		defer pools.BufioWriter32KPool.Put(ta.Buffer)
--
--		// In general we log errors here but ignore them because
--		// during e.g. a diff operation the container can continue
--		// mutating the filesystem and we can see transient errors
--		// from this
--		for _, change := range changes {
--			if change.Kind == ChangeDelete {
--				whiteOutDir := filepath.Dir(change.Path)
--				whiteOutBase := filepath.Base(change.Path)
--				whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
--				timestamp := time.Now()
--				hdr := &tar.Header{
--					Name:       whiteOut[1:],
--					Size:       0,
--					ModTime:    timestamp,
--					AccessTime: timestamp,
--					ChangeTime: timestamp,
--				}
--				if err := ta.TarWriter.WriteHeader(hdr); err != nil {
--					log.Debugf("Can't write whiteout header: %s", err)
--				}
--			} else {
--				path := filepath.Join(dir, change.Path)
--				if err := ta.addTarFile(path, change.Path[1:]); err != nil {
--					log.Debugf("Can't add file %s to tar: %s", path, err)
--				}
--			}
--		}
--
--		// Make sure to check the error on Close.
--		if err := ta.TarWriter.Close(); err != nil {
--			log.Debugf("Can't close layer: %s", err)
--		}
--		if err := writer.Close(); err != nil {
--			log.Debugf("failed close Changes writer: %s", err)
--		}
--	}()
--	return reader, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
-deleted file mode 100644
-index 34c0f0d..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
-+++ /dev/null
-@@ -1,301 +0,0 @@
--package archive
--
--import (
--	"io/ioutil"
--	"os"
--	"os/exec"
--	"path"
--	"sort"
--	"testing"
--	"time"
--)
--
--func max(x, y int) int {
--	if x >= y {
--		return x
--	}
--	return y
--}
--
--func copyDir(src, dst string) error {
--	cmd := exec.Command("cp", "-a", src, dst)
--	if err := cmd.Run(); err != nil {
--		return err
--	}
--	return nil
--}
--
--// Helper to sort []Change by path
--type byPath struct{ changes []Change }
--
--func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path }
--func (b byPath) Len() int           { return len(b.changes) }
--func (b byPath) Swap(i, j int)      { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] }
--
--type FileType uint32
--
--const (
--	Regular FileType = iota
--	Dir
--	Symlink
--)
--
--type FileData struct {
--	filetype    FileType
--	path        string
--	contents    string
--	permissions os.FileMode
--}
--
--func createSampleDir(t *testing.T, root string) {
--	files := []FileData{
--		{Regular, "file1", "file1\n", 0600},
--		{Regular, "file2", "file2\n", 0666},
--		{Regular, "file3", "file3\n", 0404},
--		{Regular, "file4", "file4\n", 0600},
--		{Regular, "file5", "file5\n", 0600},
--		{Regular, "file6", "file6\n", 0600},
--		{Regular, "file7", "file7\n", 0600},
--		{Dir, "dir1", "", 0740},
--		{Regular, "dir1/file1-1", "file1-1\n", 01444},
--		{Regular, "dir1/file1-2", "file1-2\n", 0666},
--		{Dir, "dir2", "", 0700},
--		{Regular, "dir2/file2-1", "file2-1\n", 0666},
--		{Regular, "dir2/file2-2", "file2-2\n", 0666},
--		{Dir, "dir3", "", 0700},
--		{Regular, "dir3/file3-1", "file3-1\n", 0666},
--		{Regular, "dir3/file3-2", "file3-2\n", 0666},
--		{Dir, "dir4", "", 0700},
--		{Regular, "dir4/file3-1", "file4-1\n", 0666},
--		{Regular, "dir4/file3-2", "file4-2\n", 0666},
--		{Symlink, "symlink1", "target1", 0666},
--		{Symlink, "symlink2", "target2", 0666},
--	}
--
--	now := time.Now()
--	for _, info := range files {
--		p := path.Join(root, info.path)
--		if info.filetype == Dir {
--			if err := os.MkdirAll(p, info.permissions); err != nil {
--				t.Fatal(err)
--			}
--		} else if info.filetype == Regular {
--			if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
--				t.Fatal(err)
--			}
--		} else if info.filetype == Symlink {
--			if err := os.Symlink(info.contents, p); err != nil {
--				t.Fatal(err)
--			}
--		}
--
--		if info.filetype != Symlink {
--			// Set a consistent ctime, atime for all files and dirs
--			if err := os.Chtimes(p, now, now); err != nil {
--				t.Fatal(err)
--			}
--		}
--	}
--}
--
--// Create an directory, copy it, make sure we report no changes between the two
--func TestChangesDirsEmpty(t *testing.T) {
--	src, err := ioutil.TempDir("", "docker-changes-test")
--	if err != nil {
--		t.Fatal(err)
--	}
--	createSampleDir(t, src)
--	dst := src + "-copy"
--	if err := copyDir(src, dst); err != nil {
--		t.Fatal(err)
--	}
--	changes, err := ChangesDirs(dst, src)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if len(changes) != 0 {
--		t.Fatalf("Reported changes for identical dirs: %v", changes)
--	}
--	os.RemoveAll(src)
--	os.RemoveAll(dst)
--}
--
--func mutateSampleDir(t *testing.T, root string) {
--	// Remove a regular file
--	if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
--		t.Fatal(err)
--	}
--
--	// Remove a directory
--	if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
--		t.Fatal(err)
--	}
--
--	// Remove a symlink
--	if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
--		t.Fatal(err)
--	}
--
--	// Rewrite a file
--	if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
--		t.Fatal(err)
--	}
--
--	// Replace a file
--	if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
--		t.Fatal(err)
--	}
--	if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
--		t.Fatal(err)
--	}
--
--	// Touch file
--	if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
--		t.Fatal(err)
--	}
--
--	// Replace file with dir
--	if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
--		t.Fatal(err)
--	}
--	if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
--		t.Fatal(err)
--	}
--
--	// Create new file
--	if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
--		t.Fatal(err)
--	}
--
--	// Create new dir
--	if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
--		t.Fatal(err)
--	}
--
--	// Create a new symlink
--	if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
--		t.Fatal(err)
--	}
--
--	// Change a symlink
--	if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
--		t.Fatal(err)
--	}
--	if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
--		t.Fatal(err)
--	}
--
--	// Replace dir with file
--	if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
--		t.Fatal(err)
--	}
--	if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
--		t.Fatal(err)
--	}
--
--	// Touch dir
--	if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestChangesDirsMutated(t *testing.T) {
--	src, err := ioutil.TempDir("", "docker-changes-test")
--	if err != nil {
--		t.Fatal(err)
--	}
--	createSampleDir(t, src)
--	dst := src + "-copy"
--	if err := copyDir(src, dst); err != nil {
--		t.Fatal(err)
--	}
--	defer os.RemoveAll(src)
--	defer os.RemoveAll(dst)
--
--	mutateSampleDir(t, dst)
--
--	changes, err := ChangesDirs(dst, src)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	sort.Sort(byPath{changes})
--
--	expectedChanges := []Change{
--		{"/dir1", ChangeDelete},
--		{"/dir2", ChangeModify},
--		{"/dir3", ChangeModify},
--		{"/dirnew", ChangeAdd},
--		{"/file1", ChangeDelete},
--		{"/file2", ChangeModify},
--		{"/file3", ChangeModify},
--		{"/file4", ChangeModify},
--		{"/file5", ChangeModify},
--		{"/filenew", ChangeAdd},
--		{"/symlink1", ChangeDelete},
--		{"/symlink2", ChangeModify},
--		{"/symlinknew", ChangeAdd},
--	}
--
--	for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
--		if i >= len(expectedChanges) {
--			t.Fatalf("unexpected change %s\n", changes[i].String())
--		}
--		if i >= len(changes) {
--			t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
--		}
--		if changes[i].Path == expectedChanges[i].Path {
--			if changes[i] != expectedChanges[i] {
--				t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
--			}
--		} else if changes[i].Path < expectedChanges[i].Path {
--			t.Fatalf("unexpected change %s\n", changes[i].String())
--		} else {
--			t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
--		}
--	}
--}
--
--func TestApplyLayer(t *testing.T) {
--	src, err := ioutil.TempDir("", "docker-changes-test")
--	if err != nil {
--		t.Fatal(err)
--	}
--	createSampleDir(t, src)
--	defer os.RemoveAll(src)
--	dst := src + "-copy"
--	if err := copyDir(src, dst); err != nil {
--		t.Fatal(err)
--	}
--	mutateSampleDir(t, dst)
--	defer os.RemoveAll(dst)
--
--	changes, err := ChangesDirs(dst, src)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	layer, err := ExportChanges(dst, changes)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	layerCopy, err := NewTempArchive(layer, "")
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if err := ApplyLayer(src, layerCopy); err != nil {
--		t.Fatal(err)
--	}
--
--	changes2, err := ChangesDirs(src, dst)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if len(changes2) != 0 {
--		t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
-deleted file mode 100644
-index ba22c41..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
-+++ /dev/null
-@@ -1,165 +0,0 @@
--package archive
--
--import (
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"path/filepath"
--	"strings"
--	"syscall"
--
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--
--	"github.com/docker/docker/pkg/pools"
--	"github.com/docker/docker/pkg/system"
--)
--
--func UnpackLayer(dest string, layer ArchiveReader) error {
--	tr := tar.NewReader(layer)
--	trBuf := pools.BufioReader32KPool.Get(tr)
--	defer pools.BufioReader32KPool.Put(trBuf)
--
--	var dirs []*tar.Header
--
--	aufsTempdir := ""
--	aufsHardlinks := make(map[string]*tar.Header)
--
--	// Iterate through the files in the archive.
--	for {
--		hdr, err := tr.Next()
--		if err == io.EOF {
--			// end of tar archive
--			break
--		}
--		if err != nil {
--			return err
--		}
--
--		// Normalize name, for safety and for a simple is-root check
--		hdr.Name = filepath.Clean(hdr.Name)
--
--		if !strings.HasSuffix(hdr.Name, "/") {
--			// Not the root directory, ensure that the parent directory exists.
--			// This happened in some tests where an image had a tarfile without any
--			// parent directories.
--			parent := filepath.Dir(hdr.Name)
--			parentPath := filepath.Join(dest, parent)
--			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
--				err = os.MkdirAll(parentPath, 0600)
--				if err != nil {
--					return err
--				}
--			}
--		}
--
--		// Skip AUFS metadata dirs
--		if strings.HasPrefix(hdr.Name, ".wh..wh.") {
--			// Regular files inside /.wh..wh.plnk can be used as hardlink targets
--			// We don't want this directory, but we need the files in them so that
--			// such hardlinks can be resolved.
--			if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
--				basename := filepath.Base(hdr.Name)
--				aufsHardlinks[basename] = hdr
--				if aufsTempdir == "" {
--					if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
--						return err
--					}
--					defer os.RemoveAll(aufsTempdir)
--				}
--				if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil {
--					return err
--				}
--			}
--			continue
--		}
--
--		path := filepath.Join(dest, hdr.Name)
--		rel, err := filepath.Rel(dest, path)
--		if err != nil {
--			return err
--		}
--		if strings.HasPrefix(rel, "..") {
--			return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
--		}
--		base := filepath.Base(path)
--
--		if strings.HasPrefix(base, ".wh.") {
--			originalBase := base[len(".wh."):]
--			originalPath := filepath.Join(filepath.Dir(path), originalBase)
--			if err := os.RemoveAll(originalPath); err != nil {
--				return err
--			}
--		} else {
--			// If path exits we almost always just want to remove and replace it.
--			// The only exception is when it is a directory *and* the file from
--			// the layer is also a directory. Then we want to merge them (i.e.
--			// just apply the metadata from the layer).
--			if fi, err := os.Lstat(path); err == nil {
--				if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
--					if err := os.RemoveAll(path); err != nil {
--						return err
--					}
--				}
--			}
--
--			trBuf.Reset(tr)
--			srcData := io.Reader(trBuf)
--			srcHdr := hdr
--
--			// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
--			// we manually retarget these into the temporary files we extracted them into
--			if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
--				linkBasename := filepath.Base(hdr.Linkname)
--				srcHdr = aufsHardlinks[linkBasename]
--				if srcHdr == nil {
--					return fmt.Errorf("Invalid aufs hardlink")
--				}
--				tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
--				if err != nil {
--					return err
--				}
--				defer tmpFile.Close()
--				srcData = tmpFile
--			}
--
--			if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil {
--				return err
--			}
--
--			// Directory mtimes must be handled at the end to avoid further
--			// file creation in them to modify the directory mtime
--			if hdr.Typeflag == tar.TypeDir {
--				dirs = append(dirs, hdr)
--			}
--		}
--	}
--
--	for _, hdr := range dirs {
--		path := filepath.Join(dest, hdr.Name)
--		ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
--		if err := syscall.UtimesNano(path, ts); err != nil {
--			return err
--		}
--	}
--	return nil
--}
--
--// ApplyLayer parses a diff in the standard layer format from `layer`, and
--// applies it to the directory `dest`.
--func ApplyLayer(dest string, layer ArchiveReader) error {
--	dest = filepath.Clean(dest)
--
--	// We need to be able to set any perms
--	oldmask, err := system.Umask(0)
--	if err != nil {
--		return err
--	}
--	defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
--
--	layer, err = DecompressStream(layer)
--	if err != nil {
--		return err
--	}
--	return UnpackLayer(dest, layer)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go
-deleted file mode 100644
-index 758c411..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go
-+++ /dev/null
-@@ -1,191 +0,0 @@
--package archive
--
--import (
--	"testing"
--
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--)
--
--func TestApplyLayerInvalidFilenames(t *testing.T) {
--	for i, headers := range [][]*tar.Header{
--		{
--			{
--				Name:     "../victim/dotdot",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--		{
--			{
--				// Note the leading slash
--				Name:     "/../victim/slash-dotdot",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--	} {
--		if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil {
--			t.Fatalf("i=%d. %v", i, err)
--		}
--	}
--}
--
--func TestApplyLayerInvalidHardlink(t *testing.T) {
--	for i, headers := range [][]*tar.Header{
--		{ // try reading victim/hello (../)
--			{
--				Name:     "dotdot",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (/../)
--			{
--				Name:     "slash-dotdot",
--				Typeflag: tar.TypeLink,
--				// Note the leading slash
--				Linkname: "/../victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try writing victim/file
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "loophole-victim/file",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (hardlink, symlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "symlink",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "loophole-victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // Try reading victim/hello (hardlink, hardlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "hardlink",
--				Typeflag: tar.TypeLink,
--				Linkname: "loophole-victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // Try removing victim directory (hardlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeLink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--	} {
--		if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil {
--			t.Fatalf("i=%d. %v", i, err)
--		}
--	}
--}
--
--func TestApplyLayerInvalidSymlink(t *testing.T) {
--	for i, headers := range [][]*tar.Header{
--		{ // try reading victim/hello (../)
--			{
--				Name:     "dotdot",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (/../)
--			{
--				Name:     "slash-dotdot",
--				Typeflag: tar.TypeSymlink,
--				// Note the leading slash
--				Linkname: "/../victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try writing victim/file
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "loophole-victim/file",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (symlink, symlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "symlink",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "loophole-victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try reading victim/hello (symlink, hardlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "hardlink",
--				Typeflag: tar.TypeLink,
--				Linkname: "loophole-victim/hello",
--				Mode:     0644,
--			},
--		},
--		{ // try removing victim directory (symlink)
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeSymlink,
--				Linkname: "../victim",
--				Mode:     0755,
--			},
--			{
--				Name:     "loophole-victim",
--				Typeflag: tar.TypeReg,
--				Mode:     0644,
--			},
--		},
--	} {
--		if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil {
--			t.Fatalf("i=%d. %v", i, err)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go
-deleted file mode 100644
-index cedd46a..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/example_changes.go
-+++ /dev/null
-@@ -1,97 +0,0 @@
--// +build ignore
--
--// Simple tool to create an archive stream from an old and new directory
--//
--// By default it will stream the comparison of two temporary directories with junk files
--package main
--
--import (
--	"flag"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"path"
--
--	"github.com/Sirupsen/logrus"
--	"github.com/docker/docker/pkg/archive"
--)
--
--var (
--	flDebug  = flag.Bool("D", false, "debugging output")
--	flNewDir = flag.String("newdir", "", "")
--	flOldDir = flag.String("olddir", "", "")
--	log      = logrus.New()
--)
--
--func main() {
--	flag.Usage = func() {
--		fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
--		fmt.Printf("%s [OPTIONS]\n", os.Args[0])
--		flag.PrintDefaults()
--	}
--	flag.Parse()
--	log.Out = os.Stderr
--	if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
--		logrus.SetLevel(logrus.DebugLevel)
--	}
--	var newDir, oldDir string
--
--	if len(*flNewDir) == 0 {
--		var err error
--		newDir, err = ioutil.TempDir("", "docker-test-newDir")
--		if err != nil {
--			log.Fatal(err)
--		}
--		defer os.RemoveAll(newDir)
--		if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
--			log.Fatal(err)
--		}
--	} else {
--		newDir = *flNewDir
--	}
--
--	if len(*flOldDir) == 0 {
--		oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
--		if err != nil {
--			log.Fatal(err)
--		}
--		defer os.RemoveAll(oldDir)
--	} else {
--		oldDir = *flOldDir
--	}
--
--	changes, err := archive.ChangesDirs(newDir, oldDir)
--	if err != nil {
--		log.Fatal(err)
--	}
--
--	a, err := archive.ExportChanges(newDir, changes)
--	if err != nil {
--		log.Fatal(err)
--	}
--	defer a.Close()
--
--	i, err := io.Copy(os.Stdout, a)
--	if err != nil && err != io.EOF {
--		log.Fatal(err)
--	}
--	fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
--}
--
--func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
--	fileData := []byte("fooo")
--	for n := 0; n < numberOfFiles; n++ {
--		fileName := fmt.Sprintf("file-%d", n)
--		if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
--			return 0, err
--		}
--		if makeLinks {
--			if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
--				return 0, err
--			}
--		}
--	}
--	totalSize := numberOfFiles * len(fileData)
--	return totalSize, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/testdata/broken.tar
-deleted file mode 100644
-index 8f10ea6b87d3eb4fed572349dfe87695603b10a5..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 13824
-zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q
-zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W
-zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6
-zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{<b
-z!#}_wZg2ml(Q4ku|6Mr1+wI?RTv6*d{;+nE;z`#Uja&M}?d`v1Y;E1!|I at Yq)BV#u
-zZRWY}#v$rCeY4qI`6s}QfBo-2{9XIYzyF)p1BPLmD~7SzXl<ICn<)Hr)7Wg8EA+uN
-z8R)$ABkMn+x5B**AQz at AZr%p}iLBRZHCp{)@9C2rL(>)C_QVb?f0pB4xfD_C1pX2f
-z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O at R9>nS~7H1w&*U
-zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}&
-zT~f;Cd!ZOC&mX2<A4GTBffw2lvz)=7iZ}sRft- at sEaqQf{#!Tbmv}UNOYws<kQA>n
-zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&<Ew#qD`%}b*8rzPb79NW<eyv
-zG;;IZlOzdu>h(<JHK{@^4$^2KMnN<dnlF+%N4?x!yEL?>Hc32JVwt-Hrj<{`vG3V<
-zCk?#){6BW>!9 at +(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=t<t)1ZZ_TOx6-P`}2
-z;`c*=-pBr)?ceGOaC`f2H5+$&|NryGzs&BaRh;3_6;m!^cAhw6t_Fs^LF9zVAxgrI
-z^Ga)k-uwDmQU6~pk+ZSU)Y0=r|JP`?471rlpV-_&ANVfo{}p;)*~)!ag?rz?DiPkz
-zgJxA|FHHi^PMm at 5x-%+EOX|<)I2dwy?USjQTU3?9p5ukCpoV5{uPNBif7DBCe`rTT
-z3L{~Aebmu%diM|dDB4g^FC<WUE+WY)#i2bARGm(QPg6ky+gB4xV?o;SF&J}3l#mvO
-zq_r;P$mfxwX%g4-KI8gEj2)kg<UYjrC=ss`MH?m5{!Y>S5&i^O)@Me!3BwBQ`@=VE
-zIl)Fp0<ty)I0-2ZJn%KK`C23*!u80HT~G at An4m7!)liHaUkr(FKmIt at h^+N0?qpNP
-zb)^Q!&ZPh_rG6ipy|AHL8rt#X0RtX)B_K(l(2;XbDhqQqxncyFz|$~DdGE_KNW=q4
-z9tcjBfg=w6CCH4S_Qq9)$j4<aqV~oHAli25B(_TscWjdrvlWJv1i*BSZy`mO93>MG
-z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk at mb9|fb)1BuBGk_ptuvx%G~pq0Kb
-zb&?6Szj_3#ClOiI_3vu1e+<T{ZC$E0&A3#piCQy4)rxE8M*h5E#X6Q3R^dW|e6oJA
-z6!1m_957RkFrR=qLihlW;C)rguWARo&2;UHs^RULcpxU7Z<CbFRAm&!bB*ofK>mOX
-z9k`Og<ag2OH>2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds<t*Y>>
-zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT at 5hDK4~
-z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr at 4W<f_DdFo`NM}zf>|(?6Ye5$Oayf(LUxEb
-zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@
-zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p}
-zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg at lop12w4V<L4&zs_AUbl
-zRPBf%7W7VbALtP2MKUWDr*_mV-FCm1snL%p>Yz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ
-zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs
-zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$
-z?d`g5*7a at bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8(
-zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+ at sRbyfJf~*mY
-z#+u;OA2B at 66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldT<GMfrdYcObq_-4np!UTa
-z62ven4N*a=kcA193+2D7wCW<x^TokWivt|3bJDNrb;(Bkcf<u=>tcr!I|PQf({z2i
-zZs;`}x~m6ks)bXh at +($$(s>pJ`5X6~16{Ufo<hOp)jft at H;jVNPjA=*VXp3B-qzFy
-zZvcLM4LC!MqtjbcU%z2T{o0OY(SNq8vAei$!G_W)rF&L~wiC~4DDQ;3mL2w#n^vCS
-zJ2z3;0CO9x>JC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm
-zVNC%z6l$8Qz0L<amlq9R;(1{~=%L_}f at kJr!Pk!x=@7-loMt$bG^groHAiH&Jk=VR
-zR&_E%j4>iPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb=
-z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q
-zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI=<VrwhwM%)lUw7a2hl_E at e5S9hA!K2O
-z%kV<}$`HIQz@&<rp86Auh3`cqoVYj(gD*FjqM(QaSH9xFslX{4=lmc_q`SLelwDdl
-zSsXkv8&n*U&mS2N$x;^Ut{sAM@~pT`aa;}pvdd?9l=LD4t~s7hOCN!<d3rJ{q1gih
-zwI9wi3)?eLpB&F;*iLi8Y#G==o_-#!zTotaB##pVr$ieI75am2f4huK(YoK$DHl^z
-zdDpH5p=*JSrw4SK9aJ%OqftW+H5;glLlL=j+0u+{&BX8-j$3L6T<_s^70R$<UVj`6
-zQV++DyxoB$gzv!W*&Rhzj|9J at f8ceaGr~=q9TpbcbEoz!t1jjGWESE&D_6fnC5Q^*
-zLcqGf1~}Al0cQLed8kWyh0C@?3?`_0Q4tSw9{Bu+`{KT94+*zOAD4*U0aXt7p6*wa
-zch-beqmlCpM5a-Idau_$=^me+F%s#p$9ty-XWhfj5o$_1L0O)4jVKlgIA(gi9}bU>
-zd);1Ux&vAHF3sW+ZYtInM5`<aS2kpap%%ttM^5u)K4<qEeyJy-FEKBd<Qpm&;ukq0
-z)ZeoFsl2gyRpD(ME}sE?_WWCkX_byptn(#;tNuaYLBmiRZsl at Q$ge+em$zy$b=u$J
-za#O>7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv
-z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5
-z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc
-zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH
-zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr<A%tOZ-b&(!im(9mvgsY|#
-z4dI4!RPSSH<bvyV#mjh^W(ct>(N`)UtH54-56s#rGO&e at Q}~KNY<cNr8n?O<R`Hz0
-z1w9pD6oj1ckgHrbR at j#oTzZ0&5jRwv;^>PdQ94MZxA|gP9PSIqe at Ff$9bNNvws)xH
-zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du
-z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1 at T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL
-zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#!
-zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E><Z1 at 4Ne^joS+N^P7<8_aH#Xax|s6v
-zl(F~h^x){^%86D^{*Xp9`a61<=;bQtLMx|s5~maABA}<v%pTiQlwtB at +~23YC%vQP
-z#NR7t^F!|d+`Ie(6Fzv9ukzn$jFDW at z6LAQ<>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL
-oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD at m3s}`Yv5i3pOOat4?XSI`2YX_
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go
-deleted file mode 100644
-index 3448569..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_linux.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--package archive
--
--import (
--	"syscall"
--	"time"
--)
--
--func timeToTimespec(time time.Time) (ts syscall.Timespec) {
--	if time.IsZero() {
--		// Return UTIME_OMIT special value
--		ts.Sec = 0
--		ts.Nsec = ((1 << 30) - 2)
--		return
--	}
--	return syscall.NsecToTimespec(time.UnixNano())
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go
-deleted file mode 100644
-index e85aac0..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/time_unsupported.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--// +build !linux
--
--package archive
--
--import (
--	"syscall"
--	"time"
--)
--
--func timeToTimespec(time time.Time) (ts syscall.Timespec) {
--	nsec := int64(0)
--	if !time.IsZero() {
--		nsec = time.UnixNano()
--	}
--	return syscall.NsecToTimespec(nsec)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
-deleted file mode 100644
-index 3624fe5..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
-+++ /dev/null
-@@ -1,166 +0,0 @@
--package archive
--
--import (
--	"bytes"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"path/filepath"
--	"time"
--
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--)
--
--var testUntarFns = map[string]func(string, io.Reader) error{
--	"untar": func(dest string, r io.Reader) error {
--		return Untar(r, dest, nil)
--	},
--	"applylayer": func(dest string, r io.Reader) error {
--		return ApplyLayer(dest, ArchiveReader(r))
--	},
--}
--
--// testBreakout is a helper function that, within the provided `tmpdir` directory,
--// creates a `victim` folder with a generated `hello` file in it.
--// `untar` extracts to a directory named `dest`, the tar file created from `headers`.
--//
--// Here are the tested scenarios:
--// - removed `victim` folder				(write)
--// - removed files from `victim` folder			(write)
--// - new files in `victim` folder			(write)
--// - modified files in `victim` folder			(write)
--// - file in `dest` with same content as `victim/hello` (read)
--//
--// When using testBreakout make sure you cover one of the scenarios listed above.
--func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
--	tmpdir, err := ioutil.TempDir("", tmpdir)
--	if err != nil {
--		return err
--	}
--	defer os.RemoveAll(tmpdir)
--
--	dest := filepath.Join(tmpdir, "dest")
--	if err := os.Mkdir(dest, 0755); err != nil {
--		return err
--	}
--
--	victim := filepath.Join(tmpdir, "victim")
--	if err := os.Mkdir(victim, 0755); err != nil {
--		return err
--	}
--	hello := filepath.Join(victim, "hello")
--	helloData, err := time.Now().MarshalText()
--	if err != nil {
--		return err
--	}
--	if err := ioutil.WriteFile(hello, helloData, 0644); err != nil {
--		return err
--	}
--	helloStat, err := os.Stat(hello)
--	if err != nil {
--		return err
--	}
--
--	reader, writer := io.Pipe()
--	go func() {
--		t := tar.NewWriter(writer)
--		for _, hdr := range headers {
--			t.WriteHeader(hdr)
--		}
--		t.Close()
--	}()
--
--	untar := testUntarFns[untarFn]
--	if untar == nil {
--		return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn)
--	}
--	if err := untar(dest, reader); err != nil {
--		if _, ok := err.(breakoutError); !ok {
--			// If untar returns an error unrelated to an archive breakout,
--			// then consider this an unexpected error and abort.
--			return err
--		}
--		// Here, untar detected the breakout.
--		// Let's move on verifying that indeed there was no breakout.
--		fmt.Printf("breakoutError: %v\n", err)
--	}
--
--	// Check victim folder
--	f, err := os.Open(victim)
--	if err != nil {
--		// codepath taken if victim folder was removed
--		return fmt.Errorf("archive breakout: error reading %q: %v", victim, err)
--	}
--	defer f.Close()
--
--	// Check contents of victim folder
--	//
--	// We are only interested in getting 2 files from the victim folder, because if all is well
--	// we expect only one result, the `hello` file. If there is a second result, it cannot
--	// hold the same name `hello` and we assume that a new file got created in the victim folder.
--	// That is enough to detect an archive breakout.
--	names, err := f.Readdirnames(2)
--	if err != nil {
--		// codepath taken if victim is not a folder
--		return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err)
--	}
--	for _, name := range names {
--		if name != "hello" {
--			// codepath taken if new file was created in victim folder
--			return fmt.Errorf("archive breakout: new file %q", name)
--		}
--	}
--
--	// Check victim/hello
--	f, err = os.Open(hello)
--	if err != nil {
--		// codepath taken if read permissions were removed
--		return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err)
--	}
--	defer f.Close()
--	b, err := ioutil.ReadAll(f)
--	if err != nil {
--		return err
--	}
--	fi, err := f.Stat()
--	if err != nil {
--		return err
--	}
--	if helloStat.IsDir() != fi.IsDir() ||
--		// TODO: cannot check for fi.ModTime() change
--		helloStat.Mode() != fi.Mode() ||
--		helloStat.Size() != fi.Size() ||
--		!bytes.Equal(helloData, b) {
--		// codepath taken if hello has been modified
--		return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi)
--	}
--
--	// Check that nothing in dest/ has the same content as victim/hello.
--	// Since victim/hello was generated with time.Now(), it is safe to assume
--	// that any file whose content matches exactly victim/hello, managed somehow
--	// to access victim/hello.
--	return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error {
--		if info.IsDir() {
--			if err != nil {
--				// skip directory if error
--				return filepath.SkipDir
--			}
--			// enter directory
--			return nil
--		}
--		if err != nil {
--			// skip file if error
--			return nil
--		}
--		b, err := ioutil.ReadFile(path)
--		if err != nil {
--			// Houston, we have a problem. Aborting (space)walk.
--			return err
--		}
--		if bytes.Equal(helloData, b) {
--			return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path)
--		}
--		return nil
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go
-deleted file mode 100644
-index b8b6019..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go
-+++ /dev/null
-@@ -1,59 +0,0 @@
--package archive
--
--import (
--	"bytes"
--	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
--	"io/ioutil"
--)
--
--// Generate generates a new archive from the content provided
--// as input.
--//
--// `files` is a sequence of path/content pairs. A new file is
--// added to the archive for each pair.
--// If the last pair is incomplete, the file is created with an
--// empty content. For example:
--//
--// Generate("foo.txt", "hello world", "emptyfile")
--//
--// The above call will return an archive with 2 files:
--//  * ./foo.txt with content "hello world"
--//  * ./empty with empty content
--//
--// FIXME: stream content instead of buffering
--// FIXME: specify permissions and other archive metadata
--func Generate(input ...string) (Archive, error) {
--	files := parseStringPairs(input...)
--	buf := new(bytes.Buffer)
--	tw := tar.NewWriter(buf)
--	for _, file := range files {
--		name, content := file[0], file[1]
--		hdr := &tar.Header{
--			Name: name,
--			Size: int64(len(content)),
--		}
--		if err := tw.WriteHeader(hdr); err != nil {
--			return nil, err
--		}
--		if _, err := tw.Write([]byte(content)); err != nil {
--			return nil, err
--		}
--	}
--	if err := tw.Close(); err != nil {
--		return nil, err
--	}
--	return ioutil.NopCloser(buf), nil
--}
--
--func parseStringPairs(input ...string) (output [][2]string) {
--	output = make([][2]string, 0, len(input)/2+1)
--	for i := 0; i < len(input); i += 2 {
--		var pair [2]string
--		pair[0] = input[i]
--		if i+1 < len(input) {
--			pair[1] = input[i+1]
--		}
--		output = append(output, pair)
--	}
--	return
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go
-deleted file mode 100644
-index 4e4a91b..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package fileutils
--
--import (
--	log "github.com/Sirupsen/logrus"
--	"path/filepath"
--)
--
--// Matches returns true if relFilePath matches any of the patterns
--func Matches(relFilePath string, patterns []string) (bool, error) {
--	for _, exclude := range patterns {
--		matched, err := filepath.Match(exclude, relFilePath)
--		if err != nil {
--			log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
--			return false, err
--		}
--		if matched {
--			if filepath.Clean(relFilePath) == "." {
--				log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
--				continue
--			}
--			log.Debugf("Skipping excluded path: %s", relFilePath)
--			return true, nil
--		}
--	}
--	return false, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go
-deleted file mode 100644
-index 22f46fb..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go
-+++ /dev/null
-@@ -1,114 +0,0 @@
--package ioutils
--
--import (
--	"bytes"
--	"io"
--	"sync"
--)
--
--type readCloserWrapper struct {
--	io.Reader
--	closer func() error
--}
--
--func (r *readCloserWrapper) Close() error {
--	return r.closer()
--}
--
--func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
--	return &readCloserWrapper{
--		Reader: r,
--		closer: closer,
--	}
--}
--
--type readerErrWrapper struct {
--	reader io.Reader
--	closer func()
--}
--
--func (r *readerErrWrapper) Read(p []byte) (int, error) {
--	n, err := r.reader.Read(p)
--	if err != nil {
--		r.closer()
--	}
--	return n, err
--}
--
--func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
--	return &readerErrWrapper{
--		reader: r,
--		closer: closer,
--	}
--}
--
--type bufReader struct {
--	sync.Mutex
--	buf      *bytes.Buffer
--	reader   io.Reader
--	err      error
--	wait     sync.Cond
--	drainBuf []byte
--}
--
--func NewBufReader(r io.Reader) *bufReader {
--	reader := &bufReader{
--		buf:      &bytes.Buffer{},
--		drainBuf: make([]byte, 1024),
--		reader:   r,
--	}
--	reader.wait.L = &reader.Mutex
--	go reader.drain()
--	return reader
--}
--
--func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader {
--	reader := &bufReader{
--		buf:      buffer,
--		drainBuf: drainBuffer,
--		reader:   r,
--	}
--	reader.wait.L = &reader.Mutex
--	go reader.drain()
--	return reader
--}
--
--func (r *bufReader) drain() {
--	for {
--		n, err := r.reader.Read(r.drainBuf)
--		r.Lock()
--		if err != nil {
--			r.err = err
--		} else {
--			r.buf.Write(r.drainBuf[0:n])
--		}
--		r.wait.Signal()
--		r.Unlock()
--		if err != nil {
--			break
--		}
--	}
--}
--
--func (r *bufReader) Read(p []byte) (n int, err error) {
--	r.Lock()
--	defer r.Unlock()
--	for {
--		n, err = r.buf.Read(p)
--		if n > 0 {
--			return n, err
--		}
--		if r.err != nil {
--			return 0, r.err
--		}
--		r.wait.Wait()
--	}
--}
--
--func (r *bufReader) Close() error {
--	closer, ok := r.reader.(io.ReadCloser)
--	if !ok {
--		return nil
--	}
--	return closer.Close()
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go
-deleted file mode 100644
-index a7a2dad..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go
-+++ /dev/null
-@@ -1,34 +0,0 @@
--package ioutils
--
--import (
--	"bytes"
--	"io"
--	"io/ioutil"
--	"testing"
--)
--
--func TestBufReader(t *testing.T) {
--	reader, writer := io.Pipe()
--	bufreader := NewBufReader(reader)
--
--	// Write everything down to a Pipe
--	// Usually, a pipe should block but because of the buffered reader,
--	// the writes will go through
--	done := make(chan bool)
--	go func() {
--		writer.Write([]byte("hello world"))
--		writer.Close()
--		done <- true
--	}()
--
--	// Drain the reader *after* everything has been written, just to verify
--	// it is indeed buffering
--	<-done
--	output, err := ioutil.ReadAll(bufreader)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !bytes.Equal(output, []byte("hello world")) {
--		t.Error(string(output))
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go
-deleted file mode 100644
-index c0b3608..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--package ioutils
--
--import "io"
--
--type NopWriter struct{}
--
--func (*NopWriter) Write(buf []byte) (int, error) {
--	return len(buf), nil
--}
--
--type nopWriteCloser struct {
--	io.Writer
--}
--
--func (w *nopWriteCloser) Close() error { return nil }
--
--func NopWriteCloser(w io.Writer) io.WriteCloser {
--	return &nopWriteCloser{w}
--}
--
--type NopFlusher struct{}
--
--func (f *NopFlusher) Flush() {}
--
--type writeCloserWrapper struct {
--	io.Writer
--	closer func() error
--}
--
--func (r *writeCloserWrapper) Close() error {
--	return r.closer()
--}
--
--func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
--	return &writeCloserWrapper{
--		Writer: r,
--		closer: closer,
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go
-deleted file mode 100644
-index 5338a0c..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go
-+++ /dev/null
-@@ -1,111 +0,0 @@
--// +build go1.3
--
--// Package pools provides a collection of pools which provide various
--// data types with buffers. These can be used to lower the number of
--// memory allocations and reuse buffers.
--//
--// New pools should be added to this package to allow them to be
--// shared across packages.
--//
--// Utility functions which operate on pools should be added to this
--// package to allow them to be reused.
--package pools
--
--import (
--	"bufio"
--	"io"
--	"sync"
--
--	"github.com/docker/docker/pkg/ioutils"
--)
--
--var (
--	// Pool which returns bufio.Reader with a 32K buffer
--	BufioReader32KPool *BufioReaderPool
--	// Pool which returns bufio.Writer with a 32K buffer
--	BufioWriter32KPool *BufioWriterPool
--)
--
--const buffer32K = 32 * 1024
--
--type BufioReaderPool struct {
--	pool sync.Pool
--}
--
--func init() {
--	BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
--	BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
--}
--
--// newBufioReaderPoolWithSize is unexported because new pools should be
--// added here to be shared where required.
--func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
--	pool := sync.Pool{
--		New: func() interface{} { return bufio.NewReaderSize(nil, size) },
--	}
--	return &BufioReaderPool{pool: pool}
--}
--
--// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
--func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
--	buf := bufPool.pool.Get().(*bufio.Reader)
--	buf.Reset(r)
--	return buf
--}
--
--// Put puts the bufio.Reader back into the pool.
--func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
--	b.Reset(nil)
--	bufPool.pool.Put(b)
--}
--
--// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
--// into the pool and closes the reader if it's an io.ReadCloser.
--func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
--	return ioutils.NewReadCloserWrapper(r, func() error {
--		if readCloser, ok := r.(io.ReadCloser); ok {
--			readCloser.Close()
--		}
--		bufPool.Put(buf)
--		return nil
--	})
--}
--
--type BufioWriterPool struct {
--	pool sync.Pool
--}
--
--// newBufioWriterPoolWithSize is unexported because new pools should be
--// added here to be shared where required.
--func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
--	pool := sync.Pool{
--		New: func() interface{} { return bufio.NewWriterSize(nil, size) },
--	}
--	return &BufioWriterPool{pool: pool}
--}
--
--// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
--func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
--	buf := bufPool.pool.Get().(*bufio.Writer)
--	buf.Reset(w)
--	return buf
--}
--
--// Put puts the bufio.Writer back into the pool.
--func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
--	b.Reset(nil)
--	bufPool.pool.Put(b)
--}
--
--// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
--// into the pool and closes the writer if it's an io.Writecloser.
--func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
--	return ioutils.NewWriteCloserWrapper(w, func() error {
--		buf.Flush()
--		if writeCloser, ok := w.(io.WriteCloser); ok {
--			writeCloser.Close()
--		}
--		bufPool.Put(buf)
--		return nil
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go
-deleted file mode 100644
-index 48903c2..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go
-+++ /dev/null
-@@ -1,73 +0,0 @@
--// +build !go1.3
--
--package pools
--
--import (
--	"bufio"
--	"io"
--
--	"github.com/docker/docker/pkg/ioutils"
--)
--
--var (
--	BufioReader32KPool *BufioReaderPool
--	BufioWriter32KPool *BufioWriterPool
--)
--
--const buffer32K = 32 * 1024
--
--type BufioReaderPool struct {
--	size int
--}
--
--func init() {
--	BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
--	BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
--}
--
--func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
--	return &BufioReaderPool{size: size}
--}
--
--func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
--	return bufio.NewReaderSize(r, bufPool.size)
--}
--
--func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
--	b.Reset(nil)
--}
--
--func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
--	return ioutils.NewReadCloserWrapper(r, func() error {
--		if readCloser, ok := r.(io.ReadCloser); ok {
--			return readCloser.Close()
--		}
--		return nil
--	})
--}
--
--type BufioWriterPool struct {
--	size int
--}
--
--func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
--	return &BufioWriterPool{size: size}
--}
--
--func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
--	return bufio.NewWriterSize(w, bufPool.size)
--}
--
--func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
--	b.Reset(nil)
--}
--
--func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
--	return ioutils.NewWriteCloserWrapper(w, func() error {
--		buf.Flush()
--		if writeCloser, ok := w.(io.WriteCloser); ok {
--			return writeCloser.Close()
--		}
--		return nil
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go
-deleted file mode 100644
-index dd52b90..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/promise/promise.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package promise
--
--// Go is a basic promise implementation: it wraps calls a function in a goroutine,
--// and returns a channel which will later return the function's return value.
--func Go(f func() error) chan error {
--	ch := make(chan error, 1)
--	go func() {
--		ch <- f()
--	}()
--	return ch
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS
-deleted file mode 100644
-index 68a97d2..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/MAINTAINERS
-+++ /dev/null
-@@ -1,2 +0,0 @@
--Michael Crosby <michael at crosbymichael.com> (@crosbymichael)
--Victor Vieux <vieux at docker.com> (@vieux)
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
-deleted file mode 100644
-index 6304518..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/errors.go
-+++ /dev/null
-@@ -1,9 +0,0 @@
--package system
--
--import (
--	"errors"
--)
--
--var (
--	ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
--)
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
-deleted file mode 100644
-index 9ef82d5..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--// +build !windows
--
--package system
--
--import (
--	"syscall"
--)
--
--func Lstat(path string) (*Stat, error) {
--	s := &syscall.Stat_t{}
--	err := syscall.Lstat(path, s)
--	if err != nil {
--		return nil, err
--	}
--	return fromStatT(s)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go
-deleted file mode 100644
-index 9bab4d7..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go
-+++ /dev/null
-@@ -1,27 +0,0 @@
--package system
--
--import (
--	"os"
--	"testing"
--)
--
--func TestLstat(t *testing.T) {
--	file, invalid, _, dir := prepareFiles(t)
--	defer os.RemoveAll(dir)
--
--	statFile, err := Lstat(file)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if statFile == nil {
--		t.Fatal("returned empty stat for existing file")
--	}
--
--	statInvalid, err := Lstat(invalid)
--	if err == nil {
--		t.Fatal("did not return error for non-existing file")
--	}
--	if statInvalid != nil {
--		t.Fatal("returned non-nil stat for non-existing file")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
-deleted file mode 100644
-index 213a7c7..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_windows.go
-+++ /dev/null
-@@ -1,8 +0,0 @@
--// +build windows
--
--package system
--
--func Lstat(path string) (*Stat, error) {
--	// should not be called on cli code path
--	return nil, ErrNotSupportedPlatform
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
-deleted file mode 100644
-index 3b6e947..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo.go
-+++ /dev/null
-@@ -1,17 +0,0 @@
--package system
--
--// MemInfo contains memory statistics of the host system.
--type MemInfo struct {
--	// Total usable RAM (i.e. physical RAM minus a few reserved bits and the
--	// kernel binary code).
--	MemTotal int64
--
--	// Amount of free memory.
--	MemFree int64
--
--	// Total amount of swap space available.
--	SwapTotal int64
--
--	// Amount of swap space that is currently unused.
--	SwapFree int64
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
-deleted file mode 100644
-index b7de3ff..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go
-+++ /dev/null
-@@ -1,67 +0,0 @@
--package system
--
--import (
--	"bufio"
--	"errors"
--	"io"
--	"os"
--	"strconv"
--	"strings"
--
--	"github.com/docker/docker/pkg/units"
--)
--
--var (
--	ErrMalformed = errors.New("malformed file")
--)
--
--// Retrieve memory statistics of the host system and parse them into a MemInfo
--// type.
--func ReadMemInfo() (*MemInfo, error) {
--	file, err := os.Open("/proc/meminfo")
--	if err != nil {
--		return nil, err
--	}
--	defer file.Close()
--	return parseMemInfo(file)
--}
--
--func parseMemInfo(reader io.Reader) (*MemInfo, error) {
--	meminfo := &MemInfo{}
--	scanner := bufio.NewScanner(reader)
--	for scanner.Scan() {
--		// Expected format: ["MemTotal:", "1234", "kB"]
--		parts := strings.Fields(scanner.Text())
--
--		// Sanity checks: Skip malformed entries.
--		if len(parts) < 3 || parts[2] != "kB" {
--			continue
--		}
--
--		// Convert to bytes.
--		size, err := strconv.Atoi(parts[1])
--		if err != nil {
--			continue
--		}
--		bytes := int64(size) * units.KiB
--
--		switch parts[0] {
--		case "MemTotal:":
--			meminfo.MemTotal = bytes
--		case "MemFree:":
--			meminfo.MemFree = bytes
--		case "SwapTotal:":
--			meminfo.SwapTotal = bytes
--		case "SwapFree:":
--			meminfo.SwapFree = bytes
--		}
--
--	}
--
--	// Handle errors that may have occurred during the reading of the file.
--	if err := scanner.Err(); err != nil {
--		return nil, err
--	}
--
--	return meminfo, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go
-deleted file mode 100644
-index 377405e..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go
-+++ /dev/null
-@@ -1,37 +0,0 @@
--package system
--
--import (
--	"strings"
--	"testing"
--
--	"github.com/docker/docker/pkg/units"
--)
--
--func TestMemInfo(t *testing.T) {
--	const input = `
--	MemTotal:      1 kB
--	MemFree:       2 kB
--	SwapTotal:     3 kB
--	SwapFree:      4 kB
--	Malformed1:
--	Malformed2:    1
--	Malformed3:    2 MB
--	Malformed4:    X kB
--	`
--	meminfo, err := parseMemInfo(strings.NewReader(input))
--	if err != nil {
--		t.Fatal(err)
--	}
--	if meminfo.MemTotal != 1*units.KiB {
--		t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal)
--	}
--	if meminfo.MemFree != 2*units.KiB {
--		t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree)
--	}
--	if meminfo.SwapTotal != 3*units.KiB {
--		t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal)
--	}
--	if meminfo.SwapFree != 4*units.KiB {
--		t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
-deleted file mode 100644
-index 63b8b16..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_unsupported.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--// +build !linux
--
--package system
--
--func ReadMemInfo() (*MemInfo, error) {
--	return nil, ErrNotSupportedPlatform
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
-deleted file mode 100644
-index 06f9c6a..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go
-+++ /dev/null
-@@ -1,18 +0,0 @@
--// +build !windows
--
--package system
--
--import (
--	"syscall"
--)
--
--func Mknod(path string, mode uint32, dev int) error {
--	return syscall.Mknod(path, mode, dev)
--}
--
--// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
--// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
--// then the top 12 bits of the minor
--func Mkdev(major int64, minor int64) uint32 {
--	return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
-deleted file mode 100644
-index b4020c1..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod_windows.go
-+++ /dev/null
-@@ -1,12 +0,0 @@
--// +build windows
--
--package system
--
--func Mknod(path string, mode uint32, dev int) error {
--	// should not be called on cli code path
--	return ErrNotSupportedPlatform
--}
--
--func Mkdev(major int64, minor int64) uint32 {
--	panic("Mkdev not implemented on windows, should not be called on cli code")
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
-deleted file mode 100644
-index 5d47494..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go
-+++ /dev/null
-@@ -1,42 +0,0 @@
--package system
--
--import (
--	"syscall"
--)
--
--type Stat struct {
--	mode uint32
--	uid  uint32
--	gid  uint32
--	rdev uint64
--	size int64
--	mtim syscall.Timespec
--}
--
--func (s Stat) Mode() uint32 {
--	return s.mode
--}
--
--func (s Stat) Uid() uint32 {
--	return s.uid
--}
--
--func (s Stat) Gid() uint32 {
--	return s.gid
--}
--
--func (s Stat) Rdev() uint64 {
--	return s.rdev
--}
--
--func (s Stat) Size() int64 {
--	return s.size
--}
--
--func (s Stat) Mtim() syscall.Timespec {
--	return s.mtim
--}
--
--func (s Stat) GetLastModification() syscall.Timespec {
--	return s.Mtim()
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
-deleted file mode 100644
-index 47cebef..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go
-+++ /dev/null
-@@ -1,14 +0,0 @@
--package system
--
--import (
--	"syscall"
--)
--
--func fromStatT(s *syscall.Stat_t) (*Stat, error) {
--	return &Stat{size: s.Size,
--		mode: s.Mode,
--		uid:  s.Uid,
--		gid:  s.Gid,
--		rdev: s.Rdev,
--		mtim: s.Mtim}, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go
-deleted file mode 100644
-index abcc8ea..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go
-+++ /dev/null
-@@ -1,36 +0,0 @@
--package system
--
--import (
--	"os"
--	"syscall"
--	"testing"
--)
--
--func TestFromStatT(t *testing.T) {
--	file, _, _, dir := prepareFiles(t)
--	defer os.RemoveAll(dir)
--
--	stat := &syscall.Stat_t{}
--	err := syscall.Lstat(file, stat)
--
--	s, err := fromStatT(stat)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if stat.Mode != s.Mode() {
--		t.Fatal("got invalid mode")
--	}
--	if stat.Uid != s.Uid() {
--		t.Fatal("got invalid uid")
--	}
--	if stat.Gid != s.Gid() {
--		t.Fatal("got invalid gid")
--	}
--	if stat.Rdev != s.Rdev() {
--		t.Fatal("got invalid rdev")
--	}
--	if stat.Mtim != s.Mtim() {
--		t.Fatal("got invalid mtim")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
-deleted file mode 100644
-index c4d53e6..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--// +build !linux,!windows
--
--package system
--
--import (
--	"syscall"
--)
--
--func fromStatT(s *syscall.Stat_t) (*Stat, error) {
--	return &Stat{size: s.Size,
--		mode: uint32(s.Mode),
--		uid:  s.Uid,
--		gid:  s.Gid,
--		rdev: uint64(s.Rdev),
--		mtim: s.Mtimespec}, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
-deleted file mode 100644
-index 584e894..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_windows.go
-+++ /dev/null
-@@ -1,12 +0,0 @@
--// +build windows
--
--package system
--
--import (
--	"errors"
--	"syscall"
--)
--
--func fromStatT(s *syscall.Win32FileAttributeData) (*Stat, error) {
--	return nil, errors.New("fromStatT should not be called on windows path")
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
-deleted file mode 100644
-index fddbecd..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--// +build !windows
--
--package system
--
--import (
--	"syscall"
--)
--
--func Umask(newmask int) (oldmask int, err error) {
--	return syscall.Umask(newmask), nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
-deleted file mode 100644
-index 3be563f..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/umask_windows.go
-+++ /dev/null
-@@ -1,8 +0,0 @@
--// +build windows
--
--package system
--
--func Umask(newmask int) (oldmask int, err error) {
--	// should not be called on cli code path
--	return 0, ErrNotSupportedPlatform
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
-deleted file mode 100644
-index 4c6002f..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_darwin.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package system
--
--import "syscall"
--
--func LUtimesNano(path string, ts []syscall.Timespec) error {
--	return ErrNotSupportedPlatform
--}
--
--func UtimesNano(path string, ts []syscall.Timespec) error {
--	return syscall.UtimesNano(path, ts)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
-deleted file mode 100644
-index ceaa044..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_freebsd.go
-+++ /dev/null
-@@ -1,24 +0,0 @@
--package system
--
--import (
--	"syscall"
--	"unsafe"
--)
--
--func LUtimesNano(path string, ts []syscall.Timespec) error {
--	var _path *byte
--	_path, err := syscall.BytePtrFromString(path)
--	if err != nil {
--		return err
--	}
--
--	if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
--		return err
--	}
--
--	return nil
--}
--
--func UtimesNano(path string, ts []syscall.Timespec) error {
--	return syscall.UtimesNano(path, ts)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
-deleted file mode 100644
-index 8f90298..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_linux.go
-+++ /dev/null
-@@ -1,28 +0,0 @@
--package system
--
--import (
--	"syscall"
--	"unsafe"
--)
--
--func LUtimesNano(path string, ts []syscall.Timespec) error {
--	// These are not currently available in syscall
--	AT_FDCWD := -100
--	AT_SYMLINK_NOFOLLOW := 0x100
--
--	var _path *byte
--	_path, err := syscall.BytePtrFromString(path)
--	if err != nil {
--		return err
--	}
--
--	if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
--		return err
--	}
--
--	return nil
--}
--
--func UtimesNano(path string, ts []syscall.Timespec) error {
--	return syscall.UtimesNano(path, ts)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go
-deleted file mode 100644
-index 1dea47c..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go
-+++ /dev/null
-@@ -1,65 +0,0 @@
--package system
--
--import (
--	"io/ioutil"
--	"os"
--	"path/filepath"
--	"syscall"
--	"testing"
--)
--
--func prepareFiles(t *testing.T) (string, string, string, string) {
--	dir, err := ioutil.TempDir("", "docker-system-test")
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	file := filepath.Join(dir, "exist")
--	if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil {
--		t.Fatal(err)
--	}
--
--	invalid := filepath.Join(dir, "doesnt-exist")
--
--	symlink := filepath.Join(dir, "symlink")
--	if err := os.Symlink(file, symlink); err != nil {
--		t.Fatal(err)
--	}
--
--	return file, invalid, symlink, dir
--}
--
--func TestLUtimesNano(t *testing.T) {
--	file, invalid, symlink, dir := prepareFiles(t)
--	defer os.RemoveAll(dir)
--
--	before, err := os.Stat(file)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	ts := []syscall.Timespec{{0, 0}, {0, 0}}
--	if err := LUtimesNano(symlink, ts); err != nil {
--		t.Fatal(err)
--	}
--
--	symlinkInfo, err := os.Lstat(symlink)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() {
--		t.Fatal("The modification time of the symlink should be different")
--	}
--
--	fileInfo, err := os.Stat(file)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if before.ModTime().Unix() != fileInfo.ModTime().Unix() {
--		t.Fatal("The modification time of the file should be same")
--	}
--
--	if err := LUtimesNano(invalid, ts); err == nil {
--		t.Fatal("Doesn't return an error on a non-existing file")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
-deleted file mode 100644
-index adf2734..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_unsupported.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--// +build !linux,!freebsd,!darwin
--
--package system
--
--import "syscall"
--
--func LUtimesNano(path string, ts []syscall.Timespec) error {
--	return ErrNotSupportedPlatform
--}
--
--func UtimesNano(path string, ts []syscall.Timespec) error {
--	return ErrNotSupportedPlatform
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
-deleted file mode 100644
-index 00edb20..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_linux.go
-+++ /dev/null
-@@ -1,59 +0,0 @@
--package system
--
--import (
--	"syscall"
--	"unsafe"
--)
--
--// Returns a nil slice and nil error if the xattr is not set
--func Lgetxattr(path string, attr string) ([]byte, error) {
--	pathBytes, err := syscall.BytePtrFromString(path)
--	if err != nil {
--		return nil, err
--	}
--	attrBytes, err := syscall.BytePtrFromString(attr)
--	if err != nil {
--		return nil, err
--	}
--
--	dest := make([]byte, 128)
--	destBytes := unsafe.Pointer(&dest[0])
--	sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
--	if errno == syscall.ENODATA {
--		return nil, nil
--	}
--	if errno == syscall.ERANGE {
--		dest = make([]byte, sz)
--		destBytes := unsafe.Pointer(&dest[0])
--		sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
--	}
--	if errno != 0 {
--		return nil, errno
--	}
--
--	return dest[:sz], nil
--}
--
--var _zero uintptr
--
--func Lsetxattr(path string, attr string, data []byte, flags int) error {
--	pathBytes, err := syscall.BytePtrFromString(path)
--	if err != nil {
--		return err
--	}
--	attrBytes, err := syscall.BytePtrFromString(attr)
--	if err != nil {
--		return err
--	}
--	var dataBytes unsafe.Pointer
--	if len(data) > 0 {
--		dataBytes = unsafe.Pointer(&data[0])
--	} else {
--		dataBytes = unsafe.Pointer(&_zero)
--	}
--	_, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
--	if errno != 0 {
--		return errno
--	}
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
-deleted file mode 100644
-index 0060c16..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/xattrs_unsupported.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--// +build !linux
--
--package system
--
--func Lgetxattr(path string, attr string) ([]byte, error) {
--	return nil, ErrNotSupportedPlatform
--}
--
--func Lsetxattr(path string, attr string, data []byte, flags int) error {
--	return ErrNotSupportedPlatform
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS
-deleted file mode 100644
-index 96abeae..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/MAINTAINERS
-+++ /dev/null
-@@ -1,2 +0,0 @@
--Victor Vieux <vieux at docker.com> (@vieux)
--Jessie Frazelle <jess at docker.com> (@jfrazelle)
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
-deleted file mode 100644
-index cd33121..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration.go
-+++ /dev/null
-@@ -1,31 +0,0 @@
--package units
--
--import (
--	"fmt"
--	"time"
--)
--
--// HumanDuration returns a human-readable approximation of a duration
--// (eg. "About a minute", "4 hours ago", etc.)
--func HumanDuration(d time.Duration) string {
--	if seconds := int(d.Seconds()); seconds < 1 {
--		return "Less than a second"
--	} else if seconds < 60 {
--		return fmt.Sprintf("%d seconds", seconds)
--	} else if minutes := int(d.Minutes()); minutes == 1 {
--		return "About a minute"
--	} else if minutes < 60 {
--		return fmt.Sprintf("%d minutes", minutes)
--	} else if hours := int(d.Hours()); hours == 1 {
--		return "About an hour"
--	} else if hours < 48 {
--		return fmt.Sprintf("%d hours", hours)
--	} else if hours < 24*7*2 {
--		return fmt.Sprintf("%d days", hours/24)
--	} else if hours < 24*30*3 {
--		return fmt.Sprintf("%d weeks", hours/24/7)
--	} else if hours < 24*365*2 {
--		return fmt.Sprintf("%d months", hours/24/30)
--	}
--	return fmt.Sprintf("%f years", d.Hours()/24/365)
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
-deleted file mode 100644
-index a229474..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/duration_test.go
-+++ /dev/null
-@@ -1,46 +0,0 @@
--package units
--
--import (
--	"testing"
--	"time"
--)
--
--func TestHumanDuration(t *testing.T) {
--	// Useful duration abstractions
--	day := 24 * time.Hour
--	week := 7 * day
--	month := 30 * day
--	year := 365 * day
--
--	assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond))
--	assertEquals(t, "47 seconds", HumanDuration(47*time.Second))
--	assertEquals(t, "About a minute", HumanDuration(1*time.Minute))
--	assertEquals(t, "3 minutes", HumanDuration(3*time.Minute))
--	assertEquals(t, "35 minutes", HumanDuration(35*time.Minute))
--	assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second))
--	assertEquals(t, "About an hour", HumanDuration(1*time.Hour))
--	assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute))
--	assertEquals(t, "3 hours", HumanDuration(3*time.Hour))
--	assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute))
--	assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute))
--	assertEquals(t, "24 hours", HumanDuration(24*time.Hour))
--	assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour))
--	assertEquals(t, "2 days", HumanDuration(2*day))
--	assertEquals(t, "7 days", HumanDuration(7*day))
--	assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour))
--	assertEquals(t, "2 weeks", HumanDuration(2*week))
--	assertEquals(t, "2 weeks", HumanDuration(2*week+4*day))
--	assertEquals(t, "3 weeks", HumanDuration(3*week))
--	assertEquals(t, "4 weeks", HumanDuration(4*week))
--	assertEquals(t, "4 weeks", HumanDuration(4*week+3*day))
--	assertEquals(t, "4 weeks", HumanDuration(1*month))
--	assertEquals(t, "6 weeks", HumanDuration(1*month+2*week))
--	assertEquals(t, "8 weeks", HumanDuration(2*month))
--	assertEquals(t, "3 months", HumanDuration(3*month+1*week))
--	assertEquals(t, "5 months", HumanDuration(5*month+2*week))
--	assertEquals(t, "13 months", HumanDuration(13*month))
--	assertEquals(t, "23 months", HumanDuration(23*month))
--	assertEquals(t, "24 months", HumanDuration(24*month))
--	assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week))
--	assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month))
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
-deleted file mode 100644
-index 264f388..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
-+++ /dev/null
-@@ -1,91 +0,0 @@
--package units
--
--import (
--	"fmt"
--	"regexp"
--	"strconv"
--	"strings"
--)
--
--// See: http://en.wikipedia.org/wiki/Binary_prefix
--const (
--	// Decimal
--
--	KB = 1000
--	MB = 1000 * KB
--	GB = 1000 * MB
--	TB = 1000 * GB
--	PB = 1000 * TB
--
--	// Binary
--
--	KiB = 1024
--	MiB = 1024 * KiB
--	GiB = 1024 * MiB
--	TiB = 1024 * GiB
--	PiB = 1024 * TiB
--)
--
--type unitMap map[string]int64
--
--var (
--	decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
--	binaryMap  = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
--	sizeRegex  = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
--)
--
--var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
--var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
--
--// HumanSize returns a human-readable approximation of a size
--// using SI standard (eg. "44kB", "17MB")
--func HumanSize(size int64) string {
--	return intToString(float64(size), 1000.0, decimapAbbrs)
--}
--
--func BytesSize(size float64) string {
--	return intToString(size, 1024.0, binaryAbbrs)
--}
--
--func intToString(size, unit float64, _map []string) string {
--	i := 0
--	for size >= unit {
--		size = size / unit
--		i++
--	}
--	return fmt.Sprintf("%.4g %s", size, _map[i])
--}
--
--// FromHumanSize returns an integer from a human-readable specification of a
--// size using SI standard (eg. "44kB", "17MB")
--func FromHumanSize(size string) (int64, error) {
--	return parseSize(size, decimalMap)
--}
--
--// RAMInBytes parses a human-readable string representing an amount of RAM
--// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
--// returns the number of bytes, or -1 if the string is unparseable.
--// Units are case-insensitive, and the 'b' suffix is optional.
--func RAMInBytes(size string) (int64, error) {
--	return parseSize(size, binaryMap)
--}
--
--// Parses the human-readable size string into the amount it represents
--func parseSize(sizeStr string, uMap unitMap) (int64, error) {
--	matches := sizeRegex.FindStringSubmatch(sizeStr)
--	if len(matches) != 3 {
--		return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
--	}
--
--	size, err := strconv.ParseInt(matches[1], 10, 0)
--	if err != nil {
--		return -1, err
--	}
--
--	unitPrefix := strings.ToLower(matches[2])
--	if mul, ok := uMap[unitPrefix]; ok {
--		size *= mul
--	}
--
--	return size, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
-deleted file mode 100644
-index 3e410b0..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
-+++ /dev/null
-@@ -1,108 +0,0 @@
--package units
--
--import (
--	"reflect"
--	"runtime"
--	"strings"
--	"testing"
--)
--
--func TestBytesSize(t *testing.T) {
--	assertEquals(t, "1 KiB", BytesSize(1024))
--	assertEquals(t, "1 MiB", BytesSize(1024*1024))
--	assertEquals(t, "1 MiB", BytesSize(1048576))
--	assertEquals(t, "2 MiB", BytesSize(2*MiB))
--	assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB))
--	assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB))
--	assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB))
--}
--
--func TestHumanSize(t *testing.T) {
--	assertEquals(t, "1 kB", HumanSize(1000))
--	assertEquals(t, "1.024 kB", HumanSize(1024))
--	assertEquals(t, "1 MB", HumanSize(1000000))
--	assertEquals(t, "1.049 MB", HumanSize(1048576))
--	assertEquals(t, "2 MB", HumanSize(2*MB))
--	assertEquals(t, "3.42 GB", HumanSize(int64(float64(3.42*GB))))
--	assertEquals(t, "5.372 TB", HumanSize(int64(float64(5.372*TB))))
--	assertEquals(t, "2.22 PB", HumanSize(int64(float64(2.22*PB))))
--}
--
--func TestFromHumanSize(t *testing.T) {
--	assertSuccessEquals(t, 32, FromHumanSize, "32")
--	assertSuccessEquals(t, 32, FromHumanSize, "32b")
--	assertSuccessEquals(t, 32, FromHumanSize, "32B")
--	assertSuccessEquals(t, 32*KB, FromHumanSize, "32k")
--	assertSuccessEquals(t, 32*KB, FromHumanSize, "32K")
--	assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb")
--	assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb")
--	assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb")
--	assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb")
--	assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb")
--	assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb")
--
--	assertError(t, FromHumanSize, "")
--	assertError(t, FromHumanSize, "hello")
--	assertError(t, FromHumanSize, "-32")
--	assertError(t, FromHumanSize, "32.3")
--	assertError(t, FromHumanSize, " 32 ")
--	assertError(t, FromHumanSize, "32.3Kb")
--	assertError(t, FromHumanSize, "32 mb")
--	assertError(t, FromHumanSize, "32m b")
--	assertError(t, FromHumanSize, "32bm")
--}
--
--func TestRAMInBytes(t *testing.T) {
--	assertSuccessEquals(t, 32, RAMInBytes, "32")
--	assertSuccessEquals(t, 32, RAMInBytes, "32b")
--	assertSuccessEquals(t, 32, RAMInBytes, "32B")
--	assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k")
--	assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K")
--	assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb")
--	assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb")
--	assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb")
--	assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb")
--	assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb")
--	assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb")
--	assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB")
--	assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P")
--
--	assertError(t, RAMInBytes, "")
--	assertError(t, RAMInBytes, "hello")
--	assertError(t, RAMInBytes, "-32")
--	assertError(t, RAMInBytes, "32.3")
--	assertError(t, RAMInBytes, " 32 ")
--	assertError(t, RAMInBytes, "32.3Kb")
--	assertError(t, RAMInBytes, "32 mb")
--	assertError(t, RAMInBytes, "32m b")
--	assertError(t, RAMInBytes, "32bm")
--}
--
--func assertEquals(t *testing.T, expected, actual interface{}) {
--	if expected != actual {
--		t.Errorf("Expected '%v' but got '%v'", expected, actual)
--	}
--}
--
--// func that maps to the parse function signatures as testing abstraction
--type parseFn func(string) (int64, error)
--
--// Define 'String()' for pretty-print
--func (fn parseFn) String() string {
--	fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()
--	return fnName[strings.LastIndex(fnName, ".")+1:]
--}
--
--func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) {
--	res, err := fn(arg)
--	if err != nil || res != expected {
--		t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err)
--	}
--}
--
--func assertError(t *testing.T, fn parseFn, arg string) {
--	res, err := fn(arg)
--	if err == nil && res != -1 {
--		t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
-deleted file mode 100644
-index e363aa7..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
-+++ /dev/null
-@@ -1,305 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package tar implements access to tar archives.
--// It aims to cover most of the variations, including those produced
--// by GNU and BSD tars.
--//
--// References:
--//   http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
--//   http://www.gnu.org/software/tar/manual/html_node/Standard.html
--//   http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
--package tar
--
--import (
--	"bytes"
--	"errors"
--	"fmt"
--	"os"
--	"path"
--	"time"
--)
--
--const (
--	blockSize = 512
--
--	// Types
--	TypeReg           = '0'    // regular file
--	TypeRegA          = '\x00' // regular file
--	TypeLink          = '1'    // hard link
--	TypeSymlink       = '2'    // symbolic link
--	TypeChar          = '3'    // character device node
--	TypeBlock         = '4'    // block device node
--	TypeDir           = '5'    // directory
--	TypeFifo          = '6'    // fifo node
--	TypeCont          = '7'    // reserved
--	TypeXHeader       = 'x'    // extended header
--	TypeXGlobalHeader = 'g'    // global extended header
--	TypeGNULongName   = 'L'    // Next file has a long name
--	TypeGNULongLink   = 'K'    // Next file symlinks to a file w/ a long name
--	TypeGNUSparse     = 'S'    // sparse file
--)
--
--// A Header represents a single header in a tar archive.
--// Some fields may not be populated.
--type Header struct {
--	Name       string    // name of header file entry
--	Mode       int64     // permission and mode bits
--	Uid        int       // user id of owner
--	Gid        int       // group id of owner
--	Size       int64     // length in bytes
--	ModTime    time.Time // modified time
--	Typeflag   byte      // type of header entry
--	Linkname   string    // target name of link
--	Uname      string    // user name of owner
--	Gname      string    // group name of owner
--	Devmajor   int64     // major number of character or block device
--	Devminor   int64     // minor number of character or block device
--	AccessTime time.Time // access time
--	ChangeTime time.Time // status change time
--	Xattrs     map[string]string
--}
--
--// File name constants from the tar spec.
--const (
--	fileNameSize       = 100 // Maximum number of bytes in a standard tar name.
--	fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
--)
--
--// FileInfo returns an os.FileInfo for the Header.
--func (h *Header) FileInfo() os.FileInfo {
--	return headerFileInfo{h}
--}
--
--// headerFileInfo implements os.FileInfo.
--type headerFileInfo struct {
--	h *Header
--}
--
--func (fi headerFileInfo) Size() int64        { return fi.h.Size }
--func (fi headerFileInfo) IsDir() bool        { return fi.Mode().IsDir() }
--func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
--func (fi headerFileInfo) Sys() interface{}   { return fi.h }
--
--// Name returns the base name of the file.
--func (fi headerFileInfo) Name() string {
--	if fi.IsDir() {
--		return path.Base(path.Clean(fi.h.Name))
--	}
--	return path.Base(fi.h.Name)
--}
--
--// Mode returns the permission and mode bits for the headerFileInfo.
--func (fi headerFileInfo) Mode() (mode os.FileMode) {
--	// Set file permission bits.
--	mode = os.FileMode(fi.h.Mode).Perm()
--
--	// Set setuid, setgid and sticky bits.
--	if fi.h.Mode&c_ISUID != 0 {
--		// setuid
--		mode |= os.ModeSetuid
--	}
--	if fi.h.Mode&c_ISGID != 0 {
--		// setgid
--		mode |= os.ModeSetgid
--	}
--	if fi.h.Mode&c_ISVTX != 0 {
--		// sticky
--		mode |= os.ModeSticky
--	}
--
--	// Set file mode bits.
--	// clear perm, setuid, setgid and sticky bits.
--	m := os.FileMode(fi.h.Mode) &^ 07777
--	if m == c_ISDIR {
--		// directory
--		mode |= os.ModeDir
--	}
--	if m == c_ISFIFO {
--		// named pipe (FIFO)
--		mode |= os.ModeNamedPipe
--	}
--	if m == c_ISLNK {
--		// symbolic link
--		mode |= os.ModeSymlink
--	}
--	if m == c_ISBLK {
--		// device file
--		mode |= os.ModeDevice
--	}
--	if m == c_ISCHR {
--		// Unix character device
--		mode |= os.ModeDevice
--		mode |= os.ModeCharDevice
--	}
--	if m == c_ISSOCK {
--		// Unix domain socket
--		mode |= os.ModeSocket
--	}
--
--	switch fi.h.Typeflag {
--	case TypeLink, TypeSymlink:
--		// hard link, symbolic link
--		mode |= os.ModeSymlink
--	case TypeChar:
--		// character device node
--		mode |= os.ModeDevice
--		mode |= os.ModeCharDevice
--	case TypeBlock:
--		// block device node
--		mode |= os.ModeDevice
--	case TypeDir:
--		// directory
--		mode |= os.ModeDir
--	case TypeFifo:
--		// fifo node
--		mode |= os.ModeNamedPipe
--	}
--
--	return mode
--}
--
--// sysStat, if non-nil, populates h from system-dependent fields of fi.
--var sysStat func(fi os.FileInfo, h *Header) error
--
--// Mode constants from the tar spec.
--const (
--	c_ISUID  = 04000   // Set uid
--	c_ISGID  = 02000   // Set gid
--	c_ISVTX  = 01000   // Save text (sticky bit)
--	c_ISDIR  = 040000  // Directory
--	c_ISFIFO = 010000  // FIFO
--	c_ISREG  = 0100000 // Regular file
--	c_ISLNK  = 0120000 // Symbolic link
--	c_ISBLK  = 060000  // Block special file
--	c_ISCHR  = 020000  // Character special file
--	c_ISSOCK = 0140000 // Socket
--)
--
--// Keywords for the PAX Extended Header
--const (
--	paxAtime    = "atime"
--	paxCharset  = "charset"
--	paxComment  = "comment"
--	paxCtime    = "ctime" // please note that ctime is not a valid pax header.
--	paxGid      = "gid"
--	paxGname    = "gname"
--	paxLinkpath = "linkpath"
--	paxMtime    = "mtime"
--	paxPath     = "path"
--	paxSize     = "size"
--	paxUid      = "uid"
--	paxUname    = "uname"
--	paxXattr    = "SCHILY.xattr."
--	paxNone     = ""
--)
--
--// FileInfoHeader creates a partially-populated Header from fi.
--// If fi describes a symlink, FileInfoHeader records link as the link target.
--// If fi describes a directory, a slash is appended to the name.
--// Because os.FileInfo's Name method returns only the base name of
--// the file it describes, it may be necessary to modify the Name field
--// of the returned header to provide the full path name of the file.
--func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
--	if fi == nil {
--		return nil, errors.New("tar: FileInfo is nil")
--	}
--	fm := fi.Mode()
--	h := &Header{
--		Name:    fi.Name(),
--		ModTime: fi.ModTime(),
--		Mode:    int64(fm.Perm()), // or'd with c_IS* constants later
--	}
--	switch {
--	case fm.IsRegular():
--		h.Mode |= c_ISREG
--		h.Typeflag = TypeReg
--		h.Size = fi.Size()
--	case fi.IsDir():
--		h.Typeflag = TypeDir
--		h.Mode |= c_ISDIR
--		h.Name += "/"
--	case fm&os.ModeSymlink != 0:
--		h.Typeflag = TypeSymlink
--		h.Mode |= c_ISLNK
--		h.Linkname = link
--	case fm&os.ModeDevice != 0:
--		if fm&os.ModeCharDevice != 0 {
--			h.Mode |= c_ISCHR
--			h.Typeflag = TypeChar
--		} else {
--			h.Mode |= c_ISBLK
--			h.Typeflag = TypeBlock
--		}
--	case fm&os.ModeNamedPipe != 0:
--		h.Typeflag = TypeFifo
--		h.Mode |= c_ISFIFO
--	case fm&os.ModeSocket != 0:
--		h.Mode |= c_ISSOCK
--	default:
--		return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
--	}
--	if fm&os.ModeSetuid != 0 {
--		h.Mode |= c_ISUID
--	}
--	if fm&os.ModeSetgid != 0 {
--		h.Mode |= c_ISGID
--	}
--	if fm&os.ModeSticky != 0 {
--		h.Mode |= c_ISVTX
--	}
--	if sysStat != nil {
--		return h, sysStat(fi, h)
--	}
--	return h, nil
--}
--
--var zeroBlock = make([]byte, blockSize)
--
--// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
--// We compute and return both.
--func checksum(header []byte) (unsigned int64, signed int64) {
--	for i := 0; i < len(header); i++ {
--		if i == 148 {
--			// The chksum field (header[148:156]) is special: it should be treated as space bytes.
--			unsigned += ' ' * 8
--			signed += ' ' * 8
--			i += 7
--			continue
--		}
--		unsigned += int64(header[i])
--		signed += int64(int8(header[i]))
--	}
--	return
--}
--
--type slicer []byte
--
--func (sp *slicer) next(n int) (b []byte) {
--	s := *sp
--	b, *sp = s[0:n], s[n:]
--	return
--}
--
--func isASCII(s string) bool {
--	for _, c := range s {
--		if c >= 0x80 {
--			return false
--		}
--	}
--	return true
--}
--
--func toASCII(s string) string {
--	if isASCII(s) {
--		return s
--	}
--	var buf bytes.Buffer
--	for _, c := range s {
--		if c < 0x80 {
--			buf.WriteByte(byte(c))
--		}
--	}
--	return buf.String()
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
-deleted file mode 100644
-index 351eaa0..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
-+++ /dev/null
-@@ -1,79 +0,0 @@
--// Copyright 2013 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package tar_test
--
--import (
--	"archive/tar"
--	"bytes"
--	"fmt"
--	"io"
--	"log"
--	"os"
--)
--
--func Example() {
--	// Create a buffer to write our archive to.
--	buf := new(bytes.Buffer)
--
--	// Create a new tar archive.
--	tw := tar.NewWriter(buf)
--
--	// Add some files to the archive.
--	var files = []struct {
--		Name, Body string
--	}{
--		{"readme.txt", "This archive contains some text files."},
--		{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
--		{"todo.txt", "Get animal handling licence."},
--	}
--	for _, file := range files {
--		hdr := &tar.Header{
--			Name: file.Name,
--			Size: int64(len(file.Body)),
--		}
--		if err := tw.WriteHeader(hdr); err != nil {
--			log.Fatalln(err)
--		}
--		if _, err := tw.Write([]byte(file.Body)); err != nil {
--			log.Fatalln(err)
--		}
--	}
--	// Make sure to check the error on Close.
--	if err := tw.Close(); err != nil {
--		log.Fatalln(err)
--	}
--
--	// Open the tar archive for reading.
--	r := bytes.NewReader(buf.Bytes())
--	tr := tar.NewReader(r)
--
--	// Iterate through the files in the archive.
--	for {
--		hdr, err := tr.Next()
--		if err == io.EOF {
--			// end of tar archive
--			break
--		}
--		if err != nil {
--			log.Fatalln(err)
--		}
--		fmt.Printf("Contents of %s:\n", hdr.Name)
--		if _, err := io.Copy(os.Stdout, tr); err != nil {
--			log.Fatalln(err)
--		}
--		fmt.Println()
--	}
--
--	// Output:
--	// Contents of readme.txt:
--	// This archive contains some text files.
--	// Contents of gopher.txt:
--	// Gopher names:
--	// George
--	// Geoffrey
--	// Gonzo
--	// Contents of todo.txt:
--	// Get animal handling licence.
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
-deleted file mode 100644
-index a27559d..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
-+++ /dev/null
-@@ -1,820 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package tar
--
--// TODO(dsymonds):
--//   - pax extensions
--
--import (
--	"bytes"
--	"errors"
--	"io"
--	"io/ioutil"
--	"os"
--	"strconv"
--	"strings"
--	"time"
--)
--
--var (
--	ErrHeader = errors.New("archive/tar: invalid tar header")
--)
--
--const maxNanoSecondIntSize = 9
--
--// A Reader provides sequential access to the contents of a tar archive.
--// A tar archive consists of a sequence of files.
--// The Next method advances to the next file in the archive (including the first),
--// and then it can be treated as an io.Reader to access the file's data.
--type Reader struct {
--	r       io.Reader
--	err     error
--	pad     int64           // amount of padding (ignored) after current file entry
--	curr    numBytesReader  // reader for current file entry
--	hdrBuff [blockSize]byte // buffer to use in readHeader
--}
--
--// A numBytesReader is an io.Reader with a numBytes method, returning the number
--// of bytes remaining in the underlying encoded data.
--type numBytesReader interface {
--	io.Reader
--	numBytes() int64
--}
--
--// A regFileReader is a numBytesReader for reading file data from a tar archive.
--type regFileReader struct {
--	r  io.Reader // underlying reader
--	nb int64     // number of unread bytes for current file entry
--}
--
--// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
--type sparseFileReader struct {
--	rfr *regFileReader // reads the sparse-encoded file data
--	sp  []sparseEntry  // the sparse map for the file
--	pos int64          // keeps track of file position
--	tot int64          // total size of the file
--}
--
--// Keywords for GNU sparse files in a PAX extended header
--const (
--	paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
--	paxGNUSparseOffset    = "GNU.sparse.offset"
--	paxGNUSparseNumBytes  = "GNU.sparse.numbytes"
--	paxGNUSparseMap       = "GNU.sparse.map"
--	paxGNUSparseName      = "GNU.sparse.name"
--	paxGNUSparseMajor     = "GNU.sparse.major"
--	paxGNUSparseMinor     = "GNU.sparse.minor"
--	paxGNUSparseSize      = "GNU.sparse.size"
--	paxGNUSparseRealSize  = "GNU.sparse.realsize"
--)
--
--// Keywords for old GNU sparse headers
--const (
--	oldGNUSparseMainHeaderOffset               = 386
--	oldGNUSparseMainHeaderIsExtendedOffset     = 482
--	oldGNUSparseMainHeaderNumEntries           = 4
--	oldGNUSparseExtendedHeaderIsExtendedOffset = 504
--	oldGNUSparseExtendedHeaderNumEntries       = 21
--	oldGNUSparseOffsetSize                     = 12
--	oldGNUSparseNumBytesSize                   = 12
--)
--
--// NewReader creates a new Reader reading from r.
--func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
--
--// Next advances to the next entry in the tar archive.
--func (tr *Reader) Next() (*Header, error) {
--	var hdr *Header
--	if tr.err == nil {
--		tr.skipUnread()
--	}
--	if tr.err != nil {
--		return hdr, tr.err
--	}
--	hdr = tr.readHeader()
--	if hdr == nil {
--		return hdr, tr.err
--	}
--	// Check for PAX/GNU header.
--	switch hdr.Typeflag {
--	case TypeXHeader:
--		//  PAX extended header
--		headers, err := parsePAX(tr)
--		if err != nil {
--			return nil, err
--		}
--		// We actually read the whole file,
--		// but this skips alignment padding
--		tr.skipUnread()
--		hdr = tr.readHeader()
--		mergePAX(hdr, headers)
--
--		// Check for a PAX format sparse file
--		sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers)
--		if err != nil {
--			tr.err = err
--			return nil, err
--		}
--		if sp != nil {
--			// Current file is a PAX format GNU sparse file.
--			// Set the current file reader to a sparse file reader.
--			tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
--		}
--		return hdr, nil
--	case TypeGNULongName:
--		// We have a GNU long name header. Its contents are the real file name.
--		realname, err := ioutil.ReadAll(tr)
--		if err != nil {
--			return nil, err
--		}
--		hdr, err := tr.Next()
--		hdr.Name = cString(realname)
--		return hdr, err
--	case TypeGNULongLink:
--		// We have a GNU long link header.
--		realname, err := ioutil.ReadAll(tr)
--		if err != nil {
--			return nil, err
--		}
--		hdr, err := tr.Next()
--		hdr.Linkname = cString(realname)
--		return hdr, err
--	}
--	return hdr, tr.err
--}
--
--// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
--// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
--// be treated as a regular file.
--func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
--	var sparseFormat string
--
--	// Check for sparse format indicators
--	major, majorOk := headers[paxGNUSparseMajor]
--	minor, minorOk := headers[paxGNUSparseMinor]
--	sparseName, sparseNameOk := headers[paxGNUSparseName]
--	_, sparseMapOk := headers[paxGNUSparseMap]
--	sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
--	sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
--
--	// Identify which, if any, sparse format applies from which PAX headers are set
--	if majorOk && minorOk {
--		sparseFormat = major + "." + minor
--	} else if sparseNameOk && sparseMapOk {
--		sparseFormat = "0.1"
--	} else if sparseSizeOk {
--		sparseFormat = "0.0"
--	} else {
--		// Not a PAX format GNU sparse file.
--		return nil, nil
--	}
--
--	// Check for unknown sparse format
--	if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
--		return nil, nil
--	}
--
--	// Update hdr from GNU sparse PAX headers
--	if sparseNameOk {
--		hdr.Name = sparseName
--	}
--	if sparseSizeOk {
--		realSize, err := strconv.ParseInt(sparseSize, 10, 0)
--		if err != nil {
--			return nil, ErrHeader
--		}
--		hdr.Size = realSize
--	} else if sparseRealSizeOk {
--		realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
--		if err != nil {
--			return nil, ErrHeader
--		}
--		hdr.Size = realSize
--	}
--
--	// Set up the sparse map, according to the particular sparse format in use
--	var sp []sparseEntry
--	var err error
--	switch sparseFormat {
--	case "0.0", "0.1":
--		sp, err = readGNUSparseMap0x1(headers)
--	case "1.0":
--		sp, err = readGNUSparseMap1x0(tr.curr)
--	}
--	return sp, err
--}
--
--// mergePAX merges well known headers according to PAX standard.
--// In general headers with the same name as those found
--// in the header struct overwrite those found in the header
--// struct with higher precision or longer values. Esp. useful
--// for name and linkname fields.
--func mergePAX(hdr *Header, headers map[string]string) error {
--	for k, v := range headers {
--		switch k {
--		case paxPath:
--			hdr.Name = v
--		case paxLinkpath:
--			hdr.Linkname = v
--		case paxGname:
--			hdr.Gname = v
--		case paxUname:
--			hdr.Uname = v
--		case paxUid:
--			uid, err := strconv.ParseInt(v, 10, 0)
--			if err != nil {
--				return err
--			}
--			hdr.Uid = int(uid)
--		case paxGid:
--			gid, err := strconv.ParseInt(v, 10, 0)
--			if err != nil {
--				return err
--			}
--			hdr.Gid = int(gid)
--		case paxAtime:
--			t, err := parsePAXTime(v)
--			if err != nil {
--				return err
--			}
--			hdr.AccessTime = t
--		case paxMtime:
--			t, err := parsePAXTime(v)
--			if err != nil {
--				return err
--			}
--			hdr.ModTime = t
--		case paxCtime:
--			t, err := parsePAXTime(v)
--			if err != nil {
--				return err
--			}
--			hdr.ChangeTime = t
--		case paxSize:
--			size, err := strconv.ParseInt(v, 10, 0)
--			if err != nil {
--				return err
--			}
--			hdr.Size = int64(size)
--		default:
--			if strings.HasPrefix(k, paxXattr) {
--				if hdr.Xattrs == nil {
--					hdr.Xattrs = make(map[string]string)
--				}
--				hdr.Xattrs[k[len(paxXattr):]] = v
--			}
--		}
--	}
--	return nil
--}
--
--// parsePAXTime takes a string of the form %d.%d as described in
--// the PAX specification.
--func parsePAXTime(t string) (time.Time, error) {
--	buf := []byte(t)
--	pos := bytes.IndexByte(buf, '.')
--	var seconds, nanoseconds int64
--	var err error
--	if pos == -1 {
--		seconds, err = strconv.ParseInt(t, 10, 0)
--		if err != nil {
--			return time.Time{}, err
--		}
--	} else {
--		seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
--		if err != nil {
--			return time.Time{}, err
--		}
--		nano_buf := string(buf[pos+1:])
--		// Pad as needed before converting to a decimal.
--		// For example .030 -> .030000000 -> 30000000 nanoseconds
--		if len(nano_buf) < maxNanoSecondIntSize {
--			// Right pad
--			nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
--		} else if len(nano_buf) > maxNanoSecondIntSize {
--			// Right truncate
--			nano_buf = nano_buf[:maxNanoSecondIntSize]
--		}
--		nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
--		if err != nil {
--			return time.Time{}, err
--		}
--	}
--	ts := time.Unix(seconds, nanoseconds)
--	return ts, nil
--}
--
--// parsePAX parses PAX headers.
--// If an extended header (type 'x') is invalid, ErrHeader is returned
--func parsePAX(r io.Reader) (map[string]string, error) {
--	buf, err := ioutil.ReadAll(r)
--	if err != nil {
--		return nil, err
--	}
--
--	// For GNU PAX sparse format 0.0 support.
--	// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
--	var sparseMap bytes.Buffer
--
--	headers := make(map[string]string)
--	// Each record is constructed as
--	//     "%d %s=%s\n", length, keyword, value
--	for len(buf) > 0 {
--		// or the header was empty to start with.
--		var sp int
--		// The size field ends at the first space.
--		sp = bytes.IndexByte(buf, ' ')
--		if sp == -1 {
--			return nil, ErrHeader
--		}
--		// Parse the first token as a decimal integer.
--		n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
--		if err != nil {
--			return nil, ErrHeader
--		}
--		// Extract everything between the decimal and the n -1 on the
--		// beginning to eat the ' ', -1 on the end to skip the newline.
--		var record []byte
--		record, buf = buf[sp+1:n-1], buf[n:]
--		// The first equals is guaranteed to mark the end of the key.
--		// Everything else is value.
--		eq := bytes.IndexByte(record, '=')
--		if eq == -1 {
--			return nil, ErrHeader
--		}
--		key, value := record[:eq], record[eq+1:]
--
--		keyStr := string(key)
--		if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
--			// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
--			sparseMap.Write(value)
--			sparseMap.Write([]byte{','})
--		} else {
--			// Normal key. Set the value in the headers map.
--			headers[keyStr] = string(value)
--		}
--	}
--	if sparseMap.Len() != 0 {
--		// Add sparse info to headers, chopping off the extra comma
--		sparseMap.Truncate(sparseMap.Len() - 1)
--		headers[paxGNUSparseMap] = sparseMap.String()
--	}
--	return headers, nil
--}
--
--// cString parses bytes as a NUL-terminated C-style string.
--// If a NUL byte is not found then the whole slice is returned as a string.
--func cString(b []byte) string {
--	n := 0
--	for n < len(b) && b[n] != 0 {
--		n++
--	}
--	return string(b[0:n])
--}
--
--func (tr *Reader) octal(b []byte) int64 {
--	// Check for binary format first.
--	if len(b) > 0 && b[0]&0x80 != 0 {
--		var x int64
--		for i, c := range b {
--			if i == 0 {
--				c &= 0x7f // ignore signal bit in first byte
--			}
--			x = x<<8 | int64(c)
--		}
--		return x
--	}
--
--	// Because unused fields are filled with NULs, we need
--	// to skip leading NULs. Fields may also be padded with
--	// spaces or NULs.
--	// So we remove leading and trailing NULs and spaces to
--	// be sure.
--	b = bytes.Trim(b, " \x00")
--
--	if len(b) == 0 {
--		return 0
--	}
--	x, err := strconv.ParseUint(cString(b), 8, 64)
--	if err != nil {
--		tr.err = err
--	}
--	return int64(x)
--}
--
--// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
--func (tr *Reader) skipUnread() {
--	nr := tr.numBytes() + tr.pad // number of bytes to skip
--	tr.curr, tr.pad = nil, 0
--	if sr, ok := tr.r.(io.Seeker); ok {
--		if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
--			return
--		}
--	}
--	_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
--}
--
--func (tr *Reader) verifyChecksum(header []byte) bool {
--	if tr.err != nil {
--		return false
--	}
--
--	given := tr.octal(header[148:156])
--	unsigned, signed := checksum(header)
--	return given == unsigned || given == signed
--}
--
--func (tr *Reader) readHeader() *Header {
--	header := tr.hdrBuff[:]
--	copy(header, zeroBlock)
--
--	if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
--		return nil
--	}
--
--	// Two blocks of zero bytes marks the end of the archive.
--	if bytes.Equal(header, zeroBlock[0:blockSize]) {
--		if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
--			return nil
--		}
--		if bytes.Equal(header, zeroBlock[0:blockSize]) {
--			tr.err = io.EOF
--		} else {
--			tr.err = ErrHeader // zero block and then non-zero block
--		}
--		return nil
--	}
--
--	if !tr.verifyChecksum(header) {
--		tr.err = ErrHeader
--		return nil
--	}
--
--	// Unpack
--	hdr := new(Header)
--	s := slicer(header)
--
--	hdr.Name = cString(s.next(100))
--	hdr.Mode = tr.octal(s.next(8))
--	hdr.Uid = int(tr.octal(s.next(8)))
--	hdr.Gid = int(tr.octal(s.next(8)))
--	hdr.Size = tr.octal(s.next(12))
--	hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
--	s.next(8) // chksum
--	hdr.Typeflag = s.next(1)[0]
--	hdr.Linkname = cString(s.next(100))
--
--	// The remainder of the header depends on the value of magic.
--	// The original (v7) version of tar had no explicit magic field,
--	// so its magic bytes, like the rest of the block, are NULs.
--	magic := string(s.next(8)) // contains version field as well.
--	var format string
--	switch {
--	case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
--		if string(header[508:512]) == "tar\x00" {
--			format = "star"
--		} else {
--			format = "posix"
--		}
--	case magic == "ustar  \x00": // old GNU tar
--		format = "gnu"
--	}
--
--	switch format {
--	case "posix", "gnu", "star":
--		hdr.Uname = cString(s.next(32))
--		hdr.Gname = cString(s.next(32))
--		devmajor := s.next(8)
--		devminor := s.next(8)
--		if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
--			hdr.Devmajor = tr.octal(devmajor)
--			hdr.Devminor = tr.octal(devminor)
--		}
--		var prefix string
--		switch format {
--		case "posix", "gnu":
--			prefix = cString(s.next(155))
--		case "star":
--			prefix = cString(s.next(131))
--			hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
--			hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
--		}
--		if len(prefix) > 0 {
--			hdr.Name = prefix + "/" + hdr.Name
--		}
--	}
--
--	if tr.err != nil {
--		tr.err = ErrHeader
--		return nil
--	}
--
--	// Maximum value of hdr.Size is 64 GB (12 octal digits),
--	// so there's no risk of int64 overflowing.
--	nb := int64(hdr.Size)
--	tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
--
--	// Set the current file reader.
--	tr.curr = &regFileReader{r: tr.r, nb: nb}
--
--	// Check for old GNU sparse format entry.
--	if hdr.Typeflag == TypeGNUSparse {
--		// Get the real size of the file.
--		hdr.Size = tr.octal(header[483:495])
--
--		// Read the sparse map.
--		sp := tr.readOldGNUSparseMap(header)
--		if tr.err != nil {
--			return nil
--		}
--		// Current file is a GNU sparse file. Update the current file reader.
--		tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
--	}
--
--	return hdr
--}
--
--// A sparseEntry holds a single entry in a sparse file's sparse map.
--// A sparse entry indicates the offset and size in a sparse file of a
--// block of data.
--type sparseEntry struct {
--	offset   int64
--	numBytes int64
--}
--
--// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
--// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
--// then one or more extension headers are used to store the rest of the sparse map.
--func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
--	isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
--	spCap := oldGNUSparseMainHeaderNumEntries
--	if isExtended {
--		spCap += oldGNUSparseExtendedHeaderNumEntries
--	}
--	sp := make([]sparseEntry, 0, spCap)
--	s := slicer(header[oldGNUSparseMainHeaderOffset:])
--
--	// Read the four entries from the main tar header
--	for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
--		offset := tr.octal(s.next(oldGNUSparseOffsetSize))
--		numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
--		if tr.err != nil {
--			tr.err = ErrHeader
--			return nil
--		}
--		if offset == 0 && numBytes == 0 {
--			break
--		}
--		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
--	}
--
--	for isExtended {
--		// There are more entries. Read an extension header and parse its entries.
--		sparseHeader := make([]byte, blockSize)
--		if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
--			return nil
--		}
--		isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
--		s = slicer(sparseHeader)
--		for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
--			offset := tr.octal(s.next(oldGNUSparseOffsetSize))
--			numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
--			if tr.err != nil {
--				tr.err = ErrHeader
--				return nil
--			}
--			if offset == 0 && numBytes == 0 {
--				break
--			}
--			sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
--		}
--	}
--	return sp
--}
--
--// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0.
--// The sparse map is stored just before the file data and padded out to the nearest block boundary.
--func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
--	buf := make([]byte, 2*blockSize)
--	sparseHeader := buf[:blockSize]
--
--	// readDecimal is a helper function to read a decimal integer from the sparse map
--	// while making sure to read from the file in blocks of size blockSize
--	readDecimal := func() (int64, error) {
--		// Look for newline
--		nl := bytes.IndexByte(sparseHeader, '\n')
--		if nl == -1 {
--			if len(sparseHeader) >= blockSize {
--				// This is an error
--				return 0, ErrHeader
--			}
--			oldLen := len(sparseHeader)
--			newLen := oldLen + blockSize
--			if cap(sparseHeader) < newLen {
--				// There's more header, but we need to make room for the next block
--				copy(buf, sparseHeader)
--				sparseHeader = buf[:newLen]
--			} else {
--				// There's more header, and we can just reslice
--				sparseHeader = sparseHeader[:newLen]
--			}
--
--			// Now that sparseHeader is large enough, read next block
--			if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil {
--				return 0, err
--			}
--
--			// Look for a newline in the new data
--			nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n')
--			if nl == -1 {
--				// This is an error
--				return 0, ErrHeader
--			}
--			nl += oldLen // We want the position from the beginning
--		}
--		// Now that we've found a newline, read a number
--		n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0)
--		if err != nil {
--			return 0, ErrHeader
--		}
--
--		// Update sparseHeader to consume this number
--		sparseHeader = sparseHeader[nl+1:]
--		return n, nil
--	}
--
--	// Read the first block
--	if _, err := io.ReadFull(r, sparseHeader); err != nil {
--		return nil, err
--	}
--
--	// The first line contains the number of entries
--	numEntries, err := readDecimal()
--	if err != nil {
--		return nil, err
--	}
--
--	// Read all the entries
--	sp := make([]sparseEntry, 0, numEntries)
--	for i := int64(0); i < numEntries; i++ {
--		// Read the offset
--		offset, err := readDecimal()
--		if err != nil {
--			return nil, err
--		}
--		// Read numBytes
--		numBytes, err := readDecimal()
--		if err != nil {
--			return nil, err
--		}
--
--		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
--	}
--
--	return sp, nil
--}
--
--// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
--// The sparse map is stored in the PAX headers.
--func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
--	// Get number of entries
--	numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
--	if !ok {
--		return nil, ErrHeader
--	}
--	numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
--	if err != nil {
--		return nil, ErrHeader
--	}
--
--	sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
--
--	// There should be two numbers in sparseMap for each entry
--	if int64(len(sparseMap)) != 2*numEntries {
--		return nil, ErrHeader
--	}
--
--	// Loop through the entries in the sparse map
--	sp := make([]sparseEntry, 0, numEntries)
--	for i := int64(0); i < numEntries; i++ {
--		offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
--		if err != nil {
--			return nil, ErrHeader
--		}
--		numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
--		if err != nil {
--			return nil, ErrHeader
--		}
--		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
--	}
--
--	return sp, nil
--}
--
--// numBytes returns the number of bytes left to read in the current file's entry
--// in the tar archive, or 0 if there is no current file.
--func (tr *Reader) numBytes() int64 {
--	if tr.curr == nil {
--		// No current file, so no bytes
--		return 0
--	}
--	return tr.curr.numBytes()
--}
--
--// Read reads from the current entry in the tar archive.
--// It returns 0, io.EOF when it reaches the end of that entry,
--// until Next is called to advance to the next entry.
--func (tr *Reader) Read(b []byte) (n int, err error) {
--	if tr.curr == nil {
--		return 0, io.EOF
--	}
--	n, err = tr.curr.Read(b)
--	if err != nil && err != io.EOF {
--		tr.err = err
--	}
--	return
--}
--
--func (rfr *regFileReader) Read(b []byte) (n int, err error) {
--	if rfr.nb == 0 {
--		// file consumed
--		return 0, io.EOF
--	}
--	if int64(len(b)) > rfr.nb {
--		b = b[0:rfr.nb]
--	}
--	n, err = rfr.r.Read(b)
--	rfr.nb -= int64(n)
--
--	if err == io.EOF && rfr.nb > 0 {
--		err = io.ErrUnexpectedEOF
--	}
--	return
--}
--
--// numBytes returns the number of bytes left to read in the file's data in the tar archive.
--func (rfr *regFileReader) numBytes() int64 {
--	return rfr.nb
--}
--
--// readHole reads a sparse file hole ending at offset toOffset
--func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
--	n64 := toOffset - sfr.pos
--	if n64 > int64(len(b)) {
--		n64 = int64(len(b))
--	}
--	n := int(n64)
--	for i := 0; i < n; i++ {
--		b[i] = 0
--	}
--	sfr.pos += n64
--	return n
--}
--
--// Read reads the sparse file data in expanded form.
--func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
--	if len(sfr.sp) == 0 {
--		// No more data fragments to read from.
--		if sfr.pos < sfr.tot {
--			// We're in the last hole
--			n = sfr.readHole(b, sfr.tot)
--			return
--		}
--		// Otherwise, we're at the end of the file
--		return 0, io.EOF
--	}
--	if sfr.pos < sfr.sp[0].offset {
--		// We're in a hole
--		n = sfr.readHole(b, sfr.sp[0].offset)
--		return
--	}
--
--	// We're not in a hole, so we'll read from the next data fragment
--	posInFragment := sfr.pos - sfr.sp[0].offset
--	bytesLeft := sfr.sp[0].numBytes - posInFragment
--	if int64(len(b)) > bytesLeft {
--		b = b[0:bytesLeft]
--	}
--
--	n, err = sfr.rfr.Read(b)
--	sfr.pos += int64(n)
--
--	if int64(n) == bytesLeft {
--		// We're done with this fragment
--		sfr.sp = sfr.sp[1:]
--	}
--
--	if err == io.EOF && sfr.pos < sfr.tot {
--		// We reached the end of the last fragment's data, but there's a final hole
--		err = nil
--	}
--	return
--}
--
--// numBytes returns the number of bytes left to read in the sparse file's
--// sparse-encoded data in the tar archive.
--func (sfr *sparseFileReader) numBytes() int64 {
--	return sfr.rfr.nb
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
-deleted file mode 100644
-index 9601ffe..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
-+++ /dev/null
-@@ -1,743 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package tar
--
--import (
--	"bytes"
--	"crypto/md5"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"reflect"
--	"strings"
--	"testing"
--	"time"
--)
--
--type untarTest struct {
--	file    string
--	headers []*Header
--	cksums  []string
--}
--
--var gnuTarTest = &untarTest{
--	file: "testdata/gnu.tar",
--	headers: []*Header{
--		{
--			Name:     "small.txt",
--			Mode:     0640,
--			Uid:      73025,
--			Gid:      5000,
--			Size:     5,
--			ModTime:  time.Unix(1244428340, 0),
--			Typeflag: '0',
--			Uname:    "dsymonds",
--			Gname:    "eng",
--		},
--		{
--			Name:     "small2.txt",
--			Mode:     0640,
--			Uid:      73025,
--			Gid:      5000,
--			Size:     11,
--			ModTime:  time.Unix(1244436044, 0),
--			Typeflag: '0',
--			Uname:    "dsymonds",
--			Gname:    "eng",
--		},
--	},
--	cksums: []string{
--		"e38b27eaccb4391bdec553a7f3ae6b2f",
--		"c65bd2e50a56a2138bf1716f2fd56fe9",
--	},
--}
--
--var sparseTarTest = &untarTest{
--	file: "testdata/sparse-formats.tar",
--	headers: []*Header{
--		{
--			Name:     "sparse-gnu",
--			Mode:     420,
--			Uid:      1000,
--			Gid:      1000,
--			Size:     200,
--			ModTime:  time.Unix(1392395740, 0),
--			Typeflag: 0x53,
--			Linkname: "",
--			Uname:    "david",
--			Gname:    "david",
--			Devmajor: 0,
--			Devminor: 0,
--		},
--		{
--			Name:     "sparse-posix-0.0",
--			Mode:     420,
--			Uid:      1000,
--			Gid:      1000,
--			Size:     200,
--			ModTime:  time.Unix(1392342187, 0),
--			Typeflag: 0x30,
--			Linkname: "",
--			Uname:    "david",
--			Gname:    "david",
--			Devmajor: 0,
--			Devminor: 0,
--		},
--		{
--			Name:     "sparse-posix-0.1",
--			Mode:     420,
--			Uid:      1000,
--			Gid:      1000,
--			Size:     200,
--			ModTime:  time.Unix(1392340456, 0),
--			Typeflag: 0x30,
--			Linkname: "",
--			Uname:    "david",
--			Gname:    "david",
--			Devmajor: 0,
--			Devminor: 0,
--		},
--		{
--			Name:     "sparse-posix-1.0",
--			Mode:     420,
--			Uid:      1000,
--			Gid:      1000,
--			Size:     200,
--			ModTime:  time.Unix(1392337404, 0),
--			Typeflag: 0x30,
--			Linkname: "",
--			Uname:    "david",
--			Gname:    "david",
--			Devmajor: 0,
--			Devminor: 0,
--		},
--		{
--			Name:     "end",
--			Mode:     420,
--			Uid:      1000,
--			Gid:      1000,
--			Size:     4,
--			ModTime:  time.Unix(1392398319, 0),
--			Typeflag: 0x30,
--			Linkname: "",
--			Uname:    "david",
--			Gname:    "david",
--			Devmajor: 0,
--			Devminor: 0,
--		},
--	},
--	cksums: []string{
--		"6f53234398c2449fe67c1812d993012f",
--		"6f53234398c2449fe67c1812d993012f",
--		"6f53234398c2449fe67c1812d993012f",
--		"6f53234398c2449fe67c1812d993012f",
--		"b0061974914468de549a2af8ced10316",
--	},
--}
--
--var untarTests = []*untarTest{
--	gnuTarTest,
--	sparseTarTest,
--	{
--		file: "testdata/star.tar",
--		headers: []*Header{
--			{
--				Name:       "small.txt",
--				Mode:       0640,
--				Uid:        73025,
--				Gid:        5000,
--				Size:       5,
--				ModTime:    time.Unix(1244592783, 0),
--				Typeflag:   '0',
--				Uname:      "dsymonds",
--				Gname:      "eng",
--				AccessTime: time.Unix(1244592783, 0),
--				ChangeTime: time.Unix(1244592783, 0),
--			},
--			{
--				Name:       "small2.txt",
--				Mode:       0640,
--				Uid:        73025,
--				Gid:        5000,
--				Size:       11,
--				ModTime:    time.Unix(1244592783, 0),
--				Typeflag:   '0',
--				Uname:      "dsymonds",
--				Gname:      "eng",
--				AccessTime: time.Unix(1244592783, 0),
--				ChangeTime: time.Unix(1244592783, 0),
--			},
--		},
--	},
--	{
--		file: "testdata/v7.tar",
--		headers: []*Header{
--			{
--				Name:     "small.txt",
--				Mode:     0444,
--				Uid:      73025,
--				Gid:      5000,
--				Size:     5,
--				ModTime:  time.Unix(1244593104, 0),
--				Typeflag: '\x00',
--			},
--			{
--				Name:     "small2.txt",
--				Mode:     0444,
--				Uid:      73025,
--				Gid:      5000,
--				Size:     11,
--				ModTime:  time.Unix(1244593104, 0),
--				Typeflag: '\x00',
--			},
--		},
--	},
--	{
--		file: "testdata/pax.tar",
--		headers: []*Header{
--			{
--				Name:       "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
--				Mode:       0664,
--				Uid:        1000,
--				Gid:        1000,
--				Uname:      "shane",
--				Gname:      "shane",
--				Size:       7,
--				ModTime:    time.Unix(1350244992, 23960108),
--				ChangeTime: time.Unix(1350244992, 23960108),
--				AccessTime: time.Unix(1350244992, 23960108),
--				Typeflag:   TypeReg,
--			},
--			{
--				Name:       "a/b",
--				Mode:       0777,
--				Uid:        1000,
--				Gid:        1000,
--				Uname:      "shane",
--				Gname:      "shane",
--				Size:       0,
--				ModTime:    time.Unix(1350266320, 910238425),
--				ChangeTime: time.Unix(1350266320, 910238425),
--				AccessTime: time.Unix(1350266320, 910238425),
--				Typeflag:   TypeSymlink,
--				Linkname:   "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
--			},
--		},
--	},
--	{
--		file: "testdata/nil-uid.tar", // golang.org/issue/5290
--		headers: []*Header{
--			{
--				Name:     "P1050238.JPG.log",
--				Mode:     0664,
--				Uid:      0,
--				Gid:      0,
--				Size:     14,
--				ModTime:  time.Unix(1365454838, 0),
--				Typeflag: TypeReg,
--				Linkname: "",
--				Uname:    "eyefi",
--				Gname:    "eyefi",
--				Devmajor: 0,
--				Devminor: 0,
--			},
--		},
--	},
--	{
--		file: "testdata/xattrs.tar",
--		headers: []*Header{
--			{
--				Name:       "small.txt",
--				Mode:       0644,
--				Uid:        1000,
--				Gid:        10,
--				Size:       5,
--				ModTime:    time.Unix(1386065770, 448252320),
--				Typeflag:   '0',
--				Uname:      "alex",
--				Gname:      "wheel",
--				AccessTime: time.Unix(1389782991, 419875220),
--				ChangeTime: time.Unix(1389782956, 794414986),
--				Xattrs: map[string]string{
--					"user.key":  "value",
--					"user.key2": "value2",
--					// Interestingly, selinux encodes the terminating null inside the xattr
--					"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
--				},
--			},
--			{
--				Name:       "small2.txt",
--				Mode:       0644,
--				Uid:        1000,
--				Gid:        10,
--				Size:       11,
--				ModTime:    time.Unix(1386065770, 449252304),
--				Typeflag:   '0',
--				Uname:      "alex",
--				Gname:      "wheel",
--				AccessTime: time.Unix(1389782991, 419875220),
--				ChangeTime: time.Unix(1386065770, 449252304),
--				Xattrs: map[string]string{
--					"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
--				},
--			},
--		},
--	},
--}
--
--func TestReader(t *testing.T) {
--testLoop:
--	for i, test := range untarTests {
--		f, err := os.Open(test.file)
--		if err != nil {
--			t.Errorf("test %d: Unexpected error: %v", i, err)
--			continue
--		}
--		defer f.Close()
--		tr := NewReader(f)
--		for j, header := range test.headers {
--			hdr, err := tr.Next()
--			if err != nil || hdr == nil {
--				t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
--				f.Close()
--				continue testLoop
--			}
--			if !reflect.DeepEqual(*hdr, *header) {
--				t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
--					i, j, *hdr, *header)
--			}
--		}
--		hdr, err := tr.Next()
--		if err == io.EOF {
--			continue testLoop
--		}
--		if hdr != nil || err != nil {
--			t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
--		}
--	}
--}
--
--func TestPartialRead(t *testing.T) {
--	f, err := os.Open("testdata/gnu.tar")
--	if err != nil {
--		t.Fatalf("Unexpected error: %v", err)
--	}
--	defer f.Close()
--
--	tr := NewReader(f)
--
--	// Read the first four bytes; Next() should skip the last byte.
--	hdr, err := tr.Next()
--	if err != nil || hdr == nil {
--		t.Fatalf("Didn't get first file: %v", err)
--	}
--	buf := make([]byte, 4)
--	if _, err := io.ReadFull(tr, buf); err != nil {
--		t.Fatalf("Unexpected error: %v", err)
--	}
--	if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
--		t.Errorf("Contents = %v, want %v", buf, expected)
--	}
--
--	// Second file
--	hdr, err = tr.Next()
--	if err != nil || hdr == nil {
--		t.Fatalf("Didn't get second file: %v", err)
--	}
--	buf = make([]byte, 6)
--	if _, err := io.ReadFull(tr, buf); err != nil {
--		t.Fatalf("Unexpected error: %v", err)
--	}
--	if expected := []byte("Google"); !bytes.Equal(buf, expected) {
--		t.Errorf("Contents = %v, want %v", buf, expected)
--	}
--}
--
--func TestIncrementalRead(t *testing.T) {
--	test := gnuTarTest
--	f, err := os.Open(test.file)
--	if err != nil {
--		t.Fatalf("Unexpected error: %v", err)
--	}
--	defer f.Close()
--
--	tr := NewReader(f)
--
--	headers := test.headers
--	cksums := test.cksums
--	nread := 0
--
--	// loop over all files
--	for ; ; nread++ {
--		hdr, err := tr.Next()
--		if hdr == nil || err == io.EOF {
--			break
--		}
--
--		// check the header
--		if !reflect.DeepEqual(*hdr, *headers[nread]) {
--			t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
--				*hdr, headers[nread])
--		}
--
--		// read file contents in little chunks EOF,
--		// checksumming all the way
--		h := md5.New()
--		rdbuf := make([]uint8, 8)
--		for {
--			nr, err := tr.Read(rdbuf)
--			if err == io.EOF {
--				break
--			}
--			if err != nil {
--				t.Errorf("Read: unexpected error %v\n", err)
--				break
--			}
--			h.Write(rdbuf[0:nr])
--		}
--		// verify checksum
--		have := fmt.Sprintf("%x", h.Sum(nil))
--		want := cksums[nread]
--		if want != have {
--			t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
--		}
--	}
--	if nread != len(headers) {
--		t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
--	}
--}
--
--func TestNonSeekable(t *testing.T) {
--	test := gnuTarTest
--	f, err := os.Open(test.file)
--	if err != nil {
--		t.Fatalf("Unexpected error: %v", err)
--	}
--	defer f.Close()
--
--	type readerOnly struct {
--		io.Reader
--	}
--	tr := NewReader(readerOnly{f})
--	nread := 0
--
--	for ; ; nread++ {
--		_, err := tr.Next()
--		if err == io.EOF {
--			break
--		}
--		if err != nil {
--			t.Fatalf("Unexpected error: %v", err)
--		}
--	}
--
--	if nread != len(test.headers) {
--		t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
--	}
--}
--
--func TestParsePAXHeader(t *testing.T) {
--	paxTests := [][3]string{
--		{"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
--		{"a", "a=name", "9 a=name\n"},  // Test case involving multiple acceptable length
--		{"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
--	for _, test := range paxTests {
--		key, expected, raw := test[0], test[1], test[2]
--		reader := bytes.NewReader([]byte(raw))
--		headers, err := parsePAX(reader)
--		if err != nil {
--			t.Errorf("Couldn't parse correctly formatted headers: %v", err)
--			continue
--		}
--		if strings.EqualFold(headers[key], expected) {
--			t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
--			continue
--		}
--		trailer := make([]byte, 100)
--		n, err := reader.Read(trailer)
--		if err != io.EOF || n != 0 {
--			t.Error("Buffer wasn't consumed")
--		}
--	}
--	badHeader := bytes.NewReader([]byte("3 somelongkey="))
--	if _, err := parsePAX(badHeader); err != ErrHeader {
--		t.Fatal("Unexpected success when parsing bad header")
--	}
--}
--
--func TestParsePAXTime(t *testing.T) {
--	// Some valid PAX time values
--	timestamps := map[string]time.Time{
--		"1350244992.023960108":  time.Unix(1350244992, 23960108), // The common case
--		"1350244992.02396010":   time.Unix(1350244992, 23960100), // Lower precision value
--		"1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
--		"1350244992":            time.Unix(1350244992, 0),        // Low precision value
--	}
--	for input, expected := range timestamps {
--		ts, err := parsePAXTime(input)
--		if err != nil {
--			t.Fatal(err)
--		}
--		if !ts.Equal(expected) {
--			t.Fatalf("Time parsing failure %s %s", ts, expected)
--		}
--	}
--}
--
--func TestMergePAX(t *testing.T) {
--	hdr := new(Header)
--	// Test a string, integer, and time based value.
--	headers := map[string]string{
--		"path":  "a/b/c",
--		"uid":   "1000",
--		"mtime": "1350244992.023960108",
--	}
--	err := mergePAX(hdr, headers)
--	if err != nil {
--		t.Fatal(err)
--	}
--	want := &Header{
--		Name:    "a/b/c",
--		Uid:     1000,
--		ModTime: time.Unix(1350244992, 23960108),
--	}
--	if !reflect.DeepEqual(hdr, want) {
--		t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
--	}
--}
--
--func TestSparseEndToEnd(t *testing.T) {
--	test := sparseTarTest
--	f, err := os.Open(test.file)
--	if err != nil {
--		t.Fatalf("Unexpected error: %v", err)
--	}
--	defer f.Close()
--
--	tr := NewReader(f)
--
--	headers := test.headers
--	cksums := test.cksums
--	nread := 0
--
--	// loop over all files
--	for ; ; nread++ {
--		hdr, err := tr.Next()
--		if hdr == nil || err == io.EOF {
--			break
--		}
--
--		// check the header
--		if !reflect.DeepEqual(*hdr, *headers[nread]) {
--			t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
--				*hdr, headers[nread])
--		}
--
--		// read and checksum the file data
--		h := md5.New()
--		_, err = io.Copy(h, tr)
--		if err != nil {
--			t.Fatalf("Unexpected error: %v", err)
--		}
--
--		// verify checksum
--		have := fmt.Sprintf("%x", h.Sum(nil))
--		want := cksums[nread]
--		if want != have {
--			t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
--		}
--	}
--	if nread != len(headers) {
--		t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
--	}
--}
--
--type sparseFileReadTest struct {
--	sparseData []byte
--	sparseMap  []sparseEntry
--	realSize   int64
--	expected   []byte
--}
--
--var sparseFileReadTests = []sparseFileReadTest{
--	{
--		sparseData: []byte("abcde"),
--		sparseMap: []sparseEntry{
--			{offset: 0, numBytes: 2},
--			{offset: 5, numBytes: 3},
--		},
--		realSize: 8,
--		expected: []byte("ab\x00\x00\x00cde"),
--	},
--	{
--		sparseData: []byte("abcde"),
--		sparseMap: []sparseEntry{
--			{offset: 0, numBytes: 2},
--			{offset: 5, numBytes: 3},
--		},
--		realSize: 10,
--		expected: []byte("ab\x00\x00\x00cde\x00\x00"),
--	},
--	{
--		sparseData: []byte("abcde"),
--		sparseMap: []sparseEntry{
--			{offset: 1, numBytes: 3},
--			{offset: 6, numBytes: 2},
--		},
--		realSize: 8,
--		expected: []byte("\x00abc\x00\x00de"),
--	},
--	{
--		sparseData: []byte("abcde"),
--		sparseMap: []sparseEntry{
--			{offset: 1, numBytes: 3},
--			{offset: 6, numBytes: 2},
--		},
--		realSize: 10,
--		expected: []byte("\x00abc\x00\x00de\x00\x00"),
--	},
--	{
--		sparseData: []byte(""),
--		sparseMap:  nil,
--		realSize:   2,
--		expected:   []byte("\x00\x00"),
--	},
--}
--
--func TestSparseFileReader(t *testing.T) {
--	for i, test := range sparseFileReadTests {
--		r := bytes.NewReader(test.sparseData)
--		nb := int64(r.Len())
--		sfr := &sparseFileReader{
--			rfr: &regFileReader{r: r, nb: nb},
--			sp:  test.sparseMap,
--			pos: 0,
--			tot: test.realSize,
--		}
--		if sfr.numBytes() != nb {
--			t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb)
--		}
--		buf, err := ioutil.ReadAll(sfr)
--		if err != nil {
--			t.Errorf("test %d: Unexpected error: %v", i, err)
--		}
--		if e := test.expected; !bytes.Equal(buf, e) {
--			t.Errorf("test %d: Contents = %v, want %v", i, buf, e)
--		}
--		if sfr.numBytes() != 0 {
--			t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i)
--		}
--	}
--}
--
--func TestSparseIncrementalRead(t *testing.T) {
--	sparseMap := []sparseEntry{{10, 2}}
--	sparseData := []byte("Go")
--	expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00"
--
--	r := bytes.NewReader(sparseData)
--	nb := int64(r.Len())
--	sfr := &sparseFileReader{
--		rfr: &regFileReader{r: r, nb: nb},
--		sp:  sparseMap,
--		pos: 0,
--		tot: int64(len(expected)),
--	}
--
--	// We'll read the data 6 bytes at a time, with a hole of size 10 at
--	// the beginning and one of size 8 at the end.
--	var outputBuf bytes.Buffer
--	buf := make([]byte, 6)
--	for {
--		n, err := sfr.Read(buf)
--		if err == io.EOF {
--			break
--		}
--		if err != nil {
--			t.Errorf("Read: unexpected error %v\n", err)
--		}
--		if n > 0 {
--			_, err := outputBuf.Write(buf[:n])
--			if err != nil {
--				t.Errorf("Write: unexpected error %v\n", err)
--			}
--		}
--	}
--	got := outputBuf.String()
--	if got != expected {
--		t.Errorf("Contents = %v, want %v", got, expected)
--	}
--}
--
--func TestReadGNUSparseMap0x1(t *testing.T) {
--	headers := map[string]string{
--		paxGNUSparseNumBlocks: "4",
--		paxGNUSparseMap:       "0,5,10,5,20,5,30,5",
--	}
--	expected := []sparseEntry{
--		{offset: 0, numBytes: 5},
--		{offset: 10, numBytes: 5},
--		{offset: 20, numBytes: 5},
--		{offset: 30, numBytes: 5},
--	}
--
--	sp, err := readGNUSparseMap0x1(headers)
--	if err != nil {
--		t.Errorf("Unexpected error: %v", err)
--	}
--	if !reflect.DeepEqual(sp, expected) {
--		t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
--	}
--}
--
--func TestReadGNUSparseMap1x0(t *testing.T) {
--	// This test uses lots of holes so the sparse header takes up more than two blocks
--	numEntries := 100
--	expected := make([]sparseEntry, 0, numEntries)
--	sparseMap := new(bytes.Buffer)
--
--	fmt.Fprintf(sparseMap, "%d\n", numEntries)
--	for i := 0; i < numEntries; i++ {
--		offset := int64(2048 * i)
--		numBytes := int64(1024)
--		expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes})
--		fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes)
--	}
--
--	// Make the header the smallest multiple of blockSize that fits the sparseMap
--	headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize
--	bufLen := blockSize * headerBlocks
--	buf := make([]byte, bufLen)
--	copy(buf, sparseMap.Bytes())
--
--	// Get an reader to read the sparse map
--	r := bytes.NewReader(buf)
--
--	// Read the sparse map
--	sp, err := readGNUSparseMap1x0(r)
--	if err != nil {
--		t.Errorf("Unexpected error: %v", err)
--	}
--	if !reflect.DeepEqual(sp, expected) {
--		t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
--	}
--}
--
--func TestUninitializedRead(t *testing.T) {
--	test := gnuTarTest
--	f, err := os.Open(test.file)
--	if err != nil {
--		t.Fatalf("Unexpected error: %v", err)
--	}
--	defer f.Close()
--
--	tr := NewReader(f)
--	_, err = tr.Read([]byte{})
--	if err == nil || err != io.EOF {
--		t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF)
--	}
--
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
-deleted file mode 100644
-index cf9cc79..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
-+++ /dev/null
-@@ -1,20 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// +build linux dragonfly openbsd solaris
--
--package tar
--
--import (
--	"syscall"
--	"time"
--)
--
--func statAtime(st *syscall.Stat_t) time.Time {
--	return time.Unix(st.Atim.Unix())
--}
--
--func statCtime(st *syscall.Stat_t) time.Time {
--	return time.Unix(st.Ctim.Unix())
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
-deleted file mode 100644
-index 6f17dbe..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
-+++ /dev/null
-@@ -1,20 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// +build darwin freebsd netbsd
--
--package tar
--
--import (
--	"syscall"
--	"time"
--)
--
--func statAtime(st *syscall.Stat_t) time.Time {
--	return time.Unix(st.Atimespec.Unix())
--}
--
--func statCtime(st *syscall.Stat_t) time.Time {
--	return time.Unix(st.Ctimespec.Unix())
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
-deleted file mode 100644
-index cb843db..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
-+++ /dev/null
-@@ -1,32 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// +build linux darwin dragonfly freebsd openbsd netbsd solaris
--
--package tar
--
--import (
--	"os"
--	"syscall"
--)
--
--func init() {
--	sysStat = statUnix
--}
--
--func statUnix(fi os.FileInfo, h *Header) error {
--	sys, ok := fi.Sys().(*syscall.Stat_t)
--	if !ok {
--		return nil
--	}
--	h.Uid = int(sys.Uid)
--	h.Gid = int(sys.Gid)
--	// TODO(bradfitz): populate username & group.  os/user
--	// doesn't cache LookupId lookups, and lacks group
--	// lookup functions.
--	h.AccessTime = statAtime(sys)
--	h.ChangeTime = statCtime(sys)
--	// TODO(bradfitz): major/minor device numbers?
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
-deleted file mode 100644
-index ed333f3..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
-+++ /dev/null
-@@ -1,284 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package tar
--
--import (
--	"bytes"
--	"io/ioutil"
--	"os"
--	"path"
--	"reflect"
--	"strings"
--	"testing"
--	"time"
--)
--
--func TestFileInfoHeader(t *testing.T) {
--	fi, err := os.Stat("testdata/small.txt")
--	if err != nil {
--		t.Fatal(err)
--	}
--	h, err := FileInfoHeader(fi, "")
--	if err != nil {
--		t.Fatalf("FileInfoHeader: %v", err)
--	}
--	if g, e := h.Name, "small.txt"; g != e {
--		t.Errorf("Name = %q; want %q", g, e)
--	}
--	if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
--		t.Errorf("Mode = %#o; want %#o", g, e)
--	}
--	if g, e := h.Size, int64(5); g != e {
--		t.Errorf("Size = %v; want %v", g, e)
--	}
--	if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
--		t.Errorf("ModTime = %v; want %v", g, e)
--	}
--	// FileInfoHeader should error when passing nil FileInfo
--	if _, err := FileInfoHeader(nil, ""); err == nil {
--		t.Fatalf("Expected error when passing nil to FileInfoHeader")
--	}
--}
--
--func TestFileInfoHeaderDir(t *testing.T) {
--	fi, err := os.Stat("testdata")
--	if err != nil {
--		t.Fatal(err)
--	}
--	h, err := FileInfoHeader(fi, "")
--	if err != nil {
--		t.Fatalf("FileInfoHeader: %v", err)
--	}
--	if g, e := h.Name, "testdata/"; g != e {
--		t.Errorf("Name = %q; want %q", g, e)
--	}
--	// Ignoring c_ISGID for golang.org/issue/4867
--	if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
--		t.Errorf("Mode = %#o; want %#o", g, e)
--	}
--	if g, e := h.Size, int64(0); g != e {
--		t.Errorf("Size = %v; want %v", g, e)
--	}
--	if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
--		t.Errorf("ModTime = %v; want %v", g, e)
--	}
--}
--
--func TestFileInfoHeaderSymlink(t *testing.T) {
--	h, err := FileInfoHeader(symlink{}, "some-target")
--	if err != nil {
--		t.Fatal(err)
--	}
--	if g, e := h.Name, "some-symlink"; g != e {
--		t.Errorf("Name = %q; want %q", g, e)
--	}
--	if g, e := h.Linkname, "some-target"; g != e {
--		t.Errorf("Linkname = %q; want %q", g, e)
--	}
--}
--
--type symlink struct{}
--
--func (symlink) Name() string       { return "some-symlink" }
--func (symlink) Size() int64        { return 0 }
--func (symlink) Mode() os.FileMode  { return os.ModeSymlink }
--func (symlink) ModTime() time.Time { return time.Time{} }
--func (symlink) IsDir() bool        { return false }
--func (symlink) Sys() interface{}   { return nil }
--
--func TestRoundTrip(t *testing.T) {
--	data := []byte("some file contents")
--
--	var b bytes.Buffer
--	tw := NewWriter(&b)
--	hdr := &Header{
--		Name:    "file.txt",
--		Uid:     1 << 21, // too big for 8 octal digits
--		Size:    int64(len(data)),
--		ModTime: time.Now(),
--	}
--	// tar only supports second precision.
--	hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
--	if err := tw.WriteHeader(hdr); err != nil {
--		t.Fatalf("tw.WriteHeader: %v", err)
--	}
--	if _, err := tw.Write(data); err != nil {
--		t.Fatalf("tw.Write: %v", err)
--	}
--	if err := tw.Close(); err != nil {
--		t.Fatalf("tw.Close: %v", err)
--	}
--
--	// Read it back.
--	tr := NewReader(&b)
--	rHdr, err := tr.Next()
--	if err != nil {
--		t.Fatalf("tr.Next: %v", err)
--	}
--	if !reflect.DeepEqual(rHdr, hdr) {
--		t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
--	}
--	rData, err := ioutil.ReadAll(tr)
--	if err != nil {
--		t.Fatalf("Read: %v", err)
--	}
--	if !bytes.Equal(rData, data) {
--		t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
--	}
--}
--
--type headerRoundTripTest struct {
--	h  *Header
--	fm os.FileMode
--}
--
--func TestHeaderRoundTrip(t *testing.T) {
--	golden := []headerRoundTripTest{
--		// regular file.
--		{
--			h: &Header{
--				Name:     "test.txt",
--				Mode:     0644 | c_ISREG,
--				Size:     12,
--				ModTime:  time.Unix(1360600916, 0),
--				Typeflag: TypeReg,
--			},
--			fm: 0644,
--		},
--		// hard link.
--		{
--			h: &Header{
--				Name:     "hard.txt",
--				Mode:     0644 | c_ISLNK,
--				Size:     0,
--				ModTime:  time.Unix(1360600916, 0),
--				Typeflag: TypeLink,
--			},
--			fm: 0644 | os.ModeSymlink,
--		},
--		// symbolic link.
--		{
--			h: &Header{
--				Name:     "link.txt",
--				Mode:     0777 | c_ISLNK,
--				Size:     0,
--				ModTime:  time.Unix(1360600852, 0),
--				Typeflag: TypeSymlink,
--			},
--			fm: 0777 | os.ModeSymlink,
--		},
--		// character device node.
--		{
--			h: &Header{
--				Name:     "dev/null",
--				Mode:     0666 | c_ISCHR,
--				Size:     0,
--				ModTime:  time.Unix(1360578951, 0),
--				Typeflag: TypeChar,
--			},
--			fm: 0666 | os.ModeDevice | os.ModeCharDevice,
--		},
--		// block device node.
--		{
--			h: &Header{
--				Name:     "dev/sda",
--				Mode:     0660 | c_ISBLK,
--				Size:     0,
--				ModTime:  time.Unix(1360578954, 0),
--				Typeflag: TypeBlock,
--			},
--			fm: 0660 | os.ModeDevice,
--		},
--		// directory.
--		{
--			h: &Header{
--				Name:     "dir/",
--				Mode:     0755 | c_ISDIR,
--				Size:     0,
--				ModTime:  time.Unix(1360601116, 0),
--				Typeflag: TypeDir,
--			},
--			fm: 0755 | os.ModeDir,
--		},
--		// fifo node.
--		{
--			h: &Header{
--				Name:     "dev/initctl",
--				Mode:     0600 | c_ISFIFO,
--				Size:     0,
--				ModTime:  time.Unix(1360578949, 0),
--				Typeflag: TypeFifo,
--			},
--			fm: 0600 | os.ModeNamedPipe,
--		},
--		// setuid.
--		{
--			h: &Header{
--				Name:     "bin/su",
--				Mode:     0755 | c_ISREG | c_ISUID,
--				Size:     23232,
--				ModTime:  time.Unix(1355405093, 0),
--				Typeflag: TypeReg,
--			},
--			fm: 0755 | os.ModeSetuid,
--		},
--		// setguid.
--		{
--			h: &Header{
--				Name:     "group.txt",
--				Mode:     0750 | c_ISREG | c_ISGID,
--				Size:     0,
--				ModTime:  time.Unix(1360602346, 0),
--				Typeflag: TypeReg,
--			},
--			fm: 0750 | os.ModeSetgid,
--		},
--		// sticky.
--		{
--			h: &Header{
--				Name:     "sticky.txt",
--				Mode:     0600 | c_ISREG | c_ISVTX,
--				Size:     7,
--				ModTime:  time.Unix(1360602540, 0),
--				Typeflag: TypeReg,
--			},
--			fm: 0600 | os.ModeSticky,
--		},
--	}
--
--	for i, g := range golden {
--		fi := g.h.FileInfo()
--		h2, err := FileInfoHeader(fi, "")
--		if err != nil {
--			t.Error(err)
--			continue
--		}
--		if strings.Contains(fi.Name(), "/") {
--			t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
--		}
--		name := path.Base(g.h.Name)
--		if fi.IsDir() {
--			name += "/"
--		}
--		if got, want := h2.Name, name; got != want {
--			t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
--		}
--		if got, want := h2.Size, g.h.Size; got != want {
--			t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
--		}
--		if got, want := h2.Mode, g.h.Mode; got != want {
--			t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
--		}
--		if got, want := fi.Mode(), g.fm; got != want {
--			t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
--		}
--		if got, want := h2.ModTime, g.h.ModTime; got != want {
--			t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
--		}
--		if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
--			t.Errorf("i=%d: Sys didn't return original *Header", i)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
-deleted file mode 100644
-index fc899dc8dc2ad9952f5c5f67a0c76ca2d87249e9..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 3072
-zcmeHH%L>9U5Ztq0(Jv at FdDKtv;8zq|ijXv5BIw^6DOh^2UK)_HbK1>@VQ0c5`qsHR
-zJrb1zXEcV16&lMRW}rdtKd=NSXg->JkvP|Esp4`g&CK_h+FMmo7oR?iU7RP&svn2t
-z!9Ke4)upeR_aRYKtT+(g`B!B>fS>t?p7IZ9V9LKWlK+)w+iY|SVQ_tY3I4Ddrx1w)
-M;($0H4*b6ZFWOBnBLDyZ
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
-deleted file mode 100644
-index cc9cfaa33cc5de0a28b4183c1705d801f788c96a..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 1024
-zcmWGAG%z(VGPcn33UJrU$xmmX0WbgpGcywmlR at HOU}(l*Xk=(?U}j`)Zf3?{U<jnm
-z859gKbkIPcw74X(NI`)iwK6p=6OZ|X<nd at 7C@FB5ni!ZE85p31FS2_~Omu)Kz(3qI
-O$lniHVw621LI421e;KL(
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
-deleted file mode 100644
-index 9bc24b6587d726c7fca4e533d9c61a3801a34688..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 10240
-zcmeH~&u-H|5XOD(Q}_vz`9HIV-Z}CL1|qeBRwxNlAD?kbq{vMJQj94uds*3I at 2-Ed
-zpXb|Q{eF0Qw;4Wdw!4)@_!@~t&7&b8A|a!oqM>78BOoLqCLtvwr=Z5b$i&RT%Er#Y
-zO+ZjcSVUAHn~8MUp(~vBV+cg7LjpEKCCE6D7<T<rw?O;0+Yj4Z(zR0^zGPhdbGvr9
-zrQRoy{_C*6yB&T*;!Rvanu2Cxl~q*L)HSpj7#bOyn3|beSaWc6a&~cbbNBET5Ev93
-z5*ijB5v at VXo!D}hwH&DBLoGKe+%WVH`}>E at EwTcMv_>l+&bbg`j1Cv0A776ym5t at +
-zSt9MDBFtXbKY&m4pMN0f`l~hhD>#q(-`x$5n+q at eEPmAevA;0XTM8XMYkTvSmQ-t5
-zkihVw{(qQ#_JjT})&KMa&-FhG0c8or{CPvw|Jf69W<Ub#zuZ55K6rrnt=}JY0A at _E
-zN^3xAA4n$-;7s(y0ZgN3(ESs)hV}e<pMn|npKk8d_aDyzKdR+K|CC3Dtp9n`nENR5
-zazFm(^?#bHx-J4mpa>L!B2Wa1KoKYcMW6^2fg(@@ia-%40!5$*6oDd81d2cr_`3;w
-E2V3|JA^-pY
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
-deleted file mode 100644
-index b249bfc..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
-+++ /dev/null
-@@ -1 +0,0 @@
--Kilts
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
-deleted file mode 100644
-index 394ee3e..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
-+++ /dev/null
-@@ -1 +0,0 @@
--Google.com
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar
-deleted file mode 100644
-index 8bd4e74d50f9c8961f80a887ab7d6449e032048b..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 17920
-zcmeHO!BXQ!5M{6a3g^xmb&sU64qQV{sZ?#{1DvdPiv%!*Aw}}_dEEjdi|Nre#)hpG
-zE{}(KnjXC;wbY|&t*;k1>*dF<S9eEy`z!EQm*o+DC{{)HSD=DIJR;^8%aUWjb{s-g
-zA5A~)ZI3#J at 7A}Ao9XoE^WvZL&z<&ubqMco|IPoa1g#FD%)=jb>Y-EbwpT`b+-m>u
-zXfjaoe5W44g1VMFbuvaLV|3acePf?HHj7T34f|}^XTyHz*zDR5hW%jJ$GN!K=dPX7
-zuwNSXOT&I?*sl!xm0`a!>{o{UdfWbohf`t0wKm47jd5yYoVY#C#(p&HN5g(h+o$d^
-z>C~x6+ovLJp9;gi;Rj^+0U3Tkh98jO2W0pG8Gb;9ACTb()boS>@h8I{<l4MBhF!f4
-ze;~sj$nXa;{DBOAAj2QX at CP#dfqMS%$dL>`Aj1#H at B=dZfDAvtjWMmW;RoC~7Py0M
-z`m*5%-1CF}@n^#y*zgB7{DBRBV8b8S at CP>hfen8^_^{DnOAo^z5MmhHr;h_0e!zww
-zu;B-6_yHS!z=j{N;RkH^0r&ji+3`30fen9P!ynl22R8fx0blw!^!(v@<TAFrh5~1+
-zeSUs=ja?jV>`{T)$#0AMUzUr{%bWEKK}dPBZfAtotM&Q)$6}V4GkAAL?ydIxj}Q`3
-zJOATY>UD~$8khO$y?3COY_Ib_T!Mz?cSHC~#(oEVI84ue{e9LR^x69SzvU?x#e`$G
-z`ReZSkBilxf3HuQYO>v9_2tWYd3#C|uKGRxy<M&a*c#zs`{3Lj!@BH8=k`rz`?5>y
-zk#CN0vO|t>vO|t?vO|t at vV)g2dr7mGG<!+2mo$4x2QTU1B^|t^gO_yhk}kcZOE2lt
-zOS<%uF1 at 5HFX_rly7H2)yre5H>Do)W_L8o>q-!tf+DkfmNk=c~=p`M!q@$Pg+)H}y
-zB|Z0&o_k5py`&p2>BdW1A|f+1N!@lEFX<*ndTZ#%;H1d0PWQ;sPWQ<1PWQ+WPxo*$
-zCpU9)GbcB5ax*74^K5XIR5u%)rF*!UXXCT<7;fg-2rW5AHbhJJa5K*aY3VWC%(G!y
-za*S-8mhRzZo{iMfW4M`TW3}WM*<dZ*!_7P!uBFFtGtUNW$uY7KTe^pvc{XNCkKtyX
-zjoOl9WW%;}4>$8{;Fcc4%{&{rCCA9dZs{Iw=Go{iJw}H4J9rlMBkscMKka?4V*dGW
-zC;x{dmiMq8gvD(vpG{xk(ev}2>9_pg&wuy1`g67#*MIt_+k5+eaQ%mN-{S%QM+!~(
-zxc)<2*YN+U4#l|sv%B)c7PePszGeL<)ZO)vtHtH=w09GsNfox9d|WQBPwAMB1HKi$
-z5#I)1l17qNl4g>25`gi0%mT0gEC34-1PB5I0fGQQfKq@`fKq@`fKq@;fJ%T$fJ%T$
-zfLefBfLefBfLeekKolSf5Cw<=%mtVWFc)Ahz+8YvfJT5ufJT5u0A#CaDG)Nzv=opE
-zMO*%@0IdS81gZg+MP*A>0a;*L*S;zQ^1P%)r9keM))iGXNaa8-mb9xN$g|SAj;op=
-zlS*1t6=X?iT~QSVc~H`#(jdo4>x!y6$YPQf)rV9dQiVt*BGrggBvO?~WSR`0jpG)F
-zR$z95<=;=b<p1;e#WI-!u<I>g;QIfR|BZ{k=7%FW59w-S{C9wpVT}I{Ao4pNVj%vb
-z{pbH+{)aiAzW>2BZdt7HACK|hLCzZHZZvnf_-l0|IXl~}=T~SgCPR at QPL^Kc(9Lpj
-zv56 at U!e<=Br at -+2fA>qk!2KVo<PYzEab4ggF!(?2|8czk`;O$xw#M=`;S=zcTEw@(
-z7wf3_NGn!5QAj)1BynZHFH-uX5CH3YC<MSN90~!D&xAq%<Q>rji&Q8CK+X>e0g#)6
-z at dUuK3<?3Tc!ELzEODR^0J$S51VD}l_D%pi)JGuz9=)Rw01wDf2!I>}6apYu09*vX
-znm!5vu=b8Z0L;v^6bQklmI7jCCS}XN6`)n1l|VJX%uKdX6)-c?y7pBeFf)@Dl>##}
-ztt+Z(U}h#Qst0CfT31vh!CNoVqM~4CrgcSC7r2GAs4|$DX<bnj2XCB6i^_wUnbs9m
-zg)lRd7S#wdGp#GCGQI5F8GC*I3XJjp_kRp`VX(Z)z4W|`<)^*__kX|-&wtA at MtM&O
-o7i7H7b-?-m^bOzte$weJYB>JmbGf$=ejIaDU{qjK;EfgdABYgg9smFU
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
-deleted file mode 100644
-index 59e2d4e604611eeac3e2a0f3d6f71d2623c50449..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 3072
-zcmeHHT?)e>4DRzz;R#BjQ;)ERouag*479>@u-$$NbG3!-WyoMlUh?xvNIv}HZD&jy
-zuA!-C5KZlY0Y at bP833Zfm_JQ2M2<pg$x$a<X%X>yBQ2dTK6K{>VDLBV=D{z>IvVF`
-zUD#xgUGh?F1AikeIW6NnOIrMRGU4UU`62nAWxyx>^STEhN#m{lQEc_E<B1^sfB5T4
-f&6ja*oAiHhZZc!rJG;we^27i!KnxHAf55;UI$1)v
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
-deleted file mode 100644
-index 29679d9a305fc0293f31212541335af824ab32c7..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 2048
-zcmYex%t_TNsVHHfAus>}GZPaA5N&Q|3Z at N=AbgM*P?o{a$k4#V#K6eR)QrKv#MIE(
-zgh9c8hHiozU0Pg{SOj!ZaYkZZDqIwk0aTWjhA9jefq29K;yD8YhMfGo^t{B}RQ&;E
-gz at 3MSk&&8{lh1`qc2s;c1V%$(Gz3ONV7P_=0F<Om%m4rY
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
-deleted file mode 100644
-index eb65fc9410721efd98cb7c5e274f547ec530252d..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 3584
-zcmeHH&ubGw6n?3{z(^13q0owB at t_Dbv)OD<qP9W<=F*#Zu|<OsHb$B#MLfilBFeB(
-zSoNT!67<$TAs)OG!JCj{H}PP>Tf|dgec!sXT^AK{$jKc at -kWdUe(&uh-&e0L+xARj
-zwLzm>LI~3|1sT#R<fI at Wr6qG5qg7zYkom%FVb;hoYUM10#ONkUne`1^zGXM+dP9_X
-zf9{!k@}lkT-^GT?E=~8;_J-Sfc*0X(rakp_A<JKLSzr#wwI)%K`MZ;SC3bqMH1Ip|
-z1{<-><Eg%KV|%16{)~l~IM!EAm?y(W(^K-Ur`8VhORW=CxX-71o<d*Dpz-3wx&t1}
-zqB**~o+=Lf%pnHMS44X_c9{pYsHyN=;KOss!Fho3#oJp!j+%FT{yE&EUz{K3SbKkk
-z===U0_xa&>&XkBIzWbfCPrYEK7fr^Q at 7vXO;&pw$QCTT3-?&yO+jq(<{6qS`FS_vP
-zIBhMBjnmsnS~{|C9LMN8#r!W{zj5l&zcE?^U_t*||1zJ{zqInH{-Zy}2$O|c?WSFx
-zxn8RtM3-UpAJiW`Z at Zar#$ojz)NjtWBfnULUzD=jj5!>iG>O2k{o(=ZAg=$-urC7q
-zVm{n!{kK`S at p|Vk`q%aFg#nw)bMB-40yAj*%7=F37m at ziFINBH7pTSD@Cfil^^9T6
-zxL-iu+Aq)#ev#CF(l2&S at A^eC<`;^e4{ZQ#s9$Y4r}$iP3;;e3V;a&MNN*s$f%FFc
-H(;N5+1FUK9
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar
-deleted file mode 100644
-index 5960ee824784ffeacb976a9c648be41b0281508b..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 4096
-zcmeIuJqp7x3<Y2fJ%~?`%0IEr;~|)U<3MUd#~!^lfo!2u=#cjYOIU- at r+u8`P~L^E
-z3%7XZs1+?EAN8}ZeDtD?wMK+T7Gmew2r1s@*S_^t!lo3(I;RxsGM%jZHSb5Z^y7Lc
-jtvTkfOwYeIC&%C$0uX=z1Rwwb2tWV=5P$##?k4aBT?$NV
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
-deleted file mode 100644
-index 753e883cebf52ac1291f1b7bf1b7a37ae517b2d9..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 4096
-zcmeIuF%E+;3<XdRJxET_`a8w<L86KnpjDGf?Z(j?V1^B*|A$1fEm?kLc-`QsPuKD-
-za!j<w?<#8WMj@(zLuFdmUFHukH;d4w&LP601YP^bzMov$cL>tu-|!r}ytVByrmfae
-ipO37m$1T~NWs?FFpa2CZKmiI+fC3bt00k&;vcMnFf)<_t
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
-deleted file mode 100644
-index e6d816ad0775d56d09242d6f5d1dbe56af310a32..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 3584
-zcmeHIK at P$o5ajGDd_l9j6nKIMUt!cVjT92WM1L<dt&JwF9!gRh_Cl9!$n3!ExaOdB
-z;qRWM3kV at gU_dAZAOk>@81h#LhDgML6Bon)c?rO_kPgyt^3D0fH9$GJM`O*&4VCw=
-zv#H)UKC-TtzNwGuV$*%C{b<jNMV*1o#ej?Yr?dkgN at G(0$fN$_Oh8U$tAC-g{^z>m
-zsdIMLR{C5VZL^vBE!S4cfUeCYt@>GOiAt%sq7tp|_iN{x5cDreh9ME=K+wOCQm`$x
-j!znSk-v6Dy)}|V_!f*AilYjI7l|Jj-R%ReG at B;%+QQ}au
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
-deleted file mode 100644
-index 9701950edd1f0dc82858b7117136b37391be0b08..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 5120
-zcmeHJv2KGf5M|~o_yWg1+khiw>d;i}P^nX=$R$ooYd`|ilD{uBAv6g^kxC>6-(uu<
-zHg^v_-l5r}td>fyRbC(vfcdOQq}Iq(#u+Ja9X?}Dv(|CCVoJF~09ZgF;2a!G7^%~|
-zYNYoMUQ-rE=5<MfNf&^--n!;4I5LG45ME;9L at fvneeClfz=*LBI8bnFP`025LJ)!=
-zMno8J0GDDfIs&<m0>KzzBJ^EKyr-Mx-NQ4gq%k=v3zee}wOxElT`HH-ei(K*xV|_}
-zC{$GDvDu<R={PN`MVUrfx`|V at UX}Cg<y}Q)mCx5|BTH>oW?o>&odUrVuVHkt_w?IH
-zW3PV_ at V!Jxt at A^i>Yrj(>;K=H?5X8!tJS~MYVd#a^`?|QJKb&Uduf~MfN4M7$J!Lr
-zF40zZMF!9x{tqJ#0F5+;{2!=)=Knre|G(mAKU`hAc#r>!#{V(9d;sW1hxVv7 at B_zF
-ze)#eKF~#1~>@WTI`#+&4`lkel_5U6!N8h^5vRAE8lqGgr9-Ul!p=H1_U>TS&1K)l2
-B)fNB%
-
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
-deleted file mode 100644
-index dafb2ca..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
-+++ /dev/null
-@@ -1,396 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package tar
--
--// TODO(dsymonds):
--// - catch more errors (no first header, etc.)
--
--import (
--	"bytes"
--	"errors"
--	"fmt"
--	"io"
--	"os"
--	"path"
--	"strconv"
--	"strings"
--	"time"
--)
--
--var (
--	ErrWriteTooLong    = errors.New("archive/tar: write too long")
--	ErrFieldTooLong    = errors.New("archive/tar: header field too long")
--	ErrWriteAfterClose = errors.New("archive/tar: write after close")
--	errNameTooLong     = errors.New("archive/tar: name too long")
--	errInvalidHeader   = errors.New("archive/tar: header field too long or contains invalid values")
--)
--
--// A Writer provides sequential writing of a tar archive in POSIX.1 format.
--// A tar archive consists of a sequence of files.
--// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
--// writing at most hdr.Size bytes in total.
--type Writer struct {
--	w          io.Writer
--	err        error
--	nb         int64 // number of unwritten bytes for current file entry
--	pad        int64 // amount of padding to write after current file entry
--	closed     bool
--	usedBinary bool            // whether the binary numeric field extension was used
--	preferPax  bool            // use pax header instead of binary numeric header
--	hdrBuff    [blockSize]byte // buffer to use in writeHeader when writing a regular header
--	paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
--}
--
--// NewWriter creates a new Writer writing to w.
--func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
--
--// Flush finishes writing the current file (optional).
--func (tw *Writer) Flush() error {
--	if tw.nb > 0 {
--		tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
--		return tw.err
--	}
--
--	n := tw.nb + tw.pad
--	for n > 0 && tw.err == nil {
--		nr := n
--		if nr > blockSize {
--			nr = blockSize
--		}
--		var nw int
--		nw, tw.err = tw.w.Write(zeroBlock[0:nr])
--		n -= int64(nw)
--	}
--	tw.nb = 0
--	tw.pad = 0
--	return tw.err
--}
--
--// Write s into b, terminating it with a NUL if there is room.
--// If the value is too long for the field and allowPax is true add a paxheader record instead
--func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
--	needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
--	if needsPaxHeader {
--		paxHeaders[paxKeyword] = s
--		return
--	}
--	if len(s) > len(b) {
--		if tw.err == nil {
--			tw.err = ErrFieldTooLong
--		}
--		return
--	}
--	ascii := toASCII(s)
--	copy(b, ascii)
--	if len(ascii) < len(b) {
--		b[len(ascii)] = 0
--	}
--}
--
--// Encode x as an octal ASCII string and write it into b with leading zeros.
--func (tw *Writer) octal(b []byte, x int64) {
--	s := strconv.FormatInt(x, 8)
--	// leading zeros, but leave room for a NUL.
--	for len(s)+1 < len(b) {
--		s = "0" + s
--	}
--	tw.cString(b, s, false, paxNone, nil)
--}
--
--// Write x into b, either as octal or as binary (GNUtar/star extension).
--// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
--func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
--	// Try octal first.
--	s := strconv.FormatInt(x, 8)
--	if len(s) < len(b) {
--		tw.octal(b, x)
--		return
--	}
--
--	// If it is too long for octal, and pax is preferred, use a pax header
--	if allowPax && tw.preferPax {
--		tw.octal(b, 0)
--		s := strconv.FormatInt(x, 10)
--		paxHeaders[paxKeyword] = s
--		return
--	}
--
--	// Too big: use binary (big-endian).
--	tw.usedBinary = true
--	for i := len(b) - 1; x > 0 && i >= 0; i-- {
--		b[i] = byte(x)
--		x >>= 8
--	}
--	b[0] |= 0x80 // highest bit indicates binary format
--}
--
--var (
--	minTime = time.Unix(0, 0)
--	// There is room for 11 octal digits (33 bits) of mtime.
--	maxTime = minTime.Add((1<<33 - 1) * time.Second)
--)
--
--// WriteHeader writes hdr and prepares to accept the file's contents.
--// WriteHeader calls Flush if it is not the first header.
--// Calling after a Close will return ErrWriteAfterClose.
--func (tw *Writer) WriteHeader(hdr *Header) error {
--	return tw.writeHeader(hdr, true)
--}
--
--// WriteHeader writes hdr and prepares to accept the file's contents.
--// WriteHeader calls Flush if it is not the first header.
--// Calling after a Close will return ErrWriteAfterClose.
--// As this method is called internally by writePax header to allow it to
--// suppress writing the pax header.
--func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
--	if tw.closed {
--		return ErrWriteAfterClose
--	}
--	if tw.err == nil {
--		tw.Flush()
--	}
--	if tw.err != nil {
--		return tw.err
--	}
--
--	// a map to hold pax header records, if any are needed
--	paxHeaders := make(map[string]string)
--
--	// TODO(shanemhansen): we might want to use PAX headers for
--	// subsecond time resolution, but for now let's just capture
--	// too long fields or non ascii characters
--
--	var header []byte
--
--	// We need to select which scratch buffer to use carefully,
--	// since this method is called recursively to write PAX headers.
--	// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
--	// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
--	// already being used by the non-recursive call, so we must use paxHdrBuff.
--	header = tw.hdrBuff[:]
--	if !allowPax {
--		header = tw.paxHdrBuff[:]
--	}
--	copy(header, zeroBlock)
--	s := slicer(header)
--
--	// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
--	pathHeaderBytes := s.next(fileNameSize)
--
--	tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
--
--	// Handle out of range ModTime carefully.
--	var modTime int64
--	if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
--		modTime = hdr.ModTime.Unix()
--	}
--
--	tw.octal(s.next(8), hdr.Mode)                                   // 100:108
--	tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
--	tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
--	tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders)     // 124:136
--	tw.numeric(s.next(12), modTime, false, paxNone, nil)            // 136:148 --- consider using pax for finer granularity
--	s.next(8)                                                       // chksum (148:156)
--	s.next(1)[0] = hdr.Typeflag                                     // 156:157
--
--	tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
--
--	copy(s.next(8), []byte("ustar\x0000"))                        // 257:265
--	tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
--	tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
--	tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil)      // 329:337
--	tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil)      // 337:345
--
--	// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
--	prefixHeaderBytes := s.next(155)
--	tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500  prefix
--
--	// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
--	if tw.usedBinary {
--		copy(header[257:265], []byte("ustar  \x00"))
--	}
--
--	_, paxPathUsed := paxHeaders[paxPath]
--	// try to use a ustar header when only the name is too long
--	if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
--		suffix := hdr.Name
--		prefix := ""
--		if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
--			var err error
--			prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
--			if err == nil {
--				// ok we can use a ustar long name instead of pax, now correct the fields
--
--				// remove the path field from the pax header. this will suppress the pax header
--				delete(paxHeaders, paxPath)
--
--				// update the path fields
--				tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
--				tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
--
--				// Use the ustar magic if we used ustar long names.
--				if len(prefix) > 0 && !tw.usedBinary {
--					copy(header[257:265], []byte("ustar\x00"))
--				}
--			}
--		}
--	}
--
--	// The chksum field is terminated by a NUL and a space.
--	// This is different from the other octal fields.
--	chksum, _ := checksum(header)
--	tw.octal(header[148:155], chksum)
--	header[155] = ' '
--
--	if tw.err != nil {
--		// problem with header; probably integer too big for a field.
--		return tw.err
--	}
--
--	if allowPax {
--		for k, v := range hdr.Xattrs {
--			paxHeaders[paxXattr+k] = v
--		}
--	}
--
--	if len(paxHeaders) > 0 {
--		if !allowPax {
--			return errInvalidHeader
--		}
--		if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
--			return err
--		}
--	}
--	tw.nb = int64(hdr.Size)
--	tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
--
--	_, tw.err = tw.w.Write(header)
--	return tw.err
--}
--
--// writeUSTARLongName splits a USTAR long name hdr.Name.
--// name must be < 256 characters. errNameTooLong is returned
--// if hdr.Name can't be split. The splitting heuristic
--// is compatible with gnu tar.
--func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
--	length := len(name)
--	if length > fileNamePrefixSize+1 {
--		length = fileNamePrefixSize + 1
--	} else if name[length-1] == '/' {
--		length--
--	}
--	i := strings.LastIndex(name[:length], "/")
--	// nlen contains the resulting length in the name field.
--	// plen contains the resulting length in the prefix field.
--	nlen := len(name) - i - 1
--	plen := i
--	if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
--		err = errNameTooLong
--		return
--	}
--	prefix, suffix = name[:i], name[i+1:]
--	return
--}
--
--// writePaxHeader writes an extended pax header to the
--// archive.
--func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
--	// Prepare extended header
--	ext := new(Header)
--	ext.Typeflag = TypeXHeader
--	// Setting ModTime is required for reader parsing to
--	// succeed, and seems harmless enough.
--	ext.ModTime = hdr.ModTime
--	// The spec asks that we namespace our pseudo files
--	// with the current pid.
--	pid := os.Getpid()
--	dir, file := path.Split(hdr.Name)
--	fullName := path.Join(dir,
--		fmt.Sprintf("PaxHeaders.%d", pid), file)
--
--	ascii := toASCII(fullName)
--	if len(ascii) > 100 {
--		ascii = ascii[:100]
--	}
--	ext.Name = ascii
--	// Construct the body
--	var buf bytes.Buffer
--
--	for k, v := range paxHeaders {
--		fmt.Fprint(&buf, paxHeader(k+"="+v))
--	}
--
--	ext.Size = int64(len(buf.Bytes()))
--	if err := tw.writeHeader(ext, false); err != nil {
--		return err
--	}
--	if _, err := tw.Write(buf.Bytes()); err != nil {
--		return err
--	}
--	if err := tw.Flush(); err != nil {
--		return err
--	}
--	return nil
--}
--
--// paxHeader formats a single pax record, prefixing it with the appropriate length
--func paxHeader(msg string) string {
--	const padding = 2 // Extra padding for space and newline
--	size := len(msg) + padding
--	size += len(strconv.Itoa(size))
--	record := fmt.Sprintf("%d %s\n", size, msg)
--	if len(record) != size {
--		// Final adjustment if adding size increased
--		// the number of digits in size
--		size = len(record)
--		record = fmt.Sprintf("%d %s\n", size, msg)
--	}
--	return record
--}
--
--// Write writes to the current entry in the tar archive.
--// Write returns the error ErrWriteTooLong if more than
--// hdr.Size bytes are written after WriteHeader.
--func (tw *Writer) Write(b []byte) (n int, err error) {
--	if tw.closed {
--		err = ErrWriteTooLong
--		return
--	}
--	overwrite := false
--	if int64(len(b)) > tw.nb {
--		b = b[0:tw.nb]
--		overwrite = true
--	}
--	n, err = tw.w.Write(b)
--	tw.nb -= int64(n)
--	if err == nil && overwrite {
--		err = ErrWriteTooLong
--		return
--	}
--	tw.err = err
--	return
--}
--
--// Close closes the tar archive, flushing any unwritten
--// data to the underlying writer.
--func (tw *Writer) Close() error {
--	if tw.err != nil || tw.closed {
--		return tw.err
--	}
--	tw.Flush()
--	tw.closed = true
--	if tw.err != nil {
--		return tw.err
--	}
--
--	// trailer: two zero blocks
--	for i := 0; i < 2; i++ {
--		_, tw.err = tw.w.Write(zeroBlock)
--		if tw.err != nil {
--			break
--		}
--	}
--	return tw.err
--}
-diff --git a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
-deleted file mode 100644
-index 5e42e32..0000000
---- a/Godeps/_workspace/src/github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
-+++ /dev/null
-@@ -1,491 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package tar
--
--import (
--	"bytes"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"reflect"
--	"strings"
--	"testing"
--	"testing/iotest"
--	"time"
--)
--
--type writerTestEntry struct {
--	header   *Header
--	contents string
--}
--
--type writerTest struct {
--	file    string // filename of expected output
--	entries []*writerTestEntry
--}
--
--var writerTests = []*writerTest{
--	// The writer test file was produced with this command:
--	// tar (GNU tar) 1.26
--	//   ln -s small.txt link.txt
--	//   tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
--	{
--		file: "testdata/writer.tar",
--		entries: []*writerTestEntry{
--			{
--				header: &Header{
--					Name:     "small.txt",
--					Mode:     0640,
--					Uid:      73025,
--					Gid:      5000,
--					Size:     5,
--					ModTime:  time.Unix(1246508266, 0),
--					Typeflag: '0',
--					Uname:    "dsymonds",
--					Gname:    "eng",
--				},
--				contents: "Kilts",
--			},
--			{
--				header: &Header{
--					Name:     "small2.txt",
--					Mode:     0640,
--					Uid:      73025,
--					Gid:      5000,
--					Size:     11,
--					ModTime:  time.Unix(1245217492, 0),
--					Typeflag: '0',
--					Uname:    "dsymonds",
--					Gname:    "eng",
--				},
--				contents: "Google.com\n",
--			},
--			{
--				header: &Header{
--					Name:     "link.txt",
--					Mode:     0777,
--					Uid:      1000,
--					Gid:      1000,
--					Size:     0,
--					ModTime:  time.Unix(1314603082, 0),
--					Typeflag: '2',
--					Linkname: "small.txt",
--					Uname:    "strings",
--					Gname:    "strings",
--				},
--				// no contents
--			},
--		},
--	},
--	// The truncated test file was produced using these commands:
--	//   dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
--	//   tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
--	{
--		file: "testdata/writer-big.tar",
--		entries: []*writerTestEntry{
--			{
--				header: &Header{
--					Name:     "tmp/16gig.txt",
--					Mode:     0640,
--					Uid:      73025,
--					Gid:      5000,
--					Size:     16 << 30,
--					ModTime:  time.Unix(1254699560, 0),
--					Typeflag: '0',
--					Uname:    "dsymonds",
--					Gname:    "eng",
--				},
--				// fake contents
--				contents: strings.Repeat("\x00", 4<<10),
--			},
--		},
--	},
--	// The truncated test file was produced using these commands:
--	//   dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
--	//   tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
--	{
--		file: "testdata/writer-big-long.tar",
--		entries: []*writerTestEntry{
--			{
--				header: &Header{
--					Name:     strings.Repeat("longname/", 15) + "16gig.txt",
--					Mode:     0644,
--					Uid:      1000,
--					Gid:      1000,
--					Size:     16 << 30,
--					ModTime:  time.Unix(1399583047, 0),
--					Typeflag: '0',
--					Uname:    "guillaume",
--					Gname:    "guillaume",
--				},
--				// fake contents
--				contents: strings.Repeat("\x00", 4<<10),
--			},
--		},
--	},
--	// This file was produced using gnu tar 1.17
--	// gnutar  -b 4 --format=ustar (longname/)*15 + file.txt
--	{
--		file: "testdata/ustar.tar",
--		entries: []*writerTestEntry{
--			{
--				header: &Header{
--					Name:     strings.Repeat("longname/", 15) + "file.txt",
--					Mode:     0644,
--					Uid:      0765,
--					Gid:      024,
--					Size:     06,
--					ModTime:  time.Unix(1360135598, 0),
--					Typeflag: '0',
--					Uname:    "shane",
--					Gname:    "staff",
--				},
--				contents: "hello\n",
--			},
--		},
--	},
--}
--
--// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
--func bytestr(offset int, b []byte) string {
--	const rowLen = 32
--	s := fmt.Sprintf("%04x ", offset)
--	for _, ch := range b {
--		switch {
--		case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
--			s += fmt.Sprintf("  %c", ch)
--		default:
--			s += fmt.Sprintf(" %02x", ch)
--		}
--	}
--	return s
--}
--
--// Render a pseudo-diff between two blocks of bytes.
--func bytediff(a []byte, b []byte) string {
--	const rowLen = 32
--	s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
--	for offset := 0; len(a)+len(b) > 0; offset += rowLen {
--		na, nb := rowLen, rowLen
--		if na > len(a) {
--			na = len(a)
--		}
--		if nb > len(b) {
--			nb = len(b)
--		}
--		sa := bytestr(offset, a[0:na])
--		sb := bytestr(offset, b[0:nb])
--		if sa != sb {
--			s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
--		}
--		a = a[na:]
--		b = b[nb:]
--	}
--	return s
--}
--
--func TestWriter(t *testing.T) {
--testLoop:
--	for i, test := range writerTests {
--		expected, err := ioutil.ReadFile(test.file)
--		if err != nil {
--			t.Errorf("test %d: Unexpected error: %v", i, err)
--			continue
--		}
--
--		buf := new(bytes.Buffer)
--		tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
--		big := false
--		for j, entry := range test.entries {
--			big = big || entry.header.Size > 1<<10
--			if err := tw.WriteHeader(entry.header); err != nil {
--				t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
--				continue testLoop
--			}
--			if _, err := io.WriteString(tw, entry.contents); err != nil {
--				t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
--				continue testLoop
--			}
--		}
--		// Only interested in Close failures for the small tests.
--		if err := tw.Close(); err != nil && !big {
--			t.Errorf("test %d: Failed closing archive: %v", i, err)
--			continue testLoop
--		}
--
--		actual := buf.Bytes()
--		if !bytes.Equal(expected, actual) {
--			t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
--				i, bytediff(expected, actual))
--		}
--		if testing.Short() { // The second test is expensive.
--			break
--		}
--	}
--}
--
--func TestPax(t *testing.T) {
--	// Create an archive with a large name
--	fileinfo, err := os.Stat("testdata/small.txt")
--	if err != nil {
--		t.Fatal(err)
--	}
--	hdr, err := FileInfoHeader(fileinfo, "")
--	if err != nil {
--		t.Fatalf("os.Stat: %v", err)
--	}
--	// Force a PAX long name to be written
--	longName := strings.Repeat("ab", 100)
--	contents := strings.Repeat(" ", int(hdr.Size))
--	hdr.Name = longName
--	var buf bytes.Buffer
--	writer := NewWriter(&buf)
--	if err := writer.WriteHeader(hdr); err != nil {
--		t.Fatal(err)
--	}
--	if _, err = writer.Write([]byte(contents)); err != nil {
--		t.Fatal(err)
--	}
--	if err := writer.Close(); err != nil {
--		t.Fatal(err)
--	}
--	// Simple test to make sure PAX extensions are in effect
--	if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
--		t.Fatal("Expected at least one PAX header to be written.")
--	}
--	// Test that we can get a long name back out of the archive.
--	reader := NewReader(&buf)
--	hdr, err = reader.Next()
--	if err != nil {
--		t.Fatal(err)
--	}
--	if hdr.Name != longName {
--		t.Fatal("Couldn't recover long file name")
--	}
--}
--
--func TestPaxSymlink(t *testing.T) {
--	// Create an archive with a large linkname
--	fileinfo, err := os.Stat("testdata/small.txt")
--	if err != nil {
--		t.Fatal(err)
--	}
--	hdr, err := FileInfoHeader(fileinfo, "")
--	hdr.Typeflag = TypeSymlink
--	if err != nil {
--		t.Fatalf("os.Stat:1 %v", err)
--	}
--	// Force a PAX long linkname to be written
--	longLinkname := strings.Repeat("1234567890/1234567890", 10)
--	hdr.Linkname = longLinkname
--
--	hdr.Size = 0
--	var buf bytes.Buffer
--	writer := NewWriter(&buf)
--	if err := writer.WriteHeader(hdr); err != nil {
--		t.Fatal(err)
--	}
--	if err := writer.Close(); err != nil {
--		t.Fatal(err)
--	}
--	// Simple test to make sure PAX extensions are in effect
--	if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
--		t.Fatal("Expected at least one PAX header to be written.")
--	}
--	// Test that we can get a long name back out of the archive.
--	reader := NewReader(&buf)
--	hdr, err = reader.Next()
--	if err != nil {
--		t.Fatal(err)
--	}
--	if hdr.Linkname != longLinkname {
--		t.Fatal("Couldn't recover long link name")
--	}
--}
--
--func TestPaxNonAscii(t *testing.T) {
--	// Create an archive with non ascii. These should trigger a pax header
--	// because pax headers have a defined utf-8 encoding.
--	fileinfo, err := os.Stat("testdata/small.txt")
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	hdr, err := FileInfoHeader(fileinfo, "")
--	if err != nil {
--		t.Fatalf("os.Stat:1 %v", err)
--	}
--
--	// some sample data
--	chineseFilename := "文件名"
--	chineseGroupname := "組"
--	chineseUsername := "用戶名"
--
--	hdr.Name = chineseFilename
--	hdr.Gname = chineseGroupname
--	hdr.Uname = chineseUsername
--
--	contents := strings.Repeat(" ", int(hdr.Size))
--
--	var buf bytes.Buffer
--	writer := NewWriter(&buf)
--	if err := writer.WriteHeader(hdr); err != nil {
--		t.Fatal(err)
--	}
--	if _, err = writer.Write([]byte(contents)); err != nil {
--		t.Fatal(err)
--	}
--	if err := writer.Close(); err != nil {
--		t.Fatal(err)
--	}
--	// Simple test to make sure PAX extensions are in effect
--	if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
--		t.Fatal("Expected at least one PAX header to be written.")
--	}
--	// Test that we can get a long name back out of the archive.
--	reader := NewReader(&buf)
--	hdr, err = reader.Next()
--	if err != nil {
--		t.Fatal(err)
--	}
--	if hdr.Name != chineseFilename {
--		t.Fatal("Couldn't recover unicode name")
--	}
--	if hdr.Gname != chineseGroupname {
--		t.Fatal("Couldn't recover unicode group")
--	}
--	if hdr.Uname != chineseUsername {
--		t.Fatal("Couldn't recover unicode user")
--	}
--}
--
--func TestPaxXattrs(t *testing.T) {
--	xattrs := map[string]string{
--		"user.key": "value",
--	}
--
--	// Create an archive with an xattr
--	fileinfo, err := os.Stat("testdata/small.txt")
--	if err != nil {
--		t.Fatal(err)
--	}
--	hdr, err := FileInfoHeader(fileinfo, "")
--	if err != nil {
--		t.Fatalf("os.Stat: %v", err)
--	}
--	contents := "Kilts"
--	hdr.Xattrs = xattrs
--	var buf bytes.Buffer
--	writer := NewWriter(&buf)
--	if err := writer.WriteHeader(hdr); err != nil {
--		t.Fatal(err)
--	}
--	if _, err = writer.Write([]byte(contents)); err != nil {
--		t.Fatal(err)
--	}
--	if err := writer.Close(); err != nil {
--		t.Fatal(err)
--	}
--	// Test that we can get the xattrs back out of the archive.
--	reader := NewReader(&buf)
--	hdr, err = reader.Next()
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
--		t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
--			hdr.Xattrs, xattrs)
--	}
--}
--
--func TestPAXHeader(t *testing.T) {
--	medName := strings.Repeat("CD", 50)
--	longName := strings.Repeat("AB", 100)
--	paxTests := [][2]string{
--		{paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
--		{"a=b", "6 a=b\n"},          // Single digit length
--		{"a=names", "11 a=names\n"}, // Test case involving carries
--		{paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
--		{paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
--
--	for _, test := range paxTests {
--		key, expected := test[0], test[1]
--		if result := paxHeader(key); result != expected {
--			t.Fatalf("paxHeader: got %s, expected %s", result, expected)
--		}
--	}
--}
--
--func TestUSTARLongName(t *testing.T) {
--	// Create an archive with a path that failed to split with USTAR extension in previous versions.
--	fileinfo, err := os.Stat("testdata/small.txt")
--	if err != nil {
--		t.Fatal(err)
--	}
--	hdr, err := FileInfoHeader(fileinfo, "")
--	hdr.Typeflag = TypeDir
--	if err != nil {
--		t.Fatalf("os.Stat:1 %v", err)
--	}
--	// Force a PAX long name to be written. The name was taken from a practical example
--	// that fails and replaced ever char through numbers to anonymize the sample.
--	longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
--	hdr.Name = longName
--
--	hdr.Size = 0
--	var buf bytes.Buffer
--	writer := NewWriter(&buf)
--	if err := writer.WriteHeader(hdr); err != nil {
--		t.Fatal(err)
--	}
--	if err := writer.Close(); err != nil {
--		t.Fatal(err)
--	}
--	// Test that we can get a long name back out of the archive.
--	reader := NewReader(&buf)
--	hdr, err = reader.Next()
--	if err != nil {
--		t.Fatal(err)
--	}
--	if hdr.Name != longName {
--		t.Fatal("Couldn't recover long name")
--	}
--}
--
--func TestValidTypeflagWithPAXHeader(t *testing.T) {
--	var buffer bytes.Buffer
--	tw := NewWriter(&buffer)
--
--	fileName := strings.Repeat("ab", 100)
--
--	hdr := &Header{
--		Name:     fileName,
--		Size:     4,
--		Typeflag: 0,
--	}
--	if err := tw.WriteHeader(hdr); err != nil {
--		t.Fatalf("Failed to write header: %s", err)
--	}
--	if _, err := tw.Write([]byte("fooo")); err != nil {
--		t.Fatalf("Failed to write the file's data: %s", err)
--	}
--	tw.Close()
--
--	tr := NewReader(&buffer)
--
--	for {
--		header, err := tr.Next()
--		if err == io.EOF {
--			break
--		}
--		if err != nil {
--			t.Fatalf("Failed to read header: %s", err)
--		}
--		if header.Typeflag != 0 {
--			t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE
-deleted file mode 100644
-index 5782c72..0000000
---- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE
-+++ /dev/null
-@@ -1,23 +0,0 @@
--Copyright (c) 2014, Elazar Leibovich
--All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are met:
--
--* Redistributions of source code must retain the above copyright notice, this
--  list of conditions and the following disclaimer.
--
--* Redistributions in binary form must reproduce the above copyright notice,
--  this list of conditions and the following disclaimer in the documentation
--  and/or other materials provided with the distribution.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
--AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
--IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
--DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
--FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
--DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
--SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
--CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
--OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md
-deleted file mode 100644
-index a3929a8..0000000
---- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md
-+++ /dev/null
-@@ -1,18 +0,0 @@
--go-bindata-http
--===============
--
--Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`.
--
--[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs)
--
--After running
--
--    $ go-bindata data/...
--
--Use
--
--     http.Handle("/",
--        http.FileServer(
--        &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"}))
--
--to serve files embedded from the `data` directory.
-diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go
-deleted file mode 100644
-index 83cd94d..0000000
---- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go
-+++ /dev/null
-@@ -1,141 +0,0 @@
--package assetfs
--
--import (
--	"bytes"
--	"errors"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"net/http"
--	"os"
--	"path"
--	"path/filepath"
--	"time"
--)
--
--// FakeFile implements os.FileInfo interface for a given path and size
--type FakeFile struct {
--	// Path is the path of this file
--	Path string
--	// Dir marks of the path is a directory
--	Dir bool
--	// Len is the length of the fake file, zero if it is a directory
--	Len int64
--}
--
--func (f *FakeFile) Name() string {
--	_, name := filepath.Split(f.Path)
--	return name
--}
--
--func (f *FakeFile) Mode() os.FileMode {
--	mode := os.FileMode(0644)
--	if f.Dir {
--		return mode | os.ModeDir
--	}
--	return mode
--}
--
--func (f *FakeFile) ModTime() time.Time {
--	return time.Unix(0, 0)
--}
--
--func (f *FakeFile) Size() int64 {
--	return f.Len
--}
--
--func (f *FakeFile) IsDir() bool {
--	return f.Mode().IsDir()
--}
--
--func (f *FakeFile) Sys() interface{} {
--	return nil
--}
--
--// AssetFile implements http.File interface for a no-directory file with content
--type AssetFile struct {
--	*bytes.Reader
--	io.Closer
--	FakeFile
--}
--
--func NewAssetFile(name string, content []byte) *AssetFile {
--	return &AssetFile{
--		bytes.NewReader(content),
--		ioutil.NopCloser(nil),
--		FakeFile{name, false, int64(len(content))}}
--}
--
--func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) {
--	return nil, errors.New("not a directory")
--}
--
--func (f *AssetFile) Stat() (os.FileInfo, error) {
--	return f, nil
--}
--
--// AssetDirectory implements http.File interface for a directory
--type AssetDirectory struct {
--	AssetFile
--	ChildrenRead int
--	Children     []os.FileInfo
--}
--
--func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory {
--	fileinfos := make([]os.FileInfo, 0, len(children))
--	for _, child := range children {
--		_, err := fs.AssetDir(filepath.Join(name, child))
--		fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0})
--	}
--	return &AssetDirectory{
--		AssetFile{
--			bytes.NewReader(nil),
--			ioutil.NopCloser(nil),
--			FakeFile{name, true, 0},
--		},
--		0,
--		fileinfos}
--}
--
--func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) {
--	fmt.Println(f, count)
--	if count <= 0 {
--		return f.Children, nil
--	}
--	if f.ChildrenRead+count > len(f.Children) {
--		count = len(f.Children) - f.ChildrenRead
--	}
--	rv := f.Children[f.ChildrenRead : f.ChildrenRead+count]
--	f.ChildrenRead += count
--	return rv, nil
--}
--
--func (f *AssetDirectory) Stat() (os.FileInfo, error) {
--	return f, nil
--}
--
--// AssetFS implements http.FileSystem, allowing
--// embedded files to be served from net/http package.
--type AssetFS struct {
--	// Asset should return content of file in path if exists
--	Asset func(path string) ([]byte, error)
--	// AssetDir should return list of files in the path
--	AssetDir func(path string) ([]string, error)
--	// Prefix would be prepended to http requests
--	Prefix string
--}
--
--func (fs *AssetFS) Open(name string) (http.File, error) {
--	name = path.Join(fs.Prefix, name)
--	if len(name) > 0 && name[0] == '/' {
--		name = name[1:]
--	}
--	if children, err := fs.AssetDir(name); err == nil {
--		return NewAssetDirectory(name, children, fs), nil
--	}
--	b, err := fs.Asset(name)
--	if err != nil {
--		return nil, err
--	}
--	return NewAssetFile(name, b), nil
--}
-diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go
-deleted file mode 100644
-index a664249..0000000
---- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--// assetfs allows packages to serve static content embedded
--// with the go-bindata tool with the standard net/http package.
--//
--// See https://github.com/jteeuwen/go-bindata for more information
--// about embedding binary data with go-bindata.
--//
--// Usage example, after running
--//    $ go-bindata data/...
--// use:
--//     http.Handle("/",
--//        http.FileServer(
--//        &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"}))
--package assetfs
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/.gitignore b/Godeps/_workspace/src/github.com/emicklei/go-restful/.gitignore
-deleted file mode 100644
-index cece7be..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/.gitignore
-+++ /dev/null
-@@ -1,70 +0,0 @@
--# Compiled Object files, Static and Dynamic libs (Shared Objects)
--*.o
--*.a
--*.so
--
--# Folders
--_obj
--_test
--
--# Architecture specific extensions/prefixes
--*.[568vq]
--[568vq].out
--
--*.cgo1.go
--*.cgo2.c
--_cgo_defun.c
--_cgo_gotypes.go
--_cgo_export.*
--
--_testmain.go
--
--*.exe
--
--restful.html
--
--*.out
--
--tmp.prof
--
--go-restful.test
--
--examples/restful-basic-authentication
--
--examples/restful-encoding-filter
--
--examples/restful-filters
--
--examples/restful-hello-world
--
--examples/restful-resource-functions
--
--examples/restful-serve-static
--
--examples/restful-user-service
--
--*.DS_Store
--examples/restful-user-resource
--
--examples/restful-multi-containers
--
--examples/restful-form-handling
--
--examples/restful-CORS-filter
--
--examples/restful-options-filter
--
--examples/restful-curly-router
--
--examples/restful-cpuprofiler-service
--
--examples/restful-pre-post-filters
--
--curly.prof
--
--examples/restful-NCSA-logging
--
--examples/restful-html-template
--
--s.html
--restful-path-tail
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/CHANGES.md b/Godeps/_workspace/src/github.com/emicklei/go-restful/CHANGES.md
-deleted file mode 100644
-index 9c6de61..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/CHANGES.md
-+++ /dev/null
-@@ -1,130 +0,0 @@
--Change history of go-restful
--=
--2014-11-27
--- (api add) PrettyPrint per response. (as proposed in #167)
--
--2014-11-12
--- (api add) ApiVersion(.) for documentation in Swagger UI
--
--2014-11-10
--- (api change) struct fields tagged with "description" show up in Swagger UI
--
--2014-10-31
--- (api change) ReturnsError -> Returns
--- (api add)    RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
--- fix swagger nested structs
--- sort Swagger response messages by code
--
--2014-10-23
--- (api add) ReturnsError allows you to document Http codes in swagger
--- fixed problem with greedy CurlyRouter
--- (api add) Access-Control-Max-Age in CORS
--- add tracing functionality (injectable) for debugging purposes
--- support JSON parse 64bit int 
--- fix empty parameters for swagger
--- WebServicesUrl is now optional for swagger
--- fixed duplicate AccessControlAllowOrigin in CORS
--- (api change) expose ServeMux in container
--- (api add) added AllowedDomains in CORS
--- (api add) ParameterNamed for detailed documentation
--
--2014-04-16
--- (api add) expose constructor of Request for testing.
--
--2014-06-27
--- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
--- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
--
--2014-07-03
--- (api add) CORS can be configured with a list of allowed domains
--
--2014-03-12
--- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
--
--2014-02-26
--- (api add) Request now provides information about the matched Route, see method SelectedRoutePath 
--
--2014-02-17
--- (api change) renamed parameter constants (go-lint checks)
--
--2014-01-10
-- - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
--
--2014-01-07
-- - (api change) Write* methods in Response now return the error or nil.
-- - added example of serving HTML from a Go template.
-- - fixed comparing Allowed headers in CORS (is now case-insensitive)
--
--2013-11-13
-- - (api add) Response knows how many bytes are written to the response body.
--
--2013-10-29
-- - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
--
--2013-10-04
-- - (api add) Response knows what HTTP status has been written
-- - (api add) Request can have attributes (map of string->interface, also called request-scoped variables
--
--2013-09-12
-- - (api change) Router interface simplified
-- - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
--
--2013-08-05
-- - add OPTIONS support
-- - add CORS support
--
--2013-08-27
-- - fixed some reported issues (see github)
-- - (api change) deprecated use of WriteError; use WriteErrorString instead
--
--2014-04-15
-- - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
--
--2013-08-08
-- - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
-- - (api add) the swagger package has be extended to have a UI per container.
-- - if panic is detected then a small stack trace is printed (thanks to runner-mei)
-- - (api add) WriteErrorString to Response
--
--Important API changes:
--
-- - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
-- - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
-- 
-- 
--2013-07-06
--
-- - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
--
--2013-06-19
--
-- - (improve) DoNotRecover option, moved request body closer, improved ReadEntity
--
--2013-06-03
--
-- - (api change) removed Dispatcher interface, hide PathExpression
-- - changed receiver names of type functions to be more idiomatic Go
--
--2013-06-02
--
-- - (optimize) Cache the RegExp compilation of Paths.
--
--2013-05-22
--	
-- - (api add) Added support for request/response filter functions
--
--2013-05-18
--
--
-- - (api add) Added feature to change the default Http Request Dispatch function (travis cline)
-- - (api change) Moved Swagger Webservice to swagger package (see example restful-user)
--
--[2012-11-14 .. 2013-05-18>
-- 
-- - See https://github.com/emicklei/go-restful/commits
--
--2012-11-14
--
-- - Initial commit
--
--
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/LICENSE b/Godeps/_workspace/src/github.com/emicklei/go-restful/LICENSE
-deleted file mode 100644
-index ece7ec6..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/LICENSE
-+++ /dev/null
-@@ -1,22 +0,0 @@
--Copyright (c) 2012,2013 Ernest Micklei
--
--MIT License
--
--Permission is hereby granted, free of charge, to any person obtaining
--a copy of this software and associated documentation files (the
--"Software"), to deal in the Software without restriction, including
--without limitation the rights to use, copy, modify, merge, publish,
--distribute, sublicense, and/or sell copies of the Software, and to
--permit persons to whom the Software is furnished to do so, subject to
--the following conditions:
--
--The above copyright notice and this permission notice shall be
--included in all copies or substantial portions of the Software.
--
--THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
--EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
--MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
--NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
--LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
--OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
--WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/README.md b/Godeps/_workspace/src/github.com/emicklei/go-restful/README.md
-deleted file mode 100644
-index 8af137d..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/README.md
-+++ /dev/null
-@@ -1,70 +0,0 @@
--go-restful
--==========
--
--package for building REST-style Web Services using Google Go
--
--REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
--
--- GET = Retrieve a representation of a resource
--- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
--- PUT = Create if you are sending the full content of the specified resource (URI).
--- PUT = Update if you are updating the full content of the specified resource.
--- DELETE = Delete if you are requesting the server to delete the resource
--- PATCH = Update partial content of a resource
--- OPTIONS = Get information about the communication options for the request URI
--    
--### Example
--
--```Go
--ws := new(restful.WebService)
--ws.
--	Path("/users").
--	Consumes(restful.MIME_XML, restful.MIME_JSON).
--	Produces(restful.MIME_JSON, restful.MIME_XML)
--
--ws.Route(ws.GET("/{user-id}").To(u.findUser).
--	Doc("get a user").
--	Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
--	Writes(User{}))		
--...
--	
--func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
--	id := request.PathParameter("user-id")
--	...
--}
--```
--	
--[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) 
--		
--### Features
--
--- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
--- Configurable router:
--	- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but doest **not** accept) regular expressions (See RouterJSR311 which is used by default)
--	- Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
--- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
--- Response API for writing structs to JSON/XML and setting headers
--- Filters for intercepting the request &#8594; response flow on Service or Route level
--- Request-scoped variables using attributes
--- Containers for WebServices on different HTTP endpoints
--- Content encoding (gzip,deflate) of responses
--- Automatic responses on OPTIONS (using a filter)
--- Automatic CORS request handling (using a filter)
--- API declaration for Swagger UI (see swagger package)
--- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
--	
--### Resources
--
--- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
--- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
--- [Example posted on blog](http://ernestmicklei.com/2012/11/24/go-restful-first-working-example/)
--- [Design explained on blog](http://ernestmicklei.com/2012/11/11/go-restful-api-design/)
--- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
--- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
--- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
--
--[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)[![library users](https://sourcegraph.com/api/repos/github.com/emicklei/go-restful/badges/library-users.png)](https://sourcegraph.com/github.com/emicklei/go-restful) [![authors](https://sourcegraph.com/api/repos/github.com/emicklei/go-restful/badges/authors.png)](https://sourcegraph.com/github.com/emicklei/go-restful) [![xrefs](https://sourcegraph.com/api/repos/github.com/emicklei/go-restful/badges/xrefs.png)](https://sourcegraph.com/github.com/emicklei/go-restful)
--
--(c) 2012 - 2014, http://ernestmicklei.com. MIT License
--
--Type ```git shortlog -s``` for a full list of contributors.
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/Srcfile b/Godeps/_workspace/src/github.com/emicklei/go-restful/Srcfile
-deleted file mode 100644
-index 16fd186..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/Srcfile
-+++ /dev/null
-@@ -1 +0,0 @@
--{"SkipDirs": ["examples"]}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_curly_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_curly_test.go
-deleted file mode 100644
-index db6a1a7..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_curly_test.go
-+++ /dev/null
-@@ -1,51 +0,0 @@
--package restful
--
--import (
--	"fmt"
--	"net/http"
--	"net/http/httptest"
--	"testing"
--)
--
--func setupCurly(container *Container) []string {
--	wsCount := 26
--	rtCount := 26
--	urisCurly := []string{}
--
--	container.Router(CurlyRouter{})
--	for i := 0; i < wsCount; i++ {
--		root := fmt.Sprintf("/%s/{%s}/", string(i+97), string(i+97))
--		ws := new(WebService).Path(root)
--		for j := 0; j < rtCount; j++ {
--			sub := fmt.Sprintf("/%s2/{%s2}", string(j+97), string(j+97))
--			ws.Route(ws.GET(sub).Consumes("application/xml").Produces("application/xml").To(echoCurly))
--		}
--		container.Add(ws)
--		for _, each := range ws.Routes() {
--			urisCurly = append(urisCurly, "http://bench.com"+each.Path)
--		}
--	}
--	return urisCurly
--}
--
--func echoCurly(req *Request, resp *Response) {}
--
--func BenchmarkManyCurly(b *testing.B) {
--	container := NewContainer()
--	urisCurly := setupCurly(container)
--	b.ResetTimer()
--	for t := 0; t < b.N; t++ {
--		for r := 0; r < 1000; r++ {
--			for _, each := range urisCurly {
--				sendNoReturnTo(each, container, t)
--			}
--		}
--	}
--}
--
--func sendNoReturnTo(address string, container *Container, t int) {
--	httpRequest, _ := http.NewRequest("GET", address, nil)
--	httpRequest.Header.Set("Accept", "application/xml")
--	httpWriter := httptest.NewRecorder()
--	container.dispatch(httpWriter, httpRequest)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_test.go
-deleted file mode 100644
-index 3e77c2d..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_test.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--package restful
--
--import (
--	"fmt"
--	"io"
--	"testing"
--)
--
--var uris = []string{}
--
--func setup(container *Container) {
--	wsCount := 26
--	rtCount := 26
--
--	for i := 0; i < wsCount; i++ {
--		root := fmt.Sprintf("/%s/{%s}/", string(i+97), string(i+97))
--		ws := new(WebService).Path(root)
--		for j := 0; j < rtCount; j++ {
--			sub := fmt.Sprintf("/%s2/{%s2}", string(j+97), string(j+97))
--			ws.Route(ws.GET(sub).To(echo))
--		}
--		container.Add(ws)
--		for _, each := range ws.Routes() {
--			uris = append(uris, "http://bench.com"+each.Path)
--		}
--	}
--}
--
--func echo(req *Request, resp *Response) {
--	io.WriteString(resp.ResponseWriter, "echo")
--}
--
--func BenchmarkMany(b *testing.B) {
--	container := NewContainer()
--	setup(container)
--	b.ResetTimer()
--	for t := 0; t < b.N; t++ {
--		for _, each := range uris {
--			// println(each)
--			sendItTo(each, container)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_test.sh b/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_test.sh
-deleted file mode 100644
-index 47ffbe4..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/bench_test.sh
-+++ /dev/null
-@@ -1,10 +0,0 @@
--#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
--
--go test -c
--./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
--./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
--
--#go tool pprof go-restful.test tmp.prof
--go tool pprof go-restful.test curly.prof
--
--
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/compress.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/compress.go
-deleted file mode 100644
-index c4dcca0..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/compress.go
-+++ /dev/null
-@@ -1,89 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"compress/gzip"
--	"compress/zlib"
--	"errors"
--	"io"
--	"net/http"
--	"strings"
--)
--
--// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
--var EnableContentEncoding = false
--
--// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
--type CompressingResponseWriter struct {
--	writer     http.ResponseWriter
--	compressor io.WriteCloser
--}
--
--// Header is part of http.ResponseWriter interface
--func (c *CompressingResponseWriter) Header() http.Header {
--	return c.writer.Header()
--}
--
--// WriteHeader is part of http.ResponseWriter interface
--func (c *CompressingResponseWriter) WriteHeader(status int) {
--	c.writer.WriteHeader(status)
--}
--
--// Write is part of http.ResponseWriter interface
--// It is passed through the compressor
--func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
--	return c.compressor.Write(bytes)
--}
--
--// CloseNotify is part of http.CloseNotifier interface
--func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
--	return c.writer.(http.CloseNotifier).CloseNotify()
--}
--
--// Close the underlying compressor
--func (c *CompressingResponseWriter) Close() {
--	c.compressor.Close()
--}
--
--// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
--func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
--	header := httpRequest.Header.Get(HEADER_AcceptEncoding)
--	gi := strings.Index(header, ENCODING_GZIP)
--	zi := strings.Index(header, ENCODING_DEFLATE)
--	// use in order of appearance
--	if gi == -1 {
--		return zi != -1, ENCODING_DEFLATE
--	} else if zi == -1 {
--		return gi != -1, ENCODING_GZIP
--	} else {
--		if gi < zi {
--			return true, ENCODING_GZIP
--		}
--		return true, ENCODING_DEFLATE
--	}
--}
--
--// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
--func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
--	httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
--	c := new(CompressingResponseWriter)
--	c.writer = httpWriter
--	var err error
--	if ENCODING_GZIP == encoding {
--		c.compressor, err = gzip.NewWriterLevel(httpWriter, gzip.BestSpeed)
--		if err != nil {
--			return nil, err
--		}
--	} else if ENCODING_DEFLATE == encoding {
--		c.compressor, err = zlib.NewWriterLevel(httpWriter, zlib.BestSpeed)
--		if err != nil {
--			return nil, err
--		}
--	} else {
--		return nil, errors.New("Unknown encoding:" + encoding)
--	}
--	return c, err
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/compress_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/compress_test.go
-deleted file mode 100644
-index 332fb22..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/compress_test.go
-+++ /dev/null
-@@ -1,53 +0,0 @@
--package restful
--
--import (
--	"net/http"
--	"net/http/httptest"
--	"testing"
--)
--
--func TestGzip(t *testing.T) {
--	EnableContentEncoding = true
--	httpRequest, _ := http.NewRequest("GET", "/test", nil)
--	httpRequest.Header.Set("Accept-Encoding", "gzip,deflate")
--	httpWriter := httptest.NewRecorder()
--	wanted, encoding := wantsCompressedResponse(httpRequest)
--	if !wanted {
--		t.Fatal("should accept gzip")
--	}
--	if encoding != "gzip" {
--		t.Fatal("expected gzip")
--	}
--	c, err := NewCompressingResponseWriter(httpWriter, encoding)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	c.Write([]byte("Hello World"))
--	c.Close()
--	if httpWriter.Header().Get("Content-Encoding") != "gzip" {
--		t.Fatal("Missing gzip header")
--	}
--}
--
--func TestDeflate(t *testing.T) {
--	EnableContentEncoding = true
--	httpRequest, _ := http.NewRequest("GET", "/test", nil)
--	httpRequest.Header.Set("Accept-Encoding", "deflate,gzip")
--	httpWriter := httptest.NewRecorder()
--	wanted, encoding := wantsCompressedResponse(httpRequest)
--	if !wanted {
--		t.Fatal("should accept deflate")
--	}
--	if encoding != "deflate" {
--		t.Fatal("expected deflate")
--	}
--	c, err := NewCompressingResponseWriter(httpWriter, encoding)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	c.Write([]byte("Hello World"))
--	c.Close()
--	if httpWriter.Header().Get("Content-Encoding") != "deflate" {
--		t.Fatal("Missing deflate header")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/constants.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/constants.go
-deleted file mode 100644
-index 5e564d0..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/constants.go
-+++ /dev/null
-@@ -1,29 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--const (
--	MIME_XML  = "application/xml"  // Accept or Content-Type used in Consumes() and/or Produces()
--	MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
--
--	HEADER_Allow                         = "Allow"
--	HEADER_Accept                        = "Accept"
--	HEADER_Origin                        = "Origin"
--	HEADER_ContentType                   = "Content-Type"
--	HEADER_LastModified                  = "Last-Modified"
--	HEADER_AcceptEncoding                = "Accept-Encoding"
--	HEADER_ContentEncoding               = "Content-Encoding"
--	HEADER_AccessControlExposeHeaders    = "Access-Control-Expose-Headers"
--	HEADER_AccessControlRequestMethod    = "Access-Control-Request-Method"
--	HEADER_AccessControlRequestHeaders   = "Access-Control-Request-Headers"
--	HEADER_AccessControlAllowMethods     = "Access-Control-Allow-Methods"
--	HEADER_AccessControlAllowOrigin      = "Access-Control-Allow-Origin"
--	HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
--	HEADER_AccessControlAllowHeaders     = "Access-Control-Allow-Headers"
--	HEADER_AccessControlMaxAge           = "Access-Control-Max-Age"
--
--	ENCODING_GZIP    = "gzip"
--	ENCODING_DEFLATE = "deflate"
--)
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/container.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/container.go
-deleted file mode 100644
-index 7aae565..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/container.go
-+++ /dev/null
-@@ -1,261 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"bytes"
--	"fmt"
--	"log"
--	"net/http"
--	"runtime"
--	"strings"
--)
--
--// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
--// The requests are further dispatched to routes of WebServices using a RouteSelector
--type Container struct {
--	webServices            []*WebService
--	ServeMux               *http.ServeMux
--	isRegisteredOnRoot     bool
--	containerFilters       []FilterFunction
--	doNotRecover           bool // default is false
--	recoverHandleFunc      RecoverHandleFunction
--	router                 RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative
--	contentEncodingEnabled bool          // default is false
--}
--
--// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
--func NewContainer() *Container {
--	return &Container{
--		webServices:            []*WebService{},
--		ServeMux:               http.NewServeMux(),
--		isRegisteredOnRoot:     false,
--		containerFilters:       []FilterFunction{},
--		doNotRecover:           false,
--		recoverHandleFunc:      logStackOnRecover,
--		router:                 RouterJSR311{},
--		contentEncodingEnabled: false}
--}
--
--// RecoverHandleFunction declares functions that can be used to handle a panic situation.
--// The first argument is what recover() returns. The second must be used to communicate an error response.
--type RecoverHandleFunction func(interface{}, http.ResponseWriter)
--
--// RecoverHandler changes the default function (logStackOnRecover) to be called
--// when a panic is detected. DoNotRecover must be have its default value (=false).
--func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
--	c.recoverHandleFunc = handler
--}
--
--// DoNotRecover controls whether panics will be caught to return HTTP 500.
--// If set to true, Route functions are responsible for handling any error situation.
--// Default value is false = recover from panics. This has performance implications.
--func (c *Container) DoNotRecover(doNot bool) {
--	c.doNotRecover = doNot
--}
--
--// Router changes the default Router (currently RouterJSR311)
--func (c *Container) Router(aRouter RouteSelector) {
--	c.router = aRouter
--}
--
--// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
--func (c *Container) EnableContentEncoding(enabled bool) {
--	c.contentEncodingEnabled = enabled
--}
--
--// Add a WebService to the Container. It will detect duplicate root paths and panic in that case.
--func (c *Container) Add(service *WebService) *Container {
--	// If registered on root then no additional specific mapping is needed
--	if !c.isRegisteredOnRoot {
--		pattern := c.fixedPrefixPath(service.RootPath())
--		// check if root path registration is needed
--		if "/" == pattern || "" == pattern {
--			c.ServeMux.HandleFunc("/", c.dispatch)
--			c.isRegisteredOnRoot = true
--		} else {
--			// detect if registration already exists
--			alreadyMapped := false
--			for _, each := range c.webServices {
--				if each.RootPath() == service.RootPath() {
--					alreadyMapped = true
--					break
--				}
--			}
--			if !alreadyMapped {
--				c.ServeMux.HandleFunc(pattern, c.dispatch)
--				if !strings.HasSuffix(pattern, "/") {
--					c.ServeMux.HandleFunc(pattern+"/", c.dispatch)
--				}
--			}
--		}
--	}
--	// cannot have duplicate root paths
--	for _, each := range c.webServices {
--		if each.RootPath() == service.RootPath() {
--			log.Fatalf("[restful] WebService with duplicate root path detected:['%v']", each)
--		}
--	}
--	// if rootPath was not set then lazy initialize it
--	if len(service.rootPath) == 0 {
--		service.Path("/")
--	}
--	c.webServices = append(c.webServices, service)
--	return c
--}
--
--// logStackOnRecover is the default RecoverHandleFunction and is called
--// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
--// Default implementation logs the stacktrace and writes the stacktrace on the response.
--// This may be a security issue as it exposes sourcecode information.
--func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
--	var buffer bytes.Buffer
--	buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
--	for i := 2; ; i += 1 {
--		_, file, line, ok := runtime.Caller(i)
--		if !ok {
--			break
--		}
--		buffer.WriteString(fmt.Sprintf("    %s:%d\r\n", file, line))
--	}
--	log.Println(buffer.String())
--	httpWriter.WriteHeader(http.StatusInternalServerError)
--	httpWriter.Write(buffer.Bytes())
--}
--
--// Dispatch the incoming Http Request to a matching WebService.
--func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
--	// Instal panic recovery unless told otherwise
--	if !c.doNotRecover { // catch all for 500 response
--		defer func() {
--			if r := recover(); r != nil {
--				c.recoverHandleFunc(r, httpWriter)
--				return
--			}
--		}()
--	}
--	// Install closing the request body (if any)
--	defer func() {
--		if nil != httpRequest.Body {
--			httpRequest.Body.Close()
--		}
--	}()
--
--	// Detect if compression is needed
--	// assume without compression, test for override
--	writer := httpWriter
--	if c.contentEncodingEnabled {
--		doCompress, encoding := wantsCompressedResponse(httpRequest)
--		if doCompress {
--			var err error
--			writer, err = NewCompressingResponseWriter(httpWriter, encoding)
--			if err != nil {
--				log.Println("[restful] unable to install compressor:", err)
--				httpWriter.WriteHeader(http.StatusInternalServerError)
--				return
--			}
--			defer func() {
--				writer.(*CompressingResponseWriter).Close()
--			}()
--		}
--	}
--	// Find best match Route ; err is non nil if no match was found
--	webService, route, err := c.router.SelectRoute(
--		c.webServices,
--		httpRequest)
--	if err != nil {
--		// a non-200 response has already been written
--		// run container filters anyway ; they should not touch the response...
--		chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
--			switch err.(type) {
--			case ServiceError:
--				ser := err.(ServiceError)
--				resp.WriteErrorString(ser.Code, ser.Message)
--			}
--			// TODO
--		}}
--		chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
--		return
--	}
--	wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
--	// pass through filters (if any)
--	if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
--		// compose filter chain
--		allFilters := []FilterFunction{}
--		allFilters = append(allFilters, c.containerFilters...)
--		allFilters = append(allFilters, webService.filters...)
--		allFilters = append(allFilters, route.Filters...)
--		chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
--			// handle request by route after passing all filters
--			route.Function(wrappedRequest, wrappedResponse)
--		}}
--		chain.ProcessFilter(wrappedRequest, wrappedResponse)
--	} else {
--		// no filters, handle request by route
--		route.Function(wrappedRequest, wrappedResponse)
--	}
--}
--
--// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
--func (c Container) fixedPrefixPath(pathspec string) string {
--	varBegin := strings.Index(pathspec, "{")
--	if -1 == varBegin {
--		return pathspec
--	}
--	return pathspec[:varBegin]
--}
--
--// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
--func (c Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
--	c.ServeMux.ServeHTTP(httpwriter, httpRequest)
--}
--
--// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
--func (c Container) Handle(pattern string, handler http.Handler) {
--	c.ServeMux.Handle(pattern, handler)
--}
--
--// Filter appends a container FilterFunction. These are called before dispatching
--// a http.Request to a WebService from the container
--func (c *Container) Filter(filter FilterFunction) {
--	c.containerFilters = append(c.containerFilters, filter)
--}
--
--// RegisteredWebServices returns the collections of added WebServices
--func (c Container) RegisteredWebServices() []*WebService {
--	return c.webServices
--}
--
--// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
--func (c Container) computeAllowedMethods(req *Request) []string {
--	// Go through all RegisteredWebServices() and all its Routes to collect the options
--	methods := []string{}
--	requestPath := req.Request.URL.Path
--	for _, ws := range c.RegisteredWebServices() {
--		matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
--		if matches != nil {
--			finalMatch := matches[len(matches)-1]
--			for _, rt := range ws.Routes() {
--				matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
--				if matches != nil {
--					lastMatch := matches[len(matches)-1]
--					if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
--						methods = append(methods, rt.Method)
--					}
--				}
--			}
--		}
--	}
--	// methods = append(methods, "OPTIONS")  not sure about this
--	return methods
--}
--
--// newBasicRequestResponse creates a pair of Request,Response from its http versions.
--// It is basic because no parameter or (produces) content-type information is given.
--func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
--	resp := NewResponse(httpWriter)
--	resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
--	return NewRequest(httpRequest), resp
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/cors_filter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/cors_filter.go
-deleted file mode 100644
-index ae166b6..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/cors_filter.go
-+++ /dev/null
-@@ -1,170 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"strconv"
--	"strings"
--)
--
--// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
--// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
--// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
--//
--// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
--// http://enable-cors.org/server.html
--// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
--type CrossOriginResourceSharing struct {
--	ExposeHeaders  []string // list of Header names
--	AllowedHeaders []string // list of Header names
--	AllowedDomains []string // list of allowed values for Http Origin. If empty all are allowed.
--	AllowedMethods []string
--	MaxAge         int // number of seconds before requiring new Options request
--	CookiesAllowed bool
--	Container      *Container
--}
--
--// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
--// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
--func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
--	origin := req.Request.Header.Get(HEADER_Origin)
--	if len(origin) == 0 {
--		if trace {
--			traceLogger.Println("no Http header Origin set")
--		}
--		chain.ProcessFilter(req, resp)
--		return
--	}
--	if len(c.AllowedDomains) > 0 { // if provided then origin must be included
--		included := false
--		for _, each := range c.AllowedDomains {
--			if each == origin {
--				included = true
--				break
--			}
--		}
--		if !included {
--			if trace {
--				traceLogger.Println("HTTP Origin:%s is not part of %v", origin, c.AllowedDomains)
--			}
--			chain.ProcessFilter(req, resp)
--			return
--		}
--	}
--	if req.Request.Method != "OPTIONS" {
--		c.doActualRequest(req, resp)
--		chain.ProcessFilter(req, resp)
--		return
--	}
--	if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
--		c.doPreflightRequest(req, resp)
--	} else {
--		c.doActualRequest(req, resp)
--	}
--}
--
--func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
--	c.setOptionsHeaders(req, resp)
--	// continue processing the response
--}
--
--func (c CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
--	if len(c.AllowedMethods) == 0 {
--		c.AllowedMethods = c.Container.computeAllowedMethods(req)
--	}
--
--	acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
--	if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
--		if trace {
--			traceLogger.Printf("Http header %s:%s is not in %v",
--				HEADER_AccessControlRequestMethod,
--				acrm,
--				c.AllowedMethods)
--		}
--		return
--	}
--	acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
--	if len(acrhs) > 0 {
--		for _, each := range strings.Split(acrhs, ",") {
--			if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
--				if trace {
--					traceLogger.Printf("Http header %s:%s is not in %v",
--						HEADER_AccessControlRequestHeaders,
--						acrhs,
--						c.AllowedHeaders)
--				}
--				return
--			}
--		}
--	}
--	resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
--	resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
--	c.setOptionsHeaders(req, resp)
--
--	// return http 200 response, no body
--}
--
--func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
--	c.checkAndSetExposeHeaders(resp)
--	c.setAllowOriginHeader(req, resp)
--	c.checkAndSetAllowCredentials(resp)
--	if c.MaxAge > 0 {
--		resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
--	}
--}
--
--func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
--	if len(origin) == 0 {
--		return false
--	}
--	if len(c.AllowedDomains) == 0 {
--		return true
--	}
--	allowed := false
--	for _, each := range c.AllowedDomains {
--		if each == origin {
--			allowed = true
--			break
--		}
--	}
--	return allowed
--}
--
--func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
--	origin := req.Request.Header.Get(HEADER_Origin)
--	if c.isOriginAllowed(origin) {
--		resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
--	}
--}
--
--func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
--	if len(c.ExposeHeaders) > 0 {
--		resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
--	}
--}
--
--func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
--	if c.CookiesAllowed {
--		resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
--	}
--}
--
--func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
--	for _, each := range allowedMethods {
--		if each == method {
--			return true
--		}
--	}
--	return false
--}
--
--func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
--	for _, each := range c.AllowedHeaders {
--		if strings.ToLower(each) == strings.ToLower(header) {
--			return true
--		}
--	}
--	return false
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/cors_filter_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/cors_filter_test.go
-deleted file mode 100644
-index 9b47230..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/cors_filter_test.go
-+++ /dev/null
-@@ -1,125 +0,0 @@
--package restful
--
--import (
--	"net/http"
--	"net/http/httptest"
--	"testing"
--)
--
--// go test -v -test.run TestCORSFilter_Preflight ...restful
--// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
--func TestCORSFilter_Preflight(t *testing.T) {
--	tearDown()
--	ws := new(WebService)
--	ws.Route(ws.PUT("/cors").To(dummy))
--	Add(ws)
--
--	cors := CrossOriginResourceSharing{
--		ExposeHeaders:  []string{"X-Custom-Header"},
--		AllowedHeaders: []string{"X-Custom-Header", "X-Additional-Header"},
--		CookiesAllowed: true,
--		Container:      DefaultContainer}
--	Filter(cors.Filter)
--
--	// Preflight
--	httpRequest, _ := http.NewRequest("OPTIONS", "http://api.alice.com/cors", nil)
--	httpRequest.Method = "OPTIONS"
--	httpRequest.Header.Set(HEADER_Origin, "http://api.bob.com")
--	httpRequest.Header.Set(HEADER_AccessControlRequestMethod, "PUT")
--	httpRequest.Header.Set(HEADER_AccessControlRequestHeaders, "X-Custom-Header, X-Additional-Header")
--
--	httpWriter := httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--
--	actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
--	if "http://api.bob.com" != actual {
--		t.Fatal("expected: http://api.bob.com but got:" + actual)
--	}
--	actual = httpWriter.Header().Get(HEADER_AccessControlAllowMethods)
--	if "PUT" != actual {
--		t.Fatal("expected: PUT but got:" + actual)
--	}
--	actual = httpWriter.Header().Get(HEADER_AccessControlAllowHeaders)
--	if "X-Custom-Header, X-Additional-Header" != actual {
--		t.Fatal("expected: X-Custom-Header, X-Additional-Header but got:" + actual)
--	}
--
--	if !cors.isOriginAllowed("somewhere") {
--		t.Fatal("origin expected to be allowed")
--	}
--	cors.AllowedDomains = []string{"overthere.com"}
--	if cors.isOriginAllowed("somewhere") {
--		t.Fatal("origin [somewhere] expected NOT to be allowed")
--	}
--	if !cors.isOriginAllowed("overthere.com") {
--		t.Fatal("origin [overthere] expected to be allowed")
--	}
--
--}
--
--// go test -v -test.run TestCORSFilter_Actual ...restful
--// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
--func TestCORSFilter_Actual(t *testing.T) {
--	tearDown()
--	ws := new(WebService)
--	ws.Route(ws.PUT("/cors").To(dummy))
--	Add(ws)
--
--	cors := CrossOriginResourceSharing{
--		ExposeHeaders:  []string{"X-Custom-Header"},
--		AllowedHeaders: []string{"X-Custom-Header", "X-Additional-Header"},
--		CookiesAllowed: true,
--		Container:      DefaultContainer}
--	Filter(cors.Filter)
--
--	// Actual
--	httpRequest, _ := http.NewRequest("PUT", "http://api.alice.com/cors", nil)
--	httpRequest.Header.Set(HEADER_Origin, "http://api.bob.com")
--	httpRequest.Header.Set("X-Custom-Header", "value")
--
--	httpWriter := httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--	actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
--	if "http://api.bob.com" != actual {
--		t.Fatal("expected: http://api.bob.com but got:" + actual)
--	}
--	if httpWriter.Body.String() != "dummy" {
--		t.Fatal("expected: dummy but got:" + httpWriter.Body.String())
--	}
--}
--
--var allowedDomainInput = []struct {
--	domains  []string
--	origin   string
--	accepted bool
--}{
--	{[]string{}, "http://anything.com", true},
--}
--
--// go test -v -test.run TestCORSFilter_AllowedDomains ...restful
--func TestCORSFilter_AllowedDomains(t *testing.T) {
--	for _, each := range allowedDomainInput {
--		tearDown()
--		ws := new(WebService)
--		ws.Route(ws.PUT("/cors").To(dummy))
--		Add(ws)
--
--		cors := CrossOriginResourceSharing{
--			AllowedDomains: each.domains,
--			CookiesAllowed: true,
--			Container:      DefaultContainer}
--		Filter(cors.Filter)
--
--		httpRequest, _ := http.NewRequest("PUT", "http://api.his.com/cors", nil)
--		httpRequest.Header.Set(HEADER_Origin, each.origin)
--		httpWriter := httptest.NewRecorder()
--		DefaultContainer.dispatch(httpWriter, httpRequest)
--		actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
--		if actual != each.origin && each.accepted {
--			t.Fatal("expected to be accepted")
--		}
--		if actual == each.origin && !each.accepted {
--			t.Fatal("did not expect to be accepted")
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/coverage.sh b/Godeps/_workspace/src/github.com/emicklei/go-restful/coverage.sh
-deleted file mode 100644
-index e27dbf1..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/coverage.sh
-+++ /dev/null
-@@ -1,2 +0,0 @@
--go test -coverprofile=coverage.out
--go tool cover -html=coverage.out
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/curly.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/curly.go
-deleted file mode 100644
-index ce284f7..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/curly.go
-+++ /dev/null
-@@ -1,162 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"net/http"
--	"regexp"
--	"sort"
--	"strings"
--)
--
--// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
--type CurlyRouter struct{}
--
--// SelectRoute is part of the Router interface and returns the best match
--// for the WebService and its Route for the given Request.
--func (c CurlyRouter) SelectRoute(
--	webServices []*WebService,
--	httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
--
--	requestTokens := tokenizePath(httpRequest.URL.Path)
--
--	detectedService := c.detectWebService(requestTokens, webServices)
--	if detectedService == nil {
--		if trace {
--			traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
--		}
--		return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
--	}
--	candidateRoutes := c.selectRoutes(detectedService, requestTokens)
--	if len(candidateRoutes) == 0 {
--		if trace {
--			traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
--		}
--		return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
--	}
--	selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
--	if selectedRoute == nil {
--		return detectedService, nil, err
--	}
--	return detectedService, selectedRoute, nil
--}
--
--// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
--func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) []Route {
--	candidates := &sortableCurlyRoutes{[]*curlyRoute{}}
--	for _, each := range ws.routes {
--		matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
--		if matches {
--			candidates.add(&curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
--		}
--	}
--	sort.Sort(sort.Reverse(candidates))
--	return candidates.routes()
--}
--
--// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
--func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
--	if len(routeTokens) < len(requestTokens) {
--		// proceed in matching only if last routeToken is wildcard
--		count := len(routeTokens)
--		if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
--			return false, 0, 0
--		}
--		// proceed
--	}
--	for i, routeToken := range routeTokens {
--		if i == len(requestTokens) {
--			// reached end of request path
--			return false, 0, 0
--		}
--		requestToken := requestTokens[i]
--		if strings.HasPrefix(routeToken, "{") {
--			paramCount++
--			if colon := strings.Index(routeToken, ":"); colon != -1 {
--				// match by regex
--				matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
--				if !matchesToken {
--					return false, 0, 0
--				}
--				if matchesRemainder {
--					break
--				}
--			}
--		} else { // no { prefix
--			if requestToken != routeToken {
--				return false, 0, 0
--			}
--			staticCount++
--		}
--	}
--	return true, paramCount, staticCount
--}
--
--// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
--// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
--func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
--	regPart := routeToken[colon+1 : len(routeToken)-1]
--	if regPart == "*" {
--		if trace {
--			traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
--		}
--		return true, true
--	}
--	matched, err := regexp.MatchString(regPart, requestToken)
--	return (matched && err == nil), false
--}
--
--// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
--// headers of the Request. See also RouterJSR311 in jsr311.go
--func (c CurlyRouter) detectRoute(candidateRoutes []Route, httpRequest *http.Request) (*Route, error) {
--	// tracing is done inside detectRoute
--	return RouterJSR311{}.detectRoute(candidateRoutes, httpRequest)
--}
--
--// detectWebService returns the best matching webService given the list of path tokens.
--// see also computeWebserviceScore
--func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
--	var best *WebService
--	score := -1
--	for _, each := range webServices {
--		matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
--		if matches && (eachScore > score) {
--			best = each
--			score = eachScore
--		}
--	}
--	return best
--}
--
--// computeWebserviceScore returns whether tokens match and
--// the weighted score of the longest matching consecutive tokens from the beginning.
--func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
--	if len(tokens) > len(requestTokens) {
--		return false, 0
--	}
--	score := 0
--	for i := 0; i < len(tokens); i++ {
--		each := requestTokens[i]
--		other := tokens[i]
--		if len(each) == 0 && len(other) == 0 {
--			score++
--			continue
--		}
--		if len(other) > 0 && strings.HasPrefix(other, "{") {
--			// no empty match
--			if len(each) == 0 {
--				return false, score
--			}
--			score += 1
--		} else {
--			// not a parameter
--			if each != other {
--				return false, score
--			}
--			score += (len(tokens) - i) * 10 //fuzzy
--		}
--	}
--	return true, score
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/curly_route.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/curly_route.go
-deleted file mode 100644
-index 3edab72..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/curly_route.go
-+++ /dev/null
-@@ -1,54 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
--type curlyRoute struct {
--	route       Route
--	paramCount  int
--	staticCount int
--}
--
--type sortableCurlyRoutes struct {
--	candidates []*curlyRoute
--}
--
--func (s *sortableCurlyRoutes) add(route *curlyRoute) {
--	s.candidates = append(s.candidates, route)
--}
--
--func (s *sortableCurlyRoutes) routes() (routes []Route) {
--	for _, each := range s.candidates {
--		routes = append(routes, each.route) // TODO change return type
--	}
--	return routes
--}
--
--func (s *sortableCurlyRoutes) Len() int {
--	return len(s.candidates)
--}
--func (s *sortableCurlyRoutes) Swap(i, j int) {
--	s.candidates[i], s.candidates[j] = s.candidates[j], s.candidates[i]
--}
--func (s *sortableCurlyRoutes) Less(i, j int) bool {
--	ci := s.candidates[i]
--	cj := s.candidates[j]
--
--	// primary key
--	if ci.staticCount < cj.staticCount {
--		return true
--	}
--	if ci.staticCount > cj.staticCount {
--		return false
--	}
--	// secundary key
--	if ci.paramCount < cj.paramCount {
--		return true
--	}
--	if ci.paramCount > cj.paramCount {
--		return false
--	}
--	return ci.route.Path < cj.route.Path
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/curly_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/curly_test.go
-deleted file mode 100644
-index 31d66dc..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/curly_test.go
-+++ /dev/null
-@@ -1,231 +0,0 @@
--package restful
--
--import (
--	"io"
--	"net/http"
--	"testing"
--)
--
--var requestPaths = []struct {
--	// url with path (1) is handled by service with root (2) and remainder has value final (3)
--	path, root string
--}{
--	{"/", "/"},
--	{"/p", "/p"},
--	{"/p/x", "/p/{q}"},
--	{"/q/x", "/q"},
--	{"/p/x/", "/p/{q}"},
--	{"/p/x/y", "/p/{q}"},
--	{"/q/x/y", "/q"},
--	{"/z/q", "/{p}/q"},
--	{"/a/b/c/q", "/"},
--}
--
--// go test -v -test.run TestCurlyDetectWebService ...restful
--func TestCurlyDetectWebService(t *testing.T) {
--	ws1 := new(WebService).Path("/")
--	ws2 := new(WebService).Path("/p")
--	ws3 := new(WebService).Path("/q")
--	ws4 := new(WebService).Path("/p/q")
--	ws5 := new(WebService).Path("/p/{q}")
--	ws7 := new(WebService).Path("/{p}/q")
--	var wss = []*WebService{ws1, ws2, ws3, ws4, ws5, ws7}
--
--	for _, each := range wss {
--		t.Logf("path=%s,toks=%v\n", each.pathExpr.Source, each.pathExpr.tokens)
--	}
--
--	router := CurlyRouter{}
--
--	ok := true
--	for i, fixture := range requestPaths {
--		requestTokens := tokenizePath(fixture.path)
--		who := router.detectWebService(requestTokens, wss)
--		if who != nil && who.RootPath() != fixture.root {
--			t.Logf("[line:%v] Unexpected dispatcher, expected:%v, actual:%v", i, fixture.root, who.RootPath())
--			ok = false
--		}
--	}
--	if !ok {
--		t.Fail()
--	}
--}
--
--var serviceDetects = []struct {
--	path  string
--	found bool
--	root  string
--}{
--	{"/a/b", true, "/{p}/{q}/{r}"},
--	{"/p/q", true, "/p/q"},
--	{"/q/p", true, "/q"},
--	{"/", true, "/"},
--	{"/p/q/r", true, "/p/q"},
--}
--
--// go test -v -test.run Test_detectWebService ...restful
--func Test_detectWebService(t *testing.T) {
--	router := CurlyRouter{}
--	ws1 := new(WebService).Path("/")
--	ws2 := new(WebService).Path("/p")
--	ws3 := new(WebService).Path("/q")
--	ws4 := new(WebService).Path("/p/q")
--	ws5 := new(WebService).Path("/p/{q}")
--	ws6 := new(WebService).Path("/p/{q}/")
--	ws7 := new(WebService).Path("/{p}/q")
--	ws8 := new(WebService).Path("/{p}/{q}/{r}")
--	var wss = []*WebService{ws8, ws7, ws6, ws5, ws4, ws3, ws2, ws1}
--	for _, fix := range serviceDetects {
--		requestPath := fix.path
--		requestTokens := tokenizePath(requestPath)
--		for _, ws := range wss {
--			serviceTokens := ws.pathExpr.tokens
--			matches, score := router.computeWebserviceScore(requestTokens, serviceTokens)
--			t.Logf("req=%s,toks:%v,ws=%s,toks:%v,score=%d,matches=%v", requestPath, requestTokens, ws.RootPath(), serviceTokens, score, matches)
--		}
--		best := router.detectWebService(requestTokens, wss)
--		if best != nil {
--			if fix.found {
--				t.Logf("best=%s", best.RootPath())
--			} else {
--				t.Fatalf("should have found:%s", fix.root)
--			}
--		}
--	}
--}
--
--var routeMatchers = []struct {
--	route       string
--	path        string
--	matches     bool
--	paramCount  int
--	staticCount int
--}{
--	// route, request-path
--	{"/a", "/a", true, 0, 1},
--	{"/a", "/b", false, 0, 0},
--	{"/a", "/b", false, 0, 0},
--	{"/a/{b}/c/", "/a/2/c", true, 1, 2},
--	{"/{a}/{b}/{c}/", "/a/b", false, 0, 0},
--	{"/{x:*}", "/", false, 0, 0},
--	{"/{x:*}", "/a", true, 1, 0},
--	{"/{x:*}", "/a/b", true, 1, 0},
--	{"/a/{x:*}", "/a/b", true, 1, 1},
--	{"/a/{x:[A-Z][A-Z]}", "/a/ZX", true, 1, 1},
--	{"/basepath/{resource:*}", "/basepath/some/other/location/test.xml", true, 1, 1},
--}
--
--// clear && go test -v -test.run Test_matchesRouteByPathTokens ...restful
--func Test_matchesRouteByPathTokens(t *testing.T) {
--	router := CurlyRouter{}
--	for i, each := range routeMatchers {
--		routeToks := tokenizePath(each.route)
--		reqToks := tokenizePath(each.path)
--		matches, pCount, sCount := router.matchesRouteByPathTokens(routeToks, reqToks)
--		if matches != each.matches {
--			t.Fatalf("[%d] unexpected matches outcome route:%s, path:%s, matches:%v", i, each.route, each.path, matches)
--		}
--		if pCount != each.paramCount {
--			t.Fatalf("[%d] unexpected paramCount got:%d want:%d ", i, pCount, each.paramCount)
--		}
--		if sCount != each.staticCount {
--			t.Fatalf("[%d] unexpected staticCount got:%d want:%d ", i, sCount, each.staticCount)
--		}
--	}
--}
--
--// clear && go test -v -test.run TestExtractParameters_Wildcard1 ...restful
--func TestExtractParameters_Wildcard1(t *testing.T) {
--	params := doExtractParams("/fixed/{var:*}", 2, "/fixed/remainder", t)
--	if params["var"] != "remainder" {
--		t.Errorf("parameter mismatch var: %s", params["var"])
--	}
--}
--
--// clear && go test -v -test.run TestExtractParameters_Wildcard2 ...restful
--func TestExtractParameters_Wildcard2(t *testing.T) {
--	params := doExtractParams("/fixed/{var:*}", 2, "/fixed/remain/der", t)
--	if params["var"] != "remain/der" {
--		t.Errorf("parameter mismatch var: %s", params["var"])
--	}
--}
--
--// clear && go test -v -test.run TestExtractParameters_Wildcard3 ...restful
--func TestExtractParameters_Wildcard3(t *testing.T) {
--	params := doExtractParams("/static/{var:*}", 2, "/static/test/sub/hi.html", t)
--	if params["var"] != "test/sub/hi.html" {
--		t.Errorf("parameter mismatch var: %s", params["var"])
--	}
--}
--
--// clear && go test -v -test.run TestCurly_ISSUE_34 ...restful
--func TestCurly_ISSUE_34(t *testing.T) {
--	ws1 := new(WebService).Path("/")
--	ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy))
--	ws1.Route(ws1.GET("/network/{id}").To(curlyDummy))
--	routes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
--	if len(routes) != 2 {
--		t.Fatal("expected 2 routes")
--	}
--	if routes[0].Path != "/network/{id}" {
--		t.Error("first is", routes[0].Path)
--	}
--}
--
--// clear && go test -v -test.run TestCurly_ISSUE_34_2 ...restful
--func TestCurly_ISSUE_34_2(t *testing.T) {
--	ws1 := new(WebService)
--	ws1.Route(ws1.GET("/network/{id}").To(curlyDummy))
--	ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy))
--	routes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
--	if len(routes) != 2 {
--		t.Fatal("expected 2 routes")
--	}
--	if routes[0].Path != "/network/{id}" {
--		t.Error("first is", routes[0].Path)
--	}
--}
--
--// clear && go test -v -test.run TestCurly_JsonHtml ...restful
--func TestCurly_JsonHtml(t *testing.T) {
--	ws1 := new(WebService)
--	ws1.Path("/")
--	ws1.Route(ws1.GET("/some.html").To(curlyDummy).Consumes("*/*").Produces("text/html"))
--	req, _ := http.NewRequest("GET", "/some.html", nil)
--	req.Header.Set("Accept", "application/json")
--	_, route, err := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req)
--	if err == nil {
--		t.Error("error expected")
--	}
--	if route != nil {
--		t.Error("no route expected")
--	}
--}
--
--// go test -v -test.run TestCurly_ISSUE_137 ...restful
--func TestCurly_ISSUE_137(t *testing.T) {
--	ws1 := new(WebService)
--	ws1.Route(ws1.GET("/hello").To(curlyDummy))
--	ws1.Path("/")
--	req, _ := http.NewRequest("GET", "/", nil)
--	_, route, _ := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req)
--	t.Log(route)
--	if route != nil {
--		t.Error("no route expected")
--	}
--}
--
--// go test -v -test.run TestCurly_ISSUE_137_2 ...restful
--func TestCurly_ISSUE_137_2(t *testing.T) {
--	ws1 := new(WebService)
--	ws1.Route(ws1.GET("/hello").To(curlyDummy))
--	ws1.Path("/")
--	req, _ := http.NewRequest("GET", "/hello/bob", nil)
--	_, route, _ := CurlyRouter{}.SelectRoute([]*WebService{ws1}, req)
--	t.Log(route)
--	if route != nil {
--		t.Errorf("no route expected, got %v", route)
--	}
--}
--
--func curlyDummy(req *Request, resp *Response) { io.WriteString(resp.ResponseWriter, "curlyDummy") }
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/doc.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/doc.go
-deleted file mode 100644
-index c095566..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/doc.go
-+++ /dev/null
-@@ -1,184 +0,0 @@
--/*
--Package restful, a lean package for creating REST-style WebServices without magic.
--
--WebServices and Routes
--
--A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
--Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
--WebServices must be added to a container (see below) in order to handler Http requests from a server.
--
--A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
--This package has the logic to find the best matching Route and if found, call its Function.
--
--	ws := new(restful.WebService)
--	ws.
--		Path("/users").
--		Consumes(restful.MIME_JSON, restful.MIME_XML).
--		Produces(restful.MIME_JSON, restful.MIME_XML)
--
--	ws.Route(ws.GET("/{user-id}").To(u.findUser))  // u is a UserResource
--
--	...
--
--	// GET http://localhost:8080/users/1
--	func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
--		id := request.PathParameter("user-id")
--		...
--	}
--
--The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
--
--See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
--
--Regular expression matching Routes
--
--A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
--For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
--Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
--This feature requires the use of a CurlyRouter.
--
--Containers
--
--A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
--Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
--The Default container of go-restful uses the http.DefaultServeMux.
--You can create your own Container and create a new http.Server for that particular container.
--
--	container := restful.NewContainer()
--	server := &http.Server{Addr: ":8081", Handler: container}
--
--Filters
--
--A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
--You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
--In the restful package there are three hooks into the request,response flow where filters can be added.
--Each filter must define a FilterFunction:
--
--	func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
--
--Use the following statement to pass the request,response pair to the next filter or RouteFunction
--
--	chain.ProcessFilter(req, resp)
--
--Container Filters
--
--These are processed before any registered WebService.
--
--	// install a (global) filter for the default container (processed before any webservice)
--	restful.Filter(globalLogging)
--
--WebService Filters
--
--These are processed before any Route of a WebService.
--
--	// install a webservice filter (processed before any route)
--	ws.Filter(webserviceLogging).Filter(measureTime)
--
--
--Route Filters
--
--These are processed before calling the function associated with the Route.
--
--	// install 2 chained route filters (processed before calling findUser)
--	ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
--
--See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
--
--Response Encoding
--
--Two encodings are supported: gzip and deflate. To enable this for all responses:
--
--	restful.DefaultContainer.EnableContentEncoding(true)
--
--If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
--Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
--
--See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
--
--OPTIONS support
--
--By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
--
--	Filter(OPTIONSFilter())
--
--CORS
--
--By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
--
--	cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
--	Filter(cors.Filter)
--
--Error Handling
--
--Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
--For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
--
--	400: Bad Request
--
--If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
--
--	404: Not Found
--
--Despite a valid URI, the resource requested may not be available
--
--	500: Internal Server Error
--
--If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
--
--	405: Method Not Allowed
--
--The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
--
--	406: Not Acceptable
--
--The request does not have or has an unknown Accept Header set for this operation.
--
--	415: Unsupported Media Type
--
--The request does not have or has an unknown Content-Type Header set for this operation.
--
--ServiceError
--
--In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
--
--Performance options
--
--This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
--
--	restful.DefaultContainer.Router(CurlyRouter{})
--
--The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html).
--However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time.
--The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed.
--
--	restful.DefaultContainer.DoNotRecover(true)
--
--DoNotRecover controls whether panics will be caught to return HTTP 500.
--If set to true, Route functions are responsible for handling any error situation.
--Default value is false; it will recover from panics. This has performance implications.
--
--	restful.SetCacheReadEntity(false)
--
--SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
--If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false.
--
--Trouble shooting
--
--This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
--Enabling this feature requires you to set a log.Logger instance such as:
--
--	restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
--
--Resources
--
--[project]: https://github.com/emicklei/go-restful
--
--[examples]: https://github.com/emicklei/go-restful/blob/master/examples
--
--[design]:  http://ernestmicklei.com/2012/11/11/go-restful-api-design/
--
--[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
--
--(c) 2012-2014, http://ernestmicklei.com. MIT License
--*/
--package restful
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/doc_examples_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/doc_examples_test.go
-deleted file mode 100644
-index e1a2f3b..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/doc_examples_test.go
-+++ /dev/null
-@@ -1,35 +0,0 @@
--package restful
--
--import "net/http"
--
--func ExampleOPTIONSFilter() {
--	// Install the OPTIONS filter on the default Container
--	Filter(OPTIONSFilter())
--}
--func ExampleContainer_OPTIONSFilter() {
--	// Install the OPTIONS filter on a Container
--	myContainer := new(Container)
--	myContainer.Filter(myContainer.OPTIONSFilter)
--}
--
--func ExampleContainer() {
--	// The Default container of go-restful uses the http.DefaultServeMux.
--	// You can create your own Container using restful.NewContainer() and create a new http.Server for that particular container
--
--	ws := new(WebService)
--	wsContainer := NewContainer()
--	wsContainer.Add(ws)
--	server := &http.Server{Addr: ":8080", Handler: wsContainer}
--	server.ListenAndServe()
--}
--
--func ExampleCrossOriginResourceSharing() {
--	// To install this filter on the Default Container use:
--	cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
--	Filter(cors.Filter)
--}
--
--func ExampleServiceError() {
--	resp := new(Response)
--	resp.WriteEntity(NewError(http.StatusBadRequest, "Non-integer {id} path parameter"))
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/.goconvey b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/.goconvey
-deleted file mode 100644
-index 8485e98..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/.goconvey
-+++ /dev/null
-@@ -1 +0,0 @@
--ignore
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey
-deleted file mode 100644
-index 8485e98..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey
-+++ /dev/null
-@@ -1 +0,0 @@
--ignore
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/app.yaml b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/app.yaml
-deleted file mode 100644
-index 362db6b..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/app.yaml
-+++ /dev/null
-@@ -1,20 +0,0 @@
--#
--# Include your application ID here
--#
--application: <your_app_id>
--version: 1
--runtime: go
--api_version: go1
--
--handlers:
--#
--# Regex for all swagger files to make as static content.
--# You should create the folder static/swagger and copy
--# swagger-ui into it.
--#
--- url: /apidocs/(.*?)/(.*\.(js|html|css))
--  static_files: static/swagger/\1/\2
--  upload: static/swagger/(.*?)/(.*\.(js|html|css))
--
--- url: /.*
--  script: _go_app
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey
-deleted file mode 100644
-index 8485e98..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey
-+++ /dev/null
-@@ -1 +0,0 @@
--ignore
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/app.yaml b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/app.yaml
-deleted file mode 100644
-index 33883d1..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/app.yaml
-+++ /dev/null
-@@ -1,18 +0,0 @@
--application: datastore-example
--version: 1
--runtime: go
--api_version: go1
--
--handlers:
--# Regex for all swagger files to make as static content.
--# You should create the folder static/swagger and copy
--# swagger-ui into it.
--#
--- url: /apidocs/(.*?)/(.*\.(js|html|css))
--  static_files: static/swagger/\1/\2
--  upload: static/swagger/(.*?)/(.*\.(js|html|css))
--
--# Catch all.
--- url: /.*
--  script: _go_app
--  login: required
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go
-deleted file mode 100644
-index cf832ef..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go
-+++ /dev/null
-@@ -1,266 +0,0 @@
--package main
--
--import (
--	"appengine"
--	"appengine/datastore"
--	"appengine/user"
--	"github.com/emicklei/go-restful"
--	"github.com/emicklei/go-restful/swagger"
--	"net/http"
--	"time"
--)
--
--// This example demonstrates a reasonably complete suite of RESTful operations backed
--// by DataStore on Google App Engine.
--
--// Our simple example struct.
--type Profile struct {
--	LastModified time.Time `json:"-" xml:"-"`
--	Email        string    `json:"-" xml:"-"`
--	FirstName    string    `json:"first_name" xml:"first-name"`
--	NickName     string    `json:"nick_name" xml:"nick-name"`
--	LastName     string    `json:"last_name" xml:"last-name"`
--}
--
--type ProfileApi struct {
--	Path string
--}
--
--func gaeUrl() string {
--	if appengine.IsDevAppServer() {
--		return "http://localhost:8080"
--	} else {
--		// Include your URL on App Engine here.
--		// I found no way to get AppID without appengine.Context and this always
--		// based on a http.Request.
--		return "http://federatedservices.appspot.com"
--	}
--}
--
--func init() {
--	u := ProfileApi{Path: "/profiles"}
--	u.register()
--
--	// Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API
--	// You need to download the Swagger HTML5 assets and change the FilePath location in the config below.
--	// Open <your_app_id>.appspot.com/apidocs and enter
--	// Place the Swagger UI files into a folder called static/swagger if you wish to use Swagger
--	// http://<your_app_id>.appspot.com/apidocs.json in the api input field.
--	// For testing, you can use http://localhost:8080/apidocs.json
--	config := swagger.Config{
--		// You control what services are visible
--		WebServices:    restful.RegisteredWebServices(),
--		WebServicesUrl: gaeUrl(),
--		ApiPath:        "/apidocs.json",
--
--		// Optionally, specifiy where the UI is located
--		SwaggerPath: "/apidocs/",
--
--		// GAE support static content which is configured in your app.yaml.
--		// This example expect the swagger-ui in static/swagger so you should place it there :)
--		SwaggerFilePath: "static/swagger"}
--	swagger.InstallSwaggerService(config)
--}
--
--func (u ProfileApi) register() {
--	ws := new(restful.WebService)
--
--	ws.
--		Path(u.Path).
--		// You can specify consumes and produces per route as well.
--		Consumes(restful.MIME_JSON, restful.MIME_XML).
--		Produces(restful.MIME_JSON, restful.MIME_XML)
--
--	ws.Route(ws.POST("").To(u.insert).
--		// Swagger documentation.
--		Doc("insert a new profile").
--		Param(ws.BodyParameter("Profile", "representation of a profile").DataType("main.Profile")).
--		Reads(Profile{}))
--
--	ws.Route(ws.GET("/{profile-id}").To(u.read).
--		// Swagger documentation.
--		Doc("read a profile").
--		Param(ws.PathParameter("profile-id", "identifier for a profile").DataType("string")).
--		Writes(Profile{}))
--
--	ws.Route(ws.PUT("/{profile-id}").To(u.update).
--		// Swagger documentation.
--		Doc("update an existing profile").
--		Param(ws.PathParameter("profile-id", "identifier for a profile").DataType("string")).
--		Param(ws.BodyParameter("Profile", "representation of a profile").DataType("main.Profile")).
--		Reads(Profile{}))
--
--	ws.Route(ws.DELETE("/{profile-id}").To(u.remove).
--		// Swagger documentation.
--		Doc("remove a profile").
--		Param(ws.PathParameter("profile-id", "identifier for a profile").DataType("string")))
--
--	restful.Add(ws)
--}
--
--// POST http://localhost:8080/profiles
--// {"first_name": "Ivan", "nick_name": "Socks", "last_name": "Hawkes"}
--//
--func (u *ProfileApi) insert(r *restful.Request, w *restful.Response) {
--	c := appengine.NewContext(r.Request)
--
--	// Marshall the entity from the request into a struct.
--	p := new(Profile)
--	err := r.ReadEntity(&p)
--	if err != nil {
--		w.WriteError(http.StatusNotAcceptable, err)
--		return
--	}
--
--	// Ensure we start with a sensible value for this field.
--	p.LastModified = time.Now()
--
--	// The profile belongs to this user.
--	p.Email = user.Current(c).String()
--
--	k, err := datastore.Put(c, datastore.NewIncompleteKey(c, "profiles", nil), p)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusInternalServerError)
--		return
--	}
--
--	// Let them know the location of the newly created resource.
--	// TODO: Use a safe Url path append function.
--	w.AddHeader("Location", u.Path+"/"+k.Encode())
--
--	// Return the resultant entity.
--	w.WriteHeader(http.StatusCreated)
--	w.WriteEntity(p)
--}
--
--// GET http://localhost:8080/profiles/ahdkZXZ-ZmVkZXJhdGlvbi1zZXJ2aWNlc3IVCxIIcHJvZmlsZXMYgICAgICAgAoM
--//
--func (u ProfileApi) read(r *restful.Request, w *restful.Response) {
--	c := appengine.NewContext(r.Request)
--
--	// Decode the request parameter to determine the key for the entity.
--	k, err := datastore.DecodeKey(r.PathParameter("profile-id"))
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusBadRequest)
--		return
--	}
--
--	// Retrieve the entity from the datastore.
--	p := Profile{}
--	if err := datastore.Get(c, k, &p); err != nil {
--		if err.Error() == "datastore: no such entity" {
--			http.Error(w, err.Error(), http.StatusNotFound)
--		} else {
--			http.Error(w, err.Error(), http.StatusInternalServerError)
--		}
--		return
--	}
--
--	// Check we own the profile before allowing them to view it.
--	// Optionally, return a 404 instead to help prevent guessing ids.
--	// TODO: Allow admins access.
--	if p.Email != user.Current(c).String() {
--		http.Error(w, "You do not have access to this resource", http.StatusForbidden)
--		return
--	}
--
--	w.WriteEntity(p)
--}
--
--// PUT http://localhost:8080/profiles/ahdkZXZ-ZmVkZXJhdGlvbi1zZXJ2aWNlc3IVCxIIcHJvZmlsZXMYgICAgICAgAoM
--// {"first_name": "Ivan", "nick_name": "Socks", "last_name": "Hawkes"}
--//
--func (u *ProfileApi) update(r *restful.Request, w *restful.Response) {
--	c := appengine.NewContext(r.Request)
--
--	// Decode the request parameter to determine the key for the entity.
--	k, err := datastore.DecodeKey(r.PathParameter("profile-id"))
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusBadRequest)
--		return
--	}
--
--	// Marshall the entity from the request into a struct.
--	p := new(Profile)
--	err = r.ReadEntity(&p)
--	if err != nil {
--		w.WriteError(http.StatusNotAcceptable, err)
--		return
--	}
--
--	// Retrieve the old entity from the datastore.
--	old := Profile{}
--	if err := datastore.Get(c, k, &old); err != nil {
--		if err.Error() == "datastore: no such entity" {
--			http.Error(w, err.Error(), http.StatusNotFound)
--		} else {
--			http.Error(w, err.Error(), http.StatusInternalServerError)
--		}
--		return
--	}
--
--	// Check we own the profile before allowing them to update it.
--	// Optionally, return a 404 instead to help prevent guessing ids.
--	// TODO: Allow admins access.
--	if old.Email != user.Current(c).String() {
--		http.Error(w, "You do not have access to this resource", http.StatusForbidden)
--		return
--	}
--
--	// Since the whole entity is re-written, we need to assign any invariant fields again
--	// e.g. the owner of the entity.
--	p.Email = user.Current(c).String()
--
--	// Keep track of the last modification date.
--	p.LastModified = time.Now()
--
--	// Attempt to overwrite the old entity.
--	_, err = datastore.Put(c, k, p)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusInternalServerError)
--		return
--	}
--
--	// Let them know it succeeded.
--	w.WriteHeader(http.StatusNoContent)
--}
--
--// DELETE http://localhost:8080/profiles/ahdkZXZ-ZmVkZXJhdGlvbi1zZXJ2aWNlc3IVCxIIcHJvZmlsZXMYgICAgICAgAoM
--//
--func (u *ProfileApi) remove(r *restful.Request, w *restful.Response) {
--	c := appengine.NewContext(r.Request)
--
--	// Decode the request parameter to determine the key for the entity.
--	k, err := datastore.DecodeKey(r.PathParameter("profile-id"))
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusBadRequest)
--		return
--	}
--
--	// Retrieve the old entity from the datastore.
--	old := Profile{}
--	if err := datastore.Get(c, k, &old); err != nil {
--		if err.Error() == "datastore: no such entity" {
--			http.Error(w, err.Error(), http.StatusNotFound)
--		} else {
--			http.Error(w, err.Error(), http.StatusInternalServerError)
--		}
--		return
--	}
--
--	// Check we own the profile before allowing them to delete it.
--	// Optionally, return a 404 instead to help prevent guessing ids.
--	// TODO: Allow admins access.
--	if old.Email != user.Current(c).String() {
--		http.Error(w, "You do not have access to this resource", http.StatusForbidden)
--		return
--	}
--
--	// Delete the entity.
--	if err := datastore.Delete(c, k); err != nil {
--		http.Error(w, err.Error(), http.StatusInternalServerError)
--	}
--
--	// Success notification.
--	w.WriteHeader(http.StatusNoContent)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go
-deleted file mode 100644
-index b3261ee..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package main
--
--import (
--	"github.com/mjibson/appstats"
--)
--
--
--func stats(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	c := appstats.NewContext(req.Request)
--	chain.ProcessFilter(req, resp)
--	c.Stats.Status = resp.StatusCode()
--	c.Save()
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go
-deleted file mode 100644
-index e97ba2e..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go
-+++ /dev/null
-@@ -1,161 +0,0 @@
--package main
--
--import (
--	"appengine"
--	"appengine/memcache"
--	"github.com/emicklei/go-restful"
--	"github.com/emicklei/go-restful/swagger"
--	"net/http"
--)
--
--// This example is functionally the same as ../restful-user-service.go
--// but it`s supposed to run on Goole App Engine (GAE)
--//
--// contributed by ivanhawkes
--
--type User struct {
--	Id, Name string
--}
--
--type UserService struct {
--	// normally one would use DAO (data access object)
--	// but in this example we simple use memcache.
--}
--
--func (u UserService) Register() {
--	ws := new(restful.WebService)
--
--	ws.
--		Path("/users").
--		Consumes(restful.MIME_XML, restful.MIME_JSON).
--		Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well
--
--	ws.Route(ws.GET("/{user-id}").To(u.findUser).
--		// docs
--		Doc("get a user").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
--		Writes(User{})) // on the response
--
--	ws.Route(ws.PATCH("").To(u.updateUser).
--		// docs
--		Doc("update a user").
--		Reads(User{})) // from the request
--
--	ws.Route(ws.PUT("/{user-id}").To(u.createUser).
--		// docs
--		Doc("create a user").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
--		Reads(User{})) // from the request
--
--	ws.Route(ws.DELETE("/{user-id}").To(u.removeUser).
--		// docs
--		Doc("delete a user").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")))
--
--	restful.Add(ws)
--}
--
--// GET http://localhost:8080/users/1
--//
--func (u UserService) findUser(request *restful.Request, response *restful.Response) {
--	c := appengine.NewContext(request.Request)
--	id := request.PathParameter("user-id")
--	usr := new(User)
--	_, err := memcache.Gob.Get(c, id, &usr)
--	if err != nil || len(usr.Id) == 0 {
--		response.WriteErrorString(http.StatusNotFound, "User could not be found.")
--	} else {
--		response.WriteEntity(usr)
--	}
--}
--
--// PATCH http://localhost:8080/users
--// <User><Id>1</Id><Name>Melissa Raspberry</Name></User>
--//
--func (u *UserService) updateUser(request *restful.Request, response *restful.Response) {
--	c := appengine.NewContext(request.Request)
--	usr := new(User)
--	err := request.ReadEntity(&usr)
--	if err == nil {
--		item := &memcache.Item{
--			Key:    usr.Id,
--			Object: &usr,
--		}
--		err = memcache.Gob.Set(c, item)
--		if err != nil {
--			response.WriteError(http.StatusInternalServerError, err)
--			return
--		}
--		response.WriteEntity(usr)
--	} else {
--		response.WriteError(http.StatusInternalServerError, err)
--	}
--}
--
--// PUT http://localhost:8080/users/1
--// <User><Id>1</Id><Name>Melissa</Name></User>
--//
--func (u *UserService) createUser(request *restful.Request, response *restful.Response) {
--	c := appengine.NewContext(request.Request)
--	usr := User{Id: request.PathParameter("user-id")}
--	err := request.ReadEntity(&usr)
--	if err == nil {
--		item := &memcache.Item{
--			Key:    usr.Id,
--			Object: &usr,
--		}
--		err = memcache.Gob.Add(c, item)
--		if err != nil {
--			response.WriteError(http.StatusInternalServerError, err)
--			return
--		}
--		response.WriteHeader(http.StatusCreated)
--		response.WriteEntity(usr)
--	} else {
--		response.WriteError(http.StatusInternalServerError, err)
--	}
--}
--
--// DELETE http://localhost:8080/users/1
--//
--func (u *UserService) removeUser(request *restful.Request, response *restful.Response) {
--	c := appengine.NewContext(request.Request)
--	id := request.PathParameter("user-id")
--	err := memcache.Delete(c, id)
--	if err != nil {
--		response.WriteError(http.StatusInternalServerError, err)
--	}
--}
--
--func getGaeURL() string {
--	if appengine.IsDevAppServer() {
--		return "http://localhost:8080"
--	} else {
--		/**
--		 * Include your URL on App Engine here.
--		 * I found no way to get AppID without appengine.Context and this always
--		 * based on a http.Request.
--		 */
--		return "http://<your_app_id>.appspot.com"
--	}
--}
--
--func init() {
--	u := UserService{}
--	u.Register()
--
--	// Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API
--	// You need to download the Swagger HTML5 assets and change the FilePath location in the config below.
--	// Open <your_app_id>.appspot.com/apidocs and enter http://<your_app_id>.appspot.com/apidocs.json in the api input field.
--	config := swagger.Config{
--		WebServices:    restful.RegisteredWebServices(), // you control what services are visible
--		WebServicesUrl: getGaeURL(),
--		ApiPath:        "/apidocs.json",
--
--		// Optionally, specifiy where the UI is located
--		SwaggerPath: "/apidocs/",
--		// GAE support static content which is configured in your app.yaml.
--		// This example expect the swagger-ui in static/swagger so you should place it there :)
--		SwaggerFilePath: "static/swagger"}
--	swagger.InstallSwaggerService(config)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/home.html b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/home.html
-deleted file mode 100644
-index e5d49b4..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/home.html
-+++ /dev/null
-@@ -1,7 +0,0 @@
--<!DOCTYPE html>
--
--<html>
--<body>
--	<h1>{{.Text}}</h1>
--</body>
--</html>
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-CORS-filter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-CORS-filter.go
-deleted file mode 100644
-index aacaa3d..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-CORS-filter.go
-+++ /dev/null
-@@ -1,67 +0,0 @@
--package main
--
--import (
--	"io"
--	"log"
--	"net/http"
--
--	"github.com/emicklei/go-restful"
--)
--
--// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
--// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
--//
--// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
--// http://enable-cors.org/server.html
--//
--// GET http://localhost:8080/users
--//
--// GET http://localhost:8080/users/1
--//
--// PUT http://localhost:8080/users/1
--//
--// DELETE http://localhost:8080/users/1
--//
--// OPTIONS http://localhost:8080/users/1  with Header "Origin" set to some domain and
--
--type UserResource struct{}
--
--func (u UserResource) RegisterTo(container *restful.Container) {
--	ws := new(restful.WebService)
--	ws.
--		Path("/users").
--		Consumes("*/*").
--		Produces("*/*")
--
--	ws.Route(ws.GET("/{user-id}").To(u.nop))
--	ws.Route(ws.POST("").To(u.nop))
--	ws.Route(ws.PUT("/{user-id}").To(u.nop))
--	ws.Route(ws.DELETE("/{user-id}").To(u.nop))
--
--	container.Add(ws)
--}
--
--func (u UserResource) nop(request *restful.Request, response *restful.Response) {
--	io.WriteString(response.ResponseWriter, "this would be a normal response")
--}
--
--func main() {
--	wsContainer := restful.NewContainer()
--	u := UserResource{}
--	u.RegisterTo(wsContainer)
--
--	// Add container filter to enable CORS
--	cors := restful.CrossOriginResourceSharing{
--		ExposeHeaders:  []string{"X-My-Header"},
--		AllowedHeaders: []string{"Content-Type"},
--		CookiesAllowed: false,
--		Container:      wsContainer}
--	wsContainer.Filter(cors.Filter)
--
--	// Add container filter to respond to OPTIONS
--	wsContainer.Filter(wsContainer.OPTIONSFilter)
--
--	log.Printf("start listening on localhost:8080")
--	server := &http.Server{Addr: ":8080", Handler: wsContainer}
--	log.Fatal(server.ListenAndServe())
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go
-deleted file mode 100644
-index 0cda50d..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go
-+++ /dev/null
-@@ -1,54 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"io"
--	"log"
--	"net/http"
--	"os"
--	"strings"
--	"time"
--)
--
--// This example shows how to create a filter that produces log lines
--// according to the Common Log Format, also known as the NCSA standard.
--//
--// kindly contributed by leehambley
--//
--// GET http://localhost:8080/ping
--
--var logger *log.Logger = log.New(os.Stdout, "", 0)
--
--func NCSACommonLogFormatLogger() restful.FilterFunction {
--	return func(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--		var username = "-"
--		if req.Request.URL.User != nil {
--			if name := req.Request.URL.User.Username(); name != "" {
--				username = name
--			}
--		}
--		chain.ProcessFilter(req, resp)
--		logger.Printf("%s - %s [%s] \"%s %s %s\" %d %d",
--			strings.Split(req.Request.RemoteAddr, ":")[0],
--			username,
--			time.Now().Format("02/Jan/2006:15:04:05 -0700"),
--			req.Request.Method,
--			req.Request.URL.RequestURI(),
--			req.Request.Proto,
--			resp.StatusCode(),
--			resp.ContentLength(),
--		)
--	}
--}
--
--func main() {
--	ws := new(restful.WebService)
--	ws.Filter(NCSACommonLogFormatLogger())
--	ws.Route(ws.GET("/ping").To(hello))
--	restful.Add(ws)
--	http.ListenAndServe(":8080", nil)
--}
--
--func hello(req *restful.Request, resp *restful.Response) {
--	io.WriteString(resp, "pong")
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-basic-authentication.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-basic-authentication.go
-deleted file mode 100644
-index 5dd3067..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-basic-authentication.go
-+++ /dev/null
-@@ -1,35 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"io"
--	"net/http"
--)
--
--// This example shows how to create a (Route) Filter that performs Basic Authentication on the Http request.
--//
--// GET http://localhost:8080/secret
--// and use admin,admin for the credentials
--
--func main() {
--	ws := new(restful.WebService)
--	ws.Route(ws.GET("/secret").Filter(basicAuthenticate).To(secret))
--	restful.Add(ws)
--	http.ListenAndServe(":8080", nil)
--}
--
--func basicAuthenticate(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	encoded := req.Request.Header.Get("Authorization")
--	// usr/pwd = admin/admin
--	// real code does some decoding
--	if len(encoded) == 0 || "Basic YWRtaW46YWRtaW4=" != encoded {
--		resp.AddHeader("WWW-Authenticate", "Basic realm=Protected Area")
--		resp.WriteErrorString(401, "401: Not Authorized")
--		return
--	}
--	chain.ProcessFilter(req, resp)
--}
--
--func secret(req *restful.Request, resp *restful.Response) {
--	io.WriteString(resp, "42")
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go
-deleted file mode 100644
-index 9148213..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go
-+++ /dev/null
-@@ -1,65 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"io"
--	"log"
--	"os"
--	"runtime/pprof"
--)
--
--// ProfilingService is a WebService that can start/stop a CPU profile and write results to a file
--// 	GET /{rootPath}/start will activate CPU profiling
--//	GET /{rootPath}/stop will stop profiling
--//
--// NewProfileService("/profiler", "ace.prof").AddWebServiceTo(restful.DefaultContainer)
--//
--type ProfilingService struct {
--	rootPath   string   // the base (root) of the service, e.g. /profiler
--	cpuprofile string   // the output filename to write profile results, e.g. myservice.prof
--	cpufile    *os.File // if not nil, then profiling is active
--}
--
--func NewProfileService(rootPath string, outputFilename string) *ProfilingService {
--	ps := new(ProfilingService)
--	ps.rootPath = rootPath
--	ps.cpuprofile = outputFilename
--	return ps
--}
--
--// Add this ProfileService to a restful Container
--func (p ProfilingService) AddWebServiceTo(container *restful.Container) {
--	ws := new(restful.WebService)
--	ws.Path(p.rootPath).Consumes("*/*").Produces(restful.MIME_JSON)
--	ws.Route(ws.GET("/start").To(p.startProfiler))
--	ws.Route(ws.GET("/stop").To(p.stopProfiler))
--	container.Add(ws)
--}
--
--func (p *ProfilingService) startProfiler(req *restful.Request, resp *restful.Response) {
--	if p.cpufile != nil {
--		io.WriteString(resp.ResponseWriter, "[restful] CPU profiling already running")
--		return // error?
--	}
--	cpufile, err := os.Create(p.cpuprofile)
--	if err != nil {
--		log.Fatal(err)
--	}
--	// remember for close
--	p.cpufile = cpufile
--	pprof.StartCPUProfile(cpufile)
--	io.WriteString(resp.ResponseWriter, "[restful] CPU profiling started, writing on:"+p.cpuprofile)
--}
--
--func (p *ProfilingService) stopProfiler(req *restful.Request, resp *restful.Response) {
--	if p.cpufile == nil {
--		io.WriteString(resp.ResponseWriter, "[restful] CPU profiling not active")
--		return // error?
--	}
--	pprof.StopCPUProfile()
--	p.cpufile.Close()
--	p.cpufile = nil
--	io.WriteString(resp.ResponseWriter, "[restful] CPU profiling stopped, closing:"+p.cpuprofile)
--}
--
--func main() {} // exists for example compilation only
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-curly-router.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-curly-router.go
-deleted file mode 100644
-index 1b95dd0..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-curly-router.go
-+++ /dev/null
-@@ -1,107 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"log"
--	"net/http"
--)
--
--// This example has the same service definition as restful-user-resource
--// but uses a different router (CurlyRouter) that does not use regular expressions
--//
--// POST http://localhost:8080/users
--// <User><Id>1</Id><Name>Melissa Raspberry</Name></User>
--//
--// GET http://localhost:8080/users/1
--//
--// PUT http://localhost:8080/users/1
--// <User><Id>1</Id><Name>Melissa</Name></User>
--//
--// DELETE http://localhost:8080/users/1
--//
--
--type User struct {
--	Id, Name string
--}
--
--type UserResource struct {
--	// normally one would use DAO (data access object)
--	users map[string]User
--}
--
--func (u UserResource) Register(container *restful.Container) {
--	ws := new(restful.WebService)
--	ws.
--		Path("/users").
--		Consumes(restful.MIME_XML, restful.MIME_JSON).
--		Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well
--
--	ws.Route(ws.GET("/{user-id}").To(u.findUser))
--	ws.Route(ws.POST("").To(u.updateUser))
--	ws.Route(ws.PUT("/{user-id}").To(u.createUser))
--	ws.Route(ws.DELETE("/{user-id}").To(u.removeUser))
--
--	container.Add(ws)
--}
--
--// GET http://localhost:8080/users/1
--//
--func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
--	id := request.PathParameter("user-id")
--	usr := u.users[id]
--	if len(usr.Id) == 0 {
--		response.AddHeader("Content-Type", "text/plain")
--		response.WriteErrorString(http.StatusNotFound, "User could not be found.")
--	} else {
--		response.WriteEntity(usr)
--	}
--}
--
--// POST http://localhost:8080/users
--// <User><Id>1</Id><Name>Melissa Raspberry</Name></User>
--//
--func (u *UserResource) updateUser(request *restful.Request, response *restful.Response) {
--	usr := new(User)
--	err := request.ReadEntity(&usr)
--	if err == nil {
--		u.users[usr.Id] = *usr
--		response.WriteEntity(usr)
--	} else {
--		response.AddHeader("Content-Type", "text/plain")
--		response.WriteErrorString(http.StatusInternalServerError, err.Error())
--	}
--}
--
--// PUT http://localhost:8080/users/1
--// <User><Id>1</Id><Name>Melissa</Name></User>
--//
--func (u *UserResource) createUser(request *restful.Request, response *restful.Response) {
--	usr := User{Id: request.PathParameter("user-id")}
--	err := request.ReadEntity(&usr)
--	if err == nil {
--		u.users[usr.Id] = usr
--		response.WriteHeader(http.StatusCreated)
--		response.WriteEntity(usr)
--	} else {
--		response.AddHeader("Content-Type", "text/plain")
--		response.WriteErrorString(http.StatusInternalServerError, err.Error())
--	}
--}
--
--// DELETE http://localhost:8080/users/1
--//
--func (u *UserResource) removeUser(request *restful.Request, response *restful.Response) {
--	id := request.PathParameter("user-id")
--	delete(u.users, id)
--}
--
--func main() {
--	wsContainer := restful.NewContainer()
--	wsContainer.Router(restful.CurlyRouter{})
--	u := UserResource{map[string]User{}}
--	u.Register(wsContainer)
--
--	log.Printf("start listening on localhost:8080")
--	server := &http.Server{Addr: ":8080", Handler: wsContainer}
--	log.Fatal(server.ListenAndServe())
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-encoding-filter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-encoding-filter.go
-deleted file mode 100644
-index 6094c49..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-encoding-filter.go
-+++ /dev/null
-@@ -1,61 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"log"
--	"net/http"
--)
--
--type User struct {
--	Id, Name string
--}
--
--type UserList struct {
--	Users []User
--}
--
--//
--// This example shows how to use the CompressingResponseWriter by a Filter
--// such that encoding can be enabled per WebService or per Route (instead of per container)
--// Using restful.DefaultContainer.EnableContentEncoding(true) will encode all responses served by WebServices in the DefaultContainer.
--//
--// Set Accept-Encoding to gzip or deflate
--// GET http://localhost:8080/users/42
--// and look at the response headers
--
--func main() {
--	restful.Add(NewUserService())
--	log.Printf("start listening on localhost:8080")
--	log.Fatal(http.ListenAndServe(":8080", nil))
--}
--
--func NewUserService() *restful.WebService {
--	ws := new(restful.WebService)
--	ws.
--		Path("/users").
--		Consumes(restful.MIME_XML, restful.MIME_JSON).
--		Produces(restful.MIME_JSON, restful.MIME_XML)
--
--	// install a response encoding filter
--	ws.Route(ws.GET("/{user-id}").Filter(encodingFilter).To(findUser))
--	return ws
--}
--
--// Route Filter (defines FilterFunction)
--func encodingFilter(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	log.Printf("[encoding-filter] %s,%s\n", req.Request.Method, req.Request.URL)
--	// wrap responseWriter into a compressing one
--	compress, _ := restful.NewCompressingResponseWriter(resp.ResponseWriter, restful.ENCODING_GZIP)
--	resp.ResponseWriter = compress
--	defer func() {
--		compress.Close()
--	}()
--	chain.ProcessFilter(req, resp)
--}
--
--// GET http://localhost:8080/users/42
--//
--func findUser(request *restful.Request, response *restful.Response) {
--	log.Printf("findUser")
--	response.WriteEntity(User{"42", "Gandalf"})
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-filters.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-filters.go
-deleted file mode 100644
-index 47e1146..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-filters.go
-+++ /dev/null
-@@ -1,114 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"log"
--	"net/http"
--	"time"
--)
--
--type User struct {
--	Id, Name string
--}
--
--type UserList struct {
--	Users []User
--}
--
--// This example show how to create and use the three different Filters (Container,WebService and Route)
--// When applied to the restful.DefaultContainer, we refer to them as a global filter.
--//
--// GET  http://locahost:8080/users/42
--// and see the logging per filter (try repeating this request)
--
--func main() {
--	// install a global (=DefaultContainer) filter (processed before any webservice in the DefaultContainer)
--	restful.Filter(globalLogging)
--
--	restful.Add(NewUserService())
--	log.Printf("start listening on localhost:8080")
--	log.Fatal(http.ListenAndServe(":8080", nil))
--}
--
--func NewUserService() *restful.WebService {
--	ws := new(restful.WebService)
--	ws.
--		Path("/users").
--		Consumes(restful.MIME_XML, restful.MIME_JSON).
--		Produces(restful.MIME_JSON, restful.MIME_XML)
--
--	// install a webservice filter (processed before any route)
--	ws.Filter(webserviceLogging).Filter(measureTime)
--
--	// install a counter filter
--	ws.Route(ws.GET("").Filter(NewCountFilter().routeCounter).To(getAllUsers))
--
--	// install 2 chained route filters (processed before calling findUser)
--	ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
--	return ws
--}
--
--// Global Filter
--func globalLogging(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	log.Printf("[global-filter (logger)] %s,%s\n", req.Request.Method, req.Request.URL)
--	chain.ProcessFilter(req, resp)
--}
--
--// WebService Filter
--func webserviceLogging(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	log.Printf("[webservice-filter (logger)] %s,%s\n", req.Request.Method, req.Request.URL)
--	chain.ProcessFilter(req, resp)
--}
--
--// WebService (post-process) Filter (as a struct that defines a FilterFunction)
--func measureTime(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	now := time.Now()
--	chain.ProcessFilter(req, resp)
--	log.Printf("[webservice-filter (timer)] %v\n", time.Now().Sub(now))
--}
--
--// Route Filter (defines FilterFunction)
--func routeLogging(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	log.Printf("[route-filter (logger)] %s,%s\n", req.Request.Method, req.Request.URL)
--	chain.ProcessFilter(req, resp)
--}
--
--// Route Filter (as a struct that defines a FilterFunction)
--// CountFilter implements a FilterFunction for counting requests.
--type CountFilter struct {
--	count   int
--	counter chan int // for go-routine safe count increments
--}
--
--// NewCountFilter creates and initializes a new CountFilter.
--func NewCountFilter() *CountFilter {
--	c := new(CountFilter)
--	c.counter = make(chan int)
--	go func() {
--		for {
--			c.count += <-c.counter
--		}
--	}()
--	return c
--}
--
--// routeCounter increments the count of the filter (through a channel)
--func (c *CountFilter) routeCounter(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	c.counter <- 1
--	log.Printf("[route-filter (counter)] count:%d", c.count)
--	chain.ProcessFilter(req, resp)
--}
--
--// GET http://localhost:8080/users
--//
--func getAllUsers(request *restful.Request, response *restful.Response) {
--	log.Printf("getAllUsers")
--	response.WriteEntity(UserList{[]User{User{"42", "Gandalf"}, User{"3.14", "Pi"}}})
--}
--
--// GET http://localhost:8080/users/42
--//
--func findUser(request *restful.Request, response *restful.Response) {
--	log.Printf("findUser")
--	response.WriteEntity(User{"42", "Gandalf"})
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-form-handling.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-form-handling.go
-deleted file mode 100644
-index a83db44..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-form-handling.go
-+++ /dev/null
-@@ -1,62 +0,0 @@
--package main
--
--import (
--	"fmt"
--	"github.com/emicklei/go-restful"
--	"github.com/gorilla/schema"
--	"io"
--	"net/http"
--)
--
--// This example shows how to handle a POST of a HTML form that uses the standard x-www-form-urlencoded content-type.
--// It uses the gorilla web tool kit schema package to decode the form data into a struct.
--//
--// GET http://localhost:8080/profiles
--//
--
--type Profile struct {
--	Name string
--	Age  int
--}
--
--var decoder *schema.Decoder
--
--func main() {
--	decoder = schema.NewDecoder()
--	ws := new(restful.WebService)
--	ws.Route(ws.POST("/profiles").Consumes("application/x-www-form-urlencoded").To(postAdddress))
--	ws.Route(ws.GET("/profiles").To(addresssForm))
--	restful.Add(ws)
--	http.ListenAndServe(":8080", nil)
--}
--
--func postAdddress(req *restful.Request, resp *restful.Response) {
--	err := req.Request.ParseForm()
--	if err != nil {
--		resp.WriteErrorString(http.StatusBadRequest, err.Error())
--		return
--	}
--	p := new(Profile)
--	err = decoder.Decode(p, req.Request.PostForm)
--	if err != nil {
--		resp.WriteErrorString(http.StatusBadRequest, err.Error())
--		return
--	}
--	io.WriteString(resp.ResponseWriter, fmt.Sprintf("<html><body>Name=%s, Age=%d</body></html>", p.Name, p.Age))
--}
--
--func addresssForm(req *restful.Request, resp *restful.Response) {
--	io.WriteString(resp.ResponseWriter,
--		`<html>
--		<body>
--		<h1>Enter Profile</h1>
--		<form method="post">
--		    <label>Name:</label>
--			<input type="text" name="Name"/>
--			<label>Age:</label>
--		    <input type="text" name="Age"/>
--			<input type="Submit" />
--		</form>
--		</body>
--		</html>`)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-hello-world.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-hello-world.go
-deleted file mode 100644
-index a21c2a6..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-hello-world.go
-+++ /dev/null
-@@ -1,22 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"io"
--	"net/http"
--)
--
--// This example shows the minimal code needed to get a restful.WebService working.
--//
--// GET http://localhost:8080/hello
--
--func main() {
--	ws := new(restful.WebService)
--	ws.Route(ws.GET("/hello").To(hello))
--	restful.Add(ws)
--	http.ListenAndServe(":8080", nil)
--}
--
--func hello(req *restful.Request, resp *restful.Response) {
--	io.WriteString(resp, "world")
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-html-template.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-html-template.go
-deleted file mode 100644
-index de51c59..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-html-template.go
-+++ /dev/null
-@@ -1,35 +0,0 @@
--package main
--
--import (
--	"log"
--	"net/http"
--	"text/template"
--
--	"github.com/emicklei/go-restful"
--)
--
--// This example shows how to serve a HTML page using the standard Go template engine.
--//
--// GET http://localhost:8080/
--
--func main() {
--	ws := new(restful.WebService)
--	ws.Route(ws.GET("/").To(home))
--	restful.Add(ws)
--	print("open browser on http://localhost:8080/\n")
--	http.ListenAndServe(":8080", nil)
--}
--
--type Message struct {
--	Text string
--}
--
--func home(req *restful.Request, resp *restful.Response) {
--	p := &Message{"restful-html-template demo"}
--	// you might want to cache compiled templates
--	t, err := template.ParseFiles("home.html")
--	if err != nil {
--		log.Fatalf("Template gave: %s", err)
--	}
--	t.Execute(resp.ResponseWriter, p)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-multi-containers.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-multi-containers.go
-deleted file mode 100644
-index 3f1650b..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-multi-containers.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"io"
--	"log"
--	"net/http"
--)
--
--// This example shows how to have a program with 2 WebServices containers
--// each having a http server listening on its own port.
--//
--// The first "hello" is added to the restful.DefaultContainer (and uses DefaultServeMux)
--// For the second "hello", a new container and ServeMux is created
--// and requires a new http.Server with the container being the Handler.
--// This first server is spawn in its own go-routine such that the program proceeds to create the second.
--//
--// GET http://localhost:8080/hello
--// GET http://localhost:8081/hello
--
--func main() {
--	ws := new(restful.WebService)
--	ws.Route(ws.GET("/hello").To(hello))
--	restful.Add(ws)
--	go func() {
--		http.ListenAndServe(":8080", nil)
--	}()
--
--	container2 := restful.NewContainer()
--	ws2 := new(restful.WebService)
--	ws2.Route(ws2.GET("/hello").To(hello2))
--	container2.Add(ws2)
--	server := &http.Server{Addr: ":8081", Handler: container2}
--	log.Fatal(server.ListenAndServe())
--}
--
--func hello(req *restful.Request, resp *restful.Response) {
--	io.WriteString(resp, "default world")
--}
--
--func hello2(req *restful.Request, resp *restful.Response) {
--	io.WriteString(resp, "second world")
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-options-filter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-options-filter.go
-deleted file mode 100644
-index 73dc3cf..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-options-filter.go
-+++ /dev/null
-@@ -1,51 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"io"
--	"log"
--	"net/http"
--)
--
--// This example shows how to use the OPTIONSFilter on a Container
--//
--// OPTIONS http://localhost:8080/users
--//
--// OPTIONS http://localhost:8080/users/1
--
--type UserResource struct{}
--
--func (u UserResource) RegisterTo(container *restful.Container) {
--	ws := new(restful.WebService)
--	ws.
--		Path("/users").
--		Consumes("*/*").
--		Produces("*/*")
--
--	ws.Route(ws.GET("/{user-id}").To(u.nop))
--	ws.Route(ws.POST("").To(u.nop))
--	ws.Route(ws.PUT("/{user-id}").To(u.nop))
--	ws.Route(ws.DELETE("/{user-id}").To(u.nop))
--
--	container.Add(ws)
--}
--
--func (u UserResource) nop(request *restful.Request, response *restful.Response) {
--	io.WriteString(response.ResponseWriter, "this would be a normal response")
--}
--
--func main() {
--	wsContainer := restful.NewContainer()
--	u := UserResource{}
--	u.RegisterTo(wsContainer)
--
--	// Add container filter to respond to OPTIONS
--	wsContainer.Filter(wsContainer.OPTIONSFilter)
--
--	// For use on the default container, you can write
--	// restful.Filter(restful.OPTIONSFilter())
--
--	log.Printf("start listening on localhost:8080")
--	server := &http.Server{Addr: ":8080", Handler: wsContainer}
--	log.Fatal(server.ListenAndServe())
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-path-tail.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-path-tail.go
-deleted file mode 100644
-index 8488a23..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-path-tail.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package main
--
--import (
--	"io"
--	"net/http"
--	. "github.com/emicklei/go-restful"
--)
--
--// This example shows how to a Route that matches the "tail" of a path.
--// Requires the use of a CurlyRouter and the star "*" path parameter pattern.
--//
--// GET http://localhost:8080/basepath/some/other/location/test.xml
--
--func main() {
--	DefaultContainer.Router(CurlyRouter{})
--	ws := new(WebService)
--	ws.Route(ws.GET("/basepath/{resource:*}").To(staticFromPathParam))
--	Add(ws)
--
--	println("[go-restful] serve path tails from http://localhost:8080/basepath")
--	http.ListenAndServe(":8080", nil)
--}
--
--func staticFromPathParam(req *Request, resp *Response) {
--	io.WriteString(resp, "Tail="+req.PathParameter("resource"))
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go
-deleted file mode 100644
-index 0b55f14..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go
-+++ /dev/null
-@@ -1,98 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"io"
--	"log"
--	"net/http"
--)
--
--// This example shows how the different types of filters are called in the request-response flow.
--// The call chain is logged on the console when sending an http request.
--//
--// GET http://localhost:8080/1
--// GET http://localhost:8080/2
--
--var indentLevel int
--
--func container_filter_A(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	log.Printf("url path:%v\n", req.Request.URL)
--	trace("container_filter_A: before", 1)
--	chain.ProcessFilter(req, resp)
--	trace("container_filter_A: after", -1)
--}
--
--func container_filter_B(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	trace("container_filter_B: before", 1)
--	chain.ProcessFilter(req, resp)
--	trace("container_filter_B: after", -1)
--}
--
--func service_filter_A(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	trace("service_filter_A: before", 1)
--	chain.ProcessFilter(req, resp)
--	trace("service_filter_A: after", -1)
--}
--
--func service_filter_B(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	trace("service_filter_B: before", 1)
--	chain.ProcessFilter(req, resp)
--	trace("service_filter_B: after", -1)
--}
--
--func route_filter_A(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	trace("route_filter_A: before", 1)
--	chain.ProcessFilter(req, resp)
--	trace("route_filter_A: after", -1)
--}
--
--func route_filter_B(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	trace("route_filter_B: before", 1)
--	chain.ProcessFilter(req, resp)
--	trace("route_filter_B: after", -1)
--}
--
--func trace(what string, delta int) {
--	indented := what
--	if delta < 0 {
--		indentLevel += delta
--	}
--	for t := 0; t < indentLevel; t++ {
--		indented = "." + indented
--	}
--	log.Printf("%s", indented)
--	if delta > 0 {
--		indentLevel += delta
--	}
--}
--
--func main() {
--	restful.Filter(container_filter_A)
--	restful.Filter(container_filter_B)
--
--	ws1 := new(restful.WebService)
--	ws1.Path("/1")
--	ws1.Filter(service_filter_A)
--	ws1.Filter(service_filter_B)
--	ws1.Route(ws1.GET("").To(doit1).Filter(route_filter_A).Filter(route_filter_B))
--
--	ws2 := new(restful.WebService)
--	ws2.Path("/2")
--	ws2.Filter(service_filter_A)
--	ws2.Filter(service_filter_B)
--	ws2.Route(ws2.GET("").To(doit2).Filter(route_filter_A).Filter(route_filter_B))
--
--	restful.Add(ws1)
--	restful.Add(ws2)
--
--	log.Print("go-restful example listing on http://localhost:8080/1 and http://localhost:8080/2")
--	log.Fatal(http.ListenAndServe(":8080", nil))
--}
--
--func doit1(req *restful.Request, resp *restful.Response) {
--	io.WriteString(resp, "nothing to see in 1")
--}
--
--func doit2(req *restful.Request, resp *restful.Response) {
--	io.WriteString(resp, "nothing to see in 2")
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-resource-functions.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-resource-functions.go
-deleted file mode 100644
-index fb1012a..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-resource-functions.go
-+++ /dev/null
-@@ -1,63 +0,0 @@
--package main
--
--import (
--	"github.com/emicklei/go-restful"
--	"log"
--	"net/http"
--)
--
--// This example shows how to use methods as RouteFunctions for WebServices.
--// The ProductResource has a Register() method that creates and initializes
--// a WebService to expose its methods as REST operations.
--// The WebService is added to the restful.DefaultContainer.
--// A ProductResource is typically created using some data access object.
--//
--// GET http://localhost:8080/products/1
--// POST http://localhost:8080/products
--// <Product><Id>1</Id><Title>The First</Title></Product>
--
--type Product struct {
--	Id, Title string
--}
--
--type ProductResource struct {
--	// typically reference a DAO (data-access-object)
--}
--
--func (p ProductResource) getOne(req *restful.Request, resp *restful.Response) {
--	id := req.PathParameter("id")
--	log.Println("getting product with id:" + id)
--	resp.WriteEntity(Product{Id: id, Title: "test"})
--}
--
--func (p ProductResource) postOne(req *restful.Request, resp *restful.Response) {
--	updatedProduct := new(Product)
--	err := req.ReadEntity(updatedProduct)
--	if err != nil { // bad request
--		resp.WriteErrorString(http.StatusBadRequest, err.Error())
--		return
--	}
--	log.Println("updating product with id:" + updatedProduct.Id)
--}
--
--func (p ProductResource) Register() {
--	ws := new(restful.WebService)
--	ws.Path("/products")
--	ws.Consumes(restful.MIME_XML)
--	ws.Produces(restful.MIME_XML)
--
--	ws.Route(ws.GET("/{id}").To(p.getOne).
--		Doc("get the product by its id").
--		Param(ws.PathParameter("id", "identifier of the product").DataType("string")))
--
--	ws.Route(ws.POST("").To(p.postOne).
--		Doc("update or create a product").
--		Param(ws.BodyParameter("Product", "a Product (XML)").DataType("main.Product")))
--
--	restful.Add(ws)
--}
--
--func main() {
--	ProductResource{}.Register()
--	http.ListenAndServe(":8080", nil)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-route_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-route_test.go
-deleted file mode 100644
-index 20c366b..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-route_test.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--package main
--
--import (
--	"net/http"
--	"net/http/httptest"
--	"strings"
--	"testing"
--
--	"github.com/emicklei/go-restful"
--)
--
--var (
--	Result string
--)
--
--func TestRouteExtractParameter(t *testing.T) {
--	// setup service
--	ws := new(restful.WebService)
--	ws.Consumes(restful.MIME_XML)
--	ws.Route(ws.GET("/test/{param}").To(DummyHandler))
--	restful.Add(ws)
--
--	// setup request + writer
--	bodyReader := strings.NewReader("<Sample><Value>42</Value></Sample>")
--	httpRequest, _ := http.NewRequest("GET", "/test/THIS", bodyReader)
--	httpRequest.Header.Set("Content-Type", restful.MIME_XML)
--	httpWriter := httptest.NewRecorder()
--
--	// run
--	restful.DefaultContainer.ServeHTTP(httpWriter, httpRequest)
--
--	if Result != "THIS" {
--		t.Fatalf("Result is actually: %s", Result)
--	}
--}
--
--func DummyHandler(rq *restful.Request, rp *restful.Response) {
--	Result = rq.PathParameter("param")
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-routefunction_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-routefunction_test.go
-deleted file mode 100644
-index 6d61c5c..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-routefunction_test.go
-+++ /dev/null
-@@ -1,29 +0,0 @@
--package main
--
--import (
--	"net/http"
--	"net/http/httptest"
--	"testing"
--
--	"github.com/emicklei/go-restful"
--)
--
--// This example show how to test one particular RouteFunction (getIt)
--// It uses the httptest.ResponseRecorder to capture output
--
--func getIt(req *restful.Request, resp *restful.Response) {
--	resp.WriteHeader(404)
--}
--
--func TestCallFunction(t *testing.T) {
--	httpReq, _ := http.NewRequest("GET", "/", nil)
--	req := restful.NewRequest(httpReq)
--
--	recorder := new(httptest.ResponseRecorder)
--	resp := restful.NewResponse(recoder)
--
--	getIt(req, resp)
--	if recorder.Code != 404 {
--		t.Logf("Missing or wrong status code:%d", recorder.Code)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-serve-static.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-serve-static.go
-deleted file mode 100644
-index 8cb7848..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-serve-static.go
-+++ /dev/null
-@@ -1,47 +0,0 @@
--package main
--
--import (
--	"fmt"
--	"net/http"
--	"path"
--
--	"github.com/emicklei/go-restful"
--)
--
--// This example shows how to define methods that serve static files
--// It uses the standard http.ServeFile method
--//
--// GET http://localhost:8080/static/test.xml
--// GET http://localhost:8080/static/
--//
--// GET http://localhost:8080/static?resource=subdir/test.xml
--
--var rootdir = "/tmp"
--
--func main() {
--	restful.DefaultContainer.Router(restful.CurlyRouter{})
--
--	ws := new(restful.WebService)
--	ws.Route(ws.GET("/static/{subpath:*}").To(staticFromPathParam))
--	ws.Route(ws.GET("/static").To(staticFromQueryParam))
--	restful.Add(ws)
--
--	println("[go-restful] serving files on http://localhost:8080/static from local /tmp")
--	http.ListenAndServe(":8080", nil)
--}
--
--func staticFromPathParam(req *restful.Request, resp *restful.Response) {
--	actual := path.Join(rootdir, req.PathParameter("subpath"))
--	fmt.Printf("serving %s ... (from %s)\n", actual, req.PathParameter("subpath"))
--	http.ServeFile(
--		resp.ResponseWriter,
--		req.Request,
--		actual)
--}
--
--func staticFromQueryParam(req *restful.Request, resp *restful.Response) {
--	http.ServeFile(
--		resp.ResponseWriter,
--		req.Request,
--		path.Join(rootdir, req.QueryParameter("resource")))
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-user-resource.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-user-resource.go
-deleted file mode 100644
-index f6ec988..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-user-resource.go
-+++ /dev/null
-@@ -1,153 +0,0 @@
--package main
--
--import (
--	"log"
--	"net/http"
--	"strconv"
--
--	"github.com/emicklei/go-restful"
--	"github.com/emicklei/go-restful/swagger"
--)
--
--// This example show a complete (GET,PUT,POST,DELETE) conventional example of
--// a REST Resource including documentation to be served by e.g. a Swagger UI
--// It is recommended to create a Resource struct (UserResource) that can encapsulate
--// an object that provide domain access (a DAO)
--// It has a Register method including the complete Route mapping to methods together
--// with all the appropriate documentation
--//
--// POST http://localhost:8080/users
--// <User><Id>1</Id><Name>Melissa Raspberry</Name></User>
--//
--// GET http://localhost:8080/users/1
--//
--// PUT http://localhost:8080/users/1
--// <User><Id>1</Id><Name>Melissa</Name></User>
--//
--// DELETE http://localhost:8080/users/1
--//
--
--type User struct {
--	Id, Name string
--}
--
--type UserResource struct {
--	// normally one would use DAO (data access object)
--	users map[string]User
--}
--
--func (u UserResource) Register(container *restful.Container) {
--	ws := new(restful.WebService)
--	ws.
--		Path("/users").
--		Doc("Manage Users").
--		Consumes(restful.MIME_XML, restful.MIME_JSON).
--		Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well
--
--	ws.Route(ws.GET("/{user-id}").To(u.findUser).
--		// docs
--		Doc("get a user").
--		Operation("findUser").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
--		Writes(User{})) // on the response
--
--	ws.Route(ws.PUT("/{user-id}").To(u.updateUser).
--		// docs
--		Doc("update a user").
--		Operation("updateUser").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
--		ReturnsError(409, "duplicate user-id", nil).
--		Reads(User{})) // from the request
--
--	ws.Route(ws.POST("").To(u.createUser).
--		// docs
--		Doc("create a user").
--		Operation("createUser").
--		Reads(User{})) // from the request
--
--	ws.Route(ws.DELETE("/{user-id}").To(u.removeUser).
--		// docs
--		Doc("delete a user").
--		Operation("removeUser").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")))
--
--	container.Add(ws)
--}
--
--// GET http://localhost:8080/users/1
--//
--func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
--	id := request.PathParameter("user-id")
--	usr := u.users[id]
--	if len(usr.Id) == 0 {
--		response.AddHeader("Content-Type", "text/plain")
--		response.WriteErrorString(http.StatusNotFound, "404: User could not be found.")
--		return
--	}
--	response.WriteEntity(usr)
--}
--
--// POST http://localhost:8080/users
--// <User><Name>Melissa</Name></User>
--//
--func (u *UserResource) createUser(request *restful.Request, response *restful.Response) {
--	usr := new(User)
--	err := request.ReadEntity(usr)
--	if err != nil {
--		response.AddHeader("Content-Type", "text/plain")
--		response.WriteErrorString(http.StatusInternalServerError, err.Error())
--		return
--	}
--	usr.Id = strconv.Itoa(len(u.users) + 1) // simple id generation
--	u.users[usr.Id] = *usr
--	response.WriteHeader(http.StatusCreated)
--	response.WriteEntity(usr)
--}
--
--// PUT http://localhost:8080/users/1
--// <User><Id>1</Id><Name>Melissa Raspberry</Name></User>
--//
--func (u *UserResource) updateUser(request *restful.Request, response *restful.Response) {
--	usr := new(User)
--	err := request.ReadEntity(&usr)
--	if err != nil {
--		response.AddHeader("Content-Type", "text/plain")
--		response.WriteErrorString(http.StatusInternalServerError, err.Error())
--		return
--	}
--	u.users[usr.Id] = *usr
--	response.WriteEntity(usr)
--}
--
--// DELETE http://localhost:8080/users/1
--//
--func (u *UserResource) removeUser(request *restful.Request, response *restful.Response) {
--	id := request.PathParameter("user-id")
--	delete(u.users, id)
--}
--
--func main() {
--	// to see what happens in the package, uncomment the following
--	//restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
--
--	wsContainer := restful.NewContainer()
--	u := UserResource{map[string]User{}}
--	u.Register(wsContainer)
--
--	// Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API
--	// You need to download the Swagger HTML5 assets and change the FilePath location in the config below.
--	// Open http://localhost:8080/apidocs and enter http://localhost:8080/apidocs.json in the api input field.
--	config := swagger.Config{
--		WebServices:    wsContainer.RegisteredWebServices(), // you control what services are visible
--		WebServicesUrl: "http://localhost:8080",
--		ApiPath:        "/apidocs.json",
--
--		// Optionally, specifiy where the UI is located
--		SwaggerPath:     "/apidocs/",
--		SwaggerFilePath: "/Users/emicklei/xProjects/swagger-ui/dist"}
--	swagger.RegisterSwaggerService(config, wsContainer)
--
--	log.Printf("start listening on localhost:8080")
--	server := &http.Server{Addr: ":8080", Handler: wsContainer}
--	log.Fatal(server.ListenAndServe())
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-user-service.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-user-service.go
-deleted file mode 100644
-index d0d9872..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-user-service.go
-+++ /dev/null
-@@ -1,138 +0,0 @@
--package main
--
--import (
--	"log"
--	"net/http"
--
--	"github.com/emicklei/go-restful"
--	"github.com/emicklei/go-restful/swagger"
--)
--
--// This example is functionally the same as the example in restful-user-resource.go
--// with the only difference that is served using the restful.DefaultContainer
--
--type User struct {
--	Id, Name string
--}
--
--type UserService struct {
--	// normally one would use DAO (data access object)
--	users map[string]User
--}
--
--func (u UserService) Register() {
--	ws := new(restful.WebService)
--	ws.
--		Path("/users").
--		Consumes(restful.MIME_XML, restful.MIME_JSON).
--		Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well
--
--	ws.Route(ws.GET("/").To(u.findAllUsers).
--		// docs
--		Doc("get all users").
--		Operation("findAllUsers").
--		Returns(200, "OK", []User{}))
--
--	ws.Route(ws.GET("/{user-id}").To(u.findUser).
--		// docs
--		Doc("get a user").
--		Operation("findUser").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
--		Writes(User{})) // on the response
--
--	ws.Route(ws.PUT("/{user-id}").To(u.updateUser).
--		// docs
--		Doc("update a user").
--		Operation("updateUser").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
--		Reads(User{})) // from the request
--
--	ws.Route(ws.PUT("").To(u.createUser).
--		// docs
--		Doc("create a user").
--		Operation("createUser").
--		Reads(User{})) // from the request
--
--	ws.Route(ws.DELETE("/{user-id}").To(u.removeUser).
--		// docs
--		Doc("delete a user").
--		Operation("removeUser").
--		Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")))
--
--	restful.Add(ws)
--}
--
--// GET http://localhost:8080/users
--//
--func (u UserService) findAllUsers(request *restful.Request, response *restful.Response) {
--	response.WriteEntity(u.users)
--}
--
--// GET http://localhost:8080/users/1
--//
--func (u UserService) findUser(request *restful.Request, response *restful.Response) {
--	id := request.PathParameter("user-id")
--	usr := u.users[id]
--	if len(usr.Id) == 0 {
--		response.WriteErrorString(http.StatusNotFound, "User could not be found.")
--	} else {
--		response.WriteEntity(usr)
--	}
--}
--
--// PUT http://localhost:8080/users/1
--// <User><Id>1</Id><Name>Melissa Raspberry</Name></User>
--//
--func (u *UserService) updateUser(request *restful.Request, response *restful.Response) {
--	usr := new(User)
--	err := request.ReadEntity(&usr)
--	if err == nil {
--		u.users[usr.Id] = *usr
--		response.WriteEntity(usr)
--	} else {
--		response.WriteError(http.StatusInternalServerError, err)
--	}
--}
--
--// PUT http://localhost:8080/users/1
--// <User><Id>1</Id><Name>Melissa</Name></User>
--//
--func (u *UserService) createUser(request *restful.Request, response *restful.Response) {
--	usr := User{Id: request.PathParameter("user-id")}
--	err := request.ReadEntity(&usr)
--	if err == nil {
--		u.users[usr.Id] = usr
--		response.WriteHeader(http.StatusCreated)
--		response.WriteEntity(usr)
--	} else {
--		response.WriteError(http.StatusInternalServerError, err)
--	}
--}
--
--// DELETE http://localhost:8080/users/1
--//
--func (u *UserService) removeUser(request *restful.Request, response *restful.Response) {
--	id := request.PathParameter("user-id")
--	delete(u.users, id)
--}
--
--func main() {
--	u := UserService{map[string]User{}}
--	u.Register()
--
--	// Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API
--	// You need to download the Swagger HTML5 assets and change the FilePath location in the config below.
--	// Open http://localhost:8080/apidocs and enter http://localhost:8080/apidocs.json in the api input field.
--	config := swagger.Config{
--		WebServices:    restful.RegisteredWebServices(), // you control what services are visible
--		WebServicesUrl: "http://localhost:8080",
--		ApiPath:        "/apidocs.json",
--
--		// Optionally, specifiy where the UI is located
--		SwaggerPath:     "/apidocs/",
--		SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
--	swagger.InstallSwaggerService(config)
--
--	log.Printf("start listening on localhost:8080")
--	log.Fatal(http.ListenAndServe(":8080", nil))
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/filter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/filter.go
-deleted file mode 100644
-index 4b86656..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/filter.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
--type FilterChain struct {
--	Filters []FilterFunction // ordered list of FilterFunction
--	Index   int              // index into filters that is currently in progress
--	Target  RouteFunction    // function to call after passing all filters
--}
--
--// ProcessFilter passes the request,response pair through the next of Filters.
--// Each filter can decide to proceed to the next Filter or handle the Response itself.
--func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
--	if f.Index < len(f.Filters) {
--		f.Index++
--		f.Filters[f.Index-1](request, response, f)
--	} else {
--		f.Target(request, response)
--	}
--}
--
--// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
--type FilterFunction func(*Request, *Response, *FilterChain)
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/filter_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/filter_test.go
-deleted file mode 100644
-index fadfb57..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/filter_test.go
-+++ /dev/null
-@@ -1,141 +0,0 @@
--package restful
--
--import (
--	"io"
--	"net/http"
--	"net/http/httptest"
--	"testing"
--)
--
--func setupServices(addGlobalFilter bool, addServiceFilter bool, addRouteFilter bool) {
--	if addGlobalFilter {
--		Filter(globalFilter)
--	}
--	Add(newTestService(addServiceFilter, addRouteFilter))
--}
--
--func tearDown() {
--	DefaultContainer.webServices = []*WebService{}
--	DefaultContainer.isRegisteredOnRoot = true // this allows for setupServices multiple times
--	DefaultContainer.containerFilters = []FilterFunction{}
--}
--
--func newTestService(addServiceFilter bool, addRouteFilter bool) *WebService {
--	ws := new(WebService).Path("")
--	if addServiceFilter {
--		ws.Filter(serviceFilter)
--	}
--	rb := ws.GET("/foo").To(foo)
--	if addRouteFilter {
--		rb.Filter(routeFilter)
--	}
--	ws.Route(rb)
--	ws.Route(ws.GET("/bar").To(bar))
--	return ws
--}
--
--func foo(req *Request, resp *Response) {
--	io.WriteString(resp.ResponseWriter, "foo")
--}
--
--func bar(req *Request, resp *Response) {
--	io.WriteString(resp.ResponseWriter, "bar")
--}
--
--func fail(req *Request, resp *Response) {
--	http.Error(resp.ResponseWriter, "something failed", http.StatusInternalServerError)
--}
--
--func globalFilter(req *Request, resp *Response, chain *FilterChain) {
--	io.WriteString(resp.ResponseWriter, "global-")
--	chain.ProcessFilter(req, resp)
--}
--
--func serviceFilter(req *Request, resp *Response, chain *FilterChain) {
--	io.WriteString(resp.ResponseWriter, "service-")
--	chain.ProcessFilter(req, resp)
--}
--
--func routeFilter(req *Request, resp *Response, chain *FilterChain) {
--	io.WriteString(resp.ResponseWriter, "route-")
--	chain.ProcessFilter(req, resp)
--}
--
--func TestNoFilter(t *testing.T) {
--	tearDown()
--	setupServices(false, false, false)
--	actual := sendIt("http://example.com/foo")
--	if "foo" != actual {
--		t.Fatal("expected: foo but got:" + actual)
--	}
--}
--
--func TestGlobalFilter(t *testing.T) {
--	tearDown()
--	setupServices(true, false, false)
--	actual := sendIt("http://example.com/foo")
--	if "global-foo" != actual {
--		t.Fatal("expected: global-foo but got:" + actual)
--	}
--}
--
--func TestWebServiceFilter(t *testing.T) {
--	tearDown()
--	setupServices(true, true, false)
--	actual := sendIt("http://example.com/foo")
--	if "global-service-foo" != actual {
--		t.Fatal("expected: global-service-foo but got:" + actual)
--	}
--}
--
--func TestRouteFilter(t *testing.T) {
--	tearDown()
--	setupServices(true, true, true)
--	actual := sendIt("http://example.com/foo")
--	if "global-service-route-foo" != actual {
--		t.Fatal("expected: global-service-route-foo but got:" + actual)
--	}
--}
--
--func TestRouteFilterOnly(t *testing.T) {
--	tearDown()
--	setupServices(false, false, true)
--	actual := sendIt("http://example.com/foo")
--	if "route-foo" != actual {
--		t.Fatal("expected: route-foo but got:" + actual)
--	}
--}
--
--func TestBar(t *testing.T) {
--	tearDown()
--	setupServices(false, true, false)
--	actual := sendIt("http://example.com/bar")
--	if "service-bar" != actual {
--		t.Fatal("expected: service-bar but got:" + actual)
--	}
--}
--
--func TestAllFiltersBar(t *testing.T) {
--	tearDown()
--	setupServices(true, true, true)
--	actual := sendIt("http://example.com/bar")
--	if "global-service-bar" != actual {
--		t.Fatal("expected: global-service-bar but got:" + actual)
--	}
--}
--
--func sendIt(address string) string {
--	httpRequest, _ := http.NewRequest("GET", address, nil)
--	httpRequest.Header.Set("Accept", "*/*")
--	httpWriter := httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--	return httpWriter.Body.String()
--}
--
--func sendItTo(address string, container *Container) string {
--	httpRequest, _ := http.NewRequest("GET", address, nil)
--	httpRequest.Header.Set("Accept", "*/*")
--	httpWriter := httptest.NewRecorder()
--	container.dispatch(httpWriter, httpRequest)
--	return httpWriter.Body.String()
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/install.sh b/Godeps/_workspace/src/github.com/emicklei/go-restful/install.sh
-deleted file mode 100644
-index b5de8a2..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/install.sh
-+++ /dev/null
-@@ -1,9 +0,0 @@
--cd examples
--	ls *.go | xargs -I {} go build {}
--	cd ..
--go fmt ...swagger && \
--go test -test.v ...swagger && \
--go install ...swagger && \
--go fmt ...restful && \
--go test -test.v ...restful && \
--go install ...restful
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/jsr311.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/jsr311.go
-deleted file mode 100644
-index 3d9d7d8..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/jsr311.go
-+++ /dev/null
-@@ -1,247 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"errors"
--	"fmt"
--	"net/http"
--	"sort"
--)
--
--// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
--// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
--// RouterJSR311 implements the Router interface.
--// Concept of locators is not implemented.
--type RouterJSR311 struct{}
--
--// SelectRoute is part of the Router interface and returns the best match
--// for the WebService and its Route for the given Request.
--func (r RouterJSR311) SelectRoute(
--	webServices []*WebService,
--	httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
--
--	// Identify the root resource class (WebService)
--	dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
--	if err != nil {
--		return nil, nil, NewError(http.StatusNotFound, "")
--	}
--	// Obtain the set of candidate methods (Routes)
--	routes := r.selectRoutes(dispatcher, finalMatch)
--	if len(routes) == 0 {
--		return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
--	}
--
--	// Identify the method (Route) that will handle the request
--	route, ok := r.detectRoute(routes, httpRequest)
--	return dispatcher, route, ok
--}
--
--// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
--func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
--	// http method
--	methodOk := []Route{}
--	for _, each := range routes {
--		if httpRequest.Method == each.Method {
--			methodOk = append(methodOk, each)
--		}
--	}
--	if len(methodOk) == 0 {
--		if trace {
--			traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
--		}
--		return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
--	}
--	inputMediaOk := methodOk
--	// content-type
--	contentType := httpRequest.Header.Get(HEADER_ContentType)
--	if httpRequest.ContentLength > 0 {
--		inputMediaOk = []Route{}
--		for _, each := range methodOk {
--			if each.matchesContentType(contentType) {
--				inputMediaOk = append(inputMediaOk, each)
--			}
--		}
--		if len(inputMediaOk) == 0 {
--			if trace {
--				traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
--			}
--			return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
--		}
--	}
--	// accept
--	outputMediaOk := []Route{}
--	accept := httpRequest.Header.Get(HEADER_Accept)
--	if accept == "" {
--		accept = "*/*"
--	}
--	for _, each := range inputMediaOk {
--		if each.matchesAccept(accept) {
--			outputMediaOk = append(outputMediaOk, each)
--		}
--	}
--	if len(outputMediaOk) == 0 {
--		if trace {
--			traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
--		}
--		return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
--	}
--	return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
--}
--
--// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
--// n/m > n/* > */*
--func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
--	// TODO
--	return &routes[0]
--}
--
--// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2  (step 2)
--func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
--	filtered := &sortableRouteCandidates{}
--	for _, each := range dispatcher.Routes() {
--		pathExpr := each.pathExpr
--		matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
--		if matches != nil {
--			lastMatch := matches[len(matches)-1]
--			if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
--				filtered.candidates = append(filtered.candidates,
--					routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
--			}
--		}
--	}
--	if len(filtered.candidates) == 0 {
--		if trace {
--			traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
--		}
--		return []Route{}
--	}
--	sort.Sort(sort.Reverse(filtered))
--
--	// select other routes from candidates whoes expression matches rmatch
--	matchingRoutes := []Route{filtered.candidates[0].route}
--	for c := 1; c < len(filtered.candidates); c++ {
--		each := filtered.candidates[c]
--		if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
--			matchingRoutes = append(matchingRoutes, each.route)
--		}
--	}
--	return matchingRoutes
--}
--
--// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
--func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
--	filtered := &sortableDispatcherCandidates{}
--	for _, each := range dispatchers {
--		matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
--		if matches != nil {
--			filtered.candidates = append(filtered.candidates,
--				dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
--		}
--	}
--	if len(filtered.candidates) == 0 {
--		if trace {
--			traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
--		}
--		return nil, "", errors.New("not found")
--	}
--	sort.Sort(sort.Reverse(filtered))
--	return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
--}
--
--// Types and functions to support the sorting of Routes
--
--type routeCandidate struct {
--	route           Route
--	matchesCount    int // the number of capturing groups
--	literalCount    int // the number of literal characters (means those not resulting from template variable substitution)
--	nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^  /]+?)’)
--}
--
--func (r routeCandidate) expressionToMatch() string {
--	return r.route.pathExpr.Source
--}
--
--func (r routeCandidate) String() string {
--	return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
--}
--
--type sortableRouteCandidates struct {
--	candidates []routeCandidate
--}
--
--func (rcs *sortableRouteCandidates) Len() int {
--	return len(rcs.candidates)
--}
--func (rcs *sortableRouteCandidates) Swap(i, j int) {
--	rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
--}
--func (rcs *sortableRouteCandidates) Less(i, j int) bool {
--	ci := rcs.candidates[i]
--	cj := rcs.candidates[j]
--	// primary key
--	if ci.literalCount < cj.literalCount {
--		return true
--	}
--	if ci.literalCount > cj.literalCount {
--		return false
--	}
--	// secundary key
--	if ci.matchesCount < cj.matchesCount {
--		return true
--	}
--	if ci.matchesCount > cj.matchesCount {
--		return false
--	}
--	// tertiary key
--	if ci.nonDefaultCount < cj.nonDefaultCount {
--		return true
--	}
--	if ci.nonDefaultCount > cj.nonDefaultCount {
--		return false
--	}
--	// quaternary key ("source" is interpreted as Path)
--	return ci.route.Path < cj.route.Path
--}
--
--// Types and functions to support the sorting of Dispatchers
--
--type dispatcherCandidate struct {
--	dispatcher      *WebService
--	finalMatch      string
--	matchesCount    int // the number of capturing groups
--	literalCount    int // the number of literal characters (means those not resulting from template variable substitution)
--	nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^  /]+?)’)
--}
--type sortableDispatcherCandidates struct {
--	candidates []dispatcherCandidate
--}
--
--func (dc *sortableDispatcherCandidates) Len() int {
--	return len(dc.candidates)
--}
--func (dc *sortableDispatcherCandidates) Swap(i, j int) {
--	dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
--}
--func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
--	ci := dc.candidates[i]
--	cj := dc.candidates[j]
--	// primary key
--	if ci.matchesCount < cj.matchesCount {
--		return true
--	}
--	if ci.matchesCount > cj.matchesCount {
--		return false
--	}
--	// secundary key
--	if ci.literalCount < cj.literalCount {
--		return true
--	}
--	if ci.literalCount > cj.literalCount {
--		return false
--	}
--	// tertiary key
--	return ci.nonDefaultCount < cj.nonDefaultCount
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/jsr311_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/jsr311_test.go
-deleted file mode 100644
-index b531b7f..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/jsr311_test.go
-+++ /dev/null
-@@ -1,236 +0,0 @@
--package restful
--
--import (
--	"io"
--	"sort"
--	"testing"
--)
--
--//
--// Step 1 tests
--//
--var paths = []struct {
--	// url with path (1) is handled by service with root (2) and last capturing group has value final (3)
--	path, root, final string
--}{
--	{"/", "/", "/"},
--	{"/p", "/p", ""},
--	{"/p/x", "/p/{q}", ""},
--	{"/q/x", "/q", "/x"},
--	{"/p/x/", "/p/{q}", "/"},
--	{"/p/x/y", "/p/{q}", "/y"},
--	{"/q/x/y", "/q", "/x/y"},
--	{"/z/q", "/{p}/q", ""},
--	{"/a/b/c/q", "/", "/a/b/c/q"},
--}
--
--func TestDetectDispatcher(t *testing.T) {
--	ws1 := new(WebService).Path("/")
--	ws2 := new(WebService).Path("/p")
--	ws3 := new(WebService).Path("/q")
--	ws4 := new(WebService).Path("/p/q")
--	ws5 := new(WebService).Path("/p/{q}")
--	ws6 := new(WebService).Path("/p/{q}/")
--	ws7 := new(WebService).Path("/{p}/q")
--	var dispatchers = []*WebService{ws1, ws2, ws3, ws4, ws5, ws6, ws7}
--
--	wc := NewContainer()
--	for _, each := range dispatchers {
--		wc.Add(each)
--	}
--
--	router := RouterJSR311{}
--
--	ok := true
--	for i, fixture := range paths {
--		who, final, err := router.detectDispatcher(fixture.path, dispatchers)
--		if err != nil {
--			t.Logf("error in detection:%v", err)
--			ok = false
--		}
--		if who.RootPath() != fixture.root {
--			t.Logf("[line:%v] Unexpected dispatcher, expected:%v, actual:%v", i, fixture.root, who.RootPath())
--			ok = false
--		}
--		if final != fixture.final {
--			t.Logf("[line:%v] Unexpected final, expected:%v, actual:%v", i, fixture.final, final)
--			ok = false
--		}
--	}
--	if !ok {
--		t.Fail()
--	}
--}
--
--//
--// Step 2 tests
--//
--
--// go test -v -test.run TestISSUE_30 ...restful
--func TestISSUE_30(t *testing.T) {
--	ws1 := new(WebService).Path("/users")
--	ws1.Route(ws1.GET("/{id}").To(dummy))
--	ws1.Route(ws1.POST("/login").To(dummy))
--	routes := RouterJSR311{}.selectRoutes(ws1, "/login")
--	if len(routes) != 2 {
--		t.Fatal("expected 2 routes")
--	}
--	if routes[0].Path != "/users/login" {
--		t.Error("first is", routes[0].Path)
--		t.Logf("routes:%v", routes)
--	}
--}
--
--// go test -v -test.run TestISSUE_34 ...restful
--func TestISSUE_34(t *testing.T) {
--	ws1 := new(WebService).Path("/")
--	ws1.Route(ws1.GET("/{type}/{id}").To(dummy))
--	ws1.Route(ws1.GET("/network/{id}").To(dummy))
--	routes := RouterJSR311{}.selectRoutes(ws1, "/network/12")
--	if len(routes) != 2 {
--		t.Fatal("expected 2 routes")
--	}
--	if routes[0].Path != "/network/{id}" {
--		t.Error("first is", routes[0].Path)
--		t.Logf("routes:%v", routes)
--	}
--}
--
--// go test -v -test.run TestISSUE_34_2 ...restful
--func TestISSUE_34_2(t *testing.T) {
--	ws1 := new(WebService).Path("/")
--	// change the registration order
--	ws1.Route(ws1.GET("/network/{id}").To(dummy))
--	ws1.Route(ws1.GET("/{type}/{id}").To(dummy))
--	routes := RouterJSR311{}.selectRoutes(ws1, "/network/12")
--	if len(routes) != 2 {
--		t.Fatal("expected 2 routes")
--	}
--	if routes[0].Path != "/network/{id}" {
--		t.Error("first is", routes[0].Path)
--	}
--}
--
--// go test -v -test.run TestISSUE_137 ...restful
--func TestISSUE_137(t *testing.T) {
--	ws1 := new(WebService)
--	ws1.Route(ws1.GET("/hello").To(dummy))
--	routes := RouterJSR311{}.selectRoutes(ws1, "/")
--	t.Log(routes)
--	if len(routes) > 0 {
--		t.Error("no route expected")
--	}
--}
--
--func TestSelectRoutesSlash(t *testing.T) {
--	ws1 := new(WebService).Path("/")
--	ws1.Route(ws1.GET("").To(dummy))
--	ws1.Route(ws1.GET("/").To(dummy))
--	ws1.Route(ws1.GET("/u").To(dummy))
--	ws1.Route(ws1.POST("/u").To(dummy))
--	ws1.Route(ws1.POST("/u/v").To(dummy))
--	ws1.Route(ws1.POST("/u/{w}").To(dummy))
--	ws1.Route(ws1.POST("/u/{w}/z").To(dummy))
--	routes := RouterJSR311{}.selectRoutes(ws1, "/u")
--	checkRoutesContains(routes, "/u", t)
--	checkRoutesContainsNo(routes, "/u/v", t)
--	checkRoutesContainsNo(routes, "/", t)
--	checkRoutesContainsNo(routes, "/u/{w}/z", t)
--}
--func TestSelectRoutesU(t *testing.T) {
--	ws1 := new(WebService).Path("/u")
--	ws1.Route(ws1.GET("").To(dummy))
--	ws1.Route(ws1.GET("/").To(dummy))
--	ws1.Route(ws1.GET("/v").To(dummy))
--	ws1.Route(ws1.POST("/{w}").To(dummy))
--	ws1.Route(ws1.POST("/{w}/z").To(dummy))          // so full path = /u/{w}/z
--	routes := RouterJSR311{}.selectRoutes(ws1, "/v") // test against /u/v
--	checkRoutesContains(routes, "/u/{w}", t)
--}
--
--func TestSelectRoutesUsers1(t *testing.T) {
--	ws1 := new(WebService).Path("/users")
--	ws1.Route(ws1.POST("").To(dummy))
--	ws1.Route(ws1.POST("/").To(dummy))
--	ws1.Route(ws1.PUT("/{id}").To(dummy))
--	routes := RouterJSR311{}.selectRoutes(ws1, "/1")
--	checkRoutesContains(routes, "/users/{id}", t)
--}
--func checkRoutesContains(routes []Route, path string, t *testing.T) {
--	if !containsRoutePath(routes, path, t) {
--		for _, r := range routes {
--			t.Logf("route %v %v", r.Method, r.Path)
--		}
--		t.Fatalf("routes should include [%v]:", path)
--	}
--}
--func checkRoutesContainsNo(routes []Route, path string, t *testing.T) {
--	if containsRoutePath(routes, path, t) {
--		for _, r := range routes {
--			t.Logf("route %v %v", r.Method, r.Path)
--		}
--		t.Fatalf("routes should not include [%v]:", path)
--	}
--}
--func containsRoutePath(routes []Route, path string, t *testing.T) bool {
--	for _, each := range routes {
--		if each.Path == path {
--			return true
--		}
--	}
--	return false
--}
--
--var tempregexs = []struct {
--	template, regex        string
--	literalCount, varCount int
--}{
--	{"", "^(/.*)?$", 0, 0},
--	{"/a/{b}/c/", "^/a/([^/]+?)/c(/.*)?$", 2, 1},
--	{"/{a}/{b}/{c-d-e}/", "^/([^/]+?)/([^/]+?)/([^/]+?)(/.*)?$", 0, 3},
--	{"/{p}/abcde", "^/([^/]+?)/abcde(/.*)?$", 5, 1},
--}
--
--func TestTemplateToRegularExpression(t *testing.T) {
--	ok := true
--	for i, fixture := range tempregexs {
--		actual, lCount, vCount, _ := templateToRegularExpression(fixture.template)
--		if actual != fixture.regex {
--			t.Logf("regex mismatch, expected:%v , actual:%v, line:%v\n", fixture.regex, actual, i) // 11 = where the data starts
--			ok = false
--		}
--		if lCount != fixture.literalCount {
--			t.Logf("literal count mismatch, expected:%v , actual:%v, line:%v\n", fixture.literalCount, lCount, i)
--			ok = false
--		}
--		if vCount != fixture.varCount {
--			t.Logf("variable count mismatch, expected:%v , actual:%v, line:%v\n", fixture.varCount, vCount, i)
--			ok = false
--		}
--	}
--	if !ok {
--		t.Fatal("one or more expression did not match")
--	}
--}
--
--// go test -v -test.run TestSortableRouteCandidates ...restful
--func TestSortableRouteCandidates(t *testing.T) {
--	fixture := &sortableRouteCandidates{}
--	r1 := routeCandidate{matchesCount: 0, literalCount: 0, nonDefaultCount: 0}
--	r2 := routeCandidate{matchesCount: 0, literalCount: 0, nonDefaultCount: 1}
--	r3 := routeCandidate{matchesCount: 0, literalCount: 1, nonDefaultCount: 1}
--	r4 := routeCandidate{matchesCount: 1, literalCount: 1, nonDefaultCount: 0}
--	r5 := routeCandidate{matchesCount: 1, literalCount: 0, nonDefaultCount: 0}
--	fixture.candidates = append(fixture.candidates, r5, r4, r3, r2, r1)
--	sort.Sort(sort.Reverse(fixture))
--	first := fixture.candidates[0]
--	if first.matchesCount != 1 && first.literalCount != 1 && first.nonDefaultCount != 0 {
--		t.Fatal("expected r4")
--	}
--	last := fixture.candidates[len(fixture.candidates)-1]
--	if last.matchesCount != 0 && last.literalCount != 0 && last.nonDefaultCount != 0 {
--		t.Fatal("expected r1")
--	}
--}
--
--func dummy(req *Request, resp *Response) { io.WriteString(resp.ResponseWriter, "dummy") }
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/logger.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/logger.go
-deleted file mode 100644
-index 25d6a8e..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/logger.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--package restful
--
--import "log"
--
--// Copyright 2014 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--var trace bool = false
--var traceLogger *log.Logger
--
--// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
--func TraceLogger(logger *log.Logger) {
--	traceLogger = logger
--	trace = logger != nil
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/options_filter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/options_filter.go
-deleted file mode 100644
-index bd5d0c2..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/options_filter.go
-+++ /dev/null
-@@ -1,24 +0,0 @@
--package restful
--
--import "strings"
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
--// and provides the response with a set of allowed methods for the request URL Path.
--// As for any filter, you can also install it for a particular WebService within a Container
--func (c Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
--	if "OPTIONS" != req.Request.Method {
--		chain.ProcessFilter(req, resp)
--		return
--	}
--	resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
--}
--
--// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
--// and provides the response with a set of allowed methods for the request URL Path.
--func OPTIONSFilter() FilterFunction {
--	return DefaultContainer.OPTIONSFilter
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/options_filter_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/options_filter_test.go
-deleted file mode 100644
-index f0fceb8..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/options_filter_test.go
-+++ /dev/null
-@@ -1,34 +0,0 @@
--package restful
--
--import (
--	"net/http"
--	"net/http/httptest"
--	"testing"
--)
--
--// go test -v -test.run TestOptionsFilter ...restful
--func TestOptionsFilter(t *testing.T) {
--	tearDown()
--	ws := new(WebService)
--	ws.Route(ws.GET("/candy/{kind}").To(dummy))
--	ws.Route(ws.DELETE("/candy/{kind}").To(dummy))
--	ws.Route(ws.POST("/candies").To(dummy))
--	Add(ws)
--	Filter(OPTIONSFilter())
--
--	httpRequest, _ := http.NewRequest("OPTIONS", "http://here.io/candy/gum", nil)
--	httpWriter := httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--	actual := httpWriter.Header().Get(HEADER_Allow)
--	if "GET,DELETE" != actual {
--		t.Fatal("expected: GET,DELETE but got:" + actual)
--	}
--
--	httpRequest, _ = http.NewRequest("OPTIONS", "http://here.io/candies", nil)
--	httpWriter = httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--	actual = httpWriter.Header().Get(HEADER_Allow)
--	if "POST" != actual {
--		t.Fatal("expected: POST but got:" + actual)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go
-deleted file mode 100644
-index 7f38a0a..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go
-+++ /dev/null
-@@ -1,95 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--const (
--	// PathParameterKind = indicator of Request parameter type "path"
--	PathParameterKind = iota
--
--	// QueryParameterKind = indicator of Request parameter type "query"
--	QueryParameterKind
--
--	// BodyParameterKind = indicator of Request parameter type "body"
--	BodyParameterKind
--
--	// HeaderParameterKind = indicator of Request parameter type "header"
--	HeaderParameterKind
--
--	// FormParameterKind = indicator of Request parameter type "form"
--	FormParameterKind
--)
--
--// Parameter is for documententing the parameter used in a Http Request
--// ParameterData kinds are Path,Query and Body
--type Parameter struct {
--	data *ParameterData
--}
--
--// ParameterData represents the state of a Parameter.
--// It is made public to make it accessible to e.g. the Swagger package.
--type ParameterData struct {
--	Name, Description, DataType string
--	Kind                        int
--	Required                    bool
--	AllowableValues             map[string]string
--	AllowMultiple               bool
--}
--
--// Data returns the state of the Parameter
--func (p *Parameter) Data() ParameterData {
--	return *p.data
--}
--
--// Kind returns the parameter type indicator (see const for valid values)
--func (p *Parameter) Kind() int {
--	return p.data.Kind
--}
--
--func (p *Parameter) bePath() *Parameter {
--	p.data.Kind = PathParameterKind
--	return p
--}
--func (p *Parameter) beQuery() *Parameter {
--	p.data.Kind = QueryParameterKind
--	return p
--}
--func (p *Parameter) beBody() *Parameter {
--	p.data.Kind = BodyParameterKind
--	return p
--}
--
--func (p *Parameter) beHeader() *Parameter {
--	p.data.Kind = HeaderParameterKind
--	return p
--}
--
--func (p *Parameter) beForm() *Parameter {
--	p.data.Kind = FormParameterKind
--	return p
--}
--
--// Required sets the required field and return the receiver
--func (p *Parameter) Required(required bool) *Parameter {
--	p.data.Required = required
--	return p
--}
--
--// AllowMultiple sets the allowMultiple field and return the receiver
--func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
--	p.data.AllowMultiple = multiple
--	return p
--}
--
--// AllowableValues sets the allowableValues field and return the receiver
--func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
--	p.data.AllowableValues = values
--	return p
--}
--
--// DataType sets the dataType field and return the receiver
--func (p *Parameter) DataType(typeName string) *Parameter {
--	p.data.DataType = typeName
--	return p
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/path_expression.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/path_expression.go
-deleted file mode 100644
-index 8749cb5..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/path_expression.go
-+++ /dev/null
-@@ -1,56 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"bytes"
--	"regexp"
--	"strings"
--)
--
--// PathExpression holds a compiled path expression (RegExp) needed to match against
--// Http request paths and to extract path parameter values.
--type pathExpression struct {
--	LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
--	VarCount     int // the number of named parameters (enclosed by {}) in the path
--	Matcher      *regexp.Regexp
--	Source       string // Path as defined by the RouteBuilder
--	tokens       []string
--}
--
--// NewPathExpression creates a PathExpression from the input URL path.
--// Returns an error if the path is invalid.
--func newPathExpression(path string) (*pathExpression, error) {
--	expression, literalCount, varCount, tokens := templateToRegularExpression(path)
--	compiled, err := regexp.Compile(expression)
--	if err != nil {
--		return nil, err
--	}
--	return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
--}
--
--// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
--func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
--	var buffer bytes.Buffer
--	buffer.WriteString("^")
--	//tokens = strings.Split(template, "/")
--	tokens = tokenizePath(template)
--	for _, each := range tokens {
--		if each == "" {
--			continue
--		}
--		buffer.WriteString("/")
--		if strings.HasPrefix(each, "{") {
--			// ignore var spec
--			varCount += 1
--			buffer.WriteString("([^/]+?)")
--		} else {
--			literalCount += len(each)
--			encoded := each // TODO URI encode
--			buffer.WriteString(regexp.QuoteMeta(encoded))
--		}
--	}
--	return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/request.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/request.go
-deleted file mode 100644
-index 00a069f..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/request.go
-+++ /dev/null
-@@ -1,135 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"bytes"
--	"encoding/json"
--	"encoding/xml"
--	"io"
--	"io/ioutil"
--	"net/http"
--	"strings"
--)
--
--var defaultRequestContentType string
--
--var doCacheReadEntityBytes = true
--
--// Request is a wrapper for a http Request that provides convenience methods
--type Request struct {
--	Request           *http.Request
--	bodyContent       *[]byte // to cache the request body for multiple reads of ReadEntity
--	pathParameters    map[string]string
--	attributes        map[string]interface{} // for storing request-scoped values
--	selectedRoutePath string                 // root path + route path that matched the request, e.g. /meetings/{id}/attendees
--}
--
--func NewRequest(httpRequest *http.Request) *Request {
--	return &Request{
--		Request:        httpRequest,
--		pathParameters: map[string]string{},
--		attributes:     map[string]interface{}{},
--	} // empty parameters, attributes
--}
--
--// If ContentType is missing or */* is given then fall back to this type, otherwise
--// a "Unable to unmarshal content of type:" response is returned.
--// Valid values are restful.MIME_JSON and restful.MIME_XML
--// Example:
--// 	restful.DefaultRequestContentType(restful.MIME_JSON)
--func DefaultRequestContentType(mime string) {
--	defaultRequestContentType = mime
--}
--
--// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
--// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
--func SetCacheReadEntity(doCache bool) {
--	doCacheReadEntityBytes = doCache
--}
--
--// PathParameter accesses the Path parameter value by its name
--func (r *Request) PathParameter(name string) string {
--	return r.pathParameters[name]
--}
--
--// PathParameters accesses the Path parameter values
--func (r *Request) PathParameters() map[string]string {
--	return r.pathParameters
--}
--
--// QueryParameter returns the (first) Query parameter value by its name
--func (r *Request) QueryParameter(name string) string {
--	return r.Request.FormValue(name)
--}
--
--// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
--func (r *Request) BodyParameter(name string) (string, error) {
--	err := r.Request.ParseForm()
--	if err != nil {
--		return "", err
--	}
--	return r.Request.PostFormValue(name), nil
--}
--
--// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
--func (r *Request) HeaderParameter(name string) string {
--	return r.Request.Header.Get(name)
--}
--
--// ReadEntity checks the Accept header and reads the content into the entityPointer
--// May be called multiple times in the request-response flow
--func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
--	contentType := r.Request.Header.Get(HEADER_ContentType)
--	if doCacheReadEntityBytes {
--		return r.cachingReadEntity(contentType, entityPointer)
--	}
--	// unmarshall directly from request Body
--	return r.decodeEntity(r.Request.Body, contentType, entityPointer)
--}
--
--func (r *Request) cachingReadEntity(contentType string, entityPointer interface{}) (err error) {
--	var buffer []byte
--	if r.bodyContent != nil {
--		buffer = *r.bodyContent
--	} else {
--		buffer, err = ioutil.ReadAll(r.Request.Body)
--		if err != nil {
--			return err
--		}
--		r.bodyContent = &buffer
--	}
--	return r.decodeEntity(bytes.NewReader(buffer), contentType, entityPointer)
--}
--
--func (r *Request) decodeEntity(reader io.Reader, contentType string, entityPointer interface{}) (err error) {
--	if strings.Contains(contentType, MIME_XML) {
--		return xml.NewDecoder(reader).Decode(entityPointer)
--	}
--	if strings.Contains(contentType, MIME_JSON) || MIME_JSON == defaultRequestContentType {
--		decoder := json.NewDecoder(reader)
--		decoder.UseNumber()
--		return decoder.Decode(entityPointer)
--	}
--	if MIME_XML == defaultRequestContentType {
--		return xml.NewDecoder(reader).Decode(entityPointer)
--	}
--	return NewError(400, "Unable to unmarshal content of type:"+contentType)
--}
--
--// SetAttribute adds or replaces the attribute with the given value.
--func (r *Request) SetAttribute(name string, value interface{}) {
--	r.attributes[name] = value
--}
--
--// Attribute returns the value associated to the given name. Returns nil if absent.
--func (r Request) Attribute(name string) interface{} {
--	return r.attributes[name]
--}
--
--// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
--func (r Request) SelectedRoutePath() string {
--	return r.selectedRoutePath
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/request_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/request_test.go
-deleted file mode 100644
-index 6e7c55b..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/request_test.go
-+++ /dev/null
-@@ -1,204 +0,0 @@
--package restful
--
--import (
--	"encoding/json"
--	"net/http"
--	"net/url"
--	"strconv"
--	"strings"
--	"testing"
--)
--
--func TestQueryParameter(t *testing.T) {
--	hreq := http.Request{Method: "GET"}
--	hreq.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar")
--	rreq := Request{Request: &hreq}
--	if rreq.QueryParameter("q") != "foo" {
--		t.Errorf("q!=foo %#v", rreq)
--	}
--}
--
--type Anything map[string]interface{}
--
--type Number struct {
--	ValueFloat float64
--	ValueInt   int64
--}
--
--type Sample struct {
--	Value string
--}
--
--func TestReadEntityXml(t *testing.T) {
--	SetCacheReadEntity(true)
--	bodyReader := strings.NewReader("<Sample><Value>42</Value></Sample>")
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	httpRequest.Header.Set("Content-Type", "application/xml")
--	request := &Request{Request: httpRequest}
--	sam := new(Sample)
--	request.ReadEntity(sam)
--	if sam.Value != "42" {
--		t.Fatal("read failed")
--	}
--	if request.bodyContent == nil {
--		t.Fatal("no expected cached bytes found")
--	}
--}
--
--func TestReadEntityXmlNonCached(t *testing.T) {
--	SetCacheReadEntity(false)
--	bodyReader := strings.NewReader("<Sample><Value>42</Value></Sample>")
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	httpRequest.Header.Set("Content-Type", "application/xml")
--	request := &Request{Request: httpRequest}
--	sam := new(Sample)
--	request.ReadEntity(sam)
--	if sam.Value != "42" {
--		t.Fatal("read failed")
--	}
--	if request.bodyContent != nil {
--		t.Fatal("unexpected cached bytes found")
--	}
--}
--
--func TestReadEntityJson(t *testing.T) {
--	bodyReader := strings.NewReader(`{"Value" : "42"}`)
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	httpRequest.Header.Set("Content-Type", "application/json")
--	request := &Request{Request: httpRequest}
--	sam := new(Sample)
--	request.ReadEntity(sam)
--	if sam.Value != "42" {
--		t.Fatal("read failed")
--	}
--}
--
--func TestReadEntityJsonCharset(t *testing.T) {
--	bodyReader := strings.NewReader(`{"Value" : "42"}`)
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	httpRequest.Header.Set("Content-Type", "application/json; charset=UTF-8")
--	request := NewRequest(httpRequest)
--	sam := new(Sample)
--	request.ReadEntity(sam)
--	if sam.Value != "42" {
--		t.Fatal("read failed")
--	}
--}
--
--func TestReadEntityJsonNumber(t *testing.T) {
--	SetCacheReadEntity(true)
--	bodyReader := strings.NewReader(`{"Value" : 4899710515899924123}`)
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	httpRequest.Header.Set("Content-Type", "application/json")
--	request := &Request{Request: httpRequest}
--	any := make(Anything)
--	request.ReadEntity(&any)
--	number, ok := any["Value"].(json.Number)
--	if !ok {
--		t.Fatal("read failed")
--	}
--	vint, err := number.Int64()
--	if err != nil {
--		t.Fatal("convert failed")
--	}
--	if vint != 4899710515899924123 {
--		t.Fatal("read failed")
--	}
--	vfloat, err := number.Float64()
--	if err != nil {
--		t.Fatal("convert failed")
--	}
--	// match the default behaviour
--	vstring := strconv.FormatFloat(vfloat, 'e', 15, 64)
--	if vstring != "4.899710515899924e+18" {
--		t.Fatal("convert float64 failed")
--	}
--}
--
--func TestReadEntityJsonNumberNonCached(t *testing.T) {
--	SetCacheReadEntity(false)
--	bodyReader := strings.NewReader(`{"Value" : 4899710515899924123}`)
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	httpRequest.Header.Set("Content-Type", "application/json")
--	request := &Request{Request: httpRequest}
--	any := make(Anything)
--	request.ReadEntity(&any)
--	number, ok := any["Value"].(json.Number)
--	if !ok {
--		t.Fatal("read failed")
--	}
--	vint, err := number.Int64()
--	if err != nil {
--		t.Fatal("convert failed")
--	}
--	if vint != 4899710515899924123 {
--		t.Fatal("read failed")
--	}
--	vfloat, err := number.Float64()
--	if err != nil {
--		t.Fatal("convert failed")
--	}
--	// match the default behaviour
--	vstring := strconv.FormatFloat(vfloat, 'e', 15, 64)
--	if vstring != "4.899710515899924e+18" {
--		t.Fatal("convert float64 failed")
--	}
--}
--
--func TestReadEntityJsonLong(t *testing.T) {
--	bodyReader := strings.NewReader(`{"ValueFloat" : 4899710515899924123, "ValueInt": 4899710515899924123}`)
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	httpRequest.Header.Set("Content-Type", "application/json")
--	request := &Request{Request: httpRequest}
--	number := new(Number)
--	request.ReadEntity(&number)
--	if number.ValueInt != 4899710515899924123 {
--		t.Fatal("read failed")
--	}
--	// match the default behaviour
--	vstring := strconv.FormatFloat(number.ValueFloat, 'e', 15, 64)
--	if vstring != "4.899710515899924e+18" {
--		t.Fatal("convert float64 failed")
--	}
--}
--
--func TestBodyParameter(t *testing.T) {
--	bodyReader := strings.NewReader(`value1=42&value2=43`)
--	httpRequest, _ := http.NewRequest("POST", "/test?value1=44", bodyReader) // POST and PUT body parameters take precedence over URL query string
--	httpRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
--	request := NewRequest(httpRequest)
--	v1, err := request.BodyParameter("value1")
--	if err != nil {
--		t.Error(err)
--	}
--	v2, err := request.BodyParameter("value2")
--	if err != nil {
--		t.Error(err)
--	}
--	if v1 != "42" || v2 != "43" {
--		t.Fatal("read failed")
--	}
--}
--
--func TestReadEntityUnkown(t *testing.T) {
--	bodyReader := strings.NewReader("?")
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	httpRequest.Header.Set("Content-Type", "application/rubbish")
--	request := NewRequest(httpRequest)
--	sam := new(Sample)
--	err := request.ReadEntity(sam)
--	if err == nil {
--		t.Fatal("read should be in error")
--	}
--}
--
--func TestSetAttribute(t *testing.T) {
--	bodyReader := strings.NewReader("?")
--	httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
--	request := NewRequest(httpRequest)
--	request.SetAttribute("go", "there")
--	there := request.Attribute("go")
--	if there != "there" {
--		t.Fatalf("missing request attribute:%v", there)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/response.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/response.go
-deleted file mode 100644
-index 0a3cbd5..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/response.go
-+++ /dev/null
-@@ -1,241 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"encoding/json"
--	"encoding/xml"
--	"net/http"
--	"strings"
--)
--
--// DEPRECATED, use DefaultResponseContentType(mime)
--var DefaultResponseMimeType string
--
--//PrettyPrintResponses controls the indentation feature of XML and JSON
--//serialization in the response methods WriteEntity, WriteAsJson, and
--//WriteAsXml.
--var PrettyPrintResponses = true
--
--// Response is a wrapper on the actual http ResponseWriter
--// It provides several convenience methods to prepare and write response content.
--type Response struct {
--	http.ResponseWriter
--	requestAccept string   // mime-type what the Http Request says it wants to receive
--	routeProduces []string // mime-types what the Route says it can produce
--	statusCode    int      // HTTP status code that has been written explicity (if zero then net/http has written 200)
--	contentLength int      // number of bytes written for the response body
--	prettyPrint   bool     // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
--}
--
--// Creates a new response based on a http ResponseWriter.
--func NewResponse(httpWriter http.ResponseWriter) *Response {
--	return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses} // empty content-types
--}
--
--// If Accept header matching fails, fall back to this type, otherwise
--// a "406: Not Acceptable" response is returned.
--// Valid values are restful.MIME_JSON and restful.MIME_XML
--// Example:
--// 	restful.DefaultResponseContentType(restful.MIME_JSON)
--func DefaultResponseContentType(mime string) {
--	DefaultResponseMimeType = mime
--}
--
--// InternalServerError writes the StatusInternalServerError header.
--// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
--func (r Response) InternalServerError() Response {
--	r.WriteHeader(http.StatusInternalServerError)
--	return r
--}
--
--// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
--func (r *Response) PrettyPrint(bePretty bool) {
--	r.prettyPrint = bePretty
--}
--
--// AddHeader is a shortcut for .Header().Add(header,value)
--func (r Response) AddHeader(header string, value string) Response {
--	r.Header().Add(header, value)
--	return r
--}
--
--// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
--func (r *Response) SetRequestAccepts(mime string) {
--	r.requestAccept = mime
--}
--
--// WriteEntity marshals the value using the representation denoted by the Accept Header (XML or JSON)
--// If no Accept header is specified (or */*) then return the Content-Type as specified by the first in the Route.Produces.
--// If an Accept header is specified then return the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
--// If the value is nil then nothing is written. You may want to call WriteHeader(http.StatusNotFound) instead.
--// Current implementation ignores any q-parameters in the Accept Header.
--func (r *Response) WriteEntity(value interface{}) error {
--	if value == nil { // do not write a nil representation
--		return nil
--	}
--	for _, qualifiedMime := range strings.Split(r.requestAccept, ",") {
--		mime := strings.Trim(strings.Split(qualifiedMime, ";")[0], " ")
--		if 0 == len(mime) || mime == "*/*" {
--			for _, each := range r.routeProduces {
--				if MIME_JSON == each {
--					return r.WriteAsJson(value)
--				}
--				if MIME_XML == each {
--					return r.WriteAsXml(value)
--				}
--			}
--		} else { // mime is not blank; see if we have a match in Produces
--			for _, each := range r.routeProduces {
--				if mime == each {
--					if MIME_JSON == each {
--						return r.WriteAsJson(value)
--					}
--					if MIME_XML == each {
--						return r.WriteAsXml(value)
--					}
--				}
--			}
--		}
--	}
--	if DefaultResponseMimeType == MIME_JSON {
--		return r.WriteAsJson(value)
--	} else if DefaultResponseMimeType == MIME_XML {
--		return r.WriteAsXml(value)
--	} else {
--		if trace {
--			traceLogger.Printf("mismatch in mime-types and no defaults; (http)Accept=%v,(route)Produces=%v\n", r.requestAccept, r.routeProduces)
--		}
--		r.WriteHeader(http.StatusNotAcceptable) // for recording only
--		r.ResponseWriter.WriteHeader(http.StatusNotAcceptable)
--		if _, err := r.Write([]byte("406: Not Acceptable")); err != nil {
--			return err
--		}
--	}
--	return nil
--}
--
--// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
--func (r *Response) WriteAsXml(value interface{}) error {
--	var output []byte
--	var err error
--
--	if value == nil { // do not write a nil representation
--		return nil
--	}
--	if r.prettyPrint {
--		output, err = xml.MarshalIndent(value, " ", " ")
--	} else {
--		output, err = xml.Marshal(value)
--	}
--
--	if err != nil {
--		return r.WriteError(http.StatusInternalServerError, err)
--	}
--	r.Header().Set(HEADER_ContentType, MIME_XML)
--	if r.statusCode > 0 { // a WriteHeader was intercepted
--		r.ResponseWriter.WriteHeader(r.statusCode)
--	}
--	_, err = r.Write([]byte(xml.Header))
--	if err != nil {
--		return err
--	}
--	if _, err = r.Write(output); err != nil {
--		return err
--	}
--	return nil
--}
--
--// WriteAsJson is a convenience method for writing a value in json
--func (r *Response) WriteAsJson(value interface{}) error {
--	var output []byte
--	var err error
--
--	if value == nil { // do not write a nil representation
--		return nil
--	}
--	if r.prettyPrint {
--		output, err = json.MarshalIndent(value, " ", " ")
--	} else {
--		output, err = json.Marshal(value)
--	}
--
--	if err != nil {
--		return r.WriteErrorString(http.StatusInternalServerError, err.Error())
--	}
--	r.Header().Set(HEADER_ContentType, MIME_JSON)
--	if r.statusCode > 0 { // a WriteHeader was intercepted
--		r.ResponseWriter.WriteHeader(r.statusCode)
--	}
--	if _, err = r.Write(output); err != nil {
--		return err
--	}
--	return nil
--}
--
--// WriteError write the http status and the error string on the response.
--func (r *Response) WriteError(httpStatus int, err error) error {
--	return r.WriteErrorString(httpStatus, err.Error())
--}
--
--// WriteServiceError is a convenience method for a responding with a ServiceError and a status
--func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
--	r.WriteHeader(httpStatus) // for recording only
--	return r.WriteEntity(err)
--}
--
--// WriteErrorString is a convenience method for an error status with the actual error
--func (r *Response) WriteErrorString(status int, errorReason string) error {
--	r.statusCode = status // for recording only
--	r.ResponseWriter.WriteHeader(status)
--	if _, err := r.Write([]byte(errorReason)); err != nil {
--		return err
--	}
--	return nil
--}
--
--// WriteHeader is overridden to remember the Status Code that has been written.
--// Note that using this method, the status value is only written when
--// - calling WriteEntity,
--// - or directly calling WriteAsXml or WriteAsJson,
--// - or if the status is one for which no response is allowed (i.e.,
--//   204 (http.StatusNoContent) or 304 (http.StatusNotModified))
--func (r *Response) WriteHeader(httpStatus int) {
--	r.statusCode = httpStatus
--	// if 204 then WriteEntity will not be called so we need to pass this code
--	if http.StatusNoContent == httpStatus ||
--		http.StatusNotModified == httpStatus {
--		r.ResponseWriter.WriteHeader(httpStatus)
--	}
--}
--
--// StatusCode returns the code that has been written using WriteHeader.
--func (r Response) StatusCode() int {
--	if 0 == r.statusCode {
--		// no status code has been written yet; assume OK
--		return http.StatusOK
--	}
--	return r.statusCode
--}
--
--// Write writes the data to the connection as part of an HTTP reply.
--// Write is part of http.ResponseWriter interface.
--func (r *Response) Write(bytes []byte) (int, error) {
--	written, err := r.ResponseWriter.Write(bytes)
--	r.contentLength += written
--	return written, err
--}
--
--// ContentLength returns the number of bytes written for the response content.
--// Note that this value is only correct if all data is written through the Response using its Write* methods.
--// Data written directly using the underlying http.ResponseWriter is not accounted for.
--func (r Response) ContentLength() int {
--	return r.contentLength
--}
--
--// CloseNotify is part of http.CloseNotifier interface
--func (r Response) CloseNotify() <-chan bool {
--	return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/response_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/response_test.go
-deleted file mode 100644
-index ee9a74f..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/response_test.go
-+++ /dev/null
-@@ -1,157 +0,0 @@
--package restful
--
--import (
--	"errors"
--	"net/http"
--	"net/http/httptest"
--	"testing"
--)
--
--func TestWriteHeader(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true}
--	resp.WriteHeader(123)
--	if resp.StatusCode() != 123 {
--		t.Errorf("Unexpected status code:%d", resp.StatusCode())
--	}
--}
--
--func TestNoWriteHeader(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true}
--	if resp.StatusCode() != http.StatusOK {
--		t.Errorf("Unexpected status code:%d", resp.StatusCode())
--	}
--}
--
--type food struct {
--	Kind string
--}
--
--// go test -v -test.run TestMeasureContentLengthXml ...restful
--func TestMeasureContentLengthXml(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true}
--	resp.WriteAsXml(food{"apple"})
--	if resp.ContentLength() != 76 {
--		t.Errorf("Incorrect measured length:%d", resp.ContentLength())
--	}
--}
--
--// go test -v -test.run TestMeasureContentLengthJson ...restful
--func TestMeasureContentLengthJson(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true}
--	resp.WriteAsJson(food{"apple"})
--	if resp.ContentLength() != 22 {
--		t.Errorf("Incorrect measured length:%d", resp.ContentLength())
--	}
--}
--
--// go test -v -test.run TestMeasureContentLengthJsonNotPretty ...restful
--func TestMeasureContentLengthJsonNotPretty(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, false}
--	resp.WriteAsJson(food{"apple"})
--	if resp.ContentLength() != 16 {
--		t.Errorf("Incorrect measured length:%d", resp.ContentLength())
--	}
--}
--
--// go test -v -test.run TestMeasureContentLengthWriteErrorString ...restful
--func TestMeasureContentLengthWriteErrorString(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true}
--	resp.WriteErrorString(404, "Invalid")
--	if resp.ContentLength() != len("Invalid") {
--		t.Errorf("Incorrect measured length:%d", resp.ContentLength())
--	}
--}
--
--// go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue54 ...restful
--func TestStatusCreatedAndContentTypeJson_Issue54(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true}
--	resp.WriteHeader(201)
--	resp.WriteAsJson(food{"Juicy"})
--	if httpWriter.HeaderMap.Get("Content-Type") != "application/json" {
--		t.Errorf("Expected content type json but got:%d", httpWriter.HeaderMap.Get("Content-Type"))
--	}
--	if httpWriter.Code != 201 {
--		t.Errorf("Expected status 201 but got:%d", httpWriter.Code)
--	}
--}
--
--type errorOnWriteRecorder struct {
--	*httptest.ResponseRecorder
--}
--
--func (e errorOnWriteRecorder) Write(bytes []byte) (int, error) {
--	return 0, errors.New("fail")
--}
--
--// go test -v -test.run TestLastWriteErrorCaught ...restful
--func TestLastWriteErrorCaught(t *testing.T) {
--	httpWriter := errorOnWriteRecorder{httptest.NewRecorder()}
--	resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true}
--	err := resp.WriteAsJson(food{"Juicy"})
--	if err.Error() != "fail" {
--		t.Errorf("Unexpected error message:%v", err)
--	}
--}
--
--// go test -v -test.run TestAcceptStarStar_Issue83 ...restful
--func TestAcceptStarStar_Issue83(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	//								Accept									Produces
--	resp := Response{httpWriter, "application/bogus,*/*;q=0.8", []string{"application/json"}, 0, 0, true}
--	resp.WriteEntity(food{"Juicy"})
--	ct := httpWriter.Header().Get("Content-Type")
--	if "application/json" != ct {
--		t.Errorf("Unexpected content type:%s", ct)
--	}
--}
--
--// go test -v -test.run TestAcceptSkipStarStar_Issue83 ...restful
--func TestAcceptSkipStarStar_Issue83(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	//								Accept									Produces
--	resp := Response{httpWriter, " application/xml ,*/* ; q=0.8", []string{"application/json", "application/xml"}, 0, 0, true}
--	resp.WriteEntity(food{"Juicy"})
--	ct := httpWriter.Header().Get("Content-Type")
--	if "application/xml" != ct {
--		t.Errorf("Unexpected content type:%s", ct)
--	}
--}
--
--// go test -v -test.run TestAcceptXmlBeforeStarStar_Issue83 ...restful
--func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	//								Accept									Produces
--	resp := Response{httpWriter, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", []string{"application/json"}, 0, 0, true}
--	resp.WriteEntity(food{"Juicy"})
--	ct := httpWriter.Header().Get("Content-Type")
--	if "application/json" != ct {
--		t.Errorf("Unexpected content type:%s", ct)
--	}
--}
--
--// go test -v -test.run TestWriteHeaderNoContent_Issue124 ...restful
--func TestWriteHeaderNoContent_Issue124(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "text/plain", []string{"text/plain"}, 0, 0, true}
--	resp.WriteHeader(http.StatusNoContent)
--	if httpWriter.Code != http.StatusNoContent {
--		t.Errorf("got %d want %d", httpWriter.Code, http.StatusNoContent)
--	}
--}
--
--// go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue163 ...restful
--func TestStatusCreatedAndContentTypeJson_Issue163(t *testing.T) {
--	httpWriter := httptest.NewRecorder()
--	resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true}
--	resp.WriteHeader(http.StatusNotModified)
--	if httpWriter.Code != http.StatusNotModified {
--		t.Errorf("Got %d want %d", httpWriter.Code, http.StatusNotModified)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/route.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/route.go
-deleted file mode 100644
-index 59d6e23..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/route.go
-+++ /dev/null
-@@ -1,166 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"bytes"
--	"net/http"
--	"strings"
--)
--
--// RouteFunction declares the signature of a function that can be bound to a Route.
--type RouteFunction func(*Request, *Response)
--
--// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
--type Route struct {
--	Method   string
--	Produces []string
--	Consumes []string
--	Path     string // webservice root path + described path
--	Function RouteFunction
--	Filters  []FilterFunction
--
--	// cached values for dispatching
--	relativePath string
--	pathParts    []string
--	pathExpr     *pathExpression // cached compilation of relativePath as RegExp
--
--	// documentation
--	Doc                     string
--	Operation               string
--	ParameterDocs           []*Parameter
--	ResponseErrors          map[int]ResponseError
--	ReadSample, WriteSample interface{} // structs that model an example request or response payload
--}
--
--// Initialize for Route
--func (r *Route) postBuild() {
--	r.pathParts = tokenizePath(r.Path)
--}
--
--// Create Request and Response from their http versions
--func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
--	params := r.extractParameters(httpRequest.URL.Path)
--	wrappedRequest := NewRequest(httpRequest)
--	wrappedRequest.pathParameters = params
--	wrappedRequest.selectedRoutePath = r.Path
--	wrappedResponse := NewResponse(httpWriter)
--	wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
--	wrappedResponse.routeProduces = r.Produces
--	return wrappedRequest, wrappedResponse
--}
--
--// dispatchWithFilters call the function after passing through its own filters
--func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
--	if len(r.Filters) > 0 {
--		chain := FilterChain{Filters: r.Filters, Target: r.Function}
--		chain.ProcessFilter(wrappedRequest, wrappedResponse)
--	} else {
--		// unfiltered
--		r.Function(wrappedRequest, wrappedResponse)
--	}
--}
--
--// Return whether the mimeType matches to what this Route can produce.
--func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
--	parts := strings.Split(mimeTypesWithQuality, ",")
--	for _, each := range parts {
--		var withoutQuality string
--		if strings.Contains(each, ";") {
--			withoutQuality = strings.Split(each, ";")[0]
--		} else {
--			withoutQuality = each
--		}
--		// trim before compare
--		withoutQuality = strings.Trim(withoutQuality, " ")
--		if withoutQuality == "*/*" {
--			return true
--		}
--		for _, other := range r.Produces {
--			if other == withoutQuality {
--				return true
--			}
--		}
--	}
--	return false
--}
--
--// Return whether the mimeType matches to what this Route can consume.
--func (r Route) matchesContentType(mimeTypes string) bool {
--	parts := strings.Split(mimeTypes, ",")
--	for _, each := range parts {
--		var contentType string
--		if strings.Contains(each, ";") {
--			contentType = strings.Split(each, ";")[0]
--		} else {
--			contentType = each
--		}
--		// trim before compare
--		contentType = strings.Trim(contentType, " ")
--		for _, other := range r.Consumes {
--			if other == "*/*" || other == contentType {
--				return true
--			}
--		}
--	}
--	return false
--}
--
--// Extract the parameters from the request url path
--func (r Route) extractParameters(urlPath string) map[string]string {
--	urlParts := tokenizePath(urlPath)
--	pathParameters := map[string]string{}
--	for i, key := range r.pathParts {
--		var value string
--		if i >= len(urlParts) {
--			value = ""
--		} else {
--			value = urlParts[i]
--		}
--		if strings.HasPrefix(key, "{") { // path-parameter
--			if colon := strings.Index(key, ":"); colon != -1 {
--				// extract by regex
--				regPart := key[colon+1 : len(key)-1]
--				keyPart := key[1:colon]
--				if regPart == "*" {
--					pathParameters[keyPart] = untokenizePath(i, urlParts)
--					break
--				} else {
--					pathParameters[keyPart] = value
--				}
--			} else {
--				// without enclosing {}
--				pathParameters[key[1:len(key)-1]] = value
--			}
--		}
--	}
--	return pathParameters
--}
--
--// Untokenize back into an URL path using the slash separator
--func untokenizePath(offset int, parts []string) string {
--	var buffer bytes.Buffer
--	for p := offset; p < len(parts); p++ {
--		buffer.WriteString(parts[p])
--		// do not end
--		if p < len(parts)-1 {
--			buffer.WriteString("/")
--		}
--	}
--	return buffer.String()
--}
--
--// Tokenize an URL path using the slash separator ; the result does not have empty tokens
--func tokenizePath(path string) []string {
--	if "/" == path {
--		return []string{}
--	}
--	return strings.Split(strings.Trim(path, "/"), "/")
--}
--
--// for debugging
--func (r Route) String() string {
--	return r.Method + " " + r.Path
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/route_builder.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/route_builder.go
-deleted file mode 100644
-index 8f46619..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/route_builder.go
-+++ /dev/null
-@@ -1,208 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"log"
--	"reflect"
--	"strings"
--)
--
--// RouteBuilder is a helper to construct Routes.
--type RouteBuilder struct {
--	rootPath    string
--	currentPath string
--	produces    []string
--	consumes    []string
--	httpMethod  string        // required
--	function    RouteFunction // required
--	filters     []FilterFunction
--	// documentation
--	doc                     string
--	operation               string
--	readSample, writeSample interface{}
--	parameters              []*Parameter
--	errorMap                map[int]ResponseError
--}
--
--// Do evaluates each argument with the RouteBuilder itself.
--// This allows you to follow DRY principles without breaking the fluent programming style.
--// Example:
--// 		ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
--//
--//		func Returns500(b *RouteBuilder) {
--//			b.Returns(500, "Internal Server Error", restful.ServiceError{})
--//		}
--func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
--	for _, each := range oneArgBlocks {
--		each(b)
--	}
--	return b
--}
--
--// To bind the route to a function.
--// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
--func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
--	b.function = function
--	return b
--}
--
--// Method specifies what HTTP method to match. Required.
--func (b *RouteBuilder) Method(method string) *RouteBuilder {
--	b.httpMethod = method
--	return b
--}
--
--// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
--func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
--	b.produces = mimeTypes
--	return b
--}
--
--// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
--func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
--	b.consumes = mimeTypes
--	return b
--}
--
--// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
--func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
--	b.currentPath = subPath
--	return b
--}
--
--// Doc tells what this route is all about. Optional.
--func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
--	b.doc = documentation
--	return b
--}
--
--// Reads tells what resource type will be read from the request payload. Optional.
--// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
--func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
--	b.readSample = sample
--	typeAsName := reflect.TypeOf(sample).String()
--	bodyParameter := &Parameter{&ParameterData{Name: "body"}}
--	bodyParameter.beBody()
--	bodyParameter.Required(true)
--	bodyParameter.DataType(typeAsName)
--	b.Param(bodyParameter)
--	return b
--}
--
--// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
--// Use this to modify or extend information for the Parameter (through its Data()).
--func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
--	for _, each := range b.parameters {
--		if each.Data().Name == name {
--			return each
--		}
--	}
--	return p
--}
--
--// Writes tells what resource type will be written as the response payload. Optional.
--func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
--	b.writeSample = sample
--	return b
--}
--
--// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
--func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
--	if b.parameters == nil {
--		b.parameters = []*Parameter{}
--	}
--	b.parameters = append(b.parameters, parameter)
--	return b
--}
--
--// Operation allows you to document what the acutal method/function call is of the Route.
--func (b *RouteBuilder) Operation(name string) *RouteBuilder {
--	b.operation = name
--	return b
--}
--
--// ReturnsError is deprecated, use Returns instead.
--func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
--	log.Println("ReturnsError is deprecated, use Returns instead.")
--	return b.Returns(code, message, model)
--}
--
--// Returns allows you to document what responses (errors or regular) can be expected.
--// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
--func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
--	err := ResponseError{
--		Code:    code,
--		Message: message,
--		Model:   model,
--	}
--	// lazy init because there is no NewRouteBuilder (yet)
--	if b.errorMap == nil {
--		b.errorMap = map[int]ResponseError{}
--	}
--	b.errorMap[code] = err
--	return b
--}
--
--type ResponseError struct {
--	Code    int
--	Message string
--	Model   interface{}
--}
--
--func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
--	b.rootPath = path
--	return b
--}
--
--// Filter appends a FilterFunction to the end of filters for this Route to build.
--func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
--	b.filters = append(b.filters, filter)
--	return b
--}
--
--// If no specific Route path then set to rootPath
--// If no specific Produces then set to rootProduces
--// If no specific Consumes then set to rootConsumes
--func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
--	if len(b.produces) == 0 {
--		b.produces = rootProduces
--	}
--	if len(b.consumes) == 0 {
--		b.consumes = rootConsumes
--	}
--}
--
--// Build creates a new Route using the specification details collected by the RouteBuilder
--func (b *RouteBuilder) Build() Route {
--	pathExpr, err := newPathExpression(b.currentPath)
--	if err != nil {
--		log.Fatalf("[restful] Invalid path:%s because:%v", b.currentPath, err)
--	}
--	if b.function == nil {
--		log.Fatalf("[restful] No function specified for route:" + b.currentPath)
--	}
--	route := Route{
--		Method:         b.httpMethod,
--		Path:           concatPath(b.rootPath, b.currentPath),
--		Produces:       b.produces,
--		Consumes:       b.consumes,
--		Function:       b.function,
--		Filters:        b.filters,
--		relativePath:   b.currentPath,
--		pathExpr:       pathExpr,
--		Doc:            b.doc,
--		Operation:      b.operation,
--		ParameterDocs:  b.parameters,
--		ResponseErrors: b.errorMap,
--		ReadSample:     b.readSample,
--		WriteSample:    b.writeSample}
--	route.postBuild()
--	return route
--}
--
--func concatPath(path1, path2 string) string {
--	return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/route_builder_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/route_builder_test.go
-deleted file mode 100644
-index 42ec689..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/route_builder_test.go
-+++ /dev/null
-@@ -1,55 +0,0 @@
--package restful
--
--import (
--	"testing"
--)
--
--func TestRouteBuilder_PathParameter(t *testing.T) {
--	p := &Parameter{&ParameterData{Name: "name", Description: "desc"}}
--	p.AllowMultiple(true)
--	p.DataType("int")
--	p.Required(true)
--	values := map[string]string{"a": "b"}
--	p.AllowableValues(values)
--	p.bePath()
--
--	b := new(RouteBuilder)
--	b.function = dummy
--	b.Param(p)
--	r := b.Build()
--	if !r.ParameterDocs[0].Data().AllowMultiple {
--		t.Error("AllowMultiple invalid")
--	}
--	if r.ParameterDocs[0].Data().DataType != "int" {
--		t.Error("dataType invalid")
--	}
--	if !r.ParameterDocs[0].Data().Required {
--		t.Error("required invalid")
--	}
--	if r.ParameterDocs[0].Data().Kind != PathParameterKind {
--		t.Error("kind invalid")
--	}
--	if r.ParameterDocs[0].Data().AllowableValues["a"] != "b" {
--		t.Error("allowableValues invalid")
--	}
--	if b.ParameterNamed("name") == nil {
--		t.Error("access to parameter failed")
--	}
--}
--
--func TestRouteBuilder(t *testing.T) {
--	json := "application/json"
--	b := new(RouteBuilder)
--	b.To(dummy)
--	b.Path("/routes").Method("HEAD").Consumes(json).Produces(json)
--	r := b.Build()
--	if r.Path != "/routes" {
--		t.Error("path invalid")
--	}
--	if r.Produces[0] != json {
--		t.Error("produces invalid")
--	}
--	if r.Consumes[0] != json {
--		t.Error("consumes invalid")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/route_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/route_test.go
-deleted file mode 100644
-index a416576..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/route_test.go
-+++ /dev/null
-@@ -1,108 +0,0 @@
--package restful
--
--import (
--	"testing"
--)
--
--// accept should match produces
--func TestMatchesAcceptStar(t *testing.T) {
--	r := Route{Produces: []string{"application/xml"}}
--	if !r.matchesAccept("*/*") {
--		t.Errorf("accept should match star")
--	}
--}
--
--// accept should match produces
--func TestMatchesAcceptIE(t *testing.T) {
--	r := Route{Produces: []string{"application/xml"}}
--	if !r.matchesAccept("text/html, application/xhtml+xml, */*") {
--		t.Errorf("accept should match star")
--	}
--}
--
--// accept should match produces
--func TestMatchesAcceptXml(t *testing.T) {
--	r := Route{Produces: []string{"application/xml"}}
--	if r.matchesAccept("application/json") {
--		t.Errorf("accept should not match json")
--	}
--	if !r.matchesAccept("application/xml") {
--		t.Errorf("accept should match xml")
--	}
--}
--
--// content type should match consumes
--func TestMatchesContentTypeXml(t *testing.T) {
--	r := Route{Consumes: []string{"application/xml"}}
--	if r.matchesContentType("application/json") {
--		t.Errorf("accept should not match json")
--	}
--	if !r.matchesContentType("application/xml") {
--		t.Errorf("accept should match xml")
--	}
--}
--
--// content type should match consumes
--func TestMatchesContentTypeCharsetInformation(t *testing.T) {
--	r := Route{Consumes: []string{"application/json"}}
--	if !r.matchesContentType("application/json; charset=UTF-8") {
--		t.Errorf("matchesContentType should ignore charset information")
--	}
--}
--
--func TestMatchesPath_OneParam(t *testing.T) {
--	params := doExtractParams("/from/{source}", 2, "/from/here", t)
--	if params["source"] != "here" {
--		t.Errorf("parameter mismatch here")
--	}
--}
--
--func TestMatchesPath_Slash(t *testing.T) {
--	params := doExtractParams("/", 0, "/", t)
--	if len(params) != 0 {
--		t.Errorf("expected empty parameters")
--	}
--}
--
--func TestMatchesPath_SlashNonVar(t *testing.T) {
--	params := doExtractParams("/any", 1, "/any", t)
--	if len(params) != 0 {
--		t.Errorf("expected empty parameters")
--	}
--}
--
--func TestMatchesPath_TwoVars(t *testing.T) {
--	params := doExtractParams("/from/{source}/to/{destination}", 4, "/from/AMS/to/NY", t)
--	if params["source"] != "AMS" {
--		t.Errorf("parameter mismatch AMS")
--	}
--}
--
--func TestMatchesPath_VarOnFront(t *testing.T) {
--	params := doExtractParams("{what}/from/{source}/", 3, "who/from/SOS/", t)
--	if params["source"] != "SOS" {
--		t.Errorf("parameter mismatch SOS")
--	}
--}
--
--func TestExtractParameters_EmptyValue(t *testing.T) {
--	params := doExtractParams("/fixed/{var}", 2, "/fixed/", t)
--	if params["var"] != "" {
--		t.Errorf("parameter mismatch var")
--	}
--}
--
--func TestTokenizePath(t *testing.T) {
--	if len(tokenizePath("/")) != 0 {
--		t.Errorf("not empty path tokens")
--	}
--}
--
--func doExtractParams(routePath string, size int, urlPath string, t *testing.T) map[string]string {
--	r := Route{Path: routePath}
--	r.postBuild()
--	if len(r.pathParts) != size {
--		t.Fatalf("len not %v %v, but %v", size, r.pathParts, len(r.pathParts))
--	}
--	return r.extractParameters(urlPath)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/router.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/router.go
-deleted file mode 100644
-index 9b32fb6..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/router.go
-+++ /dev/null
-@@ -1,18 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import "net/http"
--
--// A RouteSelector finds the best matching Route given the input HTTP Request
--type RouteSelector interface {
--
--	// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
--	// It returns a selected Route and its containing WebService or an error indicating
--	// a problem.
--	SelectRoute(
--		webServices []*WebService,
--		httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/service_error.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/service_error.go
-deleted file mode 100644
-index 62d1108..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/service_error.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import "fmt"
--
--// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
--type ServiceError struct {
--	Code    int
--	Message string
--}
--
--// NewError returns a ServiceError using the code and reason
--func NewError(code int, message string) ServiceError {
--	return ServiceError{Code: code, Message: message}
--}
--
--// Error returns a text representation of the service error
--func (s ServiceError) Error() string {
--	return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/CHANGES.md b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/CHANGES.md
-deleted file mode 100644
-index 9980f2f..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/CHANGES.md
-+++ /dev/null
-@@ -1,27 +0,0 @@
--Change history of swagger
--=
--
--2014-11-14
--- operation parameters are now sorted using ordering path,query,form,header,body
--
--2014-11-12
--- respect omitempty tag value for embedded structs
--- expose ApiVersion of WebService to Swagger ApiDeclaration
--
--2014-05-29
--- (api add) Ability to define custom http.Handler to serve swagger-ui static files
--
--2014-05-04
--- (fix) include model for array element type of response
--
--2014-01-03
--- (fix) do not add primitive type to the Api models
--
--2013-11-27
--- (fix) make Swagger work for WebServices with root ("/" or "") paths
--
--2013-10-29
--- (api add) package variable LogInfo to customize logging function
--
--2013-10-15
--- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition)
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/README.md b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/README.md
-deleted file mode 100644
-index 2efe8f3..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/README.md
-+++ /dev/null
-@@ -1,28 +0,0 @@
--How to use Swagger UI with go-restful
--=
--
--Get the Swagger UI sources (version 1.2 only)
--
--	git clone https://github.com/wordnik/swagger-ui.git
--	
--The project contains a "dist" folder.
--Its contents has all the Swagger UI files you need.
--
--The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
--You need to change that to match your WebService JSON endpoint  e.g. `http://localhost:8080/apidocs.json`
--
--Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
--
--	config := swagger.Config{
--		WebServices:    restful.RegisteredWebServices(),
--		ApiPath:        "/apidocs.json",
--		SwaggerPath:     "/apidocs/",
--		SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
--	swagger.InstallSwaggerService(config)		
--	
--	
--Notes
----
--- Use RouteBuilder.Operation(..) to set the Nickname field of the API spec
--- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
--- Use tag "description" to annotate a struct field with a description to show in the UI
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/config.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/config.go
-deleted file mode 100644
-index 4fca0fa..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/config.go
-+++ /dev/null
-@@ -1,25 +0,0 @@
--package swagger
--
--import (
--	"net/http"
--
--	"github.com/emicklei/go-restful"
--)
--
--type Config struct {
--	// url where the services are available, e.g. http://localhost:8080
--	// if left empty then the basePath of Swagger is taken from the actual request
--	WebServicesUrl string
--	// path where the JSON api is avaiable , e.g. /apidocs
--	ApiPath string
--	// [optional] path where the swagger UI will be served, e.g. /swagger
--	SwaggerPath string
--	// [optional] location of folder containing Swagger HTML5 application index.html
--	SwaggerFilePath string
--	// api listing is constructed from this list of restful WebServices.
--	WebServices []*restful.WebService
--	// will serve all static content (scripts,pages,images)
--	StaticHandler http.Handler
--	// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
--	DisableCORS bool
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder.go
-deleted file mode 100644
-index 68524ec..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder.go
-+++ /dev/null
-@@ -1,277 +0,0 @@
--package swagger
--
--import (
--	"encoding/json"
--	"reflect"
--	"strings"
--)
--
--type modelBuilder struct {
--	Models map[string]Model
--}
--
--func (b modelBuilder) addModel(st reflect.Type, nameOverride string) {
--	modelName := b.keyFrom(st)
--	if nameOverride != "" {
--		modelName = nameOverride
--	}
--	// no models needed for primitive types
--	if b.isPrimitiveType(modelName) {
--		return
--	}
--	// see if we already have visited this model
--	if _, ok := b.Models[modelName]; ok {
--		return
--	}
--	sm := Model{
--		Id:         modelName,
--		Required:   []string{},
--		Properties: map[string]ModelProperty{}}
--
--	// reference the model before further initializing (enables recursive structs)
--	b.Models[modelName] = sm
--
--	// check for slice or array
--	if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
--		b.addModel(st.Elem(), "")
--		return
--	}
--	// check for structure or primitive type
--	if st.Kind() != reflect.Struct {
--		return
--	}
--	for i := 0; i < st.NumField(); i++ {
--		field := st.Field(i)
--		jsonName, prop := b.buildProperty(field, &sm, modelName)
--		if descTag := field.Tag.Get("description"); descTag != "" {
--			prop.Description = descTag
--		}
--		// add if not ommitted
--		if len(jsonName) != 0 {
--			// update Required
--			if b.isPropertyRequired(field) {
--				sm.Required = append(sm.Required, jsonName)
--			}
--			sm.Properties[jsonName] = prop
--		}
--	}
--
--	// update model builder with completed model
--	b.Models[modelName] = sm
--}
--
--func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
--	required := true
--	if jsonTag := field.Tag.Get("json"); jsonTag != "" {
--		s := strings.Split(jsonTag, ",")
--		if len(s) > 1 && s[1] == "omitempty" {
--			return false
--		}
--	}
--	return required
--}
--
--func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName string, prop ModelProperty) {
--	jsonName = b.jsonNameOfField(field)
--	if len(jsonName) == 0 {
--		// empty name signals skip property
--		return "", prop
--	}
--	fieldType := field.Type
--	fieldKind := fieldType.Kind()
--
--	if jsonTag := field.Tag.Get("json"); jsonTag != "" {
--		s := strings.Split(jsonTag, ",")
--		if len(s) > 1 && s[1] == "string" {
--			fieldType = reflect.TypeOf("")
--		}
--	}
--
--	var pType = b.jsonSchemaType(fieldType.String()) // may include pkg path
--	prop.Type = &pType
--	if b.isPrimitiveType(fieldType.String()) {
--		prop.Format = b.jsonSchemaFormat(fieldType.String())
--		return jsonName, prop
--	}
--
--	marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
--	if fieldType.Implements(marshalerType) {
--		var pType = "string"
--		prop.Type = &pType
--		return jsonName, prop
--	}
--
--	if fieldKind == reflect.Struct {
--		return b.buildStructTypeProperty(field, jsonName, model)
--	}
--
--	if fieldKind == reflect.Slice || fieldKind == reflect.Array {
--		return b.buildArrayTypeProperty(field, jsonName, modelName)
--	}
--
--	if fieldKind == reflect.Ptr {
--		return b.buildPointerTypeProperty(field, jsonName, modelName)
--	}
--
--	if fieldType.Name() == "" { // override type of anonymous structs
--		nestedTypeName := modelName + "." + jsonName
--		var pType = nestedTypeName
--		prop.Type = &pType
--		b.addModel(fieldType, nestedTypeName)
--	}
--	return jsonName, prop
--}
--
--func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
--	fieldType := field.Type
--	// check for anonymous
--	if len(fieldType.Name()) == 0 {
--		// anonymous
--		anonType := model.Id + "." + jsonName
--		b.addModel(fieldType, anonType)
--		prop.Type = &anonType
--		return jsonName, prop
--	}
--	if field.Name == fieldType.Name() && field.Anonymous {
--		// embedded struct
--		sub := modelBuilder{map[string]Model{}}
--		sub.addModel(fieldType, "")
--		subKey := sub.keyFrom(fieldType)
--		// merge properties from sub
--		subModel := sub.Models[subKey]
--		for k, v := range subModel.Properties {
--			model.Properties[k] = v
--			// if subModel says this property is required then include it
--			required := false
--			for _, each := range subModel.Required {
--				if k == each {
--					required = true
--					break
--				}
--			}
--			if required {
--				model.Required = append(model.Required, k)
--			}
--		}
--		// empty name signals skip property
--		return "", prop
--	}
--	// simple struct
--	b.addModel(fieldType, "")
--	var pType = fieldType.String()
--	prop.Type = &pType
--	return jsonName, prop
--}
--
--func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
--	fieldType := field.Type
--	var pType = "array"
--	prop.Type = &pType
--	elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
--	prop.Items = []Item{Item{Ref: &elemName}}
--	// add|overwrite model for element type
--	b.addModel(fieldType.Elem(), elemName)
--	return jsonName, prop
--}
--
--func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
--	fieldType := field.Type
--
--	// override type of pointer to list-likes
--	if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
--		var pType = "array"
--		prop.Type = &pType
--		elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
--		prop.Items = []Item{Item{Ref: &elemName}}
--		// add|overwrite model for element type
--		b.addModel(fieldType.Elem().Elem(), elemName)
--	} else {
--		// non-array, pointer type
--		var pType = fieldType.String()[1:] // no star, include pkg path
--		prop.Type = &pType
--		elemName := ""
--		if fieldType.Elem().Name() == "" {
--			elemName = modelName + "." + jsonName
--			prop.Type = &elemName
--		}
--		b.addModel(fieldType.Elem(), elemName)
--	}
--	return jsonName, prop
--}
--
--func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
--	if t.Name() == "" {
--		return modelName + "." + jsonName
--	}
--	if b.isPrimitiveType(t.Name()) {
--		return b.jsonSchemaType(t.Name())
--	}
--	return b.keyFrom(t)
--}
--
--func (b modelBuilder) keyFrom(st reflect.Type) string {
--	key := st.String()
--	if len(st.Name()) == 0 { // unnamed type
--		// Swagger UI has special meaning for [
--		key = strings.Replace(key, "[]", "||", -1)
--	}
--	return key
--}
--
--func (b modelBuilder) isPrimitiveType(modelName string) bool {
--	return strings.Contains("uint8 int int32 int64 float32 float64 bool string byte time.Time", modelName)
--}
--
--// jsonNameOfField returns the name of the field as it should appear in JSON format
--// An empty string indicates that this field is not part of the JSON representation
--func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
--	if jsonTag := field.Tag.Get("json"); jsonTag != "" {
--		s := strings.Split(jsonTag, ",")
--		if s[0] == "-" {
--			// empty name signals skip property
--			return ""
--		} else if s[0] != "" {
--			return s[0]
--		}
--	}
--	return field.Name
--}
--
--func (b modelBuilder) jsonSchemaType(modelName string) string {
--	schemaMap := map[string]string{
--		"uint8":     "integer",
--		"int":       "integer",
--		"int32":     "integer",
--		"int64":     "integer",
--		"byte":      "string",
--		"float64":   "number",
--		"float32":   "number",
--		"bool":      "boolean",
--		"time.Time": "string",
--	}
--	mapped, ok := schemaMap[modelName]
--	if ok {
--		return mapped
--	} else {
--		return modelName // use as is (custom or struct)
--	}
--}
--
--func (b modelBuilder) jsonSchemaFormat(modelName string) string {
--	schemaMap := map[string]string{
--		"int":       "int32",
--		"int32":     "int32",
--		"int64":     "int64",
--		"byte":      "byte",
--		"uint8":     "byte",
--		"float64":   "double",
--		"float32":   "float",
--		"time.Time": "date-time",
--	}
--	mapped, ok := schemaMap[modelName]
--	if ok {
--		return mapped
--	} else {
--		return "" // no format
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder_test.go
-deleted file mode 100644
-index dd966bd..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder_test.go
-+++ /dev/null
-@@ -1,729 +0,0 @@
--package swagger
--
--import (
--	"testing"
--	"time"
--)
--
--type YesNo bool
--
--func (y YesNo) MarshalJSON() ([]byte, error) {
--	if y {
--		return []byte("yes"), nil
--	}
--	return []byte("no"), nil
--}
--
--// clear && go test -v -test.run TestCustomMarshaller_Issue96 ...swagger
--func TestCustomMarshaller_Issue96(t *testing.T) {
--	type Vote struct {
--		What YesNo
--	}
--	testJsonFromStruct(t, Vote{}, `{
--  "swagger.Vote": {
--   "id": "swagger.Vote",
--   "required": [
--    "What"
--   ],
--   "properties": {
--    "What": {
--     "type": "string"
--    }
--   }
--  }
-- }`)
--}
--
--// clear && go test -v -test.run TestPrimitiveTypes ...swagger
--func TestPrimitiveTypes(t *testing.T) {
--	type Prims struct {
--		f float64
--		t time.Time
--	}
--	testJsonFromStruct(t, Prims{}, `{
--  "swagger.Prims": {
--   "id": "swagger.Prims",
--   "required": [
--    "f",
--    "t"
--   ],
--   "properties": {
--    "f": {
--     "type": "number",
--     "format": "double"
--    },
--    "t": {
--     "type": "string",
--     "format": "date-time"
--    }
--   }
--  }
-- }`)
--}
--
--// clear && go test -v -test.run TestS1 ...swagger
--func TestS1(t *testing.T) {
--	type S1 struct {
--		Id string
--	}
--	testJsonFromStruct(t, S1{}, `{
--  "swagger.S1": {
--   "id": "swagger.S1",
--   "required": [
--    "Id"
--   ],
--   "properties": {
--    "Id": {
--     "type": "string"
--    }
--   }
--  }
-- }`)
--}
--
--// clear && go test -v -test.run TestS2 ...swagger
--func TestS2(t *testing.T) {
--	type S2 struct {
--		Ids []string
--	}
--	testJsonFromStruct(t, S2{}, `{
--  "swagger.S2": {
--   "id": "swagger.S2",
--   "required": [
--    "Ids"
--   ],
--   "properties": {
--    "Ids": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "string"
--      }
--     ]
--    }
--   }
--  }
-- }`)
--}
--
--// clear && go test -v -test.run TestS3 ...swagger
--func TestS3(t *testing.T) {
--	type NestedS3 struct {
--		Id string
--	}
--	type S3 struct {
--		Nested NestedS3
--	}
--	testJsonFromStruct(t, S3{}, `{
--  "swagger.NestedS3": {
--   "id": "swagger.NestedS3",
--   "required": [
--    "Id"
--   ],
--   "properties": {
--    "Id": {
--     "type": "string"
--    }
--   }
--  },
--  "swagger.S3": {
--   "id": "swagger.S3",
--   "required": [
--    "Nested"
--   ],
--   "properties": {
--    "Nested": {
--     "type": "swagger.NestedS3"
--    }
--   }
--  }
-- }`)
--}
--
--type sample struct {
--	id       string `swagger:"required"` // TODO
--	items    []item
--	rootItem item `json:"root" description:"root desc"`
--}
--
--type item struct {
--	itemName string `json:"name"`
--}
--
--// clear && go test -v -test.run TestSampleToModelAsJson ...swagger
--func TestSampleToModelAsJson(t *testing.T) {
--	testJsonFromStruct(t, sample{items: []item{}}, `{
--  "swagger.item": {
--   "id": "swagger.item",
--   "required": [
--    "name"
--   ],
--   "properties": {
--    "name": {
--     "type": "string"
--    }
--   }
--  },
--  "swagger.sample": {
--   "id": "swagger.sample",
--   "required": [
--    "id",
--    "items",
--    "root"
--   ],
--   "properties": {
--    "id": {
--     "type": "string"
--    },
--    "items": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "swagger.item"
--      }
--     ]
--    },
--    "root": {
--     "type": "swagger.item",
--     "description": "root desc"
--    }
--   }
--  }
-- }`)
--}
--
--func TestJsonTags(t *testing.T) {
--	type X struct {
--		A string
--		B string `json:"-"`
--		C int    `json:",string"`
--		D int    `json:","`
--	}
--
--	expected := `{
--  "swagger.X": {
--   "id": "swagger.X",
--   "required": [
--    "A",
--    "C",
--    "D"
--   ],
--   "properties": {
--    "A": {
--     "type": "string"
--    },
--    "C": {
--     "type": "string"
--    },
--    "D": {
--     "type": "integer",
--     "format": "int32"
--    }
--   }
--  }
-- }`
--
--	testJsonFromStruct(t, X{}, expected)
--}
--
--func TestJsonTagOmitempty(t *testing.T) {
--	type X struct {
--		A int `json:",omitempty"`
--		B int `json:"C,omitempty"`
--	}
--
--	expected := `{
--  "swagger.X": {
--   "id": "swagger.X",
--   "properties": {
--    "A": {
--     "type": "integer",
--     "format": "int32"
--    },
--    "C": {
--     "type": "integer",
--     "format": "int32"
--    }
--   }
--  }
-- }`
--
--	testJsonFromStruct(t, X{}, expected)
--}
--
--func TestJsonTagName(t *testing.T) {
--	type X struct {
--		A string `json:"B"`
--	}
--
--	expected := `{
--  "swagger.X": {
--   "id": "swagger.X",
--   "required": [
--    "B"
--   ],
--   "properties": {
--    "B": {
--     "type": "string"
--    }
--   }
--  }
-- }`
--
--	testJsonFromStruct(t, X{}, expected)
--}
--
--func TestAnonymousStruct(t *testing.T) {
--	type X struct {
--		A struct {
--			B int
--		}
--	}
--
--	expected := `{
--  "swagger.X": {
--   "id": "swagger.X",
--   "required": [
--    "A"
--   ],
--   "properties": {
--    "A": {
--     "type": "swagger.X.A"
--    }
--   }
--  },
--  "swagger.X.A": {
--   "id": "swagger.X.A",
--   "required": [
--    "B"
--   ],
--   "properties": {
--    "B": {
--     "type": "integer",
--     "format": "int32"
--    }
--   }
--  }
-- }`
--
--	testJsonFromStruct(t, X{}, expected)
--}
--
--func TestAnonymousPtrStruct(t *testing.T) {
--	type X struct {
--		A *struct {
--			B int
--		}
--	}
--
--	expected := `{
--  "swagger.X": {
--   "id": "swagger.X",
--   "required": [
--    "A"
--   ],
--   "properties": {
--    "A": {
--     "type": "swagger.X.A"
--    }
--   }
--  },
--  "swagger.X.A": {
--   "id": "swagger.X.A",
--   "required": [
--    "B"
--   ],
--   "properties": {
--    "B": {
--     "type": "integer",
--     "format": "int32"
--    }
--   }
--  }
-- }`
--
--	testJsonFromStruct(t, X{}, expected)
--}
--
--func TestAnonymousArrayStruct(t *testing.T) {
--	type X struct {
--		A []struct {
--			B int
--		}
--	}
--
--	expected := `{
--  "swagger.X": {
--   "id": "swagger.X",
--   "required": [
--    "A"
--   ],
--   "properties": {
--    "A": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "swagger.X.A"
--      }
--     ]
--    }
--   }
--  },
--  "swagger.X.A": {
--   "id": "swagger.X.A",
--   "required": [
--    "B"
--   ],
--   "properties": {
--    "B": {
--     "type": "integer",
--     "format": "int32"
--    }
--   }
--  }
-- }`
--
--	testJsonFromStruct(t, X{}, expected)
--}
--
--func TestAnonymousPtrArrayStruct(t *testing.T) {
--	type X struct {
--		A *[]struct {
--			B int
--		}
--	}
--
--	expected := `{
--  "swagger.X": {
--   "id": "swagger.X",
--   "required": [
--    "A"
--   ],
--   "properties": {
--    "A": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "swagger.X.A"
--      }
--     ]
--    }
--   }
--  },
--  "swagger.X.A": {
--   "id": "swagger.X.A",
--   "required": [
--    "B"
--   ],
--   "properties": {
--    "B": {
--     "type": "integer",
--     "format": "int32"
--    }
--   }
--  }
-- }`
--
--	testJsonFromStruct(t, X{}, expected)
--}
--
--// go test -v -test.run TestEmbeddedStruct_Issue98 ...swagger
--func TestEmbeddedStruct_Issue98(t *testing.T) {
--	type Y struct {
--		A int
--	}
--	type X struct {
--		Y
--	}
--	testJsonFromStruct(t, X{}, `{
--  "swagger.X": {
--   "id": "swagger.X",
--   "required": [
--    "A"
--   ],
--   "properties": {
--    "A": {
--     "type": "integer",
--     "format": "int32"
--    }
--   }
--  }
-- }`)
--}
--
--type Dataset struct {
--	Names []string
--}
--
--// clear && go test -v -test.run TestIssue85 ...swagger
--func TestIssue85(t *testing.T) {
--	anon := struct{ Datasets []Dataset }{}
--	testJsonFromStruct(t, anon, `{
--  "struct { Datasets ||swagger.Dataset }": {
--   "id": "struct { Datasets ||swagger.Dataset }",
--   "required": [
--    "Datasets"
--   ],
--   "properties": {
--    "Datasets": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "swagger.Dataset"
--      }
--     ]
--    }
--   }
--  },
--  "swagger.Dataset": {
--   "id": "swagger.Dataset",
--   "required": [
--    "Names"
--   ],
--   "properties": {
--    "Names": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "string"
--      }
--     ]
--    }
--   }
--  }
-- }`)
--}
--
--type File struct {
--	History     []File
--	HistoryPtrs []*File
--}
--
--// go test -v -test.run TestRecursiveStructure ...swagger
--func TestRecursiveStructure(t *testing.T) {
--	testJsonFromStruct(t, File{}, `{
--  "swagger.File": {
--   "id": "swagger.File",
--   "required": [
--    "History",
--    "HistoryPtrs"
--   ],
--   "properties": {
--    "History": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "swagger.File"
--      }
--     ]
--    },
--    "HistoryPtrs": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "swagger.File.HistoryPtrs"
--      }
--     ]
--    }
--   }
--  },
--  "swagger.File.HistoryPtrs": {
--   "id": "swagger.File.HistoryPtrs",
--   "properties": {}
--  }
-- }`)
--}
--
--type A1 struct {
--	B struct {
--		Id      int
--		Comment string `json:"comment,omitempty"`
--	}
--}
--
--// go test -v -test.run TestEmbeddedStructA1 ...swagger
--func TestEmbeddedStructA1(t *testing.T) {
--	testJsonFromStruct(t, A1{}, `{
--  "swagger.A1": {
--   "id": "swagger.A1",
--   "required": [
--    "B"
--   ],
--   "properties": {
--    "B": {
--     "type": "swagger.A1.B"
--    }
--   }
--  },
--  "swagger.A1.B": {
--   "id": "swagger.A1.B",
--   "required": [
--    "Id"
--   ],
--   "properties": {
--    "Id": {
--     "type": "integer",
--     "format": "int32"
--    },
--    "comment": {
--     "type": "string"
--    }
--   }
--  }
-- }`)
--}
--
--type A2 struct {
--	C
--}
--type C struct {
--	Id      int    `json:"B"`
--	Comment string `json:"comment,omitempty"`
--	Secure  bool   `json:"secure"`
--}
--
--// go test -v -test.run TestEmbeddedStructA2 ...swagger
--func TestEmbeddedStructA2(t *testing.T) {
--	testJsonFromStruct(t, A2{}, `{
--  "swagger.A2": {
--   "id": "swagger.A2",
--   "required": [
--    "B",
--    "secure"
--   ],
--   "properties": {
--    "B": {
--     "type": "integer",
--     "format": "int32"
--    },
--    "comment": {
--     "type": "string"
--    },
--    "secure": {
--     "type": "boolean"
--    }
--   }
--  }
-- }`)
--}
--
--type A3 struct {
--	B D
--}
--
--type D struct {
--	Id int
--}
--
--// clear && go test -v -test.run TestStructA3 ...swagger
--func TestStructA3(t *testing.T) {
--	testJsonFromStruct(t, A3{}, `{
--  "swagger.A3": {
--   "id": "swagger.A3",
--   "required": [
--    "B"
--   ],
--   "properties": {
--    "B": {
--     "type": "swagger.D"
--    }
--   }
--  },
--  "swagger.D": {
--   "id": "swagger.D",
--   "required": [
--    "Id"
--   ],
--   "properties": {
--    "Id": {
--     "type": "integer",
--     "format": "int32"
--    }
--   }
--  }
-- }`)
--}
--
--type ObjectId []byte
--
--type Region struct {
--	Id   ObjectId `bson:"_id" json:"id"`
--	Name string   `bson:"name" json:"name"`
--	Type string   `bson:"type" json:"type"`
--}
--
--// clear && go test -v -test.run TestRegion_Issue113 ...swagger
--func TestRegion_Issue113(t *testing.T) {
--	testJsonFromStruct(t, []Region{}, `{
--  "integer": {
--   "id": "integer",
--   "properties": {}
--  },
--  "swagger.Region": {
--   "id": "swagger.Region",
--   "required": [
--    "id",
--    "name",
--    "type"
--   ],
--   "properties": {
--    "id": {
--     "type": "array",
--     "items": [
--      {
--       "$ref": "integer"
--      }
--     ]
--    },
--    "name": {
--     "type": "string"
--    },
--    "type": {
--     "type": "string"
--    }
--   }
--  },
--  "||swagger.Region": {
--   "id": "||swagger.Region",
--   "properties": {}
--  }
-- }`)
--}
--
--// clear && go test -v -test.run TestIssue158 ...swagger
--func TestIssue158(t *testing.T) {
--	type Address struct {
--		Country string `json:"country,omitempty"`
--	}
--
--	type Customer struct {
--		Name    string  `json:"name"`
--		Address Address `json:"address"`
--	}
--	expected := `{
--  "swagger.Address": {
--   "id": "swagger.Address",
--   "properties": {
--    "country": {
--     "type": "string"
--    }
--   }
--  },
--  "swagger.Customer": {
--   "id": "swagger.Customer",
--   "required": [
--    "name",
--    "address"
--   ],
--   "properties": {
--    "address": {
--     "type": "swagger.Address"
--    },
--    "name": {
--     "type": "string"
--    }
--   }
--  }
-- }`
--	testJsonFromStruct(t, Customer{}, expected)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter.go
-deleted file mode 100644
-index 813007b..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter.go
-+++ /dev/null
-@@ -1,29 +0,0 @@
--package swagger
--
--// Copyright 2014 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--type ParameterSorter []Parameter
--
--func (s ParameterSorter) Len() int {
--	return len(s)
--}
--func (s ParameterSorter) Swap(i, j int) {
--	s[i], s[j] = s[j], s[i]
--}
--
--var typeToSortKey = map[string]string{
--	"path":   "A",
--	"query":  "B",
--	"form":   "C",
--	"header": "D",
--	"body":   "E",
--}
--
--func (s ParameterSorter) Less(i, j int) bool {
--	// use ordering path,query,form,header,body
--	pi := s[i]
--	pj := s[j]
--	return typeToSortKey[pi.ParamType]+pi.Name < typeToSortKey[pj.ParamType]+pj.Name
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter_test.go
-deleted file mode 100644
-index ef6d9eb..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter_test.go
-+++ /dev/null
-@@ -1,52 +0,0 @@
--package swagger
--
--import (
--	"bytes"
--	"sort"
--	"testing"
--)
--
--func TestSortParameters(t *testing.T) {
--	unsorted := []Parameter{
--		Parameter{
--			Name:      "form2",
--			ParamType: "form",
--		},
--		Parameter{
--			Name:      "header1",
--			ParamType: "header",
--		},
--		Parameter{
--			Name:      "path2",
--			ParamType: "path",
--		},
--		Parameter{
--			Name:      "body",
--			ParamType: "body",
--		},
--		Parameter{
--			Name:      "path1",
--			ParamType: "path",
--		},
--		Parameter{
--			Name:      "form1",
--			ParamType: "form",
--		},
--		Parameter{
--			Name:      "query2",
--			ParamType: "query",
--		},
--		Parameter{
--			Name:      "query1",
--			ParamType: "query",
--		},
--	}
--	sort.Sort(ParameterSorter(unsorted))
--	var b bytes.Buffer
--	for _, p := range unsorted {
--		b.WriteString(p.Name + ".")
--	}
--	if "path1.path2.query1.query2.form1.form2.header1.body." != b.String() {
--		t.Fatal("sorting has changed:" + b.String())
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger.go
-deleted file mode 100644
-index 9f2fe4b..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger.go
-+++ /dev/null
-@@ -1,184 +0,0 @@
--// Package swagger implements the structures of the Swagger
--// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
--package swagger
--
--const swaggerVersion = "1.2"
--
--// 4.3.3 Data Type Fields
--type DataTypeFields struct {
--	Type         *string  `json:"type,omitempty"` // if Ref not used
--	Ref          *string  `json:"$ref,omitempty"` // if Type not used
--	Format       string   `json:"format,omitempty"`
--	DefaultValue Special  `json:"defaultValue,omitempty"`
--	Enum         []string `json:"enum,omitempty"`
--	Minimum      string   `json:"minimum,omitempty"`
--	Maximum      string   `json:"maximum,omitempty"`
--	Items        []Item   `json:"items,omitempty"`
--	UniqueItems  *bool    `json:"uniqueItems,omitempty"`
--}
--
--type Special string
--
--// 4.3.4 Items Object
--type Item struct {
--	Type   *string `json:"type,omitempty"`
--	Ref    *string `json:"$ref,omitempty"`
--	Format string  `json:"format,omitempty"`
--}
--
--// 5.1 Resource Listing
--type ResourceListing struct {
--	SwaggerVersion string          `json:"swaggerVersion"` // e.g 1.2
--	Apis           []Resource      `json:"apis"`
--	ApiVersion     string          `json:"apiVersion"`
--	Info           Info            `json:"info"`
--	Authorizations []Authorization `json:"authorizations,omitempty"`
--}
--
--// 5.1.2 Resource Object
--type Resource struct {
--	Path        string `json:"path"` // relative or absolute, must start with /
--	Description string `json:"description"`
--}
--
--// 5.1.3 Info Object
--type Info struct {
--	Title             string `json:"title"`
--	Description       string `json:"description"`
--	TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
--	Contact           string `json:"contact,omitempty"`
--	License           string `json:"license,omitempty"`
--	LicensUrl         string `json:"licensUrl,omitempty"`
--}
--
--// 5.1.5
--type Authorization struct {
--	Type       string      `json:"type"`
--	PassAs     string      `json:"passAs"`
--	Keyname    string      `json:"keyname"`
--	Scopes     []Scope     `json:"scopes"`
--	GrantTypes []GrantType `json:"grandTypes"`
--}
--
--// 5.1.6, 5.2.11
--type Scope struct {
--	// Required. The name of the scope.
--	Scope string `json:"scope"`
--	// Recommended. A short description of the scope.
--	Description string `json:"description"`
--}
--
--// 5.1.7
--type GrantType struct {
--	Implicit          Implicit          `json:"implicit"`
--	AuthorizationCode AuthorizationCode `json:"authorization_code"`
--}
--
--// 5.1.8 Implicit Object
--type Implicit struct {
--	// Required. The login endpoint definition.
--	loginEndpoint LoginEndpoint `json:"loginEndpoint"`
--	// An optional alternative name to standard "access_token" OAuth2 parameter.
--	TokenName string `json:"tokenName"`
--}
--
--// 5.1.9 Authorization Code Object
--type AuthorizationCode struct {
--	TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
--	TokenEndpoint        TokenEndpoint        `json:"tokenEndpoint"`
--}
--
--// 5.1.10 Login Endpoint Object
--type LoginEndpoint struct {
--	// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
--	Url string `json:"url"`
--}
--
--// 5.1.11 Token Request Endpoint Object
--type TokenRequestEndpoint struct {
--	// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
--	Url string `json:"url"`
--	// An optional alternative name to standard "client_id" OAuth2 parameter.
--	ClientIdName string `json:"clientIdName"`
--	// An optional alternative name to the standard "client_secret" OAuth2 parameter.
--	ClientSecretName string `json:"clientSecretName"`
--}
--
--// 5.1.12 Token Endpoint Object
--type TokenEndpoint struct {
--	// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
--	Url string `json:"url"`
--	// An optional alternative name to standard "access_token" OAuth2 parameter.
--	TokenName string `json:"tokenName"`
--}
--
--// 5.2 API Declaration
--type ApiDeclaration struct {
--	SwaggerVersion string           `json:"swaggerVersion"`
--	ApiVersion     string           `json:"apiVersion"`
--	BasePath       string           `json:"basePath"`
--	ResourcePath   string           `json:"resourcePath"` // must start with /
--	Apis           []Api            `json:"apis,omitempty"`
--	Models         map[string]Model `json:"models,omitempty"`
--	Produces       []string         `json:"produces,omitempty"`
--	Consumes       []string         `json:"consumes,omitempty"`
--	Authorizations []Authorization  `json:"authorizations,omitempty"`
--}
--
--// 5.2.2 API Object
--type Api struct {
--	Path        string      `json:"path"` // relative or absolute, must start with /
--	Description string      `json:"description"`
--	Operations  []Operation `json:"operations,omitempty"`
--}
--
--// 5.2.3 Operation Object
--type Operation struct {
--	Type             string            `json:"type"`
--	Method           string            `json:"method"`
--	Summary          string            `json:"summary,omitempty"`
--	Notes            string            `json:"notes,omitempty"`
--	Nickname         string            `json:"nickname"`
--	Authorizations   []Authorization   `json:"authorizations,omitempty"`
--	Parameters       []Parameter       `json:"parameters"`
--	ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
--	Produces         []string          `json:"produces,omitempty"`
--	Consumes         []string          `json:"consumes,omitempty"`
--	Deprecated       string            `json:"deprecated,omitempty"`
--}
--
--// 5.2.4 Parameter Object
--type Parameter struct {
--	DataTypeFields
--	ParamType     string `json:"paramType"` // path,query,body,header,form
--	Name          string `json:"name"`
--	Description   string `json:"description"`
--	Required      bool   `json:"required"`
--	AllowMultiple bool   `json:"allowMultiple"`
--}
--
--// 5.2.5 Response Message Object
--type ResponseMessage struct {
--	Code          int    `json:"code"`
--	Message       string `json:"message"`
--	ResponseModel string `json:"responseModel,omitempty"`
--}
--
--// 5.2.6, 5.2.7 Models Object
--type Model struct {
--	Id            string                   `json:"id"`
--	Description   string                   `json:"description,omitempty"`
--	Required      []string                 `json:"required,omitempty"`
--	Properties    map[string]ModelProperty `json:"properties"`
--	SubTypes      []string                 `json:"subTypes,omitempty"`
--	Discriminator string                   `json:"discriminator,omitempty"`
--}
--
--// 5.2.8 Properties Object
--type ModelProperty struct {
--	DataTypeFields
--	Description string `json:"description,omitempty"`
--}
--
--// 5.2.10
--type Authorizations map[string]Authorization
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_test.go
-deleted file mode 100644
-index 81e72f6..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_test.go
-+++ /dev/null
-@@ -1,116 +0,0 @@
--package swagger
--
--import (
--	"encoding/json"
--	"fmt"
--	"testing"
--
--	"github.com/emicklei/go-restful"
--)
--
--// go test -v -test.run TestApi ...swagger
--func TestApi(t *testing.T) {
--	value := Api{Path: "/", Description: "Some Path", Operations: []Operation{}}
--	compareJson(t, true, value, `{"path":"/","description":"Some Path"}`)
--}
--
--// go test -v -test.run TestServiceToApi ...swagger
--func TestServiceToApi(t *testing.T) {
--	ws := new(restful.WebService)
--	ws.Path("/tests")
--	ws.Consumes(restful.MIME_JSON)
--	ws.Produces(restful.MIME_XML)
--	ws.Route(ws.GET("/all").To(dummy).Writes(sample{}))
--	ws.ApiVersion("1.2.3")
--	cfg := Config{
--		WebServicesUrl: "http://here.com",
--		ApiPath:        "/apipath",
--		WebServices:    []*restful.WebService{ws}}
--	sws := newSwaggerService(cfg)
--	decl := sws.composeDeclaration(ws, "/tests")
--	data, err := json.MarshalIndent(decl, " ", " ")
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	// for visual inspection only
--	fmt.Println(string(data))
--}
--
--func dummy(i *restful.Request, o *restful.Response) {}
--
--// go test -v -test.run TestIssue78 ...swagger
--type Response struct {
--	Code  int
--	Users *[]User
--	Items *[]TestItem
--}
--type User struct {
--	Id, Name string
--}
--type TestItem struct {
--	Id, Name string
--}
--
--// clear && go test -v -test.run TestComposeResponseMessages ...swagger
--func TestComposeResponseMessages(t *testing.T) {
--	responseErrors := map[int]restful.ResponseError{}
--	responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: TestItem{}}
--	route := restful.Route{ResponseErrors: responseErrors}
--	decl := new(ApiDeclaration)
--	decl.Models = map[string]Model{}
--	msgs := composeResponseMessages(route, decl)
--	if msgs[0].ResponseModel != "swagger.TestItem" {
--		t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel)
--	}
--}
--
--// clear && go test -v -test.run TestComposeResponseMessageArray ...swagger
--func TestComposeResponseMessageArray(t *testing.T) {
--	responseErrors := map[int]restful.ResponseError{}
--	responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: []TestItem{}}
--	route := restful.Route{ResponseErrors: responseErrors}
--	decl := new(ApiDeclaration)
--	decl.Models = map[string]Model{}
--	msgs := composeResponseMessages(route, decl)
--	if msgs[0].ResponseModel != "array[swagger.TestItem]" {
--		t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel)
--	}
--}
--
--func TestIssue78(t *testing.T) {
--	sws := newSwaggerService(Config{})
--	models := map[string]Model{}
--	sws.addModelFromSampleTo(&Operation{}, true, Response{Items: &[]TestItem{}}, models)
--	model, ok := models["swagger.Response"]
--	if !ok {
--		t.Fatal("missing response model")
--	}
--	if "swagger.Response" != model.Id {
--		t.Fatal("wrong model id:" + model.Id)
--	}
--	code, ok := model.Properties["Code"]
--	if !ok {
--		t.Fatal("missing code")
--	}
--	if "integer" != *code.Type {
--		t.Fatal("wrong code type:" + *code.Type)
--	}
--	items, ok := model.Properties["Items"]
--	if !ok {
--		t.Fatal("missing items")
--	}
--	if "array" != *items.Type {
--		t.Fatal("wrong items type:" + *items.Type)
--	}
--	items_items := items.Items
--	if len(items_items) == 0 {
--		t.Fatal("missing items->items")
--	}
--	ref := items_items[0].Ref
--	if ref == nil {
--		t.Fatal("missing $ref")
--	}
--	if *ref != "swagger.TestItem" {
--		t.Fatal("wrong $ref:" + *ref)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go
-deleted file mode 100644
-index 04da0a1..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go
-+++ /dev/null
-@@ -1,353 +0,0 @@
--package swagger
--
--import (
--	"fmt"
--	"github.com/emicklei/go-restful"
--	// "github.com/emicklei/hopwatch"
--	"log"
--	"net/http"
--	"reflect"
--	"sort"
--	"strings"
--)
--
--type SwaggerService struct {
--	config            Config
--	apiDeclarationMap map[string]ApiDeclaration
--}
--
--func newSwaggerService(config Config) *SwaggerService {
--	return &SwaggerService{
--		config:            config,
--		apiDeclarationMap: map[string]ApiDeclaration{}}
--}
--
--// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
--var LogInfo = log.Printf
--
--// InstallSwaggerService add the WebService that provides the API documentation of all services
--// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
--func InstallSwaggerService(aSwaggerConfig Config) {
--	RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
--}
--
--// RegisterSwaggerService add the WebService that provides the API documentation of all services
--// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
--func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
--	sws := newSwaggerService(config)
--	ws := new(restful.WebService)
--	ws.Path(config.ApiPath)
--	ws.Produces(restful.MIME_JSON)
--	if config.DisableCORS {
--		ws.Filter(enableCORS)
--	}
--	ws.Route(ws.GET("/").To(sws.getListing))
--	ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
--	ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
--	ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
--	ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
--	ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
--	ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
--	ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
--	LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
--	wsContainer.Add(ws)
--
--	// Build all ApiDeclarations
--	for _, each := range config.WebServices {
--		rootPath := each.RootPath()
--		// skip the api service itself
--		if rootPath != config.ApiPath {
--			if rootPath == "" || rootPath == "/" {
--				// use routes
--				for _, route := range each.Routes() {
--					entry := staticPathFromRoute(route)
--					_, exists := sws.apiDeclarationMap[entry]
--					if !exists {
--						sws.apiDeclarationMap[entry] = sws.composeDeclaration(each, entry)
--					}
--				}
--			} else { // use root path
--				sws.apiDeclarationMap[each.RootPath()] = sws.composeDeclaration(each, each.RootPath())
--			}
--		}
--	}
--
--	// Check paths for UI serving
--	if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
--		swaggerPathSlash := config.SwaggerPath
--		// path must end with slash /
--		if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
--			LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
--			swaggerPathSlash += "/"
--		}
--
--		LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
--		wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
--
--		//if we define a custom static handler use it
--	} else if config.StaticHandler != nil && config.SwaggerPath != "" {
--		swaggerPathSlash := config.SwaggerPath
--		// path must end with slash /
--		if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
--			LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
--			swaggerPathSlash += "/"
--
--		}
--		LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
--		wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
--
--	} else {
--		LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
--	}
--}
--
--func staticPathFromRoute(r restful.Route) string {
--	static := r.Path
--	bracket := strings.Index(static, "{")
--	if bracket <= 1 { // result cannot be empty
--		return static
--	}
--	if bracket != -1 {
--		static = r.Path[:bracket]
--	}
--	if strings.HasSuffix(static, "/") {
--		return static[:len(static)-1]
--	} else {
--		return static
--	}
--}
--
--func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
--	if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
--		// prevent duplicate header
--		if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
--			resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
--		}
--	}
--	chain.ProcessFilter(req, resp)
--}
--
--func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
--	listing := ResourceListing{SwaggerVersion: swaggerVersion}
--	for k, v := range sws.apiDeclarationMap {
--		ref := Resource{Path: k}
--		if len(v.Apis) > 0 { // use description of first (could still be empty)
--			ref.Description = v.Apis[0].Description
--		}
--		listing.Apis = append(listing.Apis, ref)
--	}
--	resp.WriteAsJson(listing)
--}
--
--func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
--	decl := sws.apiDeclarationMap[composeRootPath(req)]
--	// unless WebServicesUrl is given
--	if len(sws.config.WebServicesUrl) == 0 {
--		// update base path from the actual request
--		// TODO how to detect https? assume http for now
--		(&decl).BasePath = fmt.Sprintf("http://%s", req.Request.Host)
--	}
--	resp.WriteAsJson(decl)
--}
--
--func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
--	decl := ApiDeclaration{
--		SwaggerVersion: swaggerVersion,
--		BasePath:       sws.config.WebServicesUrl,
--		ResourcePath:   ws.RootPath(),
--		Models:         map[string]Model{},
--		ApiVersion:     ws.Version()}
--
--	// collect any path parameters
--	rootParams := []Parameter{}
--	for _, param := range ws.PathParameters() {
--		rootParams = append(rootParams, asSwaggerParameter(param.Data()))
--	}
--	// aggregate by path
--	pathToRoutes := map[string][]restful.Route{}
--	for _, other := range ws.Routes() {
--		if strings.HasPrefix(other.Path, pathPrefix) {
--			routes := pathToRoutes[other.Path]
--			pathToRoutes[other.Path] = append(routes, other)
--		}
--	}
--	for path, routes := range pathToRoutes {
--		api := Api{Path: strings.TrimSuffix(path, "/"), Description: ws.Documentation()}
--		for _, route := range routes {
--			operation := Operation{
--				Method:           route.Method,
--				Summary:          route.Doc,
--				Type:             asDataType(route.WriteSample),
--				Parameters:       []Parameter{},
--				Nickname:         route.Operation,
--				ResponseMessages: composeResponseMessages(route, &decl)}
--
--			operation.Consumes = route.Consumes
--			operation.Produces = route.Produces
--
--			// share root params if any
--			for _, swparam := range rootParams {
--				operation.Parameters = append(operation.Parameters, swparam)
--			}
--			// route specific params
--			for _, param := range route.ParameterDocs {
--				operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
--			}
--			// sort parameters
--			sort.Sort(ParameterSorter(operation.Parameters))
--
--			sws.addModelsFromRouteTo(&operation, route, &decl)
--			api.Operations = append(api.Operations, operation)
--		}
--		decl.Apis = append(decl.Apis, api)
--	}
--	return decl
--}
--
--// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
--func composeResponseMessages(route restful.Route, decl *ApiDeclaration) (messages []ResponseMessage) {
--	if route.ResponseErrors == nil {
--		return messages
--	}
--	// sort by code
--	codes := sort.IntSlice{}
--	for code, _ := range route.ResponseErrors {
--		codes = append(codes, code)
--	}
--	codes.Sort()
--	for _, code := range codes {
--		each := route.ResponseErrors[code]
--		message := ResponseMessage{
--			Code:    code,
--			Message: each.Message,
--		}
--		if each.Model != nil {
--			st := reflect.TypeOf(each.Model)
--			isCollection, st := detectCollectionType(st)
--			modelName := modelBuilder{}.keyFrom(st)
--			if isCollection {
--				modelName = "array[" + modelName + "]"
--			}
--			modelBuilder{decl.Models}.addModel(st, "")
--			// reference the model
--			message.ResponseModel = modelName
--		}
--		messages = append(messages, message)
--	}
--	return
--}
--
--// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
--func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
--	if route.ReadSample != nil {
--		sws.addModelFromSampleTo(operation, false, route.ReadSample, decl.Models)
--	}
--	if route.WriteSample != nil {
--		sws.addModelFromSampleTo(operation, true, route.WriteSample, decl.Models)
--	}
--}
--
--func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
--	isCollection := false
--	if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
--		st = st.Elem()
--		isCollection = true
--	} else {
--		if st.Kind() == reflect.Ptr {
--			if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
--				st = st.Elem().Elem()
--				isCollection = true
--			}
--		}
--	}
--	return isCollection, st
--}
--
--// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
--func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models map[string]Model) {
--	st := reflect.TypeOf(sample)
--	isCollection, st := detectCollectionType(st)
--	modelName := modelBuilder{}.keyFrom(st)
--	if isResponse {
--		if isCollection {
--			modelName = "array[" + modelName + "]"
--		}
--		operation.Type = modelName
--	}
--	modelBuilder{models}.addModel(reflect.TypeOf(sample), "")
--}
--
--func asSwaggerParameter(param restful.ParameterData) Parameter {
--	return Parameter{
--		DataTypeFields: DataTypeFields{
--			Type:   &param.DataType,
--			Format: asFormat(param.DataType),
--		},
--		Name:        param.Name,
--		Description: param.Description,
--		ParamType:   asParamType(param.Kind),
--
--		Required: param.Required}
--}
--
--// Between 1..7 path parameters is supported
--func composeRootPath(req *restful.Request) string {
--	path := "/" + req.PathParameter("a")
--	b := req.PathParameter("b")
--	if b == "" {
--		return path
--	}
--	path = path + "/" + b
--	c := req.PathParameter("c")
--	if c == "" {
--		return path
--	}
--	path = path + "/" + c
--	d := req.PathParameter("d")
--	if d == "" {
--		return path
--	}
--	path = path + "/" + d
--	e := req.PathParameter("e")
--	if e == "" {
--		return path
--	}
--	path = path + "/" + e
--	f := req.PathParameter("f")
--	if f == "" {
--		return path
--	}
--	path = path + "/" + f
--	g := req.PathParameter("g")
--	if g == "" {
--		return path
--	}
--	return path + "/" + g
--}
--
--func asFormat(name string) string {
--	return "" // TODO
--}
--
--func asParamType(kind int) string {
--	switch {
--	case kind == restful.PathParameterKind:
--		return "path"
--	case kind == restful.QueryParameterKind:
--		return "query"
--	case kind == restful.BodyParameterKind:
--		return "body"
--	case kind == restful.HeaderParameterKind:
--		return "header"
--	case kind == restful.FormParameterKind:
--		return "form"
--	}
--	return ""
--}
--
--func asDataType(any interface{}) string {
--	if any == nil {
--		return "void"
--	}
--	return reflect.TypeOf(any).Name()
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/utils_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/utils_test.go
-deleted file mode 100644
-index 6127bd5..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/utils_test.go
-+++ /dev/null
-@@ -1,70 +0,0 @@
--package swagger
--
--import (
--	"bytes"
--	"encoding/json"
--	"fmt"
--	"reflect"
--	"strings"
--	"testing"
--)
--
--func testJsonFromStruct(t *testing.T, sample interface{}, expectedJson string) {
--	compareJson(t, false, modelsFromStruct(sample), expectedJson)
--}
--
--func modelsFromStruct(sample interface{}) map[string]Model {
--	models := map[string]Model{}
--	builder := modelBuilder{models}
--	builder.addModel(reflect.TypeOf(sample), "")
--	return models
--}
--
--func compareJson(t *testing.T, flatCompare bool, value interface{}, expectedJsonAsString string) {
--	var output []byte
--	var err error
--	if flatCompare {
--		output, err = json.Marshal(value)
--	} else {
--		output, err = json.MarshalIndent(value, " ", " ")
--	}
--	if err != nil {
--		t.Error(err.Error())
--		return
--	}
--	actual := string(output)
--	if actual != expectedJsonAsString {
--		t.Errorf("First mismatch JSON doc at line:%d", indexOfNonMatchingLine(actual, expectedJsonAsString))
--		// Use simple fmt to create a pastable output :-)
--		fmt.Println("---- expected -----")
--		fmt.Println(withLineNumbers(expectedJsonAsString))
--		fmt.Println("---- actual -----")
--		fmt.Println(withLineNumbers(actual))
--		fmt.Println("---- raw -----")
--		fmt.Println(actual)
--	}
--}
--
--func indexOfNonMatchingLine(actual, expected string) int {
--	a := strings.Split(actual, "\n")
--	e := strings.Split(expected, "\n")
--	size := len(a)
--	if len(e) < len(a) {
--		size = len(e)
--	}
--	for i := 0; i < size; i++ {
--		if a[i] != e[i] {
--			return i
--		}
--	}
--	return -1
--}
--
--func withLineNumbers(content string) string {
--	var buffer bytes.Buffer
--	lines := strings.Split(content, "\n")
--	for i, each := range lines {
--		buffer.WriteString(fmt.Sprintf("%d:%s\n", i, each))
--	}
--	return buffer.String()
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service.go
-deleted file mode 100644
-index f3c10e0..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service.go
-+++ /dev/null
-@@ -1,190 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import "log"
--
--// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
--type WebService struct {
--	rootPath       string
--	pathExpr       *pathExpression // cached compilation of rootPath as RegExp
--	routes         []Route
--	produces       []string
--	consumes       []string
--	pathParameters []*Parameter
--	filters        []FilterFunction
--	documentation  string
--	apiVersion     string
--}
--
--// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
--func (w *WebService) compilePathExpression() {
--	if len(w.rootPath) == 0 {
--		w.Path("/") // lazy initialize path
--	}
--	compiled, err := newPathExpression(w.rootPath)
--	if err != nil {
--		log.Fatalf("[restful] invalid path:%s because:%v", w.rootPath, err)
--	}
--	w.pathExpr = compiled
--}
--
--// ApiVersion sets the API version for documentation purposes.
--func (w *WebService) ApiVersion(apiVersion string) *WebService {
--	w.apiVersion = apiVersion
--	return w
--}
--
--// Version returns the API version for documentation purposes.
--func (w WebService) Version() string { return w.apiVersion }
--
--// Path specifies the root URL template path of the WebService.
--// All Routes will be relative to this path.
--func (w *WebService) Path(root string) *WebService {
--	w.rootPath = root
--	w.compilePathExpression()
--	return w
--}
--
--// Param adds a PathParameter to document parameters used in the root path.
--func (w *WebService) Param(parameter *Parameter) *WebService {
--	if w.pathParameters == nil {
--		w.pathParameters = []*Parameter{}
--	}
--	w.pathParameters = append(w.pathParameters, parameter)
--	return w
--}
--
--// PathParameter creates a new Parameter of kind Path for documentation purposes.
--// It is initialized as required with string as its DataType.
--func (w *WebService) PathParameter(name, description string) *Parameter {
--	p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
--	p.bePath()
--	return p
--}
--
--// QueryParameter creates a new Parameter of kind Query for documentation purposes.
--// It is initialized as not required with string as its DataType.
--func (w *WebService) QueryParameter(name, description string) *Parameter {
--	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
--	p.beQuery()
--	return p
--}
--
--// BodyParameter creates a new Parameter of kind Body for documentation purposes.
--// It is initialized as required without a DataType.
--func (w *WebService) BodyParameter(name, description string) *Parameter {
--	p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
--	p.beBody()
--	return p
--}
--
--// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
--// It is initialized as not required with string as its DataType.
--func (w *WebService) HeaderParameter(name, description string) *Parameter {
--	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
--	p.beHeader()
--	return p
--}
--
--// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
--// It is initialized as required with string as its DataType.
--func (w *WebService) FormParameter(name, description string) *Parameter {
--	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
--	p.beForm()
--	return p
--}
--
--// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
--func (w *WebService) Route(builder *RouteBuilder) *WebService {
--	builder.copyDefaults(w.produces, w.consumes)
--	w.routes = append(w.routes, builder.Build())
--	return w
--}
--
--// Method creates a new RouteBuilder and initialize its http method
--func (w *WebService) Method(httpMethod string) *RouteBuilder {
--	return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
--}
--
--// Produces specifies that this WebService can produce one or more MIME types.
--// Http requests must have one of these values set for the Accept header.
--func (w *WebService) Produces(contentTypes ...string) *WebService {
--	w.produces = contentTypes
--	return w
--}
--
--// Consumes specifies that this WebService can consume one or more MIME types.
--// Http requests must have one of these values set for the Content-Type header.
--func (w *WebService) Consumes(accepts ...string) *WebService {
--	w.consumes = accepts
--	return w
--}
--
--// Routes returns the Routes associated with this WebService
--func (w WebService) Routes() []Route {
--	return w.routes
--}
--
--// RootPath returns the RootPath associated with this WebService. Default "/"
--func (w WebService) RootPath() string {
--	return w.rootPath
--}
--
--// PathParameters return the path parameter names for (shared amoung its Routes)
--func (w WebService) PathParameters() []*Parameter {
--	return w.pathParameters
--}
--
--// Filter adds a filter function to the chain of filters applicable to all its Routes
--func (w *WebService) Filter(filter FilterFunction) *WebService {
--	w.filters = append(w.filters, filter)
--	return w
--}
--
--// Doc is used to set the documentation of this service.
--func (w *WebService) Doc(plainText string) *WebService {
--	w.documentation = plainText
--	return w
--}
--
--// Documentation returns it.
--func (w WebService) Documentation() string {
--	return w.documentation
--}
--
--/*
--	Convenience methods
--*/
--
--// HEAD is a shortcut for .Method("HEAD").Path(subPath)
--func (w *WebService) HEAD(subPath string) *RouteBuilder {
--	return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
--}
--
--// GET is a shortcut for .Method("GET").Path(subPath)
--func (w *WebService) GET(subPath string) *RouteBuilder {
--	return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
--}
--
--// POST is a shortcut for .Method("POST").Path(subPath)
--func (w *WebService) POST(subPath string) *RouteBuilder {
--	return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
--}
--
--// PUT is a shortcut for .Method("PUT").Path(subPath)
--func (w *WebService) PUT(subPath string) *RouteBuilder {
--	return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
--}
--
--// PATCH is a shortcut for .Method("PATCH").Path(subPath)
--func (w *WebService) PATCH(subPath string) *RouteBuilder {
--	return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
--}
--
--// DELETE is a shortcut for .Method("DELETE").Path(subPath)
--func (w *WebService) DELETE(subPath string) *RouteBuilder {
--	return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_container.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_container.go
-deleted file mode 100644
-index c9d31b0..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_container.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--package restful
--
--// Copyright 2013 Ernest Micklei. All rights reserved.
--// Use of this source code is governed by a license
--// that can be found in the LICENSE file.
--
--import (
--	"net/http"
--)
--
--// DefaultContainer is a restful.Container that uses http.DefaultServeMux
--var DefaultContainer *Container
--
--func init() {
--	DefaultContainer = NewContainer()
--	DefaultContainer.ServeMux = http.DefaultServeMux
--}
--
--// If set the true then panics will not be caught to return HTTP 500.
--// In that case, Route functions are responsible for handling any error situation.
--// Default value is false = recover from panics. This has performance implications.
--// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
--var DoNotRecover = false
--
--// Add registers a new WebService add it to the DefaultContainer.
--func Add(service *WebService) {
--	DefaultContainer.Add(service)
--}
--
--// Filter appends a container FilterFunction from the DefaultContainer.
--// These are called before dispatching a http.Request to a WebService.
--func Filter(filter FilterFunction) {
--	DefaultContainer.Filter(filter)
--}
--
--// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
--func RegisteredWebServices() []*WebService {
--	return DefaultContainer.RegisteredWebServices()
--}
-diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_test.go
-deleted file mode 100644
-index e200979..0000000
---- a/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_test.go
-+++ /dev/null
-@@ -1,115 +0,0 @@
--package restful
--
--import (
--	"net/http"
--	"net/http/httptest"
--	"testing"
--)
--
--const (
--	pathGetFriends = "/get/{userId}/friends"
--)
--
--func TestParameter(t *testing.T) {
--	p := &Parameter{&ParameterData{Name: "name", Description: "desc"}}
--	p.AllowMultiple(true)
--	p.DataType("int")
--	p.Required(true)
--	values := map[string]string{"a": "b"}
--	p.AllowableValues(values)
--	p.bePath()
--
--	ws := new(WebService)
--	ws.Param(p)
--	if ws.pathParameters[0].Data().Name != "name" {
--		t.Error("path parameter (or name) invalid")
--	}
--}
--func TestWebService_CanCreateParameterKinds(t *testing.T) {
--	ws := new(WebService)
--	if ws.BodyParameter("b", "b").Kind() != BodyParameterKind {
--		t.Error("body parameter expected")
--	}
--	if ws.PathParameter("p", "p").Kind() != PathParameterKind {
--		t.Error("path parameter expected")
--	}
--	if ws.QueryParameter("q", "q").Kind() != QueryParameterKind {
--		t.Error("query parameter expected")
--	}
--}
--
--func TestCapturePanic(t *testing.T) {
--	tearDown()
--	Add(newPanicingService())
--	httpRequest, _ := http.NewRequest("GET", "http://here.com/fire", nil)
--	httpRequest.Header.Set("Accept", "*/*")
--	httpWriter := httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--	if 500 != httpWriter.Code {
--		t.Error("500 expected on fire")
--	}
--}
--
--func TestNotFound(t *testing.T) {
--	tearDown()
--	httpRequest, _ := http.NewRequest("GET", "http://here.com/missing", nil)
--	httpRequest.Header.Set("Accept", "*/*")
--	httpWriter := httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--	if 404 != httpWriter.Code {
--		t.Error("404 expected on missing")
--	}
--}
--
--func TestMethodNotAllowed(t *testing.T) {
--	tearDown()
--	Add(newGetOnlyService())
--	httpRequest, _ := http.NewRequest("POST", "http://here.com/get", nil)
--	httpRequest.Header.Set("Accept", "*/*")
--	httpWriter := httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--	if 405 != httpWriter.Code {
--		t.Error("405 expected method not allowed")
--	}
--}
--
--func TestSelectedRoutePath_Issue100(t *testing.T) {
--	tearDown()
--	Add(newSelectedRouteTestingService())
--	httpRequest, _ := http.NewRequest("GET", "http://here.com/get/232452/friends", nil)
--	httpRequest.Header.Set("Accept", "*/*")
--	httpWriter := httptest.NewRecorder()
--	DefaultContainer.dispatch(httpWriter, httpRequest)
--	if http.StatusOK != httpWriter.Code {
--		t.Error(http.StatusOK, "expected,", httpWriter.Code, "received.")
--	}
--}
--
--func newPanicingService() *WebService {
--	ws := new(WebService).Path("")
--	ws.Route(ws.GET("/fire").To(doPanic))
--	return ws
--}
--
--func newGetOnlyService() *WebService {
--	ws := new(WebService).Path("")
--	ws.Route(ws.GET("/get").To(doPanic))
--	return ws
--}
--
--func newSelectedRouteTestingService() *WebService {
--	ws := new(WebService).Path("")
--	ws.Route(ws.GET(pathGetFriends).To(selectedRouteChecker))
--	return ws
--}
--
--func selectedRouteChecker(req *Request, resp *Response) {
--	if req.SelectedRoutePath() != pathGetFriends {
--		resp.InternalServerError()
--	}
--}
--
--func doPanic(req *Request, resp *Response) {
--	println("lightning...")
--	panic("fire")
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
-deleted file mode 100644
-index 5a19fae..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
-+++ /dev/null
-@@ -1,13 +0,0 @@
--language: go
--go:
--  - 1.2.2
--  - 1.3.1
--  - tip
--env:
--  - GOARCH=amd64
--  - GOARCH=386
--install:
--  - go get -d ./...
--script:
--  - go test ./...
--  - ./testing/bin/fmtpolice
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
-deleted file mode 100644
-index 1470c64..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
-+++ /dev/null
-@@ -1,50 +0,0 @@
--# This is the official list of go-dockerclient authors for copyright purposes.
--
--Aldrin Leal <aldrin at leal.eng.br>
--Andreas Jaekle <andreas at jaekle.net>
--Andrews Medina <andrewsmedina at gmail.com>
--Andy Goldstein <andy.goldstein at redhat.com>
--Ben McCann <benmccann.com>
--Carlos Diaz-Padron <cpadron at mozilla.com>
--Cezar Sa Espinola <cezar.sa at corp.globo.com>
--Cheah Chu Yeow <chuyeow at gmail.com>
--cheneydeng <cheneydeng at qq.com>
--CMGS <ilskdw at gmail.com>
--Daniel, Dao Quang Minh <dqminh89 at gmail.com>
--David Huie <dahuie at gmail.com>
--Dawn Chen <dawnchen at google.com>
--Ed <edrocksit at gmail.com>
--Eric Anderson <anderson at copperegg.com>
--Fabio Rehm <fgrehm at gmail.com>
--Flavia Missi <flaviamissi at gmail.com>
--Francisco Souza <f at souza.cc>
--Jari Kolehmainen <jari.kolehmainen at digia.com>
--Jason Wilder <jwilder at litl.com>
--Jawher Moussa <jawher.moussa at gmail.com>
--Jean-Baptiste Dalido <jeanbaptiste at appgratis.com>
--Jeff Mitchell <jeffrey.mitchell at gmail.com>
--Jeffrey Hulten <jhulten at gmail.com>
--Johan Euphrosine <proppy at google.com>
--Karan Misra <kidoman at gmail.com>
--Kim, Hirokuni <hirokuni.kim at kvh.co.jp>
--Lucas Clemente <lucas at clemente.io>
--Máximo Cuadros Ortiz <mcuadros at gmail.com>
--Mike Dillon <mike.dillon at synctree.com>
--Omeid Matten <public at omeid.me>
--Paul Morie <pmorie at gmail.com>
--Peter Jihoon Kim <raingrove at gmail.com>
--Philippe Lafoucrière <philippe.lafoucriere at tech-angels.com>
--Rafe Colton <rafael.colton at gmail.com>
--Rob Miller <rob at kalistra.com>
--Robert Williamson <williamson.robert at gmail.com>
--Salvador Gironès <salvadorgirones at gmail.com>
--Simon Eskildsen <sirup at sirupsen.com>
--Simon Menke <simon.menke at gmail.com>
--Skolos <skolos at gopherlab.com>
--Soulou <leo at unbekandt.eu>
--Sridhar Ratnakumar <sridharr at activestate.com>
--Summer Mousa <smousa at zenoss.com>
--Tarsis Azevedo <tarsis at corp.globo.com>
--Tim Schindler <tim at catalyst-zero.com>
--Wiliam Souza <wiliamsouza83 at gmail.com>
--Ye Yin <eyniy at qq.com>
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
-deleted file mode 100644
-index f4130a5..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
-+++ /dev/null
-@@ -1,6 +0,0 @@
--                                 Apache License
--                           Version 2.0, January 2004
--                        http://www.apache.org/licenses/
--
--You can find the Docker license int the following link:
--https://raw2.github.com/dotcloud/docker/master/LICENSE
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE
-deleted file mode 100644
-index 7a6d8bb..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE
-+++ /dev/null
-@@ -1,22 +0,0 @@
--Copyright (c) 2014, go-dockerclient authors
--All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are met:
--
--  * Redistributions of source code must retain the above copyright notice,
--this list of conditions and the following disclaimer.
--  * Redistributions in binary form must reproduce the above copyright notice,
--this list of conditions and the following disclaimer in the documentation
--and/or other materials provided with the distribution.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
--ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
--WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
--DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
--FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
--DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
--SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
--CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
--OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
-deleted file mode 100644
-index 0f95d1f..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
-+++ /dev/null
-@@ -1,42 +0,0 @@
--#go-dockerclient
--
--[![Build Status](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest)
--[![Build Status](https://travis-ci.org/fsouza/go-dockerclient.png)](https://travis-ci.org/fsouza/go-dockerclient)
--
--[![GoDoc](http://godoc.org/github.com/fsouza/go-dockerclient?status.png)](http://godoc.org/github.com/fsouza/go-dockerclient)
--
--This package presents a client for the Docker remote API.
--
--For more details, check the [remote API documentation](http://docs.docker.io/en/latest/reference/api/docker_remote_api/).
--
--## Example
--
--```go
--package main
--
--import (
--        "fmt"
--        "github.com/fsouza/go-dockerclient"
--)
--
--func main() {
--        endpoint := "unix:///var/run/docker.sock"
--        client, _ := docker.NewClient(endpoint)
--        imgs, _ := client.ListImages(true)
--        for _, img := range imgs {
--                fmt.Println("ID: ", img.ID)
--                fmt.Println("RepoTags: ", img.RepoTags)
--                fmt.Println("Created: ", img.Created)
--                fmt.Println("Size: ", img.Size)
--                fmt.Println("VirtualSize: ", img.VirtualSize)
--                fmt.Println("ParentId: ", img.ParentID)
--        }
--}
--```
--
--## Developing
--
--You can run the tests with:
--
--    go get -d ./...
--    go test ./...
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go
-deleted file mode 100644
-index ecd9885..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go
-+++ /dev/null
-@@ -1,144 +0,0 @@
--package docker
--
--import (
--	"bytes"
--	"io"
--	"io/ioutil"
--	"net/http"
--	"os"
--	"reflect"
--	"testing"
--
--	"github.com/docker/docker/pkg/archive"
--)
--
--func TestBuildImageMultipleContextsError(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := BuildImageOptions{
--		Name:                "testImage",
--		NoCache:             true,
--		SuppressOutput:      true,
--		RmTmpContainer:      true,
--		ForceRmTmpContainer: true,
--		InputStream:         &buf,
--		OutputStream:        &buf,
--		ContextDir:          "testing/data",
--	}
--	err := client.BuildImage(opts)
--	if err != ErrMultipleContexts {
--		t.Errorf("BuildImage: providing both InputStream and ContextDir should produce an error")
--	}
--}
--
--func TestBuildImageContextDirDockerignoreParsing(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := BuildImageOptions{
--		Name:                "testImage",
--		NoCache:             true,
--		SuppressOutput:      true,
--		RmTmpContainer:      true,
--		ForceRmTmpContainer: true,
--		OutputStream:        &buf,
--		ContextDir:          "testing/data",
--	}
--	err := client.BuildImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	reqBody := fakeRT.requests[0].Body
--	tmpdir, err := unpackBodyTarball(reqBody)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	defer func() {
--		if err := os.RemoveAll(tmpdir); err != nil {
--			t.Fatal(err)
--		}
--	}()
--
--	files, err := ioutil.ReadDir(tmpdir)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	foundFiles := []string{}
--	for _, file := range files {
--		foundFiles = append(foundFiles, file.Name())
--	}
--
--	expectedFiles := []string{
--		".dockerignore",
--		"Dockerfile",
--		"barfile",
--		"ca.pem",
--		"cert.pem",
--		"key.pem",
--		"server.pem",
--		"serverkey.pem",
--		"symlink",
--	}
--
--	if !reflect.DeepEqual(expectedFiles, foundFiles) {
--		t.Errorf(
--			"BuildImage: incorrect files sent in tarball to docker server\nexpected %+v, found %+v",
--			expectedFiles, foundFiles,
--		)
--	}
--}
--
--func TestBuildImageSendXRegistryConfig(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := BuildImageOptions{
--		Name:                "testImage",
--		NoCache:             true,
--		SuppressOutput:      true,
--		RmTmpContainer:      true,
--		ForceRmTmpContainer: true,
--		OutputStream:        &buf,
--		ContextDir:          "testing/data",
--		AuthConfigs: AuthConfigurations{
--			Configs: map[string]AuthConfiguration{
--				"quay.io": {
--					Username:      "foo",
--					Password:      "bar",
--					Email:         "baz",
--					ServerAddress: "quay.io",
--				},
--			},
--		},
--	}
--
--	encodedConfig := "eyJjb25maWdzIjp7InF1YXkuaW8iOnsidXNlcm5hbWUiOiJmb28iLCJwYXNzd29yZCI6ImJhciIsImVtYWlsIjoiYmF6Iiwic2VydmVyYWRkcmVzcyI6InF1YXkuaW8ifX19Cg=="
--
--	if err := client.BuildImage(opts); err != nil {
--		t.Fatal(err)
--	}
--
--	xRegistryConfig := fakeRT.requests[0].Header["X-Registry-Config"][0]
--	if xRegistryConfig != encodedConfig {
--		t.Errorf(
--			"BuildImage: X-Registry-Config not set currectly: expected %q, got %q",
--			encodedConfig,
--			xRegistryConfig,
--		)
--	}
--}
--
--func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) {
--	tmpdir, err = ioutil.TempDir("", "go-dockerclient-test")
--	if err != nil {
--		return
--	}
--	err = archive.Untar(req, tmpdir, &archive.TarOptions{
--		Compression: archive.Uncompressed,
--		NoLchown:    true,
--	})
--	return
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go
-deleted file mode 100644
-index e7b056c..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import "fmt"
--
--// ChangeType is a type for constants indicating the type of change
--// in a container
--type ChangeType int
--
--const (
--	// ChangeModify is the ChangeType for container modifications
--	ChangeModify ChangeType = iota
--
--	// ChangeAdd is the ChangeType for additions to a container
--	ChangeAdd
--
--	// ChangeDelete is the ChangeType for deletions from a container
--	ChangeDelete
--)
--
--// Change represents a change in a container.
--//
--// See http://goo.gl/QkW9sH for more details.
--type Change struct {
--	Path string
--	Kind ChangeType
--}
--
--func (change *Change) String() string {
--	var kind string
--	switch change.Kind {
--	case ChangeModify:
--		kind = "C"
--	case ChangeAdd:
--		kind = "A"
--	case ChangeDelete:
--		kind = "D"
--	}
--	return fmt.Sprintf("%s %s", kind, change.Path)
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go
-deleted file mode 100644
-index 7c2ec30..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"testing"
--)
--
--func TestChangeString(t *testing.T) {
--	var tests = []struct {
--		change   Change
--		expected string
--	}{
--		{Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"},
--		{Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"},
--		{Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"},
--		{Change{"/etc/passwd", 33}, " /etc/passwd"},
--	}
--	for _, tt := range tests {
--		if got := tt.change.String(); got != tt.expected {
--			t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
-deleted file mode 100644
-index 3d86ff2..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
-+++ /dev/null
-@@ -1,636 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package docker provides a client for the Docker remote API.
--//
--// See http://goo.gl/G3plxW for more details on the remote API.
--package docker
--
--import (
--	"bytes"
--	"crypto/tls"
--	"crypto/x509"
--	"encoding/json"
--	"errors"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"net"
--	"net/http"
--	"net/http/httputil"
--	"net/url"
--	"reflect"
--	"strconv"
--	"strings"
--)
--
--const userAgent = "go-dockerclient"
--
--var (
--	// ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
--	ErrInvalidEndpoint = errors.New("invalid endpoint")
--
--	// ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
--	ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
--
--	apiVersion1_12, _ = NewAPIVersion("1.12")
--)
--
--// APIVersion is an internal representation of a version of the Remote API.
--type APIVersion []int
--
--// NewAPIVersion returns an instance of APIVersion for the given string.
--//
--// The given string must be in the form <major>.<minor>.<patch>, where <major>,
--// <minor> and <patch> are integer numbers.
--func NewAPIVersion(input string) (APIVersion, error) {
--	if !strings.Contains(input, ".") {
--		return nil, fmt.Errorf("Unable to parse version %q", input)
--	}
--	arr := strings.Split(input, ".")
--	ret := make(APIVersion, len(arr))
--	var err error
--	for i, val := range arr {
--		ret[i], err = strconv.Atoi(val)
--		if err != nil {
--			return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
--		}
--	}
--	return ret, nil
--}
--
--func (version APIVersion) String() string {
--	var str string
--	for i, val := range version {
--		str += strconv.Itoa(val)
--		if i < len(version)-1 {
--			str += "."
--		}
--	}
--	return str
--}
--
--// LessThan is a function for comparing APIVersion structs
--func (version APIVersion) LessThan(other APIVersion) bool {
--	return version.compare(other) < 0
--}
--
--// LessThanOrEqualTo is a function for comparing APIVersion structs
--func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
--	return version.compare(other) <= 0
--}
--
--// GreaterThan is a function for comparing APIVersion structs
--func (version APIVersion) GreaterThan(other APIVersion) bool {
--	return version.compare(other) > 0
--}
--
--// GreaterThanOrEqualTo is a function for comparing APIVersion structs
--func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
--	return version.compare(other) >= 0
--}
--
--func (version APIVersion) compare(other APIVersion) int {
--	for i, v := range version {
--		if i <= len(other)-1 {
--			otherVersion := other[i]
--
--			if v < otherVersion {
--				return -1
--			} else if v > otherVersion {
--				return 1
--			}
--		}
--	}
--	if len(version) > len(other) {
--		return 1
--	}
--	if len(version) < len(other) {
--		return -1
--	}
--	return 0
--}
--
--// Client is the basic type of this package. It provides methods for
--// interaction with the API.
--type Client struct {
--	SkipServerVersionCheck bool
--	HTTPClient             *http.Client
--	TLSConfig              *tls.Config
--
--	endpoint            string
--	endpointURL         *url.URL
--	eventMonitor        *eventMonitoringState
--	requestedAPIVersion APIVersion
--	serverAPIVersion    APIVersion
--	expectedAPIVersion  APIVersion
--}
--
--// NewClient returns a Client instance ready for communication with the given
--// server endpoint. It will use the latest remote API version available in the
--// server.
--func NewClient(endpoint string) (*Client, error) {
--	client, err := NewVersionedClient(endpoint, "")
--	if err != nil {
--		return nil, err
--	}
--	client.SkipServerVersionCheck = true
--	return client, nil
--}
--
--// NewTLSClient returns a Client instance ready for TLS communications with the givens
--// server endpoint, key and certificates . It will use the latest remote API version
--// available in the server.
--func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
--	client, err := NewVersionnedTLSClient(endpoint, cert, key, ca, "")
--	if err != nil {
--		return nil, err
--	}
--	client.SkipServerVersionCheck = true
--	return client, nil
--}
--
--// NewVersionedClient returns a Client instance ready for communication with
--// the given server endpoint, using a specific remote API version.
--func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
--	u, err := parseEndpoint(endpoint)
--	if err != nil {
--		return nil, err
--	}
--	var requestedAPIVersion APIVersion
--	if strings.Contains(apiVersionString, ".") {
--		requestedAPIVersion, err = NewAPIVersion(apiVersionString)
--		if err != nil {
--			return nil, err
--		}
--	}
--	return &Client{
--		HTTPClient:          http.DefaultClient,
--		endpoint:            endpoint,
--		endpointURL:         u,
--		eventMonitor:        new(eventMonitoringState),
--		requestedAPIVersion: requestedAPIVersion,
--	}, nil
--}
--
--// NewVersionnedTLSClient returns a Client instance ready for TLS communications with the givens
--// server endpoint, key and certificates, using a specific remote API version.
--func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
--	u, err := parseEndpoint(endpoint)
--	if err != nil {
--		return nil, err
--	}
--	var requestedAPIVersion APIVersion
--	if strings.Contains(apiVersionString, ".") {
--		requestedAPIVersion, err = NewAPIVersion(apiVersionString)
--		if err != nil {
--			return nil, err
--		}
--	}
--	if cert == "" || key == "" {
--		return nil, errors.New("Both cert and key path are required")
--	}
--	tlsCert, err := tls.LoadX509KeyPair(cert, key)
--	if err != nil {
--		return nil, err
--	}
--	tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
--	if ca == "" {
--		tlsConfig.InsecureSkipVerify = true
--	} else {
--		cert, err := ioutil.ReadFile(ca)
--		if err != nil {
--			return nil, err
--		}
--		caPool := x509.NewCertPool()
--		if !caPool.AppendCertsFromPEM(cert) {
--			return nil, errors.New("Could not add RootCA pem")
--		}
--		tlsConfig.RootCAs = caPool
--	}
--	tr := &http.Transport{
--		TLSClientConfig: tlsConfig,
--	}
--	if err != nil {
--		return nil, err
--	}
--	return &Client{
--		HTTPClient:          &http.Client{Transport: tr},
--		TLSConfig:           tlsConfig,
--		endpoint:            endpoint,
--		endpointURL:         u,
--		eventMonitor:        new(eventMonitoringState),
--		requestedAPIVersion: requestedAPIVersion,
--	}, nil
--}
--
--func (c *Client) checkAPIVersion() error {
--	serverAPIVersionString, err := c.getServerAPIVersionString()
--	if err != nil {
--		return err
--	}
--	c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
--	if err != nil {
--		return err
--	}
--	if c.requestedAPIVersion == nil {
--		c.expectedAPIVersion = c.serverAPIVersion
--	} else {
--		c.expectedAPIVersion = c.requestedAPIVersion
--	}
--	return nil
--}
--
--// Ping pings the docker server
--//
--// See http://goo.gl/stJENm for more details.
--func (c *Client) Ping() error {
--	path := "/_ping"
--	body, status, err := c.do("GET", path, nil)
--	if err != nil {
--		return err
--	}
--	if status != http.StatusOK {
--		return newError(status, body)
--	}
--	return nil
--}
--
--func (c *Client) getServerAPIVersionString() (version string, err error) {
--	body, status, err := c.do("GET", "/version", nil)
--	if err != nil {
--		return "", err
--	}
--	if status != http.StatusOK {
--		return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", status)
--	}
--	var versionResponse map[string]string
--	err = json.Unmarshal(body, &versionResponse)
--	if err != nil {
--		return "", err
--	}
--	version = versionResponse["ApiVersion"]
--	return version, nil
--}
--
--func (c *Client) do(method, path string, data interface{}) ([]byte, int, error) {
--	var params io.Reader
--	if data != nil {
--		buf, err := json.Marshal(data)
--		if err != nil {
--			return nil, -1, err
--		}
--		params = bytes.NewBuffer(buf)
--	}
--	if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
--		err := c.checkAPIVersion()
--		if err != nil {
--			return nil, -1, err
--		}
--	}
--	req, err := http.NewRequest(method, c.getURL(path), params)
--	if err != nil {
--		return nil, -1, err
--	}
--	req.Header.Set("User-Agent", userAgent)
--	if data != nil {
--		req.Header.Set("Content-Type", "application/json")
--	} else if method == "POST" {
--		req.Header.Set("Content-Type", "plain/text")
--	}
--	var resp *http.Response
--	protocol := c.endpointURL.Scheme
--	address := c.endpointURL.Path
--	if protocol == "unix" {
--		dial, err := net.Dial(protocol, address)
--		if err != nil {
--			return nil, -1, err
--		}
--		defer dial.Close()
--		clientconn := httputil.NewClientConn(dial, nil)
--		resp, err = clientconn.Do(req)
--		if err != nil {
--			return nil, -1, err
--		}
--		defer clientconn.Close()
--	} else {
--		resp, err = c.HTTPClient.Do(req)
--	}
--	if err != nil {
--		if strings.Contains(err.Error(), "connection refused") {
--			return nil, -1, ErrConnectionRefused
--		}
--		return nil, -1, err
--	}
--	defer resp.Body.Close()
--	body, err := ioutil.ReadAll(resp.Body)
--	if err != nil {
--		return nil, -1, err
--	}
--	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
--		return nil, resp.StatusCode, newError(resp.StatusCode, body)
--	}
--	return body, resp.StatusCode, nil
--}
--
--func (c *Client) stream(method, path string, setRawTerminal, rawJSONStream bool, headers map[string]string, in io.Reader, stdout, stderr io.Writer) error {
--	if (method == "POST" || method == "PUT") && in == nil {
--		in = bytes.NewReader(nil)
--	}
--	if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
--		err := c.checkAPIVersion()
--		if err != nil {
--			return err
--		}
--	}
--	req, err := http.NewRequest(method, c.getURL(path), in)
--	if err != nil {
--		return err
--	}
--	req.Header.Set("User-Agent", userAgent)
--	if method == "POST" {
--		req.Header.Set("Content-Type", "plain/text")
--	}
--	for key, val := range headers {
--		req.Header.Set(key, val)
--	}
--	var resp *http.Response
--	protocol := c.endpointURL.Scheme
--	address := c.endpointURL.Path
--	if stdout == nil {
--		stdout = ioutil.Discard
--	}
--	if stderr == nil {
--		stderr = ioutil.Discard
--	}
--	if protocol == "unix" {
--		dial, err := net.Dial(protocol, address)
--		if err != nil {
--			return err
--		}
--		clientconn := httputil.NewClientConn(dial, nil)
--		resp, err = clientconn.Do(req)
--		defer clientconn.Close()
--	} else {
--		resp, err = c.HTTPClient.Do(req)
--	}
--	if err != nil {
--		if strings.Contains(err.Error(), "connection refused") {
--			return ErrConnectionRefused
--		}
--		return err
--	}
--	defer resp.Body.Close()
--	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
--		body, err := ioutil.ReadAll(resp.Body)
--		if err != nil {
--			return err
--		}
--		return newError(resp.StatusCode, body)
--	}
--	if resp.Header.Get("Content-Type") == "application/json" {
--		// if we want to get raw json stream, just copy it back to output
--		// without decoding it
--		if rawJSONStream {
--			_, err = io.Copy(stdout, resp.Body)
--			return err
--		}
--		dec := json.NewDecoder(resp.Body)
--		for {
--			var m jsonMessage
--			if err := dec.Decode(&m); err == io.EOF {
--				break
--			} else if err != nil {
--				return err
--			}
--			if m.Stream != "" {
--				fmt.Fprint(stdout, m.Stream)
--			} else if m.Progress != "" {
--				fmt.Fprintf(stdout, "%s %s\r", m.Status, m.Progress)
--			} else if m.Error != "" {
--				return errors.New(m.Error)
--			}
--			if m.Status != "" {
--				fmt.Fprintln(stdout, m.Status)
--			}
--		}
--	} else {
--		if setRawTerminal {
--			_, err = io.Copy(stdout, resp.Body)
--		} else {
--			_, err = stdCopy(stdout, stderr, resp.Body)
--		}
--		return err
--	}
--	return nil
--}
--
--func (c *Client) hijack(method, path string, success chan struct{}, setRawTerminal bool, in io.Reader, stderr, stdout io.Writer, data interface{}) error {
--	if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
--		err := c.checkAPIVersion()
--		if err != nil {
--			return err
--		}
--	}
--
--	var params io.Reader
--	if data != nil {
--		buf, err := json.Marshal(data)
--		if err != nil {
--			return err
--		}
--		params = bytes.NewBuffer(buf)
--	}
--
--	if stdout == nil {
--		stdout = ioutil.Discard
--	}
--	if stderr == nil {
--		stderr = ioutil.Discard
--	}
--	req, err := http.NewRequest(method, c.getURL(path), params)
--	if err != nil {
--		return err
--	}
--	req.Header.Set("Content-Type", "plain/text")
--	protocol := c.endpointURL.Scheme
--	address := c.endpointURL.Path
--	if protocol != "unix" {
--		protocol = "tcp"
--		address = c.endpointURL.Host
--	}
--	dial, err := net.Dial(protocol, address)
--	if err != nil {
--		return err
--	}
--	defer dial.Close()
--	clientconn := httputil.NewClientConn(dial, nil)
--	clientconn.Do(req)
--	if success != nil {
--		success <- struct{}{}
--		<-success
--	}
--	rwc, br := clientconn.Hijack()
--	errs := make(chan error, 2)
--	exit := make(chan bool)
--	go func() {
--		defer close(exit)
--		var err error
--		if setRawTerminal {
--			_, err = io.Copy(stdout, br)
--		} else {
--			_, err = stdCopy(stdout, stderr, br)
--		}
--		errs <- err
--	}()
--	go func() {
--		var err error
--		if in != nil {
--			_, err = io.Copy(rwc, in)
--		}
--		rwc.(interface {
--			CloseWrite() error
--		}).CloseWrite()
--		errs <- err
--	}()
--	<-exit
--	return <-errs
--}
--
--func (c *Client) getURL(path string) string {
--	urlStr := strings.TrimRight(c.endpointURL.String(), "/")
--	if c.endpointURL.Scheme == "unix" {
--		urlStr = ""
--	}
--
--	if c.requestedAPIVersion != nil {
--		return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
--	}
--	return fmt.Sprintf("%s%s", urlStr, path)
--}
--
--type jsonMessage struct {
--	Status   string `json:"status,omitempty"`
--	Progress string `json:"progress,omitempty"`
--	Error    string `json:"error,omitempty"`
--	Stream   string `json:"stream,omitempty"`
--}
--
--func queryString(opts interface{}) string {
--	if opts == nil {
--		return ""
--	}
--	value := reflect.ValueOf(opts)
--	if value.Kind() == reflect.Ptr {
--		value = value.Elem()
--	}
--	if value.Kind() != reflect.Struct {
--		return ""
--	}
--	items := url.Values(map[string][]string{})
--	for i := 0; i < value.NumField(); i++ {
--		field := value.Type().Field(i)
--		if field.PkgPath != "" {
--			continue
--		}
--		key := field.Tag.Get("qs")
--		if key == "" {
--			key = strings.ToLower(field.Name)
--		} else if key == "-" {
--			continue
--		}
--		v := value.Field(i)
--		switch v.Kind() {
--		case reflect.Bool:
--			if v.Bool() {
--				items.Add(key, "1")
--			}
--		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--			if v.Int() > 0 {
--				items.Add(key, strconv.FormatInt(v.Int(), 10))
--			}
--		case reflect.Float32, reflect.Float64:
--			if v.Float() > 0 {
--				items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
--			}
--		case reflect.String:
--			if v.String() != "" {
--				items.Add(key, v.String())
--			}
--		case reflect.Ptr:
--			if !v.IsNil() {
--				if b, err := json.Marshal(v.Interface()); err == nil {
--					items.Add(key, string(b))
--				}
--			}
--		case reflect.Map:
--			if len(v.MapKeys()) > 0 {
--				if b, err := json.Marshal(v.Interface()); err == nil {
--					items.Add(key, string(b))
--				}
--			}
--		}
--	}
--	return items.Encode()
--}
--
--// Error represents failures in the API. It represents a failure from the API.
--type Error struct {
--	Status  int
--	Message string
--}
--
--func newError(status int, body []byte) *Error {
--	return &Error{Status: status, Message: string(body)}
--}
--
--func (e *Error) Error() string {
--	return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
--}
--
--func parseEndpoint(endpoint string) (*url.URL, error) {
--	u, err := url.Parse(endpoint)
--	if err != nil {
--		return nil, ErrInvalidEndpoint
--	}
--	if u.Scheme == "tcp" {
--		_, port, err := net.SplitHostPort(u.Host)
--		if err != nil {
--			if e, ok := err.(*net.AddrError); ok {
--				if e.Err == "missing port in address" {
--					return u, nil
--				}
--			}
--			return nil, ErrInvalidEndpoint
--		}
--
--		number, err := strconv.ParseInt(port, 10, 64)
--		if err == nil && number == 2376 {
--			u.Scheme = "https"
--		} else {
--			u.Scheme = "http"
--		}
--	}
--	if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" {
--		return nil, ErrInvalidEndpoint
--	}
--	if u.Scheme != "unix" {
--		_, port, err := net.SplitHostPort(u.Host)
--		if err != nil {
--			if e, ok := err.(*net.AddrError); ok {
--				if e.Err == "missing port in address" {
--					return u, nil
--				}
--			}
--			return nil, ErrInvalidEndpoint
--		}
--		number, err := strconv.ParseInt(port, 10, 64)
--		if err == nil && number > 0 && number < 65536 {
--			return u, nil
--		}
--	} else {
--		return u, nil // we don't need port when using a unix socket
--	}
--	return nil, ErrInvalidEndpoint
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go
-deleted file mode 100644
-index 34543b4..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go
-+++ /dev/null
-@@ -1,368 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"fmt"
--	"io/ioutil"
--	"net/http"
--	"net/url"
--	"reflect"
--	"strconv"
--	"strings"
--	"testing"
--)
--
--func TestNewAPIClient(t *testing.T) {
--	endpoint := "http://localhost:4243"
--	client, err := NewClient(endpoint)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if client.endpoint != endpoint {
--		t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
--	}
--	if client.HTTPClient != http.DefaultClient {
--		t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.HTTPClient)
--	}
--	// test unix socket endpoints
--	endpoint = "unix:///var/run/docker.sock"
--	client, err = NewClient(endpoint)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if client.endpoint != endpoint {
--		t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
--	}
--	if !client.SkipServerVersionCheck {
--		t.Error("Expected SkipServerVersionCheck to be true, got false")
--	}
--	if client.requestedAPIVersion != nil {
--		t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion)
--	}
--}
--
--func newTLSClient(endpoint string) (*Client, error) {
--	return NewTLSClient(endpoint,
--		"testing/data/cert.pem",
--		"testing/data/key.pem",
--		"testing/data/ca.pem")
--}
--
--func TestNewTSLAPIClient(t *testing.T) {
--	endpoint := "https://localhost:4243"
--	client, err := newTLSClient(endpoint)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if client.endpoint != endpoint {
--		t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
--	}
--	if !client.SkipServerVersionCheck {
--		t.Error("Expected SkipServerVersionCheck to be true, got false")
--	}
--	if client.requestedAPIVersion != nil {
--		t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion)
--	}
--}
--
--func TestNewVersionedClient(t *testing.T) {
--	endpoint := "http://localhost:4243"
--	client, err := NewVersionedClient(endpoint, "1.12")
--	if err != nil {
--		t.Fatal(err)
--	}
--	if client.endpoint != endpoint {
--		t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
--	}
--	if client.HTTPClient != http.DefaultClient {
--		t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.HTTPClient)
--	}
--	if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" {
--		t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion)
--	}
--	if client.SkipServerVersionCheck {
--		t.Error("Expected SkipServerVersionCheck to be false, got true")
--	}
--}
--
--func TestNewTLSVersionedClient(t *testing.T) {
--	certPath := "testing/data/cert.pem"
--	keyPath := "testing/data/key.pem"
--	caPath := "testing/data/ca.pem"
--	endpoint := "https://localhost:4243"
--	client, err := NewVersionnedTLSClient(endpoint, certPath, keyPath, caPath, "1.14")
--	if err != nil {
--		t.Fatal(err)
--	}
--	if client.endpoint != endpoint {
--		t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
--	}
--	if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" {
--		t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion)
--	}
--	if client.SkipServerVersionCheck {
--		t.Error("Expected SkipServerVersionCheck to be false, got true")
--	}
--}
--
--func TestNewTLSVersionedClientInvalidCA(t *testing.T) {
--	certPath := "testing/data/cert.pem"
--	keyPath := "testing/data/key.pem"
--	caPath := "testing/data/key.pem"
--	endpoint := "https://localhost:4243"
--	_, err := NewVersionnedTLSClient(endpoint, certPath, keyPath, caPath, "1.14")
--	if err == nil {
--		t.Errorf("Expected invalid ca at %s", caPath)
--	}
--}
--
--func TestNewClientInvalidEndpoint(t *testing.T) {
--	cases := []string{
--		"htp://localhost:3243", "http://localhost:a", "localhost:8080",
--		"", "localhost", "http://localhost:8080:8383", "http://localhost:65536",
--		"https://localhost:-20",
--	}
--	for _, c := range cases {
--		client, err := NewClient(c)
--		if client != nil {
--			t.Errorf("Want <nil> client for invalid endpoint, got %#v.", client)
--		}
--		if !reflect.DeepEqual(err, ErrInvalidEndpoint) {
--			t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err)
--		}
--	}
--}
--
--func TestNewTLSClient2376(t *testing.T) {
--	var tests = []struct {
--		endpoint string
--		expected string
--	}{
--		{"tcp://localhost:2376", "https"},
--		{"tcp://localhost:2375", "http"},
--		{"tcp://localhost:4000", "http"},
--	}
--
--	for _, tt := range tests {
--		client, err := newTLSClient(tt.endpoint)
--		if err != nil {
--			t.Error(err)
--		}
--		got := client.endpointURL.Scheme
--		if got != tt.expected {
--			t.Errorf("endpointURL.Scheme: Got %s. Want %s.", got, tt.expected)
--		}
--	}
--}
--
--func TestGetURL(t *testing.T) {
--	var tests = []struct {
--		endpoint string
--		path     string
--		expected string
--	}{
--		{"http://localhost:4243/", "/", "http://localhost:4243/"},
--		{"http://localhost:4243", "/", "http://localhost:4243/"},
--		{"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"},
--		{"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"},
--		{"http://localhost:4243/////", "/", "http://localhost:4243/"},
--		{"unix:///var/run/docker.socket", "/containers", "/containers"},
--	}
--	for _, tt := range tests {
--		client, _ := NewClient(tt.endpoint)
--		client.endpoint = tt.endpoint
--		client.SkipServerVersionCheck = true
--		got := client.getURL(tt.path)
--		if got != tt.expected {
--			t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected)
--		}
--	}
--}
--
--func TestError(t *testing.T) {
--	err := newError(400, []byte("bad parameter"))
--	expected := Error{Status: 400, Message: "bad parameter"}
--	if !reflect.DeepEqual(expected, *err) {
--		t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err)
--	}
--	message := "API error (400): bad parameter"
--	if err.Error() != message {
--		t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error())
--	}
--}
--
--func TestQueryString(t *testing.T) {
--	v := float32(2.4)
--	f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64))
--	jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`)
--	var tests = []struct {
--		input interface{}
--		want  string
--	}{
--		{&ListContainersOptions{All: true}, "all=1"},
--		{ListContainersOptions{All: true}, "all=1"},
--		{ListContainersOptions{Before: "something"}, "before=something"},
--		{ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"},
--		{ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, "filters=%7B%22status%22%3A%5B%22paused%22%2C%22running%22%5D%7D"},
--		{dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"},
--		{dumb{W: v, X: 10, Y: 10.35000}, f32QueryString},
--		{dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"},
--		{dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"},
--		{dumb{T: 10, Y: 10.35000}, "y=10.35"},
--		{dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson},
--		{nil, ""},
--		{10, ""},
--		{"not_a_struct", ""},
--	}
--	for _, tt := range tests {
--		got := queryString(tt.input)
--		if got != tt.want {
--			t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got)
--		}
--	}
--}
--
--func TestNewAPIVersionFailures(t *testing.T) {
--	var tests = []struct {
--		input         string
--		expectedError string
--	}{
--		{"1-0", `Unable to parse version "1-0"`},
--		{"1.0-beta", `Unable to parse version "1.0-beta": "0-beta" is not an integer`},
--	}
--	for _, tt := range tests {
--		v, err := NewAPIVersion(tt.input)
--		if v != nil {
--			t.Errorf("Expected <nil> version, got %v.", v)
--		}
--		if err.Error() != tt.expectedError {
--			t.Errorf("NewAPIVersion(%q): wrong error. Want %q. Got %q", tt.input, tt.expectedError, err.Error())
--		}
--	}
--}
--
--func TestAPIVersions(t *testing.T) {
--	var tests = []struct {
--		a                              string
--		b                              string
--		expectedALessThanB             bool
--		expectedALessThanOrEqualToB    bool
--		expectedAGreaterThanB          bool
--		expectedAGreaterThanOrEqualToB bool
--	}{
--		{"1.11", "1.11", false, true, false, true},
--		{"1.10", "1.11", true, true, false, false},
--		{"1.11", "1.10", false, false, true, true},
--
--		{"1.9", "1.11", true, true, false, false},
--		{"1.11", "1.9", false, false, true, true},
--
--		{"1.1.1", "1.1", false, false, true, true},
--		{"1.1", "1.1.1", true, true, false, false},
--
--		{"2.1", "1.1.1", false, false, true, true},
--		{"2.1", "1.3.1", false, false, true, true},
--		{"1.1.1", "2.1", true, true, false, false},
--		{"1.3.1", "2.1", true, true, false, false},
--	}
--
--	for _, tt := range tests {
--		a, _ := NewAPIVersion(tt.a)
--		b, _ := NewAPIVersion(tt.b)
--
--		if tt.expectedALessThanB && !a.LessThan(b) {
--			t.Errorf("Expected %#v < %#v", a, b)
--		}
--		if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) {
--			t.Errorf("Expected %#v <= %#v", a, b)
--		}
--		if tt.expectedAGreaterThanB && !a.GreaterThan(b) {
--			t.Errorf("Expected %#v > %#v", a, b)
--		}
--		if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) {
--			t.Errorf("Expected %#v >= %#v", a, b)
--		}
--	}
--}
--
--func TestPing(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	err := client.Ping()
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestPingFailing(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError}
--	client := newTestClient(fakeRT)
--	err := client.Ping()
--	if err == nil {
--		t.Fatal("Expected non nil error, got nil")
--	}
--	expectedErrMsg := "API error (500): "
--	if err.Error() != expectedErrMsg {
--		t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error())
--	}
--}
--
--func TestPingFailingWrongStatus(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted}
--	client := newTestClient(fakeRT)
--	err := client.Ping()
--	if err == nil {
--		t.Fatal("Expected non nil error, got nil")
--	}
--	expectedErrMsg := "API error (202): "
--	if err.Error() != expectedErrMsg {
--		t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error())
--	}
--}
--
--type FakeRoundTripper struct {
--	message  string
--	status   int
--	header   map[string]string
--	requests []*http.Request
--}
--
--func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
--	body := strings.NewReader(rt.message)
--	rt.requests = append(rt.requests, r)
--	res := &http.Response{
--		StatusCode: rt.status,
--		Body:       ioutil.NopCloser(body),
--		Header:     make(http.Header),
--	}
--	for k, v := range rt.header {
--		res.Header.Set(k, v)
--	}
--	return res, nil
--}
--
--func (rt *FakeRoundTripper) Reset() {
--	rt.requests = nil
--}
--
--type person struct {
--	Name string
--	Age  int `json:"age"`
--}
--
--type dumb struct {
--	T      int `qs:"-"`
--	v      int
--	W      float32
--	X      int
--	Y      float64
--	Z      int     `qs:"zee"`
--	Person *person `qs:"p"`
--}
--
--type fakeEndpointURL struct {
--	Scheme string
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
-deleted file mode 100644
-index c600c84..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
-+++ /dev/null
-@@ -1,760 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"bytes"
--	"encoding/json"
--	"fmt"
--	"io"
--	"net/http"
--	"net/url"
--	"strconv"
--	"strings"
--	"time"
--)
--
--// ListContainersOptions specify parameters to the ListContainers function.
--//
--// See http://goo.gl/6Y4Gz7 for more details.
--type ListContainersOptions struct {
--	All     bool
--	Size    bool
--	Limit   int
--	Since   string
--	Before  string
--	Filters map[string][]string
--}
--
--// APIPort is a type that represents a port mapping returned by the Docker API
--type APIPort struct {
--	PrivatePort int64  `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"`
--	PublicPort  int64  `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"`
--	Type        string `json:"Type,omitempty" yaml:"Type,omitempty"`
--	IP          string `json:"IP,omitempty" yaml:"IP,omitempty"`
--}
--
--// APIContainers represents a container.
--//
--// See http://goo.gl/QeFH7U for more details.
--type APIContainers struct {
--	ID         string    `json:"Id" yaml:"Id"`
--	Image      string    `json:"Image,omitempty" yaml:"Image,omitempty"`
--	Command    string    `json:"Command,omitempty" yaml:"Command,omitempty"`
--	Created    int64     `json:"Created,omitempty" yaml:"Created,omitempty"`
--	Status     string    `json:"Status,omitempty" yaml:"Status,omitempty"`
--	Ports      []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"`
--	SizeRw     int64     `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"`
--	SizeRootFs int64     `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"`
--	Names      []string  `json:"Names,omitempty" yaml:"Names,omitempty"`
--}
--
--// ListContainers returns a slice of containers matching the given criteria.
--//
--// See http://goo.gl/6Y4Gz7 for more details.
--func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
--	path := "/containers/json?" + queryString(opts)
--	body, _, err := c.do("GET", path, nil)
--	if err != nil {
--		return nil, err
--	}
--	var containers []APIContainers
--	err = json.Unmarshal(body, &containers)
--	if err != nil {
--		return nil, err
--	}
--	return containers, nil
--}
--
--// Port represents the port number and the protocol, in the form
--// <number>/<protocol>. For example: 80/tcp.
--type Port string
--
--// Port returns the number of the port.
--func (p Port) Port() string {
--	return strings.Split(string(p), "/")[0]
--}
--
--// Proto returns the name of the protocol.
--func (p Port) Proto() string {
--	parts := strings.Split(string(p), "/")
--	if len(parts) == 1 {
--		return "tcp"
--	}
--	return parts[1]
--}
--
--// State represents the state of a container.
--type State struct {
--	Running    bool      `json:"Running,omitempty" yaml:"Running,omitempty"`
--	Paused     bool      `json:"Paused,omitempty" yaml:"Paused,omitempty"`
--	OOMKilled  bool      `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"`
--	Pid        int       `json:"Pid,omitempty" yaml:"Pid,omitempty"`
--	ExitCode   int       `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
--	Error      string    `json:"Error,omitempty" yaml:"Error,omitempty"`
--	StartedAt  time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"`
--	FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"`
--}
--
--// String returns the string representation of a state.
--func (s *State) String() string {
--	if s.Running {
--		if s.Paused {
--			return "paused"
--		}
--		return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt))
--	}
--	return fmt.Sprintf("Exit %d", s.ExitCode)
--}
--
--// PortBinding represents the host/container port mapping as returned in the
--// `docker inspect` json
--type PortBinding struct {
--	HostIP   string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"`
--	HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"`
--}
--
--// PortMapping represents a deprecated field in the `docker inspect` output,
--// and its value as found in NetworkSettings should always be nil
--type PortMapping map[string]string
--
--// NetworkSettings contains network-related information about a container
--type NetworkSettings struct {
--	IPAddress   string                 `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
--	IPPrefixLen int                    `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
--	Gateway     string                 `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
--	Bridge      string                 `json:"Bridge,omitempty" yaml:"Bridge,omitempty"`
--	PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"`
--	Ports       map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"`
--}
--
--// PortMappingAPI translates the port mappings as contained in NetworkSettings
--// into the format in which they would appear when returned by the API
--func (settings *NetworkSettings) PortMappingAPI() []APIPort {
--	var mapping []APIPort
--	for port, bindings := range settings.Ports {
--		p, _ := parsePort(port.Port())
--		if len(bindings) == 0 {
--			mapping = append(mapping, APIPort{
--				PublicPort: int64(p),
--				Type:       port.Proto(),
--			})
--			continue
--		}
--		for _, binding := range bindings {
--			p, _ := parsePort(port.Port())
--			h, _ := parsePort(binding.HostPort)
--			mapping = append(mapping, APIPort{
--				PrivatePort: int64(p),
--				PublicPort:  int64(h),
--				Type:        port.Proto(),
--				IP:          binding.HostIP,
--			})
--		}
--	}
--	return mapping
--}
--
--func parsePort(rawPort string) (int, error) {
--	port, err := strconv.ParseUint(rawPort, 10, 16)
--	if err != nil {
--		return 0, err
--	}
--	return int(port), nil
--}
--
--// Config is the list of configuration options used when creating a container.
--// Config does not the options that are specific to starting a container on a
--// given host.  Those are contained in HostConfig
--type Config struct {
--	Hostname        string              `json:"Hostname,omitempty" yaml:"Hostname,omitempty"`
--	Domainname      string              `json:"Domainname,omitempty" yaml:"Domainname,omitempty"`
--	User            string              `json:"User,omitempty" yaml:"User,omitempty"`
--	Memory          int64               `json:"Memory,omitempty" yaml:"Memory,omitempty"`
--	MemorySwap      int64               `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
--	CPUShares       int64               `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
--	CPUSet          string              `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
--	AttachStdin     bool                `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
--	AttachStdout    bool                `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
--	AttachStderr    bool                `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
--	PortSpecs       []string            `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
--	ExposedPorts    map[Port]struct{}   `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
--	Tty             bool                `json:"Tty,omitempty" yaml:"Tty,omitempty"`
--	OpenStdin       bool                `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
--	StdinOnce       bool                `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
--	Env             []string            `json:"Env,omitempty" yaml:"Env,omitempty"`
--	Cmd             []string            `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
--	DNS             []string            `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
--	Image           string              `json:"Image,omitempty" yaml:"Image,omitempty"`
--	Volumes         map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
--	VolumesFrom     string              `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
--	WorkingDir      string              `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
--	Entrypoint      []string            `json:"Entrypoint,omitempty" yaml:"Entrypoint,omitempty"`
--	NetworkDisabled bool                `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
--}
--
--// Container is the type encompasing everything about a container - its config,
--// hostconfig, etc.
--type Container struct {
--	ID string `json:"Id" yaml:"Id"`
--
--	Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"`
--
--	Path string   `json:"Path,omitempty" yaml:"Path,omitempty"`
--	Args []string `json:"Args,omitempty" yaml:"Args,omitempty"`
--
--	Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"`
--	State  State   `json:"State,omitempty" yaml:"State,omitempty"`
--	Image  string  `json:"Image,omitempty" yaml:"Image,omitempty"`
--
--	NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"`
--
--	SysInitPath    string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"`
--	ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"`
--	HostnamePath   string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"`
--	HostsPath      string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"`
--	Name           string `json:"Name,omitempty" yaml:"Name,omitempty"`
--	Driver         string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
--
--	Volumes    map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
--	VolumesRW  map[string]bool   `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"`
--	HostConfig *HostConfig       `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
--}
--
--// InspectContainer returns information about a container by its ID.
--//
--// See http://goo.gl/CxVuJ5 for more details.
--func (c *Client) InspectContainer(id string) (*Container, error) {
--	path := "/containers/" + id + "/json"
--	body, status, err := c.do("GET", path, nil)
--	if status == http.StatusNotFound {
--		return nil, &NoSuchContainer{ID: id}
--	}
--	if err != nil {
--		return nil, err
--	}
--	var container Container
--	err = json.Unmarshal(body, &container)
--	if err != nil {
--		return nil, err
--	}
--	return &container, nil
--}
--
--// ContainerChanges returns changes in the filesystem of the given container.
--//
--// See http://goo.gl/QkW9sH for more details.
--func (c *Client) ContainerChanges(id string) ([]Change, error) {
--	path := "/containers/" + id + "/changes"
--	body, status, err := c.do("GET", path, nil)
--	if status == http.StatusNotFound {
--		return nil, &NoSuchContainer{ID: id}
--	}
--	if err != nil {
--		return nil, err
--	}
--	var changes []Change
--	err = json.Unmarshal(body, &changes)
--	if err != nil {
--		return nil, err
--	}
--	return changes, nil
--}
--
--// CreateContainerOptions specify parameters to the CreateContainer function.
--//
--// See http://goo.gl/2xxQQK for more details.
--type CreateContainerOptions struct {
--	Name       string
--	Config     *Config `qs:"-"`
--	HostConfig *HostConfig
--}
--
--// CreateContainer creates a new container, returning the container instance,
--// or an error in case of failure.
--//
--// See http://goo.gl/mErxNp for more details.
--func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
--	path := "/containers/create?" + queryString(opts)
--	body, status, err := c.do("POST", path, struct {
--		*Config
--		HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
--	}{
--		opts.Config,
--		opts.HostConfig,
--	})
--
--	if status == http.StatusNotFound {
--		return nil, ErrNoSuchImage
--	}
--	if err != nil {
--		return nil, err
--	}
--	var container Container
--	err = json.Unmarshal(body, &container)
--	if err != nil {
--		return nil, err
--	}
--
--	container.Name = opts.Name
--
--	return &container, nil
--}
--
--// KeyValuePair is a type for generic key/value pairs as used in the Lxc
--// configuration
--type KeyValuePair struct {
--	Key   string `json:"Key,omitempty" yaml:"Key,omitempty"`
--	Value string `json:"Value,omitempty" yaml:"Value,omitempty"`
--}
--
--// RestartPolicy represents the policy for automatically restarting a container.
--//
--// Possible values are:
--//
--//   - always: the docker daemon will always restart the container
--//   - on-failure: the docker daemon will restart the container on failures, at
--//                 most MaximumRetryCount times
--//   - no: the docker daemon will not restart the container automatically
--type RestartPolicy struct {
--	Name              string `json:"Name,omitempty" yaml:"Name,omitempty"`
--	MaximumRetryCount int    `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"`
--}
--
--// AlwaysRestart returns a restart policy that tells the Docker daemon to
--// always restart the container.
--func AlwaysRestart() RestartPolicy {
--	return RestartPolicy{Name: "always"}
--}
--
--// RestartOnFailure returns a restart policy that tells the Docker daemon to
--// restart the container on failures, trying at most maxRetry times.
--func RestartOnFailure(maxRetry int) RestartPolicy {
--	return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry}
--}
--
--// NeverRestart returns a restart policy that tells the Docker daemon to never
--// restart the container on failures.
--func NeverRestart() RestartPolicy {
--	return RestartPolicy{Name: "no"}
--}
--
--// HostConfig contains the container options related to starting a container on
--// a given host
--type HostConfig struct {
--	Binds           []string               `json:"Binds,omitempty" yaml:"Binds,omitempty"`
--	CapAdd          []string               `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"`
--	CapDrop         []string               `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"`
--	ContainerIDFile string                 `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"`
--	LxcConf         []KeyValuePair         `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"`
--	Privileged      bool                   `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
--	PortBindings    map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"`
--	Links           []string               `json:"Links,omitempty" yaml:"Links,omitempty"`
--	PublishAllPorts bool                   `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"`
--	DNS             []string               `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only
--	DNSSearch       []string               `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"`
--	ExtraHosts      []string               `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"`
--	VolumesFrom     []string               `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
--	NetworkMode     string                 `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
--	RestartPolicy   RestartPolicy          `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
--}
--
--// StartContainer starts a container, returning an error in case of failure.
--//
--// See http://goo.gl/iM5GYs for more details.
--func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
--	if hostConfig == nil {
--		hostConfig = &HostConfig{}
--	}
--	path := "/containers/" + id + "/start"
--	_, status, err := c.do("POST", path, hostConfig)
--	if status == http.StatusNotFound {
--		return &NoSuchContainer{ID: id}
--	}
--	if status == http.StatusNotModified {
--		return &ContainerAlreadyRunning{ID: id}
--	}
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// StopContainer stops a container, killing it after the given timeout (in
--// seconds).
--//
--// See http://goo.gl/EbcpXt for more details.
--func (c *Client) StopContainer(id string, timeout uint) error {
--	path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
--	_, status, err := c.do("POST", path, nil)
--	if status == http.StatusNotFound {
--		return &NoSuchContainer{ID: id}
--	}
--	if status == http.StatusNotModified {
--		return &ContainerNotRunning{ID: id}
--	}
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// RestartContainer stops a container, killing it after the given timeout (in
--// seconds), during the stop process.
--//
--// See http://goo.gl/VOzR2n for more details.
--func (c *Client) RestartContainer(id string, timeout uint) error {
--	path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
--	_, status, err := c.do("POST", path, nil)
--	if status == http.StatusNotFound {
--		return &NoSuchContainer{ID: id}
--	}
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// PauseContainer pauses the given container.
--//
--// See http://goo.gl/AM5t42 for more details.
--func (c *Client) PauseContainer(id string) error {
--	path := fmt.Sprintf("/containers/%s/pause", id)
--	_, status, err := c.do("POST", path, nil)
--	if status == http.StatusNotFound {
--		return &NoSuchContainer{ID: id}
--	}
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// UnpauseContainer pauses the given container.
--//
--// See http://goo.gl/eBrNSL for more details.
--func (c *Client) UnpauseContainer(id string) error {
--	path := fmt.Sprintf("/containers/%s/unpause", id)
--	_, status, err := c.do("POST", path, nil)
--	if status == http.StatusNotFound {
--		return &NoSuchContainer{ID: id}
--	}
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// TopResult represents the list of processes running in a container, as
--// returned by /containers/<id>/top.
--//
--// See http://goo.gl/qu4gse for more details.
--type TopResult struct {
--	Titles    []string
--	Processes [][]string
--}
--
--// TopContainer returns processes running inside a container
--//
--// See http://goo.gl/qu4gse for more details.
--func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
--	var args string
--	var result TopResult
--	if psArgs != "" {
--		args = fmt.Sprintf("?ps_args=%s", psArgs)
--	}
--	path := fmt.Sprintf("/containers/%s/top%s", id, args)
--	body, status, err := c.do("GET", path, nil)
--	if status == http.StatusNotFound {
--		return result, &NoSuchContainer{ID: id}
--	}
--	if err != nil {
--		return result, err
--	}
--	err = json.Unmarshal(body, &result)
--	if err != nil {
--		return result, err
--	}
--	return result, nil
--}
--
--// KillContainerOptions represents the set of options that can be used in a
--// call to KillContainer.
--//
--// See http://goo.gl/TFkECx for more details.
--type KillContainerOptions struct {
--	// The ID of the container.
--	ID string `qs:"-"`
--
--	// The signal to send to the container. When omitted, Docker server
--	// will assume SIGKILL.
--	Signal Signal
--}
--
--// KillContainer kills a container, returning an error in case of failure.
--//
--// See http://goo.gl/TFkECx for more details.
--func (c *Client) KillContainer(opts KillContainerOptions) error {
--	path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
--	_, status, err := c.do("POST", path, nil)
--	if status == http.StatusNotFound {
--		return &NoSuchContainer{ID: opts.ID}
--	}
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// RemoveContainerOptions encapsulates options to remove a container.
--//
--// See http://goo.gl/ZB83ji for more details.
--type RemoveContainerOptions struct {
--	// The ID of the container.
--	ID string `qs:"-"`
--
--	// A flag that indicates whether Docker should remove the volumes
--	// associated to the container.
--	RemoveVolumes bool `qs:"v"`
--
--	// A flag that indicates whether Docker should remove the container
--	// even if it is currently running.
--	Force bool
--}
--
--// RemoveContainer removes a container, returning an error in case of failure.
--//
--// See http://goo.gl/ZB83ji for more details.
--func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
--	path := "/containers/" + opts.ID + "?" + queryString(opts)
--	_, status, err := c.do("DELETE", path, nil)
--	if status == http.StatusNotFound {
--		return &NoSuchContainer{ID: opts.ID}
--	}
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// CopyFromContainerOptions is the set of options that can be used when copying
--// files or folders from a container.
--//
--// See http://goo.gl/rINMlw for more details.
--type CopyFromContainerOptions struct {
--	OutputStream io.Writer `json:"-"`
--	Container    string    `json:"-"`
--	Resource     string
--}
--
--// CopyFromContainer copy files or folders from a container, using a given
--// resource.
--//
--// See http://goo.gl/rINMlw for more details.
--func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
--	if opts.Container == "" {
--		return &NoSuchContainer{ID: opts.Container}
--	}
--	url := fmt.Sprintf("/containers/%s/copy", opts.Container)
--	body, status, err := c.do("POST", url, opts)
--	if status == http.StatusNotFound {
--		return &NoSuchContainer{ID: opts.Container}
--	}
--	if err != nil {
--		return err
--	}
--	io.Copy(opts.OutputStream, bytes.NewBuffer(body))
--	return nil
--}
--
--// WaitContainer blocks until the given container stops, return the exit code
--// of the container status.
--//
--// See http://goo.gl/J88DHU for more details.
--func (c *Client) WaitContainer(id string) (int, error) {
--	body, status, err := c.do("POST", "/containers/"+id+"/wait", nil)
--	if status == http.StatusNotFound {
--		return 0, &NoSuchContainer{ID: id}
--	}
--	if err != nil {
--		return 0, err
--	}
--	var r struct{ StatusCode int }
--	err = json.Unmarshal(body, &r)
--	if err != nil {
--		return 0, err
--	}
--	return r.StatusCode, nil
--}
--
--// CommitContainerOptions aggregates parameters to the CommitContainer method.
--//
--// See http://goo.gl/Jn8pe8 for more details.
--type CommitContainerOptions struct {
--	Container  string
--	Repository string `qs:"repo"`
--	Tag        string
--	Message    string `qs:"m"`
--	Author     string
--	Run        *Config `qs:"-"`
--}
--
--// CommitContainer creates a new image from a container's changes.
--//
--// See http://goo.gl/Jn8pe8 for more details.
--func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
--	path := "/commit?" + queryString(opts)
--	body, status, err := c.do("POST", path, opts.Run)
--	if status == http.StatusNotFound {
--		return nil, &NoSuchContainer{ID: opts.Container}
--	}
--	if err != nil {
--		return nil, err
--	}
--	var image Image
--	err = json.Unmarshal(body, &image)
--	if err != nil {
--		return nil, err
--	}
--	return &image, nil
--}
--
--// AttachToContainerOptions is the set of options that can be used when
--// attaching to a container.
--//
--// See http://goo.gl/RRAhws for more details.
--type AttachToContainerOptions struct {
--	Container    string    `qs:"-"`
--	InputStream  io.Reader `qs:"-"`
--	OutputStream io.Writer `qs:"-"`
--	ErrorStream  io.Writer `qs:"-"`
--
--	// Get container logs, sending it to OutputStream.
--	Logs bool
--
--	// Stream the response?
--	Stream bool
--
--	// Attach to stdin, and use InputStream.
--	Stdin bool
--
--	// Attach to stdout, and use OutputStream.
--	Stdout bool
--
--	// Attach to stderr, and use ErrorStream.
--	Stderr bool
--
--	// If set, after a successful connect, a sentinel will be sent and then the
--	// client will block on receive before continuing.
--	//
--	// It must be an unbuffered channel. Using a buffered channel can lead
--	// to unexpected behavior.
--	Success chan struct{}
--
--	// Use raw terminal? Usually true when the container contains a TTY.
--	RawTerminal bool `qs:"-"`
--}
--
--// AttachToContainer attaches to a container, using the given options.
--//
--// See http://goo.gl/RRAhws for more details.
--func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
--	if opts.Container == "" {
--		return &NoSuchContainer{ID: opts.Container}
--	}
--	path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
--	return c.hijack("POST", path, opts.Success, opts.RawTerminal, opts.InputStream, opts.ErrorStream, opts.OutputStream, nil)
--}
--
--// LogsOptions represents the set of options used when getting logs from a
--// container.
--//
--// See http://goo.gl/rLhKSU for more details.
--type LogsOptions struct {
--	Container    string    `qs:"-"`
--	OutputStream io.Writer `qs:"-"`
--	ErrorStream  io.Writer `qs:"-"`
--	Follow       bool
--	Stdout       bool
--	Stderr       bool
--	Timestamps   bool
--	Tail         string
--
--	// Use raw terminal? Usually true when the container contains a TTY.
--	RawTerminal bool `qs:"-"`
--}
--
--// Logs gets stdout and stderr logs from the specified container.
--//
--// See http://goo.gl/rLhKSU for more details.
--func (c *Client) Logs(opts LogsOptions) error {
--	if opts.Container == "" {
--		return &NoSuchContainer{ID: opts.Container}
--	}
--	if opts.Tail == "" {
--		opts.Tail = "all"
--	}
--	path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
--	return c.stream("GET", path, opts.RawTerminal, false, nil, nil, opts.OutputStream, opts.ErrorStream)
--}
--
--// ResizeContainerTTY resizes the terminal to the given height and width.
--func (c *Client) ResizeContainerTTY(id string, height, width int) error {
--	params := make(url.Values)
--	params.Set("h", strconv.Itoa(height))
--	params.Set("w", strconv.Itoa(width))
--	_, _, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), nil)
--	return err
--}
--
--// ExportContainerOptions is the set of parameters to the ExportContainer
--// method.
--//
--// See http://goo.gl/hnzE62 for more details.
--type ExportContainerOptions struct {
--	ID           string
--	OutputStream io.Writer
--}
--
--// ExportContainer export the contents of container id as tar archive
--// and prints the exported contents to stdout.
--//
--// See http://goo.gl/hnzE62 for more details.
--func (c *Client) ExportContainer(opts ExportContainerOptions) error {
--	if opts.ID == "" {
--		return &NoSuchContainer{ID: opts.ID}
--	}
--	url := fmt.Sprintf("/containers/%s/export", opts.ID)
--	return c.stream("GET", url, true, false, nil, nil, opts.OutputStream, nil)
--}
--
--// NoSuchContainer is the error returned when a given container does not exist.
--type NoSuchContainer struct {
--	ID string
--}
--
--func (err *NoSuchContainer) Error() string {
--	return "No such container: " + err.ID
--}
--
--// ContainerAlreadyRunning is the error returned when a given container is
--// already running.
--type ContainerAlreadyRunning struct {
--	ID string
--}
--
--func (err *ContainerAlreadyRunning) Error() string {
--	return "Container already running: " + err.ID
--}
--
--// ContainerNotRunning is the error returned when a given container is not
--// running.
--type ContainerNotRunning struct {
--	ID string
--}
--
--func (err *ContainerNotRunning) Error() string {
--	return "Container not running: " + err.ID
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go
-deleted file mode 100644
-index bfb1119..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go
-+++ /dev/null
-@@ -1,1524 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"bytes"
--	"encoding/json"
--	"io/ioutil"
--	"net"
--	"net/http"
--	"net/http/httptest"
--	"net/url"
--	"os"
--	"reflect"
--	"regexp"
--	"runtime"
--	"strconv"
--	"strings"
--	"testing"
--	"time"
--)
--
--func TestStateString(t *testing.T) {
--	started := time.Now().Add(-3 * time.Hour)
--	var tests = []struct {
--		input    State
--		expected string
--	}{
--		{State{Running: true, Paused: true}, "^paused$"},
--		{State{Running: true, StartedAt: started}, "^Up 3h.*$"},
--		{State{Running: false, ExitCode: 7}, "^Exit 7$"},
--	}
--	for _, tt := range tests {
--		re := regexp.MustCompile(tt.expected)
--		if got := tt.input.String(); !re.MatchString(got) {
--			t.Errorf("State.String(): wrong result. Want %q. Got %q.", tt.expected, got)
--		}
--	}
--}
--
--func TestListContainers(t *testing.T) {
--	jsonContainers := `[
--     {
--             "Id": "8dfafdbc3a40",
--             "Image": "base:latest",
--             "Command": "echo 1",
--             "Created": 1367854155,
--             "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
--             "Status": "Exit 0"
--     },
--     {
--             "Id": "9cd87474be90",
--             "Image": "base:latest",
--             "Command": "echo 222222",
--             "Created": 1367854155,
--             "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
--             "Status": "Exit 0"
--     },
--     {
--             "Id": "3176a2479c92",
--             "Image": "base:latest",
--             "Command": "echo 3333333333333333",
--             "Created": 1367854154,
--             "Ports":[{"PrivatePort": 2221, "PublicPort": 3331, "Type": "tcp"}],
--             "Status": "Exit 0"
--     },
--     {
--             "Id": "4cb07b47f9fb",
--             "Image": "base:latest",
--             "Command": "echo 444444444444444444444444444444444",
--             "Ports":[{"PrivatePort": 2223, "PublicPort": 3332, "Type": "tcp"}],
--             "Created": 1367854152,
--             "Status": "Exit 0"
--     }
--]`
--	var expected []APIContainers
--	err := json.Unmarshal([]byte(jsonContainers), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	client := newTestClient(&FakeRoundTripper{message: jsonContainers, status: http.StatusOK})
--	containers, err := client.ListContainers(ListContainersOptions{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(containers, expected) {
--		t.Errorf("ListContainers: Expected %#v. Got %#v.", expected, containers)
--	}
--}
--
--func TestListContainersParams(t *testing.T) {
--	var tests = []struct {
--		input  ListContainersOptions
--		params map[string][]string
--	}{
--		{ListContainersOptions{}, map[string][]string{}},
--		{ListContainersOptions{All: true}, map[string][]string{"all": {"1"}}},
--		{ListContainersOptions{All: true, Limit: 10}, map[string][]string{"all": {"1"}, "limit": {"10"}}},
--		{
--			ListContainersOptions{All: true, Limit: 10, Since: "adf9983", Before: "abdeef"},
--			map[string][]string{"all": {"1"}, "limit": {"10"}, "since": {"adf9983"}, "before": {"abdeef"}},
--		},
--		{
--			ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}},
--			map[string][]string{"filters": {"{\"status\":[\"paused\",\"running\"]}"}},
--		},
--		{
--			ListContainersOptions{All: true, Filters: map[string][]string{"exited": {"0"}, "status": {"exited"}}},
--			map[string][]string{"all": {"1"}, "filters": {"{\"exited\":[\"0\"],\"status\":[\"exited\"]}"}},
--		},
--	}
--	fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	u, _ := url.Parse(client.getURL("/containers/json"))
--	for _, tt := range tests {
--		client.ListContainers(tt.input)
--		got := map[string][]string(fakeRT.requests[0].URL.Query())
--		if !reflect.DeepEqual(got, tt.params) {
--			t.Errorf("Expected %#v, got %#v.", tt.params, got)
--		}
--		if path := fakeRT.requests[0].URL.Path; path != u.Path {
--			t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path)
--		}
--		if meth := fakeRT.requests[0].Method; meth != "GET" {
--			t.Errorf("Wrong HTTP method. Want GET. Got %s.", meth)
--		}
--		fakeRT.Reset()
--	}
--}
--
--func TestListContainersFailure(t *testing.T) {
--	var tests = []struct {
--		status  int
--		message string
--	}{
--		{400, "bad parameter"},
--		{500, "internal server error"},
--	}
--	for _, tt := range tests {
--		client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status})
--		expected := Error{Status: tt.status, Message: tt.message}
--		containers, err := client.ListContainers(ListContainersOptions{})
--		if !reflect.DeepEqual(expected, *err.(*Error)) {
--			t.Errorf("Wrong error in ListContainers. Want %#v. Got %#v.", expected, err)
--		}
--		if len(containers) > 0 {
--			t.Errorf("ListContainers failure. Expected empty list. Got %#v.", containers)
--		}
--	}
--}
--
--func TestInspectContainer(t *testing.T) {
--	jsonContainer := `{
--             "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
--             "Created": "2013-05-07T14:51:42.087658+02:00",
--             "Path": "date",
--             "Args": [],
--             "Config": {
--                     "Hostname": "4fa6e0f0c678",
--                     "User": "",
--                     "Memory": 17179869184,
--                     "MemorySwap": 34359738368,
--                     "AttachStdin": false,
--                     "AttachStdout": true,
--                     "AttachStderr": true,
--                     "PortSpecs": null,
--                     "Tty": false,
--                     "OpenStdin": false,
--                     "StdinOnce": false,
--                     "Env": null,
--                     "Cmd": [
--                             "date"
--                     ],
--                     "Image": "base",
--                     "Volumes": {},
--                     "VolumesFrom": ""
--             },
--             "State": {
--                     "Running": false,
--                     "Pid": 0,
--                     "ExitCode": 0,
--                     "StartedAt": "2013-05-07T14:51:42.087658+02:00",
--                     "Ghost": false
--             },
--             "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
--             "NetworkSettings": {
--                     "IpAddress": "",
--                     "IpPrefixLen": 0,
--                     "Gateway": "",
--                     "Bridge": "",
--                     "PortMapping": null
--             },
--             "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
--             "ResolvConfPath": "/etc/resolv.conf",
--             "Volumes": {},
--             "HostConfig": {
--               "Binds": null,
--               "ContainerIDFile": "",
--               "LxcConf": [],
--               "Privileged": false,
--               "PortBindings": {
--                 "80/tcp": [
--                   {
--                     "HostIp": "0.0.0.0",
--                     "HostPort": "49153"
--                   }
--                 ]
--               },
--               "Links": null,
--               "PublishAllPorts": false
--             }
--}`
--	var expected Container
--	err := json.Unmarshal([]byte(jsonContainer), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c678"
--	container, err := client.InspectContainer(id)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(*container, expected) {
--		t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json"))
--	if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestInspectContainerNegativeSwap(t *testing.T) {
--	jsonContainer := `{
--             "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
--             "Created": "2013-05-07T14:51:42.087658+02:00",
--             "Path": "date",
--             "Args": [],
--             "Config": {
--                     "Hostname": "4fa6e0f0c678",
--                     "User": "",
--                     "Memory": 17179869184,
--                     "MemorySwap": -1,
--                     "AttachStdin": false,
--                     "AttachStdout": true,
--                     "AttachStderr": true,
--                     "PortSpecs": null,
--                     "Tty": false,
--                     "OpenStdin": false,
--                     "StdinOnce": false,
--                     "Env": null,
--                     "Cmd": [
--                             "date"
--                     ],
--                     "Image": "base",
--                     "Volumes": {},
--                     "VolumesFrom": ""
--             },
--             "State": {
--                     "Running": false,
--                     "Pid": 0,
--                     "ExitCode": 0,
--                     "StartedAt": "2013-05-07T14:51:42.087658+02:00",
--                     "Ghost": false
--             },
--             "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
--             "NetworkSettings": {
--                     "IpAddress": "",
--                     "IpPrefixLen": 0,
--                     "Gateway": "",
--                     "Bridge": "",
--                     "PortMapping": null
--             },
--             "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
--             "ResolvConfPath": "/etc/resolv.conf",
--             "Volumes": {},
--             "HostConfig": {
--               "Binds": null,
--               "ContainerIDFile": "",
--               "LxcConf": [],
--               "Privileged": false,
--               "PortBindings": {
--                 "80/tcp": [
--                   {
--                     "HostIp": "0.0.0.0",
--                     "HostPort": "49153"
--                   }
--                 ]
--               },
--               "Links": null,
--               "PublishAllPorts": false
--             }
--}`
--	var expected Container
--	err := json.Unmarshal([]byte(jsonContainer), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c678"
--	container, err := client.InspectContainer(id)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(*container, expected) {
--		t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json"))
--	if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestInspectContainerFailure(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "server error", status: 500})
--	expected := Error{Status: 500, Message: "server error"}
--	container, err := client.InspectContainer("abe033")
--	if container != nil {
--		t.Errorf("InspectContainer: Expected <nil> container, got %#v", container)
--	}
--	if !reflect.DeepEqual(expected, *err.(*Error)) {
--		t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestInspectContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404})
--	container, err := client.InspectContainer("abe033")
--	if container != nil {
--		t.Errorf("InspectContainer: Expected <nil> container, got %#v", container)
--	}
--	expected := &NoSuchContainer{ID: "abe033"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestContainerChanges(t *testing.T) {
--	jsonChanges := `[
--     {
--             "Path":"/dev",
--             "Kind":0
--     },
--     {
--             "Path":"/dev/kmsg",
--             "Kind":1
--     },
--     {
--             "Path":"/test",
--             "Kind":1
--     }
--]`
--	var expected []Change
--	err := json.Unmarshal([]byte(jsonChanges), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	fakeRT := &FakeRoundTripper{message: jsonChanges, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c678"
--	changes, err := client.ContainerChanges(id)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(changes, expected) {
--		t.Errorf("ContainerChanges(%q): Expected %#v. Got %#v.", id, expected, changes)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/changes"))
--	if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("ContainerChanges(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestContainerChangesFailure(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "server error", status: 500})
--	expected := Error{Status: 500, Message: "server error"}
--	changes, err := client.ContainerChanges("abe033")
--	if changes != nil {
--		t.Errorf("ContainerChanges: Expected <nil> changes, got %#v", changes)
--	}
--	if !reflect.DeepEqual(expected, *err.(*Error)) {
--		t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestContainerChangesNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404})
--	changes, err := client.ContainerChanges("abe033")
--	if changes != nil {
--		t.Errorf("ContainerChanges: Expected <nil> changes, got %#v", changes)
--	}
--	expected := &NoSuchContainer{ID: "abe033"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestCreateContainer(t *testing.T) {
--	jsonContainer := `{
--             "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
--	     "Warnings": []
--}`
--	var expected Container
--	err := json.Unmarshal([]byte(jsonContainer), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	config := Config{AttachStdout: true, AttachStdin: true}
--	opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config}
--	container, err := client.CreateContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	if container.ID != id {
--		t.Errorf("CreateContainer: wrong ID. Want %q. Got %q.", id, container.ID)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("CreateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/create"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("CreateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
--	}
--	var gotBody Config
--	err = json.NewDecoder(req.Body).Decode(&gotBody)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestCreateContainerImageNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusNotFound})
--	config := Config{AttachStdout: true, AttachStdin: true}
--	container, err := client.CreateContainer(CreateContainerOptions{Config: &config})
--	if container != nil {
--		t.Errorf("CreateContainer: expected <nil> container, got %#v.", container)
--	}
--	if !reflect.DeepEqual(err, ErrNoSuchImage) {
--		t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrNoSuchImage, err)
--	}
--}
--
--func TestCreateContainerWithHostConfig(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	config := Config{}
--	hostConfig := HostConfig{PublishAllPorts: true}
--	opts := CreateContainerOptions{Name: "TestCreateContainerWithHostConfig", Config: &config, HostConfig: &hostConfig}
--	_, err := client.CreateContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	var gotBody map[string]interface{}
--	err = json.NewDecoder(req.Body).Decode(&gotBody)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if _, ok := gotBody["HostConfig"]; !ok {
--		t.Errorf("CreateContainer: wrong body. HostConfig was not serialized")
--	}
--}
--
--func TestStartContainer(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.StartContainer(id, &HostConfig{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--	expectedContentType := "application/json"
--	if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType {
--		t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType)
--	}
--}
--
--func TestStartContainerNilHostConfig(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.StartContainer(id, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--	expectedContentType := "application/json"
--	if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType {
--		t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType)
--	}
--}
--
--func TestStartContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	err := client.StartContainer("a2344", &HostConfig{})
--	expected := &NoSuchContainer{ID: "a2344"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestStartContainerAlreadyRunning(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "container already running", status: http.StatusNotModified})
--	err := client.StartContainer("a2334", &HostConfig{})
--	expected := &ContainerAlreadyRunning{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestStopContainer(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.StopContainer(id, 10)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestStopContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	err := client.StopContainer("a2334", 10)
--	expected := &NoSuchContainer{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestStopContainerNotRunning(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "container not running", status: http.StatusNotModified})
--	err := client.StopContainer("a2334", 10)
--	expected := &ContainerNotRunning{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestRestartContainer(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.RestartContainer(id, 10)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("RestartContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/restart"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("RestartContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestRestartContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	err := client.RestartContainer("a2334", 10)
--	expected := &NoSuchContainer{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("RestartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestPauseContainer(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.PauseContainer(id)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/pause"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestPauseContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	err := client.PauseContainer("a2334")
--	expected := &NoSuchContainer{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestUnpauseContainer(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.UnpauseContainer(id)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/unpause"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestUnpauseContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	err := client.UnpauseContainer("a2334")
--	expected := &NoSuchContainer{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestKillContainer(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.KillContainer(KillContainerOptions{ID: id})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/kill"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("KillContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestKillContainerSignal(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.KillContainer(KillContainerOptions{ID: id, Signal: SIGTERM})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	if signal := req.URL.Query().Get("signal"); signal != "15" {
--		t.Errorf("KillContainer(%q): Wrong query string in request. Want %q. Got %q.", id, "15", signal)
--	}
--}
--
--func TestKillContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	err := client.KillContainer(KillContainerOptions{ID: "a2334"})
--	expected := &NoSuchContainer{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestRemoveContainer(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	opts := RemoveContainerOptions{ID: id}
--	err := client.RemoveContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "DELETE" {
--		t.Errorf("RemoveContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("RemoveContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestRemoveContainerRemoveVolumes(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	opts := RemoveContainerOptions{ID: id, RemoveVolumes: true}
--	err := client.RemoveContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	params := map[string][]string(req.URL.Query())
--	expected := map[string][]string{"v": {"1"}}
--	if !reflect.DeepEqual(params, expected) {
--		t.Errorf("RemoveContainer(%q): wrong parameters. Want %#v. Got %#v.", id, expected, params)
--	}
--}
--
--func TestRemoveContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	err := client.RemoveContainer(RemoveContainerOptions{ID: "a2334"})
--	expected := &NoSuchContainer{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("RemoveContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestResizeContainerTTY(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	err := client.ResizeContainerTTY(id, 40, 80)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("ResizeContainerTTY(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/resize"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("ResizeContainerTTY(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--	got := map[string][]string(req.URL.Query())
--	expectedParams := map[string][]string{
--		"w": {"80"},
--		"h": {"40"},
--	}
--	if !reflect.DeepEqual(got, expectedParams) {
--		t.Errorf("Expected %#v, got %#v.", expectedParams, got)
--	}
--}
--
--func TestWaitContainer(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	status, err := client.WaitContainer(id)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if status != 56 {
--		t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
--	}
--}
--
--func TestWaitContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	_, err := client.WaitContainer("a2334")
--	expected := &NoSuchContainer{ID: "a2334"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("WaitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestCommitContainer(t *testing.T) {
--	response := `{"Id":"596069db4bf5"}`
--	client := newTestClient(&FakeRoundTripper{message: response, status: http.StatusOK})
--	id := "596069db4bf5"
--	image, err := client.CommitContainer(CommitContainerOptions{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	if image.ID != id {
--		t.Errorf("CommitContainer: Wrong image id. Want %q. Got %q.", id, image.ID)
--	}
--}
--
--func TestCommitContainerParams(t *testing.T) {
--	cfg := Config{Memory: 67108864}
--	json, _ := json.Marshal(&cfg)
--	var tests = []struct {
--		input  CommitContainerOptions
--		params map[string][]string
--		body   []byte
--	}{
--		{CommitContainerOptions{}, map[string][]string{}, nil},
--		{CommitContainerOptions{Container: "44c004db4b17"}, map[string][]string{"container": {"44c004db4b17"}}, nil},
--		{
--			CommitContainerOptions{Container: "44c004db4b17", Repository: "tsuru/python", Message: "something"},
--			map[string][]string{"container": {"44c004db4b17"}, "repo": {"tsuru/python"}, "m": {"something"}},
--			nil,
--		},
--		{
--			CommitContainerOptions{Container: "44c004db4b17", Run: &cfg},
--			map[string][]string{"container": {"44c004db4b17"}},
--			json,
--		},
--	}
--	fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	u, _ := url.Parse(client.getURL("/commit"))
--	for _, tt := range tests {
--		client.CommitContainer(tt.input)
--		got := map[string][]string(fakeRT.requests[0].URL.Query())
--		if !reflect.DeepEqual(got, tt.params) {
--			t.Errorf("Expected %#v, got %#v.", tt.params, got)
--		}
--		if path := fakeRT.requests[0].URL.Path; path != u.Path {
--			t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path)
--		}
--		if meth := fakeRT.requests[0].Method; meth != "POST" {
--			t.Errorf("Wrong HTTP method. Want POST. Got %s.", meth)
--		}
--		if tt.body != nil {
--			if requestBody, err := ioutil.ReadAll(fakeRT.requests[0].Body); err == nil {
--				if bytes.Compare(requestBody, tt.body) != 0 {
--					t.Errorf("Expected body %#v, got %#v", tt.body, requestBody)
--				}
--			} else {
--				t.Errorf("Error reading request body: %#v", err)
--			}
--		}
--		fakeRT.Reset()
--	}
--}
--
--func TestCommitContainerFailure(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusInternalServerError})
--	_, err := client.CommitContainer(CommitContainerOptions{})
--	if err == nil {
--		t.Error("Expected non-nil error, got <nil>.")
--	}
--}
--
--func TestCommitContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	_, err := client.CommitContainer(CommitContainerOptions{})
--	expected := &NoSuchContainer{ID: ""}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("CommitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestAttachToContainerLogs(t *testing.T) {
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 19})
--		w.Write([]byte("something happened!"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var buf bytes.Buffer
--	opts := AttachToContainerOptions{
--		Container:    "a123456",
--		OutputStream: &buf,
--		Stdout:       true,
--		Stderr:       true,
--		Logs:         true,
--	}
--	err := client.AttachToContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	expected := "something happened!"
--	if buf.String() != expected {
--		t.Errorf("AttachToContainer for logs: wrong output. Want %q. Got %q.", expected, buf.String())
--	}
--	if req.Method != "POST" {
--		t.Errorf("AttachToContainer: wrong HTTP method. Want POST. Got %s.", req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/containers/a123456/attach"))
--	if req.URL.Path != u.Path {
--		t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path)
--	}
--	expectedQs := map[string][]string{
--		"logs":   {"1"},
--		"stdout": {"1"},
--		"stderr": {"1"},
--	}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expectedQs) {
--		t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expectedQs, got)
--	}
--}
--
--func TestAttachToContainer(t *testing.T) {
--	var reader = strings.NewReader("send value")
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
--		w.Write([]byte("hello"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var stdout, stderr bytes.Buffer
--	opts := AttachToContainerOptions{
--		Container:    "a123456",
--		OutputStream: &stdout,
--		ErrorStream:  &stderr,
--		InputStream:  reader,
--		Stdin:        true,
--		Stdout:       true,
--		Stderr:       true,
--		Stream:       true,
--		RawTerminal:  true,
--	}
--	err := client.AttachToContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	expected := map[string][]string{
--		"stdin":  {"1"},
--		"stdout": {"1"},
--		"stderr": {"1"},
--		"stream": {"1"},
--	}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestAttachToContainerSentinel(t *testing.T) {
--	var reader = strings.NewReader("send value")
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
--		w.Write([]byte("hello"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var stdout, stderr bytes.Buffer
--	success := make(chan struct{})
--	opts := AttachToContainerOptions{
--		Container:    "a123456",
--		OutputStream: &stdout,
--		ErrorStream:  &stderr,
--		InputStream:  reader,
--		Stdin:        true,
--		Stdout:       true,
--		Stderr:       true,
--		Stream:       true,
--		RawTerminal:  true,
--		Success:      success,
--	}
--	go client.AttachToContainer(opts)
--	success <- <-success
--}
--
--func TestAttachToContainerNilStdout(t *testing.T) {
--	var reader = strings.NewReader("send value")
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
--		w.Write([]byte("hello"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var stderr bytes.Buffer
--	opts := AttachToContainerOptions{
--		Container:    "a123456",
--		OutputStream: nil,
--		ErrorStream:  &stderr,
--		InputStream:  reader,
--		Stdin:        true,
--		Stdout:       true,
--		Stderr:       true,
--		Stream:       true,
--		RawTerminal:  true,
--	}
--	err := client.AttachToContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestAttachToContainerNilStderr(t *testing.T) {
--	var reader = strings.NewReader("send value")
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
--		w.Write([]byte("hello"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var stdout bytes.Buffer
--	opts := AttachToContainerOptions{
--		Container:    "a123456",
--		OutputStream: &stdout,
--		InputStream:  reader,
--		Stdin:        true,
--		Stdout:       true,
--		Stderr:       true,
--		Stream:       true,
--		RawTerminal:  true,
--	}
--	err := client.AttachToContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestAttachToContainerRawTerminalFalse(t *testing.T) {
--	input := strings.NewReader("send value")
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		prefix := []byte{1, 0, 0, 0, 0, 0, 0, 5}
--		w.Write(prefix)
--		w.Write([]byte("hello"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var stdout, stderr bytes.Buffer
--	opts := AttachToContainerOptions{
--		Container:    "a123456",
--		OutputStream: &stdout,
--		ErrorStream:  &stderr,
--		InputStream:  input,
--		Stdin:        true,
--		Stdout:       true,
--		Stderr:       true,
--		Stream:       true,
--		RawTerminal:  false,
--	}
--	err := client.AttachToContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	expected := map[string][]string{
--		"stdin":  {"1"},
--		"stdout": {"1"},
--		"stderr": {"1"},
--		"stream": {"1"},
--	}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--	t.Log(stderr.String())
--	t.Log(stdout.String())
--	if stdout.String() != "hello" {
--		t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stderr.String())
--	}
--}
--
--func TestAttachToContainerWithoutContainer(t *testing.T) {
--	var client Client
--	err := client.AttachToContainer(AttachToContainerOptions{})
--	expected := &NoSuchContainer{ID: ""}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestLogs(t *testing.T) {
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19}
--		w.Write(prefix)
--		w.Write([]byte("something happened!"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var buf bytes.Buffer
--	opts := LogsOptions{
--		Container:    "a123456",
--		OutputStream: &buf,
--		Follow:       true,
--		Stdout:       true,
--		Stderr:       true,
--		Timestamps:   true,
--	}
--	err := client.Logs(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	expected := "something happened!"
--	if buf.String() != expected {
--		t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String())
--	}
--	if req.Method != "GET" {
--		t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/containers/a123456/logs"))
--	if req.URL.Path != u.Path {
--		t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path)
--	}
--	expectedQs := map[string][]string{
--		"follow":     {"1"},
--		"stdout":     {"1"},
--		"stderr":     {"1"},
--		"timestamps": {"1"},
--		"tail":       {"all"},
--	}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expectedQs) {
--		t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got)
--	}
--}
--
--func TestLogsNilStdoutDoesntFail(t *testing.T) {
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19}
--		w.Write(prefix)
--		w.Write([]byte("something happened!"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	opts := LogsOptions{
--		Container:  "a123456",
--		Follow:     true,
--		Stdout:     true,
--		Stderr:     true,
--		Timestamps: true,
--	}
--	err := client.Logs(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestLogsNilStderrDoesntFail(t *testing.T) {
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19}
--		w.Write(prefix)
--		w.Write([]byte("something happened!"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	opts := LogsOptions{
--		Container:  "a123456",
--		Follow:     true,
--		Stdout:     true,
--		Stderr:     true,
--		Timestamps: true,
--	}
--	err := client.Logs(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestLogsSpecifyingTail(t *testing.T) {
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19}
--		w.Write(prefix)
--		w.Write([]byte("something happened!"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var buf bytes.Buffer
--	opts := LogsOptions{
--		Container:    "a123456",
--		OutputStream: &buf,
--		Follow:       true,
--		Stdout:       true,
--		Stderr:       true,
--		Timestamps:   true,
--		Tail:         "100",
--	}
--	err := client.Logs(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	expected := "something happened!"
--	if buf.String() != expected {
--		t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String())
--	}
--	if req.Method != "GET" {
--		t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/containers/a123456/logs"))
--	if req.URL.Path != u.Path {
--		t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path)
--	}
--	expectedQs := map[string][]string{
--		"follow":     {"1"},
--		"stdout":     {"1"},
--		"stderr":     {"1"},
--		"timestamps": {"1"},
--		"tail":       {"100"},
--	}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expectedQs) {
--		t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got)
--	}
--}
--
--func TestLogsRawTerminal(t *testing.T) {
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		w.Write([]byte("something happened!"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var buf bytes.Buffer
--	opts := LogsOptions{
--		Container:    "a123456",
--		OutputStream: &buf,
--		Follow:       true,
--		RawTerminal:  true,
--		Stdout:       true,
--		Stderr:       true,
--		Timestamps:   true,
--		Tail:         "100",
--	}
--	err := client.Logs(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	expected := "something happened!"
--	if buf.String() != expected {
--		t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String())
--	}
--}
--
--func TestLogsNoContainer(t *testing.T) {
--	var client Client
--	err := client.Logs(LogsOptions{})
--	expected := &NoSuchContainer{ID: ""}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestNoSuchContainerError(t *testing.T) {
--	var err = &NoSuchContainer{ID: "i345"}
--	expected := "No such container: i345"
--	if got := err.Error(); got != expected {
--		t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got)
--	}
--}
--
--func TestExportContainer(t *testing.T) {
--	content := "exported container tar content"
--	out := stdoutMock{bytes.NewBufferString(content)}
--	client := newTestClient(&FakeRoundTripper{status: http.StatusOK})
--	opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out}
--	err := client.ExportContainer(opts)
--	if err != nil {
--		t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error())
--	}
--	if out.String() != content {
--		t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String())
--	}
--}
--
--func TestExportContainerViaUnixSocket(t *testing.T) {
--	if runtime.GOOS != "darwin" {
--		t.Skip("skipping test on %q", runtime.GOOS)
--	}
--	content := "exported container tar content"
--	var buf []byte
--	out := bytes.NewBuffer(buf)
--	tempSocket := tempfile("export_socket")
--	defer os.Remove(tempSocket)
--	endpoint := "unix://" + tempSocket
--	u, _ := parseEndpoint(endpoint)
--	client := Client{
--		HTTPClient:             http.DefaultClient,
--		endpoint:               endpoint,
--		endpointURL:            u,
--		SkipServerVersionCheck: true,
--	}
--	listening := make(chan string)
--	done := make(chan int)
--	go runStreamConnServer(t, "unix", tempSocket, listening, done)
--	<-listening // wait for server to start
--	opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out}
--	err := client.ExportContainer(opts)
--	<-done // make sure server stopped
--	if err != nil {
--		t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error())
--	}
--	if out.String() != content {
--		t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String())
--	}
--}
--
--func runStreamConnServer(t *testing.T, network, laddr string, listening chan<- string, done chan<- int) {
--	defer close(done)
--	l, err := net.Listen(network, laddr)
--	if err != nil {
--		t.Errorf("Listen(%q, %q) failed: %v", network, laddr, err)
--		listening <- "<nil>"
--		return
--	}
--	defer l.Close()
--	listening <- l.Addr().String()
--	c, err := l.Accept()
--	if err != nil {
--		t.Logf("Accept failed: %v", err)
--		return
--	}
--	c.Write([]byte("HTTP/1.1 200 OK\n\nexported container tar content"))
--	c.Close()
--}
--
--func tempfile(filename string) string {
--	return os.TempDir() + "/" + filename + "." + strconv.Itoa(os.Getpid())
--}
--
--func TestExportContainerNoId(t *testing.T) {
--	client := Client{}
--	out := stdoutMock{bytes.NewBufferString("")}
--	err := client.ExportContainer(ExportContainerOptions{OutputStream: out})
--	e, ok := err.(*NoSuchContainer)
--	if !ok {
--		t.Errorf("ExportContainer: wrong error. Want NoSuchContainer. Got %#v.", e)
--	}
--	if e.ID != "" {
--		t.Errorf("ExportContainer: wrong ID. Want %q. Got %q", "", e.ID)
--	}
--}
--
--func TestCopyFromContainer(t *testing.T) {
--	content := "File content"
--	out := stdoutMock{bytes.NewBufferString(content)}
--	client := newTestClient(&FakeRoundTripper{status: http.StatusOK})
--	opts := CopyFromContainerOptions{
--		Container:    "a123456",
--		OutputStream: out,
--	}
--	err := client.CopyFromContainer(opts)
--	if err != nil {
--		t.Errorf("CopyFromContainer: caugh error %#v while copying from container, expected nil", err.Error())
--	}
--	if out.String() != content {
--		t.Errorf("CopyFromContainer: wrong stdout. Want %#v. Got %#v.", content, out.String())
--	}
--}
--
--func TestCopyFromContainerEmptyContainer(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{status: http.StatusOK})
--	err := client.CopyFromContainer(CopyFromContainerOptions{})
--	_, ok := err.(*NoSuchContainer)
--	if !ok {
--		t.Errorf("CopyFromContainer: invalid error returned. Want NoSuchContainer, got %#v.", err)
--	}
--}
--
--func TestPassingNameOptToCreateContainerReturnsItInContainer(t *testing.T) {
--	jsonContainer := `{
--             "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
--	     "Warnings": []
--}`
--	fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	config := Config{AttachStdout: true, AttachStdin: true}
--	opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config}
--	container, err := client.CreateContainer(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if container.Name != "TestCreateContainer" {
--		t.Errorf("Container name expected to be TestCreateContainer, was %s", container.Name)
--	}
--}
--
--func TestAlwaysRestart(t *testing.T) {
--	policy := AlwaysRestart()
--	if policy.Name != "always" {
--		t.Errorf("AlwaysRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name)
--	}
--	if policy.MaximumRetryCount != 0 {
--		t.Errorf("AlwaysRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount)
--	}
--}
--
--func TestRestartOnFailure(t *testing.T) {
--	const retry = 5
--	policy := RestartOnFailure(retry)
--	if policy.Name != "on-failure" {
--		t.Errorf("RestartOnFailure(%d): wrong policy name. Want %q. Got %q", retry, "on-failure", policy.Name)
--	}
--	if policy.MaximumRetryCount != retry {
--		t.Errorf("RestartOnFailure(%d): wrong MaximumRetryCount. Want %d. Got %d", retry, retry, policy.MaximumRetryCount)
--	}
--}
--
--func TestNeverRestart(t *testing.T) {
--	policy := NeverRestart()
--	if policy.Name != "no" {
--		t.Errorf("NeverRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name)
--	}
--	if policy.MaximumRetryCount != 0 {
--		t.Errorf("NeverRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount)
--	}
--}
--
--func TestTopContainer(t *testing.T) {
--	jsonTop := `{
--  "Processes": [
--    [
--      "ubuntu",
--      "3087",
--      "815",
--      "0",
--      "01:44",
--      "?",
--      "00:00:00",
--      "cmd1"
--    ],
--    [
--      "root",
--      "3158",
--      "3087",
--      "0",
--      "01:44",
--      "?",
--      "00:00:01",
--      "cmd2"
--    ]
--  ],
--  "Titles": [
--    "UID",
--    "PID",
--    "PPID",
--    "C",
--    "STIME",
--    "TTY",
--    "TIME",
--    "CMD"
--  ]
--}`
--	var expected TopResult
--	err := json.Unmarshal([]byte(jsonTop), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	id := "4fa6e0f0"
--	fakeRT := &FakeRoundTripper{message: jsonTop, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	processes, err := client.TopContainer(id, "")
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(processes, expected) {
--		t.Errorf("TopContainer: Expected %#v. Got %#v.", expected, processes)
--	}
--	if len(processes.Processes) != 2 || len(processes.Processes[0]) != 8 ||
--		processes.Processes[0][7] != "cmd1" {
--		t.Errorf("TopContainer: Process list to include cmd1. Got %#v.", expected, processes)
--	}
--	expectedURI := "/containers/" + id + "/top"
--	if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) {
--		t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String())
--	}
--}
--
--func TestTopContainerNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
--	_, err := client.TopContainer("abef348", "")
--	expected := &NoSuchContainer{ID: "abef348"}
--	if !reflect.DeepEqual(err, expected) {
--		t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
--	}
--}
--
--func TestTopContainerWithPsArgs(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "no such container", status: http.StatusNotFound}
--	client := newTestClient(fakeRT)
--	client.TopContainer("abef348", "aux")
--	expectedURI := "/containers/abef348/top?ps_args=aux"
--	if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) {
--		t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String())
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go
-deleted file mode 100644
-index c54b0b0..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go
-+++ /dev/null
-@@ -1,168 +0,0 @@
--// Copyright 2014 Docker authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the DOCKER-LICENSE file.
--
--package docker
--
--import (
--	"encoding/json"
--	"fmt"
--	"io"
--	"strconv"
--	"strings"
--)
--
--// Env represents a list of key-pair represented in the form KEY=VALUE.
--type Env []string
--
--// Get returns the string value of the given key.
--func (env *Env) Get(key string) (value string) {
--	return env.Map()[key]
--}
--
--// Exists checks whether the given key is defined in the internal Env
--// representation.
--func (env *Env) Exists(key string) bool {
--	_, exists := env.Map()[key]
--	return exists
--}
--
--// GetBool returns a boolean representation of the given key. The key is false
--// whenever its value if 0, no, false, none or an empty string. Any other value
--// will be interpreted as true.
--func (env *Env) GetBool(key string) (value bool) {
--	s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
--	if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
--		return false
--	}
--	return true
--}
--
--// SetBool defines a boolean value to the given key.
--func (env *Env) SetBool(key string, value bool) {
--	if value {
--		env.Set(key, "1")
--	} else {
--		env.Set(key, "0")
--	}
--}
--
--// GetInt returns the value of the provided key, converted to int.
--//
--// It the value cannot be represented as an integer, it returns -1.
--func (env *Env) GetInt(key string) int {
--	return int(env.GetInt64(key))
--}
--
--// SetInt defines an integer value to the given key.
--func (env *Env) SetInt(key string, value int) {
--	env.Set(key, strconv.Itoa(value))
--}
--
--// GetInt64 returns the value of the provided key, converted to int64.
--//
--// It the value cannot be represented as an integer, it returns -1.
--func (env *Env) GetInt64(key string) int64 {
--	s := strings.Trim(env.Get(key), " \t")
--	val, err := strconv.ParseInt(s, 10, 64)
--	if err != nil {
--		return -1
--	}
--	return val
--}
--
--// SetInt64 defines an integer (64-bit wide) value to the given key.
--func (env *Env) SetInt64(key string, value int64) {
--	env.Set(key, strconv.FormatInt(value, 10))
--}
--
--// GetJSON unmarshals the value of the provided key in the provided iface.
--//
--// iface is a value that can be provided to the json.Unmarshal function.
--func (env *Env) GetJSON(key string, iface interface{}) error {
--	sval := env.Get(key)
--	if sval == "" {
--		return nil
--	}
--	return json.Unmarshal([]byte(sval), iface)
--}
--
--// SetJSON marshals the given value to JSON format and stores it using the
--// provided key.
--func (env *Env) SetJSON(key string, value interface{}) error {
--	sval, err := json.Marshal(value)
--	if err != nil {
--		return err
--	}
--	env.Set(key, string(sval))
--	return nil
--}
--
--// GetList returns a list of strings matching the provided key. It handles the
--// list as a JSON representation of a list of strings.
--//
--// If the given key matches to a single string, it will return a list
--// containing only the value that matches the key.
--func (env *Env) GetList(key string) []string {
--	sval := env.Get(key)
--	if sval == "" {
--		return nil
--	}
--	var l []string
--	if err := json.Unmarshal([]byte(sval), &l); err != nil {
--		l = append(l, sval)
--	}
--	return l
--}
--
--// SetList stores the given list in the provided key, after serializing it to
--// JSON format.
--func (env *Env) SetList(key string, value []string) error {
--	return env.SetJSON(key, value)
--}
--
--// Set defines the value of a key to the given string.
--func (env *Env) Set(key, value string) {
--	*env = append(*env, key+"="+value)
--}
--
--// Decode decodes `src` as a json dictionary, and adds each decoded key-value
--// pair to the environment.
--//
--// If `src` cannot be decoded as a json dictionary, an error is returned.
--func (env *Env) Decode(src io.Reader) error {
--	m := make(map[string]interface{})
--	if err := json.NewDecoder(src).Decode(&m); err != nil {
--		return err
--	}
--	for k, v := range m {
--		env.SetAuto(k, v)
--	}
--	return nil
--}
--
--// SetAuto will try to define the Set* method to call based on the given value.
--func (env *Env) SetAuto(key string, value interface{}) {
--	if fval, ok := value.(float64); ok {
--		env.SetInt64(key, int64(fval))
--	} else if sval, ok := value.(string); ok {
--		env.Set(key, sval)
--	} else if val, err := json.Marshal(value); err == nil {
--		env.Set(key, string(val))
--	} else {
--		env.Set(key, fmt.Sprintf("%v", value))
--	}
--}
--
--// Map returns the map representation of the env.
--func (env *Env) Map() map[string]string {
--	if len(*env) == 0 {
--		return nil
--	}
--	m := make(map[string]string)
--	for _, kv := range *env {
--		parts := strings.SplitN(kv, "=", 2)
--		m[parts[0]] = parts[1]
--	}
--	return m
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go
-deleted file mode 100644
-index 6d03d7b..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go
-+++ /dev/null
-@@ -1,349 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the DOCKER-LICENSE file.
--
--package docker
--
--import (
--	"bytes"
--	"errors"
--	"reflect"
--	"sort"
--	"testing"
--)
--
--func TestGet(t *testing.T) {
--	var tests = []struct {
--		input    []string
--		query    string
--		expected string
--	}{
--		{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PATH", "/usr/bin:/bin"},
--		{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", "/usr/local"},
--		{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", ""},
--		{[]string{"WAT="}, "WAT", ""},
--	}
--	for _, tt := range tests {
--		env := Env(tt.input)
--		got := env.Get(tt.query)
--		if got != tt.expected {
--			t.Errorf("Env.Get(%q): wrong result. Want %q. Got %q", tt.query, tt.expected, got)
--		}
--	}
--}
--
--func TestExists(t *testing.T) {
--	var tests = []struct {
--		input    []string
--		query    string
--		expected bool
--	}{
--		{[]string{"WAT=", "PYTHONPATH=/usr/local"}, "WAT", true},
--		{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", true},
--		{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", false},
--	}
--	for _, tt := range tests {
--		env := Env(tt.input)
--		got := env.Exists(tt.query)
--		if got != tt.expected {
--			t.Errorf("Env.Exists(%q): wrong result. Want %v. Got %v", tt.query, tt.expected, got)
--		}
--	}
--}
--
--func TestGetBool(t *testing.T) {
--	var tests = []struct {
--		input    string
--		expected bool
--	}{
--		{"EMTPY_VAR", false}, {"ZERO_VAR", false}, {"NO_VAR", false},
--		{"FALSE_VAR", false}, {"NONE_VAR", false}, {"TRUE_VAR", true},
--		{"WAT", true}, {"PATH", true}, {"ONE_VAR", true}, {"NO_VAR_TAB", false},
--	}
--	env := Env([]string{
--		"EMPTY_VAR=", "ZERO_VAR=0", "NO_VAR=no", "FALSE_VAR=false",
--		"NONE_VAR=none", "TRUE_VAR=true", "WAT=wat", "PATH=/usr/bin:/bin",
--		"ONE_VAR=1", "NO_VAR_TAB=0 \t\t\t",
--	})
--	for _, tt := range tests {
--		got := env.GetBool(tt.input)
--		if got != tt.expected {
--			t.Errorf("Env.GetBool(%q): wrong result. Want %v. Got %v.", tt.input, tt.expected, got)
--		}
--	}
--}
--
--func TestSetBool(t *testing.T) {
--	var tests = []struct {
--		input    bool
--		expected string
--	}{
--		{true, "1"}, {false, "0"},
--	}
--	for _, tt := range tests {
--		var env Env
--		env.SetBool("SOME", tt.input)
--		if got := env.Get("SOME"); got != tt.expected {
--			t.Errorf("Env.SetBool(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
--		}
--	}
--}
--
--func TestGetInt(t *testing.T) {
--	var tests = []struct {
--		input    string
--		expected int
--	}{
--		{"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2},
--	}
--	env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"})
--	for _, tt := range tests {
--		got := env.GetInt(tt.input)
--		if got != tt.expected {
--			t.Errorf("Env.GetInt(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got)
--		}
--	}
--}
--
--func TestSetInt(t *testing.T) {
--	var tests = []struct {
--		input    int
--		expected string
--	}{
--		{10, "10"}, {13, "13"}, {7, "7"}, {33, "33"},
--		{0, "0"}, {-34, "-34"},
--	}
--	for _, tt := range tests {
--		var env Env
--		env.SetInt("SOME", tt.input)
--		if got := env.Get("SOME"); got != tt.expected {
--			t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
--		}
--	}
--}
--
--func TestGetInt64(t *testing.T) {
--	var tests = []struct {
--		input    string
--		expected int64
--	}{
--		{"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2},
--	}
--	env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"})
--	for _, tt := range tests {
--		got := env.GetInt64(tt.input)
--		if got != tt.expected {
--			t.Errorf("Env.GetInt64(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got)
--		}
--	}
--}
--
--func TestSetInt64(t *testing.T) {
--	var tests = []struct {
--		input    int64
--		expected string
--	}{
--		{10, "10"}, {13, "13"}, {7, "7"}, {33, "33"},
--		{0, "0"}, {-34, "-34"},
--	}
--	for _, tt := range tests {
--		var env Env
--		env.SetInt64("SOME", tt.input)
--		if got := env.Get("SOME"); got != tt.expected {
--			t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
--		}
--	}
--}
--
--func TestGetJSON(t *testing.T) {
--	var p struct {
--		Name string `json:"name"`
--		Age  int    `json:"age"`
--	}
--	var env Env
--	env.Set("person", `{"name":"Gopher","age":5}`)
--	err := env.GetJSON("person", &p)
--	if err != nil {
--		t.Error(err)
--	}
--	if p.Name != "Gopher" {
--		t.Errorf("Env.GetJSON(%q): wrong name. Want %q. Got %q", "person", "Gopher", p.Name)
--	}
--	if p.Age != 5 {
--		t.Errorf("Env.GetJSON(%q): wrong age. Want %d. Got %d", "person", 5, p.Age)
--	}
--}
--
--func TestGetJSONAbsent(t *testing.T) {
--	var l []string
--	var env Env
--	err := env.GetJSON("person", &l)
--	if err != nil {
--		t.Error(err)
--	}
--	if l != nil {
--		t.Errorf("Env.GetJSON(): get unexpected list %v", l)
--	}
--}
--
--func TestGetJSONFailure(t *testing.T) {
--	var p []string
--	var env Env
--	env.Set("list-person", `{"name":"Gopher","age":5}`)
--	err := env.GetJSON("list-person", &p)
--	if err == nil {
--		t.Errorf("Env.GetJSON(%q): got unexpected <nil> error.", "list-person")
--	}
--}
--
--func TestSetJSON(t *testing.T) {
--	var p1 = struct {
--		Name string `json:"name"`
--		Age  int    `json:"age"`
--	}{Name: "Gopher", Age: 5}
--	var env Env
--	err := env.SetJSON("person", p1)
--	if err != nil {
--		t.Error(err)
--	}
--	var p2 struct {
--		Name string `json:"name"`
--		Age  int    `json:"age"`
--	}
--	err = env.GetJSON("person", &p2)
--	if err != nil {
--		t.Error(err)
--	}
--	if !reflect.DeepEqual(p1, p2) {
--		t.Errorf("Env.SetJSON(%q): wrong result. Want %v. Got %v", "person", p1, p2)
--	}
--}
--
--func TestSetJSONFailure(t *testing.T) {
--	var env Env
--	err := env.SetJSON("person", unmarshable{})
--	if err == nil {
--		t.Error("Env.SetJSON(): got unexpected <nil> error")
--	}
--	if env.Exists("person") {
--		t.Errorf("Env.SetJSON(): should not define the key %q, but did", "person")
--	}
--}
--
--func TestGetList(t *testing.T) {
--	var tests = []struct {
--		input    string
--		expected []string
--	}{
--		{"WAT=wat", []string{"wat"}},
--		{`WAT=["wat","wet","wit","wot","wut"]`, []string{"wat", "wet", "wit", "wot", "wut"}},
--		{"WAT=", nil},
--	}
--	for _, tt := range tests {
--		env := Env([]string{tt.input})
--		got := env.GetList("WAT")
--		if !reflect.DeepEqual(got, tt.expected) {
--			t.Errorf("Env.GetList(%q): wrong result. Want %v. Got %v", "WAT", tt.expected, got)
--		}
--	}
--}
--
--func TestSetList(t *testing.T) {
--	list := []string{"a", "b", "c"}
--	var env Env
--	env.SetList("SOME", list)
--	if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) {
--		t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got)
--	}
--}
--
--func TestSet(t *testing.T) {
--	var env Env
--	env.Set("PATH", "/home/bin:/bin")
--	env.Set("SOMETHING", "/usr/bin")
--	env.Set("PATH", "/bin")
--	if expected, got := "/usr/bin", env.Get("SOMETHING"); got != expected {
--		t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got)
--	}
--	if expected, got := "/bin", env.Get("PATH"); got != expected {
--		t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got)
--	}
--}
--
--func TestDecode(t *testing.T) {
--	var tests = []struct {
--		input       string
--		expectedOut []string
--		expectedErr string
--	}{
--		{
--			`{"PATH":"/usr/bin:/bin","containers":54,"wat":["123","345"]}`,
--			[]string{"PATH=/usr/bin:/bin", "containers=54", `wat=["123","345"]`},
--			"",
--		},
--		{"}}", nil, "invalid character '}' looking for beginning of value"},
--		{`{}`, nil, ""},
--	}
--	for _, tt := range tests {
--		var env Env
--		err := env.Decode(bytes.NewBufferString(tt.input))
--		if tt.expectedErr == "" {
--			if err != nil {
--				t.Error(err)
--			}
--		} else if tt.expectedErr != err.Error() {
--			t.Errorf("Env.Decode(): invalid error. Want %q. Got %q.", tt.expectedErr, err)
--		}
--		got := []string(env)
--		sort.Strings(got)
--		sort.Strings(tt.expectedOut)
--		if !reflect.DeepEqual(got, tt.expectedOut) {
--			t.Errorf("Env.Decode(): wrong result. Want %v. Got %v.", tt.expectedOut, got)
--		}
--	}
--}
--
--func TestSetAuto(t *testing.T) {
--	buf := bytes.NewBufferString("oi")
--	var tests = []struct {
--		input    interface{}
--		expected string
--	}{
--		{10, "10"},
--		{10.3, "10"},
--		{"oi", "oi"},
--		{buf, "{}"},
--		{unmarshable{}, "{}"},
--	}
--	for _, tt := range tests {
--		var env Env
--		env.SetAuto("SOME", tt.input)
--		if got := env.Get("SOME"); got != tt.expected {
--			t.Errorf("Env.SetAuto(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
--		}
--	}
--}
--
--func TestMap(t *testing.T) {
--	var tests = []struct {
--		input    []string
--		expected map[string]string
--	}{
--		{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, map[string]string{"PATH": "/usr/bin:/bin", "PYTHONPATH": "/usr/local"}},
--		{nil, nil},
--	}
--	for _, tt := range tests {
--		env := Env(tt.input)
--		got := env.Map()
--		if !reflect.DeepEqual(got, tt.expected) {
--			t.Errorf("Env.Map(): wrong result. Want %v. Got %v", tt.expected, got)
--		}
--	}
--}
--
--type unmarshable struct {
--}
--
--func (unmarshable) MarshalJSON() ([]byte, error) {
--	return nil, errors.New("cannot marshal")
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
-deleted file mode 100644
-index 7c055c5..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go
-+++ /dev/null
-@@ -1,309 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"crypto/tls"
--	"encoding/json"
--	"errors"
--	"fmt"
--	"io"
--	"math"
--	"net"
--	"net/http"
--	"net/http/httputil"
--	"sync"
--	"sync/atomic"
--	"time"
--)
--
--// APIEvents represents an event returned by the API.
--type APIEvents struct {
--	Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
--	ID     string `json:"ID,omitempty" yaml:"ID,omitempty"`
--	From   string `json:"From,omitempty" yaml:"From,omitempty"`
--	Time   int64  `json:"Time,omitempty" yaml:"Time,omitempty"`
--}
--
--type eventMonitoringState struct {
--	sync.RWMutex
--	sync.WaitGroup
--	enabled   bool
--	lastSeen  *int64
--	C         chan *APIEvents
--	errC      chan error
--	listeners []chan<- *APIEvents
--}
--
--const (
--	maxMonitorConnRetries = 5
--	retryInitialWaitTime  = 10.
--)
--
--var (
--	// ErrNoListeners is the error returned when no listeners are available
--	// to receive an event.
--	ErrNoListeners = errors.New("no listeners present to receive event")
--
--	// ErrListenerAlreadyExists is the error returned when the listerner already
--	// exists.
--	ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
--
--	// EOFEvent is sent when the event listener receives an EOF error.
--	EOFEvent = &APIEvents{
--		Status: "EOF",
--	}
--)
--
--// AddEventListener adds a new listener to container events in the Docker API.
--//
--// The parameter is a channel through which events will be sent.
--func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
--	var err error
--	if !c.eventMonitor.isEnabled() {
--		err = c.eventMonitor.enableEventMonitoring(c)
--		if err != nil {
--			return err
--		}
--	}
--	err = c.eventMonitor.addListener(listener)
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// RemoveEventListener removes a listener from the monitor.
--func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
--	err := c.eventMonitor.removeListener(listener)
--	if err != nil {
--		return err
--	}
--	if len(c.eventMonitor.listeners) == 0 {
--		err = c.eventMonitor.disableEventMonitoring()
--		if err != nil {
--			return err
--		}
--	}
--	return nil
--}
--
--func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
--	eventState.Lock()
--	defer eventState.Unlock()
--	if listenerExists(listener, &eventState.listeners) {
--		return ErrListenerAlreadyExists
--	}
--	eventState.Add(1)
--	eventState.listeners = append(eventState.listeners, listener)
--	return nil
--}
--
--func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
--	eventState.Lock()
--	defer eventState.Unlock()
--	if listenerExists(listener, &eventState.listeners) {
--		var newListeners []chan<- *APIEvents
--		for _, l := range eventState.listeners {
--			if l != listener {
--				newListeners = append(newListeners, l)
--			}
--		}
--		eventState.listeners = newListeners
--		eventState.Add(-1)
--	}
--	return nil
--}
--
--func (eventState *eventMonitoringState) closeListeners() {
--	eventState.Lock()
--	defer eventState.Unlock()
--	for _, l := range eventState.listeners {
--		close(l)
--		eventState.Add(-1)
--	}
--	eventState.listeners = nil
--}
--
--func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
--	for _, b := range *list {
--		if b == a {
--			return true
--		}
--	}
--	return false
--}
--
--func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
--	eventState.Lock()
--	defer eventState.Unlock()
--	if !eventState.enabled {
--		eventState.enabled = true
--		var lastSeenDefault = int64(0)
--		eventState.lastSeen = &lastSeenDefault
--		eventState.C = make(chan *APIEvents, 100)
--		eventState.errC = make(chan error, 1)
--		go eventState.monitorEvents(c)
--	}
--	return nil
--}
--
--func (eventState *eventMonitoringState) disableEventMonitoring() error {
--	eventState.Wait()
--	eventState.Lock()
--	defer eventState.Unlock()
--	if eventState.enabled {
--		eventState.enabled = false
--		close(eventState.C)
--		close(eventState.errC)
--	}
--	return nil
--}
--
--func (eventState *eventMonitoringState) monitorEvents(c *Client) {
--	var err error
--	for eventState.noListeners() {
--		time.Sleep(10 * time.Millisecond)
--	}
--	if err = eventState.connectWithRetry(c); err != nil {
--		eventState.terminate()
--	}
--	for eventState.isEnabled() {
--		timeout := time.After(100 * time.Millisecond)
--		select {
--		case ev, ok := <-eventState.C:
--			if !ok {
--				return
--			}
--			if ev == EOFEvent {
--				eventState.closeListeners()
--				eventState.terminate()
--				return
--			}
--			go eventState.sendEvent(ev)
--			go eventState.updateLastSeen(ev)
--		case err = <-eventState.errC:
--			if err == ErrNoListeners {
--				eventState.terminate()
--				return
--			} else if err != nil {
--				defer func() { go eventState.monitorEvents(c) }()
--				return
--			}
--		case <-timeout:
--			continue
--		}
--	}
--}
--
--func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
--	var retries int
--	var err error
--	for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
--		waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
--		time.Sleep(time.Duration(waitTime) * time.Millisecond)
--		err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
--	}
--	return err
--}
--
--func (eventState *eventMonitoringState) noListeners() bool {
--	eventState.RLock()
--	defer eventState.RUnlock()
--	return len(eventState.listeners) == 0
--}
--
--func (eventState *eventMonitoringState) isEnabled() bool {
--	eventState.RLock()
--	defer eventState.RUnlock()
--	return eventState.enabled
--}
--
--func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
--	eventState.RLock()
--	defer eventState.RUnlock()
--	eventState.Add(1)
--	defer eventState.Done()
--	if eventState.isEnabled() {
--		if eventState.noListeners() {
--			eventState.errC <- ErrNoListeners
--			return
--		}
--
--		for _, listener := range eventState.listeners {
--			listener <- event
--		}
--	}
--}
--
--func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
--	eventState.Lock()
--	defer eventState.Unlock()
--	if atomic.LoadInt64(eventState.lastSeen) < e.Time {
--		atomic.StoreInt64(eventState.lastSeen, e.Time)
--	}
--}
--
--func (eventState *eventMonitoringState) terminate() {
--	eventState.disableEventMonitoring()
--}
--
--func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
--	uri := "/events"
--	if startTime != 0 {
--		uri += fmt.Sprintf("?since=%d", startTime)
--	}
--	protocol := c.endpointURL.Scheme
--	address := c.endpointURL.Path
--	if protocol != "unix" {
--		protocol = "tcp"
--		address = c.endpointURL.Host
--	}
--	var dial net.Conn
--	var err error
--	if c.TLSConfig == nil {
--		dial, err = net.Dial(protocol, address)
--	} else {
--		dial, err = tls.Dial(protocol, address, c.TLSConfig)
--	}
--	if err != nil {
--		return err
--	}
--	conn := httputil.NewClientConn(dial, nil)
--	req, err := http.NewRequest("GET", uri, nil)
--	if err != nil {
--		return err
--	}
--	res, err := conn.Do(req)
--	if err != nil {
--		return err
--	}
--	go func(res *http.Response, conn *httputil.ClientConn) {
--		defer conn.Close()
--		defer res.Body.Close()
--		decoder := json.NewDecoder(res.Body)
--		for {
--			var event APIEvents
--			if err = decoder.Decode(&event); err != nil {
--				if err == io.EOF || err == io.ErrUnexpectedEOF {
--					if c.eventMonitor.isEnabled() {
--						// Signal that we're exiting.
--						eventChan <- EOFEvent
--					}
--					break
--				}
--				errChan <- err
--			}
--			if event.Time == 0 {
--				continue
--			}
--			if !c.eventMonitor.isEnabled() {
--				return
--			}
--			eventChan <- &event
--		}
--	}(res, conn)
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go
-deleted file mode 100644
-index 0de0474..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go
-+++ /dev/null
-@@ -1,129 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"bufio"
--	"crypto/tls"
--	"crypto/x509"
--	"fmt"
--	"io/ioutil"
--	"net/http"
--	"net/http/httptest"
--	"strings"
--	"testing"
--	"time"
--)
--
--func TestEventListeners(t *testing.T) {
--	testEventListeners("TestEventListeners", t, httptest.NewServer, NewClient)
--}
--
--func TestTLSEventListeners(t *testing.T) {
--	testEventListeners("TestTLSEventListeners", t, func(handler http.Handler) *httptest.Server {
--		server := httptest.NewUnstartedServer(handler)
--
--		cert, err := tls.LoadX509KeyPair("testing/data/server.pem", "testing/data/serverkey.pem")
--		if err != nil {
--			t.Fatalf("Error loading server key pair: %s", err)
--		}
--
--		caCert, err := ioutil.ReadFile("testing/data/ca.pem")
--		if err != nil {
--			t.Fatalf("Error loading ca certificate: %s", err)
--		}
--		caPool := x509.NewCertPool()
--		if !caPool.AppendCertsFromPEM(caCert) {
--			t.Fatalf("Could not add ca certificate")
--		}
--
--		server.TLS = &tls.Config{
--			Certificates: []tls.Certificate{cert},
--			RootCAs:      caPool,
--		}
--		server.StartTLS()
--		return server
--	}, func(url string) (*Client, error) {
--		return NewTLSClient(url, "testing/data/cert.pem", "testing/data/key.pem", "testing/data/ca.pem")
--	})
--}
--
--func testEventListeners(testName string, t *testing.T, buildServer func(http.Handler) *httptest.Server, buildClient func(string) (*Client, error)) {
--	response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
--{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
--{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966}
--{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
--`
--
--	var req http.Request
--	server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		rsc := bufio.NewScanner(strings.NewReader(response))
--		for rsc.Scan() {
--			w.Write([]byte(rsc.Text()))
--			w.(http.Flusher).Flush()
--			time.Sleep(10 * time.Millisecond)
--		}
--		req = *r
--	}))
--	defer server.Close()
--
--	client, err := buildClient(server.URL)
--	if err != nil {
--		t.Errorf("Failed to create client: %s", err)
--	}
--	client.SkipServerVersionCheck = true
--
--	listener := make(chan *APIEvents, 10)
--	defer func() { time.Sleep(10 * time.Millisecond); client.RemoveEventListener(listener) }()
--
--	err = client.AddEventListener(listener)
--	if err != nil {
--		t.Errorf("Failed to add event listener: %s", err)
--	}
--
--	timeout := time.After(1 * time.Second)
--	var count int
--
--	for {
--		select {
--		case msg := <-listener:
--			t.Logf("Received: %s", *msg)
--			count++
--			err = checkEvent(count, msg)
--			if err != nil {
--				t.Fatalf("Check event failed: %s", err)
--			}
--			if count == 4 {
--				return
--			}
--		case <-timeout:
--			t.Fatalf("%s timed out waiting on events", testName)
--		}
--	}
--}
--
--func checkEvent(index int, event *APIEvents) error {
--	if event.ID != "dfdf82bd3881" {
--		return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID)
--	}
--	if event.From != "base:latest" {
--		return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From)
--	}
--	var status string
--	switch index {
--	case 1:
--		status = "create"
--	case 2:
--		status = "start"
--	case 3:
--		status = "stop"
--	case 4:
--		status = "destroy"
--	}
--	if event.Status != status {
--		return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status)
--	}
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go
-deleted file mode 100644
-index 8c2c719..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go
-+++ /dev/null
-@@ -1,168 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker_test
--
--import (
--	"archive/tar"
--	"bytes"
--	"fmt"
--	"io"
--	"log"
--	"time"
--
--	"github.com/fsouza/go-dockerclient"
--)
--
--func ExampleClient_AttachToContainer() {
--	client, err := docker.NewClient("http://localhost:4243")
--	if err != nil {
--		log.Fatal(err)
--	}
--	client.SkipServerVersionCheck = true
--	// Reading logs from container a84849 and sending them to buf.
--	var buf bytes.Buffer
--	err = client.AttachToContainer(docker.AttachToContainerOptions{
--		Container:    "a84849",
--		OutputStream: &buf,
--		Logs:         true,
--		Stdout:       true,
--		Stderr:       true,
--	})
--	if err != nil {
--		log.Fatal(err)
--	}
--	log.Println(buf.String())
--	buf.Reset()
--	err = client.AttachToContainer(docker.AttachToContainerOptions{
--		Container:    "a84849",
--		OutputStream: &buf,
--		Stdout:       true,
--		Stream:       true,
--	})
--	if err != nil {
--		log.Fatal(err)
--	}
--	log.Println(buf.String())
--}
--
--func ExampleClient_CopyFromContainer() {
--	client, err := docker.NewClient("http://localhost:4243")
--	if err != nil {
--		log.Fatal(err)
--	}
--	cid := "a84849"
--	var buf bytes.Buffer
--	filename := "/tmp/output.txt"
--	err = client.CopyFromContainer(docker.CopyFromContainerOptions{
--		Container:    cid,
--		Resource:     filename,
--		OutputStream: &buf,
--	})
--	if err != nil {
--		log.Fatalf("Error while copying from %s: %s\n", cid, err)
--	}
--	content := new(bytes.Buffer)
--	r := bytes.NewReader(buf.Bytes())
--	tr := tar.NewReader(r)
--	tr.Next()
--	if err != nil && err != io.EOF {
--		log.Fatal(err)
--	}
--	if _, err := io.Copy(content, tr); err != nil {
--		log.Fatal(err)
--	}
--	log.Println(buf.String())
--}
--
--func ExampleClient_BuildImage() {
--	client, err := docker.NewClient("http://localhost:4243")
--	if err != nil {
--		log.Fatal(err)
--	}
--
--	t := time.Now()
--	inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
--	tr := tar.NewWriter(inputbuf)
--	tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t})
--	tr.Write([]byte("FROM base\n"))
--	tr.Close()
--	opts := docker.BuildImageOptions{
--		Name:         "test",
--		InputStream:  inputbuf,
--		OutputStream: outputbuf,
--	}
--	if err := client.BuildImage(opts); err != nil {
--		log.Fatal(err)
--	}
--}
--
--func ExampleClient_ListenEvents() {
--	client, err := docker.NewClient("http://localhost:4243")
--	if err != nil {
--		log.Fatal(err)
--	}
--
--	listener := make(chan *docker.APIEvents)
--	err = client.AddEventListener(listener)
--	if err != nil {
--		log.Fatal(err)
--	}
--
--	defer func() {
--
--		err = client.RemoveEventListener(listener)
--		if err != nil {
--			log.Fatal(err)
--		}
--
--	}()
--
--	timeout := time.After(1 * time.Second)
--
--	for {
--		select {
--		case msg := <-listener:
--			log.Println(msg)
--		case <-timeout:
--			break
--		}
--	}
--
--}
--
--func ExampleEnv_Map() {
--	e := docker.Env([]string{"A=1", "B=2", "C=3"})
--	envs := e.Map()
--	for k, v := range envs {
--		fmt.Printf("%s=%q\n", k, v)
--	}
--}
--
--func ExampleEnv_SetJSON() {
--	type Person struct {
--		Name string
--		Age  int
--	}
--	p := Person{Name: "Gopher", Age: 4}
--	var e docker.Env
--	err := e.SetJSON("person", p)
--	if err != nil {
--		log.Fatal(err)
--	}
--}
--
--func ExampleEnv_GetJSON() {
--	type Person struct {
--		Name string
--		Age  int
--	}
--	p := Person{Name: "Gopher", Age: 4}
--	var e docker.Env
--	e.Set("person", `{"name":"Gopher","age":4}`)
--	err := e.GetJSON("person", &p)
--	if err != nil {
--		log.Fatal(err)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
-deleted file mode 100644
-index 9ce7b44..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
-+++ /dev/null
-@@ -1,129 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Docs can currently be found at https://github.com/docker/docker/blob/master/docs/sources/reference/api/docker_remote_api_v1.15.md#exec-create
--
--package docker
--
--import (
--	"encoding/json"
--	"fmt"
--	"io"
--	"net/http"
--	"net/url"
--	"strconv"
--)
--
--// CreateExecOptions specify parameters to the CreateExecContainer function.
--//
--// See http://goo.gl/8izrzI for more details
--type CreateExecOptions struct {
--	AttachStdin  bool     `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
--	AttachStdout bool     `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
--	AttachStderr bool     `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
--	Tty          bool     `json:"Tty,omitempty" yaml:"Tty,omitempty"`
--	Cmd          []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
--	Container    string   `json:"Container,omitempty" yaml:"Container,omitempty"`
--}
--
--// StartExecOptions specify parameters to the StartExecContainer function.
--//
--// See http://goo.gl/JW8Lxl for more details
--type StartExecOptions struct {
--	Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
--
--	Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
--
--	InputStream  io.Reader `qs:"-"`
--	OutputStream io.Writer `qs:"-"`
--	ErrorStream  io.Writer `qs:"-"`
--
--	// Use raw terminal? Usually true when the container contains a TTY.
--	RawTerminal bool `qs:"-"`
--
--	// If set, after a successful connect, a sentinel will be sent and then the
--	// client will block on receive before continuing.
--	//
--	// It must be an unbuffered channel. Using a buffered channel can lead
--	// to unexpected behavior.
--	Success chan struct{} `json:"-"`
--}
--
--// Exec is the type representing a `docker exec` instance and containing the
--// instance ID
--type Exec struct {
--	ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
--}
--
--// CreateExec sets up an exec instance in a running container `id`, returning the exec
--// instance, or an error in case of failure.
--//
--// See http://goo.gl/8izrzI for more details
--func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
--	path := fmt.Sprintf("/containers/%s/exec", opts.Container)
--	body, status, err := c.do("POST", path, opts)
--	if status == http.StatusNotFound {
--		return nil, &NoSuchContainer{ID: opts.Container}
--	}
--	if err != nil {
--		return nil, err
--	}
--	var exec Exec
--	err = json.Unmarshal(body, &exec)
--	if err != nil {
--		return nil, err
--	}
--
--	return &exec, nil
--}
--
--// StartExec starts a previously set up exec instance id. If opts.Detach is
--// true, it returns after starting the exec command. Otherwise, it sets up an
--// interactive session with the exec command.
--//
--// See http://goo.gl/JW8Lxl for more details
--func (c *Client) StartExec(id string, opts StartExecOptions) error {
--	if id == "" {
--		return &NoSuchExec{ID: id}
--	}
--
--	path := fmt.Sprintf("/exec/%s/start", id)
--
--	if opts.Detach {
--		_, status, err := c.do("POST", path, opts)
--		if status == http.StatusNotFound {
--			return &NoSuchExec{ID: id}
--		}
--		if err != nil {
--			return err
--		}
--		return nil
--	}
--
--	return c.hijack("POST", path, opts.Success, opts.RawTerminal, opts.InputStream, opts.ErrorStream, opts.OutputStream, opts)
--}
--
--// ResizeExecTTY resizes the tty session used by the exec command id. This API
--// is valid only if Tty was specified as part of creating and starting the exec
--// command.
--//
--// See http://goo.gl/YDSx1f for more details
--func (c *Client) ResizeExecTTY(id string, height, width int) error {
--	params := make(url.Values)
--	params.Set("h", strconv.Itoa(height))
--	params.Set("w", strconv.Itoa(width))
--
--	path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
--	_, _, err := c.do("POST", path, nil)
--	return err
--}
--
--// NoSuchExec is the error returned when a given exec instance does not exist.
--type NoSuchExec struct {
--	ID string
--}
--
--func (err *NoSuchExec) Error() string {
--	return "No such exec instance: " + err.ID
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
-deleted file mode 100644
-index 31de162..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
-+++ /dev/null
-@@ -1,128 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"bytes"
--	"encoding/json"
--	"net/http"
--	"net/http/httptest"
--	"net/url"
--	"strings"
--	"testing"
--)
--
--func TestExecCreate(t *testing.T) {
--	jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}`
--	var expected struct{ ID string }
--	err := json.Unmarshal([]byte(jsonContainer), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	config := CreateExecOptions{
--		Container:    "test",
--		AttachStdin:  true,
--		AttachStdout: true,
--		AttachStderr: false,
--		Tty:          false,
--		Cmd:          []string{"touch", "/tmp/file"},
--	}
--	execObj, err := client.CreateExec(config)
--	if err != nil {
--		t.Fatal(err)
--	}
--	expectedID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	if execObj.ID != expectedID {
--		t.Errorf("ExecCreate: wrong ID. Want %q. Got %q.", expectedID, execObj.ID)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("ExecCreate: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/containers/test/exec"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
--	}
--	var gotBody struct{ ID string }
--	err = json.NewDecoder(req.Body).Decode(&gotBody)
--	if err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestExecStartDetached(t *testing.T) {
--	execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	fakeRT := &FakeRoundTripper{status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	config := StartExecOptions{
--		Detach: true,
--	}
--	err := client.StartExec(execID, config)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/start"))
--	if gotPath := req.URL.Path; gotPath != expectedURL.Path {
--		t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
--	}
--	t.Log(req.Body)
--	var gotBody struct{ Detach bool }
--	err = json.NewDecoder(req.Body).Decode(&gotBody)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !gotBody.Detach {
--		t.Fatal("Expected Detach in StartExecOptions to be true")
--	}
--}
--
--func TestExecStartAndAttach(t *testing.T) {
--	var reader = strings.NewReader("send value")
--	var req http.Request
--	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
--		w.Write([]byte("hello"))
--		req = *r
--	}))
--	defer server.Close()
--	client, _ := NewClient(server.URL)
--	client.SkipServerVersionCheck = true
--	var stdout, stderr bytes.Buffer
--	success := make(chan struct{})
--	execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	opts := StartExecOptions{
--		OutputStream: &stdout,
--		ErrorStream:  &stderr,
--		InputStream:  reader,
--		RawTerminal:  true,
--		Success:      success,
--	}
--	go client.StartExec(execID, opts)
--	<-success
--}
--
--func TestExecResize(t *testing.T) {
--	execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
--	fakeRT := &FakeRoundTripper{status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	err := client.ResizeExecTTY(execID, 10, 20)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
--	}
--	expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/resize?h=10&w=20"))
--	if gotPath := req.URL.RequestURI(); gotPath != expectedURL.RequestURI() {
--		t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go
-deleted file mode 100644
-index 3d55155..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go
-+++ /dev/null
-@@ -1,458 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"bytes"
--	"encoding/base64"
--	"encoding/json"
--	"errors"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"net/http"
--	"net/url"
--	"os"
--	"time"
--)
--
--// APIImages represent an image returned in the ListImages call.
--type APIImages struct {
--	ID          string   `json:"Id" yaml:"Id"`
--	RepoTags    []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"`
--	Created     int64    `json:"Created,omitempty" yaml:"Created,omitempty"`
--	Size        int64    `json:"Size,omitempty" yaml:"Size,omitempty"`
--	VirtualSize int64    `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"`
--	ParentID    string   `json:"ParentId,omitempty" yaml:"ParentId,omitempty"`
--}
--
--// Image is the type representing a docker image and its various properties
--type Image struct {
--	ID              string    `json:"Id" yaml:"Id"`
--	Parent          string    `json:"Parent,omitempty" yaml:"Parent,omitempty"`
--	Comment         string    `json:"Comment,omitempty" yaml:"Comment,omitempty"`
--	Created         time.Time `json:"Created,omitempty" yaml:"Created,omitempty"`
--	Container       string    `json:"Container,omitempty" yaml:"Container,omitempty"`
--	ContainerConfig Config    `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"`
--	DockerVersion   string    `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"`
--	Author          string    `json:"Author,omitempty" yaml:"Author,omitempty"`
--	Config          *Config   `json:"Config,omitempty" yaml:"Config,omitempty"`
--	Architecture    string    `json:"Architecture,omitempty" yaml:"Architecture,omitempty"`
--	Size            int64     `json:"Size,omitempty" yaml:"Size,omitempty"`
--}
--
--// ImageHistory represent a layer in an image's history returned by the
--// ImageHistory call.
--type ImageHistory struct {
--	ID        string   `json:"Id" yaml:"Id"`
--	Tags      []string `json:"Tags,omitempty" yaml:"Tags,omitempty"`
--	Created   int64    `json:"Created,omitempty" yaml:"Created,omitempty"`
--	CreatedBy string   `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"`
--	Size      int64    `json:"Size,omitempty" yaml:"Size,omitempty"`
--}
--
--// ImagePre012 serves the same purpose as the Image type except that it is for
--// earlier versions of the Docker API (pre-012 to be specific)
--type ImagePre012 struct {
--	ID              string    `json:"id"`
--	Parent          string    `json:"parent,omitempty"`
--	Comment         string    `json:"comment,omitempty"`
--	Created         time.Time `json:"created"`
--	Container       string    `json:"container,omitempty"`
--	ContainerConfig Config    `json:"container_config,omitempty"`
--	DockerVersion   string    `json:"docker_version,omitempty"`
--	Author          string    `json:"author,omitempty"`
--	Config          *Config   `json:"config,omitempty"`
--	Architecture    string    `json:"architecture,omitempty"`
--	Size            int64     `json:"size,omitempty"`
--}
--
--// ListImagesOptions specify parameters to the ListImages function.
--//
--// See http://goo.gl/2rOLFF for more details.
--type ListImagesOptions struct {
--	All     bool
--	Filters map[string][]string
--}
--
--var (
--	// ErrNoSuchImage is the error returned when the image does not exist.
--	ErrNoSuchImage = errors.New("no such image")
--
--	// ErrMissingRepo is the error returned when the remote repository is
--	// missing.
--	ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'")
--
--	// ErrMissingOutputStream is the error returned when no output stream
--	// is provided to some calls, like BuildImage.
--	ErrMissingOutputStream = errors.New("missing output stream")
--
--	// ErrMultipleContexts is the error returned when both a ContextDir and
--	// InputStream are provided in BuildImageOptions
--	ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream")
--)
--
--// ListImages returns the list of available images in the server.
--//
--// See http://goo.gl/2rOLFF for more details.
--func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) {
--	path := "/images/json?" + queryString(opts)
--	body, _, err := c.do("GET", path, nil)
--	if err != nil {
--		return nil, err
--	}
--	var images []APIImages
--	err = json.Unmarshal(body, &images)
--	if err != nil {
--		return nil, err
--	}
--	return images, nil
--}
--
--// ImageHistory returns the history of the image by its name or ID.
--//
--// See http://goo.gl/2oJmNs for more details.
--func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
--	body, status, err := c.do("GET", "/images/"+name+"/history", nil)
--	if status == http.StatusNotFound {
--		return nil, ErrNoSuchImage
--	}
--	if err != nil {
--		return nil, err
--	}
--	var history []ImageHistory
--	err = json.Unmarshal(body, &history)
--	if err != nil {
--		return nil, err
--	}
--	return history, nil
--}
--
--// RemoveImage removes an image by its name or ID.
--//
--// See http://goo.gl/znj0wM for more details.
--func (c *Client) RemoveImage(name string) error {
--	_, status, err := c.do("DELETE", "/images/"+name, nil)
--	if status == http.StatusNotFound {
--		return ErrNoSuchImage
--	}
--	return err
--}
--
--// InspectImage returns an image by its name or ID.
--//
--// See http://goo.gl/Q112NY for more details.
--func (c *Client) InspectImage(name string) (*Image, error) {
--	body, status, err := c.do("GET", "/images/"+name+"/json", nil)
--	if status == http.StatusNotFound {
--		return nil, ErrNoSuchImage
--	}
--	if err != nil {
--		return nil, err
--	}
--
--	var image Image
--
--	// if the caller elected to skip checking the server's version, assume it's the latest
--	if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion1_12) {
--		err = json.Unmarshal(body, &image)
--		if err != nil {
--			return nil, err
--		}
--	} else {
--		var imagePre012 ImagePre012
--		err = json.Unmarshal(body, &imagePre012)
--		if err != nil {
--			return nil, err
--		}
--
--		image.ID = imagePre012.ID
--		image.Parent = imagePre012.Parent
--		image.Comment = imagePre012.Comment
--		image.Created = imagePre012.Created
--		image.Container = imagePre012.Container
--		image.ContainerConfig = imagePre012.ContainerConfig
--		image.DockerVersion = imagePre012.DockerVersion
--		image.Author = imagePre012.Author
--		image.Config = imagePre012.Config
--		image.Architecture = imagePre012.Architecture
--		image.Size = imagePre012.Size
--	}
--
--	return &image, nil
--}
--
--// PushImageOptions represents options to use in the PushImage method.
--//
--// See http://goo.gl/pN8A3P for more details.
--type PushImageOptions struct {
--	// Name of the image
--	Name string
--
--	// Tag of the image
--	Tag string
--
--	// Registry server to push the image
--	Registry string
--
--	OutputStream  io.Writer `qs:"-"`
--	RawJSONStream bool      `qs:"-"`
--}
--
--// AuthConfiguration represents authentication options to use in the PushImage
--// method. It represents the authentication in the Docker index server.
--type AuthConfiguration struct {
--	Username      string `json:"username,omitempty"`
--	Password      string `json:"password,omitempty"`
--	Email         string `json:"email,omitempty"`
--	ServerAddress string `json:"serveraddress,omitempty"`
--}
--
--// AuthConfigurations represents authentication options to use for the
--// PushImage method accommodating the new X-Registry-Config header
--type AuthConfigurations struct {
--	Configs map[string]AuthConfiguration `json:"configs"`
--}
--
--// PushImage pushes an image to a remote registry, logging progress to w.
--//
--// An empty instance of AuthConfiguration may be used for unauthenticated
--// pushes.
--//
--// See http://goo.gl/pN8A3P for more details.
--func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error {
--	if opts.Name == "" {
--		return ErrNoSuchImage
--	}
--	name := opts.Name
--	opts.Name = ""
--	path := "/images/" + name + "/push?" + queryString(&opts)
--	headers := headersWithAuth(auth)
--	return c.stream("POST", path, true, opts.RawJSONStream, headers, nil, opts.OutputStream, nil)
--}
--
--// PullImageOptions present the set of options available for pulling an image
--// from a registry.
--//
--// See http://goo.gl/ACyYNS for more details.
--type PullImageOptions struct {
--	Repository    string `qs:"fromImage"`
--	Registry      string
--	Tag           string
--	OutputStream  io.Writer `qs:"-"`
--	RawJSONStream bool      `qs:"-"`
--}
--
--// PullImage pulls an image from a remote registry, logging progress to w.
--//
--// See http://goo.gl/ACyYNS for more details.
--func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error {
--	if opts.Repository == "" {
--		return ErrNoSuchImage
--	}
--
--	headers := headersWithAuth(auth)
--	return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream)
--}
--
--func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error {
--	path := "/images/create?" + qs
--	return c.stream("POST", path, true, rawJSONStream, headers, in, w, nil)
--}
--
--// LoadImageOptions represents the options for LoadImage Docker API Call
--//
--// See http://goo.gl/Y8NNCq for more details.
--type LoadImageOptions struct {
--	InputStream io.Reader
--}
--
--// LoadImage imports a tarball docker image
--//
--// See http://goo.gl/Y8NNCq for more details.
--func (c *Client) LoadImage(opts LoadImageOptions) error {
--	return c.stream("POST", "/images/load", true, false, nil, opts.InputStream, nil, nil)
--}
--
--// ExportImageOptions represent the options for ExportImage Docker API call
--//
--// See http://goo.gl/mi6kvk for more details.
--type ExportImageOptions struct {
--	Name         string
--	OutputStream io.Writer
--}
--
--// ExportImage exports an image (as a tar file) into the stream
--//
--// See http://goo.gl/mi6kvk for more details.
--func (c *Client) ExportImage(opts ExportImageOptions) error {
--	return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), true, false, nil, nil, opts.OutputStream, nil)
--}
--
--// ImportImageOptions present the set of informations available for importing
--// an image from a source file or the stdin.
--//
--// See http://goo.gl/PhBKnS for more details.
--type ImportImageOptions struct {
--	Repository string `qs:"repo"`
--	Source     string `qs:"fromSrc"`
--	Tag        string `qs:"tag"`
--
--	InputStream  io.Reader `qs:"-"`
--	OutputStream io.Writer `qs:"-"`
--}
--
--// ImportImage imports an image from a url, a file or stdin
--//
--// See http://goo.gl/PhBKnS for more details.
--func (c *Client) ImportImage(opts ImportImageOptions) error {
--	if opts.Repository == "" {
--		return ErrNoSuchImage
--	}
--	if opts.Source != "-" {
--		opts.InputStream = nil
--	}
--	if opts.Source != "-" && !isURL(opts.Source) {
--		f, err := os.Open(opts.Source)
--		if err != nil {
--			return err
--		}
--		b, err := ioutil.ReadAll(f)
--		opts.InputStream = bytes.NewBuffer(b)
--		opts.Source = "-"
--	}
--	return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, false)
--}
--
--// BuildImageOptions present the set of informations available for building an
--// image from a tarfile with a Dockerfile in it.
--//
--// For more details about the Docker building process, see
--// http://goo.gl/tlPXPu.
--type BuildImageOptions struct {
--	Name                string             `qs:"t"`
--	NoCache             bool               `qs:"nocache"`
--	SuppressOutput      bool               `qs:"q"`
--	RmTmpContainer      bool               `qs:"rm"`
--	ForceRmTmpContainer bool               `qs:"forcerm"`
--	InputStream         io.Reader          `qs:"-"`
--	OutputStream        io.Writer          `qs:"-"`
--	RawJSONStream       bool               `qs:"-"`
--	Remote              string             `qs:"remote"`
--	Auth                AuthConfiguration  `qs:"-"` // for older docker X-Registry-Auth header
--	AuthConfigs         AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header
--	ContextDir          string             `qs:"-"`
--}
--
--// BuildImage builds an image from a tarball's url or a Dockerfile in the input
--// stream.
--//
--// See http://goo.gl/wRsW76 for more details.
--func (c *Client) BuildImage(opts BuildImageOptions) error {
--	if opts.OutputStream == nil {
--		return ErrMissingOutputStream
--	}
--	var headers = headersWithAuth(opts.Auth, opts.AuthConfigs)
--
--	if opts.Remote != "" && opts.Name == "" {
--		opts.Name = opts.Remote
--	}
--	if opts.InputStream != nil || opts.ContextDir != "" {
--		headers["Content-Type"] = "application/tar"
--	} else if opts.Remote == "" {
--		return ErrMissingRepo
--	}
--	if opts.ContextDir != "" {
--		if opts.InputStream != nil {
--			return ErrMultipleContexts
--		}
--		var err error
--		if opts.InputStream, err = createTarStream(opts.ContextDir); err != nil {
--			return err
--		}
--	}
--
--	return c.stream("POST", fmt.Sprintf("/build?%s",
--		queryString(&opts)), true, opts.RawJSONStream, headers, opts.InputStream, opts.OutputStream, nil)
--}
--
--// TagImageOptions present the set of options to tag an image.
--//
--// See http://goo.gl/5g6qFy for more details.
--type TagImageOptions struct {
--	Repo  string
--	Tag   string
--	Force bool
--}
--
--// TagImage adds a tag to the image identified by the given name.
--//
--// See http://goo.gl/5g6qFy for more details.
--func (c *Client) TagImage(name string, opts TagImageOptions) error {
--	if name == "" {
--		return ErrNoSuchImage
--	}
--	_, status, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s",
--		queryString(&opts)), nil)
--	if status == http.StatusNotFound {
--		return ErrNoSuchImage
--	}
--
--	return err
--}
--
--func isURL(u string) bool {
--	p, err := url.Parse(u)
--	if err != nil {
--		return false
--	}
--	return p.Scheme == "http" || p.Scheme == "https"
--}
--
--func headersWithAuth(auths ...interface{}) map[string]string {
--	var headers = make(map[string]string)
--
--	for _, auth := range auths {
--		switch auth.(type) {
--		case AuthConfiguration:
--			var buf bytes.Buffer
--			json.NewEncoder(&buf).Encode(auth)
--			headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
--		case AuthConfigurations:
--			var buf bytes.Buffer
--			json.NewEncoder(&buf).Encode(auth)
--			headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes())
--		}
--	}
--
--	return headers
--}
--
--// APIImageSearch reflect the result of a search on the dockerHub
--//
--// See http://goo.gl/xI5lLZ for more details.
--type APIImageSearch struct {
--	Description string `json:"description,omitempty" yaml:"description,omitempty"`
--	IsOfficial  bool   `json:"is_official,omitempty" yaml:"is_official,omitempty"`
--	IsAutomated bool   `json:"is_automated,omitempty" yaml:"is_automated,omitempty"`
--	Name        string `json:"name,omitempty" yaml:"name,omitempty"`
--	StarCount   int    `json:"star_count,omitempty" yaml:"star_count,omitempty"`
--}
--
--// SearchImages search the docker hub with a specific given term.
--//
--// See http://goo.gl/xI5lLZ for more details.
--func (c *Client) SearchImages(term string) ([]APIImageSearch, error) {
--	body, _, err := c.do("GET", "/images/search?term="+term, nil)
--	if err != nil {
--		return nil, err
--	}
--	var searchResult []APIImageSearch
--	err = json.Unmarshal(body, &searchResult)
--	if err != nil {
--		return nil, err
--	}
--	return searchResult, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go
-deleted file mode 100644
-index 11776e8..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go
-+++ /dev/null
-@@ -1,878 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"bytes"
--	"encoding/base64"
--	"encoding/json"
--	"io/ioutil"
--	"net/http"
--	"net/url"
--	"os"
--	"reflect"
--	"strings"
--	"testing"
--)
--
--func newTestClient(rt *FakeRoundTripper) Client {
--	endpoint := "http://localhost:4243"
--	u, _ := parseEndpoint("http://localhost:4243")
--	client := Client{
--		HTTPClient:             &http.Client{Transport: rt},
--		endpoint:               endpoint,
--		endpointURL:            u,
--		SkipServerVersionCheck: true,
--	}
--	return client
--}
--
--type stdoutMock struct {
--	*bytes.Buffer
--}
--
--func (m stdoutMock) Close() error {
--	return nil
--}
--
--type stdinMock struct {
--	*bytes.Buffer
--}
--
--func (m stdinMock) Close() error {
--	return nil
--}
--
--func TestListImages(t *testing.T) {
--	body := `[
--     {
--             "Repository":"base",
--             "Tag":"ubuntu-12.10",
--             "Id":"b750fe79269d",
--             "Created":1364102658
--     },
--     {
--             "Repository":"base",
--             "Tag":"ubuntu-quantal",
--             "Id":"b750fe79269d",
--             "Created":1364102658
--     },
--     {
--             "RepoTag": [
--             "ubuntu:12.04",
--             "ubuntu:precise",
--             "ubuntu:latest"
--             ],
--             "Id": "8dbd9e392a964c",
--             "Created": 1365714795,
--             "Size": 131506275,
--             "VirtualSize": 131506275
--      },
--      {
--             "RepoTag": [
--             "ubuntu:12.10",
--             "ubuntu:quantal"
--             ],
--             "ParentId": "27cf784147099545",
--             "Id": "b750fe79269d2e",
--             "Created": 1364102658,
--             "Size": 24653,
--             "VirtualSize": 180116135
--      }
--]`
--	var expected []APIImages
--	err := json.Unmarshal([]byte(body), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK})
--	images, err := client.ListImages(ListImagesOptions{})
--	if err != nil {
--		t.Error(err)
--	}
--	if !reflect.DeepEqual(images, expected) {
--		t.Errorf("ListImages: Wrong return value. Want %#v. Got %#v.", expected, images)
--	}
--}
--
--func TestListImagesParameters(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "null", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	_, err := client.ListImages(ListImagesOptions{All: false})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "GET" {
--		t.Errorf("ListImages({All: false}: Wrong HTTP method. Want GET. Got %s.", req.Method)
--	}
--	if all := req.URL.Query().Get("all"); all != "0" && all != "" {
--		t.Errorf("ListImages({All: false}): Wrong parameter. Want all=0 or not present at all. Got all=%s", all)
--	}
--	fakeRT.Reset()
--	_, err = client.ListImages(ListImagesOptions{All: true})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req = fakeRT.requests[0]
--	if all := req.URL.Query().Get("all"); all != "1" {
--		t.Errorf("ListImages({All: true}): Wrong parameter. Want all=1. Got all=%s", all)
--	}
--	fakeRT.Reset()
--	_, err = client.ListImages(ListImagesOptions{Filters: map[string][]string{
--		"dangling": {"true"},
--	}})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req = fakeRT.requests[0]
--	body := req.URL.Query().Get("filters")
--	var filters map[string][]string
--	err = json.Unmarshal([]byte(body), &filters)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if len(filters["dangling"]) != 1 || filters["dangling"][0] != "true" {
--		t.Errorf("ListImages(dangling=[true]): Wrong filter map. Want dangling=[true], got dangling=%v", filters["dangling"])
--	}
--}
--
--func TestImageHistory(t *testing.T) {
--	body := `[
--	{
--		"Id": "25daec02219d2d852f7526137213a9b199926b4b24e732eab5b8bc6c49bd470e",
--		"Tags": [
--			"debian:7.6",
--			"debian:latest",
--			"debian:7",
--			"debian:wheezy"
--		],
--		"Created": 1409856216,
--		"CreatedBy": "/bin/sh -c #(nop) CMD [/bin/bash]"
--	},
--	{
--		"Id": "41026a5347fb5be6ed16115bf22df8569697139f246186de9ae8d4f67c335dce",
--		"Created": 1409856213,
--		"CreatedBy": "/bin/sh -c #(nop) ADD file:1ee9e97209d00e3416a4543b23574cc7259684741a46bbcbc755909b8a053a38 in /",
--		"Size": 85178663
--	},
--	{
--		"Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158",
--		"Tags": [
--			"scratch:latest"
--		],
--		"Created": 1371157430
--	}
--]`
--	var expected []ImageHistory
--	err := json.Unmarshal([]byte(body), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK})
--	history, err := client.ImageHistory("debian:latest")
--	if err != nil {
--		t.Error(err)
--	}
--	if !reflect.DeepEqual(history, expected) {
--		t.Errorf("ImageHistory: Wrong return value. Want %#v. Got %#v.", expected, history)
--	}
--}
--
--func TestRemoveImage(t *testing.T) {
--	name := "test"
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
--	client := newTestClient(fakeRT)
--	err := client.RemoveImage(name)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expectedMethod := "DELETE"
--	if req.Method != expectedMethod {
--		t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/images/" + name))
--	if req.URL.Path != u.Path {
--		t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path)
--	}
--}
--
--func TestRemoveImageNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound})
--	err := client.RemoveImage("test:")
--	if err != ErrNoSuchImage {
--		t.Errorf("RemoveImage: wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err)
--	}
--}
--
--func TestInspectImage(t *testing.T) {
--	body := `{
--     "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
--     "parent":"27cf784147099545",
--     "created":"2013-03-23T22:24:18.818426-07:00",
--     "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0",
--     "container_config":{"Memory":0}
--}`
--	var expected Image
--	json.Unmarshal([]byte(body), &expected)
--	fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	image, err := client.InspectImage(expected.ID)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(*image, expected) {
--		t.Errorf("InspectImage(%q): Wrong image returned. Want %#v. Got %#v.", expected.ID, expected, *image)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "GET" {
--		t.Errorf("InspectImage(%q): Wrong HTTP method. Want GET. Got %s.", expected.ID, req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/images/" + expected.ID + "/json"))
--	if req.URL.Path != u.Path {
--		t.Errorf("InspectImage(%q): Wrong request URL. Want %q. Got %q.", expected.ID, u.Path, req.URL.Path)
--	}
--}
--
--func TestInspectImageNotFound(t *testing.T) {
--	client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound})
--	name := "test"
--	image, err := client.InspectImage(name)
--	if image != nil {
--		t.Errorf("InspectImage(%q): expected <nil> image, got %#v.", name, image)
--	}
--	if err != ErrNoSuchImage {
--		t.Errorf("InspectImage(%q): wrong error. Want %#v. Got %#v.", name, ErrNoSuchImage, err)
--	}
--}
--
--func TestPushImage(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, AuthConfiguration{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	expected := "Pushing 1/100"
--	if buf.String() != expected {
--		t.Errorf("PushImage: Wrong output. Want %q. Got %q.", expected, buf.String())
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("PushImage: Wrong HTTP method. Want POST. Got %s.", req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/images/test/push"))
--	if req.URL.Path != u.Path {
--		t.Errorf("PushImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path)
--	}
--	if query := req.URL.Query().Encode(); query != "" {
--		t.Errorf("PushImage: Wrong query string. Want no parameters, got %q.", query)
--	}
--
--	auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth"))
--	if err != nil {
--		t.Errorf("PushImage: caught error decoding auth. %#v", err.Error())
--	}
--	if strings.TrimSpace(string(auth)) != "{}" {
--		t.Errorf("PushImage: wrong body. Want %q. Got %q.",
--			base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth"))
--	}
--}
--
--func TestPushImageWithRawJSON(t *testing.T) {
--	body := `
--	{"status":"Pushing..."}
--	{"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}}
--	{"status":"Image successfully pushed"}
--	`
--	fakeRT := &FakeRoundTripper{
--		message: body,
--		status:  http.StatusOK,
--		header: map[string]string{
--			"Content-Type": "application/json",
--		},
--	}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--
--	err := client.PushImage(PushImageOptions{
--		Name:          "test",
--		OutputStream:  &buf,
--		RawJSONStream: true,
--	}, AuthConfiguration{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	if buf.String() != body {
--		t.Errorf("PushImage: Wrong raw output. Want %q. Got %q.", body, buf.String())
--	}
--}
--
--func TestPushImageWithAuthentication(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	inputAuth := AuthConfiguration{
--		Username: "gopher",
--		Password: "gopher123",
--		Email:    "gopher at tsuru.io",
--	}
--	err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, inputAuth)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	var gotAuth AuthConfiguration
--
--	auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth"))
--	if err != nil {
--		t.Errorf("PushImage: caught error decoding auth. %#v", err.Error())
--	}
--
--	err = json.Unmarshal(auth, &gotAuth)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(gotAuth, inputAuth) {
--		t.Errorf("PushImage: wrong auth configuration. Want %#v. Got %#v.", inputAuth, gotAuth)
--	}
--}
--
--func TestPushImageCustomRegistry(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var authConfig AuthConfiguration
--	var buf bytes.Buffer
--	opts := PushImageOptions{
--		Name: "test", Registry: "docker.tsuru.io",
--		OutputStream: &buf,
--	}
--	err := client.PushImage(opts, authConfig)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expectedQuery := "registry=docker.tsuru.io"
--	if query := req.URL.Query().Encode(); query != expectedQuery {
--		t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query)
--	}
--}
--
--func TestPushImageNoName(t *testing.T) {
--	client := Client{}
--	err := client.PushImage(PushImageOptions{}, AuthConfiguration{})
--	if err != ErrNoSuchImage {
--		t.Errorf("PushImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err)
--	}
--}
--
--func TestPullImage(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	err := client.PullImage(PullImageOptions{Repository: "base", OutputStream: &buf},
--		AuthConfiguration{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	expected := "Pulling 1/100"
--	if buf.String() != expected {
--		t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String())
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/images/create"))
--	if req.URL.Path != u.Path {
--		t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path)
--	}
--	expectedQuery := "fromImage=base"
--	if query := req.URL.Query().Encode(); query != expectedQuery {
--		t.Errorf("PullImage: Wrong query strin. Want %q. Got %q.", expectedQuery, query)
--	}
--}
--
--func TestPullImageWithRawJSON(t *testing.T) {
--	body := `
--	{"status":"Pulling..."}
--	{"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}}
--	`
--	fakeRT := &FakeRoundTripper{
--		message: body,
--		status:  http.StatusOK,
--		header: map[string]string{
--			"Content-Type": "application/json",
--		},
--	}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	err := client.PullImage(PullImageOptions{
--		Repository:    "base",
--		OutputStream:  &buf,
--		RawJSONStream: true,
--	}, AuthConfiguration{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	if buf.String() != body {
--		t.Errorf("PullImage: Wrong raw output. Want %q. Got %q", body, buf.String())
--	}
--}
--
--func TestPullImageWithoutOutputStream(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	opts := PullImageOptions{
--		Repository: "base",
--		Registry:   "docker.tsuru.io",
--	}
--	err := client.PullImage(opts, AuthConfiguration{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestPullImageCustomRegistry(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := PullImageOptions{
--		Repository:   "base",
--		Registry:     "docker.tsuru.io",
--		OutputStream: &buf,
--	}
--	err := client.PullImage(opts, AuthConfiguration{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestPullImageTag(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := PullImageOptions{
--		Repository:   "base",
--		Registry:     "docker.tsuru.io",
--		Tag:          "latest",
--		OutputStream: &buf,
--	}
--	err := client.PullImage(opts, AuthConfiguration{})
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}, "tag": {"latest"}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestPullImageNoRepository(t *testing.T) {
--	var opts PullImageOptions
--	client := Client{}
--	err := client.PullImage(opts, AuthConfiguration{})
--	if err != ErrNoSuchImage {
--		t.Errorf("PullImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err)
--	}
--}
--
--func TestImportImageFromUrl(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := ImportImageOptions{
--		Source:       "http://mycompany.com/file.tar",
--		Repository:   "testimage",
--		Tag:          "tag",
--		OutputStream: &buf,
--	}
--	err := client.ImportImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestImportImageFromInput(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	in := bytes.NewBufferString("tar content")
--	var buf bytes.Buffer
--	opts := ImportImageOptions{
--		Source: "-", Repository: "testimage",
--		InputStream: in, OutputStream: &buf,
--		Tag: "tag",
--	}
--	err := client.ImportImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--	body, err := ioutil.ReadAll(req.Body)
--	if err != nil {
--		t.Errorf("ImportImage: caugth error while reading body %#v", err.Error())
--	}
--	e := "tar content"
--	if string(body) != e {
--		t.Errorf("ImportImage: wrong body. Want %#v. Got %#v.", e, string(body))
--	}
--}
--
--func TestImportImageDoesNotPassesInputIfSourceIsNotDash(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	in := bytes.NewBufferString("foo")
--	opts := ImportImageOptions{
--		Source: "http://test.com/container.tar", Repository: "testimage",
--		InputStream: in, OutputStream: &buf,
--	}
--	err := client.ImportImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--	body, err := ioutil.ReadAll(req.Body)
--	if err != nil {
--		t.Errorf("ImportImage: caugth error while reading body %#v", err.Error())
--	}
--	if string(body) != "" {
--		t.Errorf("ImportImage: wrong body. Want nothing. Got %#v.", string(body))
--	}
--}
--
--func TestImportImageShouldPassTarContentToBodyWhenSourceIsFilePath(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	tarPath := "testing/data/container.tar"
--	opts := ImportImageOptions{
--		Source: tarPath, Repository: "testimage",
--		OutputStream: &buf,
--	}
--	err := client.ImportImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	tar, err := os.Open(tarPath)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	tarContent, err := ioutil.ReadAll(tar)
--	body, err := ioutil.ReadAll(req.Body)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(tarContent, body) {
--		t.Errorf("ImportImage: wrong body. Want %#v content. Got %#v.", tarPath, body)
--	}
--}
--
--func TestImportImageShouldChangeSourceToDashWhenItsAFilePath(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	tarPath := "testing/data/container.tar"
--	opts := ImportImageOptions{
--		Source: tarPath, Repository: "testimage",
--		OutputStream: &buf,
--	}
--	err := client.ImportImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"fromSrc": {"-"}, "repo": {opts.Repository}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestBuildImageParameters(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := BuildImageOptions{
--		Name:                "testImage",
--		NoCache:             true,
--		SuppressOutput:      true,
--		RmTmpContainer:      true,
--		ForceRmTmpContainer: true,
--		InputStream:         &buf,
--		OutputStream:        &buf,
--	}
--	err := client.BuildImage(opts)
--	if err != nil && strings.Index(err.Error(), "build image fail") == -1 {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"t": {opts.Name}, "nocache": {"1"}, "q": {"1"}, "rm": {"1"}, "forcerm": {"1"}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestBuildImageParametersForRemoteBuild(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := BuildImageOptions{
--		Name:           "testImage",
--		Remote:         "testing/data/container.tar",
--		SuppressOutput: true,
--		OutputStream:   &buf,
--	}
--	err := client.BuildImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"t": {opts.Name}, "remote": {opts.Remote}, "q": {"1"}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestBuildImageMissingRepoAndNilInput(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := BuildImageOptions{
--		Name:           "testImage",
--		SuppressOutput: true,
--		OutputStream:   &buf,
--	}
--	err := client.BuildImage(opts)
--	if err != ErrMissingRepo {
--		t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingRepo, err)
--	}
--}
--
--func TestBuildImageMissingOutputStream(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	opts := BuildImageOptions{Name: "testImage"}
--	err := client.BuildImage(opts)
--	if err != ErrMissingOutputStream {
--		t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingOutputStream, err)
--	}
--}
--
--func TestBuildImageWithRawJSON(t *testing.T) {
--	body := `
--	{"stream":"Step 0 : FROM ubuntu:latest\n"}
--	{"stream":" ---\u003e 4300eb9d3c8d\n"}
--	{"stream":"Step 1 : MAINTAINER docker <eng at docker.com>\n"}
--	{"stream":" ---\u003e Using cache\n"}
--	{"stream":" ---\u003e 3a3ed758c370\n"}
--	{"stream":"Step 2 : CMD /usr/bin/top\n"}
--	{"stream":" ---\u003e Running in 36b1479cc2e4\n"}
--	{"stream":" ---\u003e 4b6188aebe39\n"}
--	{"stream":"Removing intermediate container 36b1479cc2e4\n"}
--	{"stream":"Successfully built 4b6188aebe39\n"}
--    `
--	fakeRT := &FakeRoundTripper{
--		message: body,
--		status:  http.StatusOK,
--		header: map[string]string{
--			"Content-Type": "application/json",
--		},
--	}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := BuildImageOptions{
--		Name:           "testImage",
--		RmTmpContainer: true,
--		InputStream:    &buf,
--		OutputStream:   &buf,
--		RawJSONStream:  true,
--	}
--	err := client.BuildImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if buf.String() != body {
--		t.Errorf("BuildImage: Wrong raw output. Want %q. Got %q.", body, buf.String())
--	}
--}
--
--func TestBuildImageRemoteWithoutName(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	var buf bytes.Buffer
--	opts := BuildImageOptions{
--		Remote:         "testing/data/container.tar",
--		SuppressOutput: true,
--		OutputStream:   &buf,
--	}
--	err := client.BuildImage(opts)
--	if err != nil {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := map[string][]string{"t": {opts.Remote}, "remote": {opts.Remote}, "q": {"1"}}
--	got := map[string][]string(req.URL.Query())
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestTagImageParameters(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	opts := TagImageOptions{Repo: "testImage"}
--	err := client.TagImage("base", opts)
--	if err != nil && strings.Index(err.Error(), "tag image fail") == -1 {
--		t.Fatal(err)
--	}
--	req := fakeRT.requests[0]
--	expected := "http://localhost:4243/images/base/tag?repo=testImage"
--	got := req.URL.String()
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("TagImage: wrong query string. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestTagImageMissingRepo(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	opts := TagImageOptions{Repo: "testImage"}
--	err := client.TagImage("", opts)
--	if err != ErrNoSuchImage {
--		t.Errorf("TestTag: wrong error returned. Want %#v. Got %#v.",
--			ErrNoSuchImage, err)
--	}
--}
--
--func TestIsUrl(t *testing.T) {
--	url := "http://foo.bar/"
--	result := isURL(url)
--	if !result {
--		t.Errorf("isURL: wrong match. Expected %#v to be a url. Got %#v.", url, result)
--	}
--	url = "/foo/bar.tar"
--	result = isURL(url)
--	if result {
--		t.Errorf("isURL: wrong match. Expected %#v to not be a url. Got %#v", url, result)
--	}
--}
--
--func TestLoadImage(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	tar, err := os.Open("testing/data/container.tar")
--	if err != nil {
--		t.Fatal(err)
--	} else {
--		defer tar.Close()
--	}
--	opts := LoadImageOptions{InputStream: tar}
--	err = client.LoadImage(opts)
--	if nil != err {
--		t.Error(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "POST" {
--		t.Errorf("LoadImage: wrong method. Expected %q. Got %q.", "POST", req.Method)
--	}
--	if req.URL.Path != "/images/load" {
--		t.Errorf("LoadImage: wrong URL. Expected %q. Got %q.", "/images/load", req.URL.Path)
--	}
--}
--
--func TestExportImage(t *testing.T) {
--	var buf bytes.Buffer
--	fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
--	client := newTestClient(fakeRT)
--	opts := ExportImageOptions{Name: "testimage", OutputStream: &buf}
--	err := client.ExportImage(opts)
--	if nil != err {
--		t.Error(err)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "GET" {
--		t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method)
--	}
--	expectedPath := "/images/testimage/get"
--	if req.URL.Path != expectedPath {
--		t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expectedPath, req.URL.Path)
--	}
--}
--
--func TestSearchImages(t *testing.T) {
--	body := `[
--	{
--		"description":"A container with Cassandra 2.0.3",
--		"is_official":true,
--		"is_automated":true,
--		"name":"poklet/cassandra",
--		"star_count":17
--	},
--	{
--		"description":"A container with Cassandra 2.0.3",
--		"is_official":true,
--		"is_automated":false,
--		"name":"poklet/cassandra",
--		"star_count":17
--	}
--	,
--	{
--		"description":"A container with Cassandra 2.0.3",
--		"is_official":false,
--		"is_automated":true,
--		"name":"poklet/cassandra",
--		"star_count":17
--	}
--]`
--	var expected []APIImageSearch
--	err := json.Unmarshal([]byte(body), &expected)
--	if err != nil {
--		t.Fatal(err)
--	}
--	client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK})
--	result, err := client.SearchImages("cassandra")
--	if err != nil {
--		t.Error(err)
--	}
--	if !reflect.DeepEqual(result, expected) {
--		t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go
-deleted file mode 100644
-index 2678ab5..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go
-+++ /dev/null
-@@ -1,59 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"bytes"
--	"strings"
--)
--
--// Version returns version information about the docker server.
--//
--// See http://goo.gl/BOZrF5 for more details.
--func (c *Client) Version() (*Env, error) {
--	body, _, err := c.do("GET", "/version", nil)
--	if err != nil {
--		return nil, err
--	}
--	var env Env
--	if err := env.Decode(bytes.NewReader(body)); err != nil {
--		return nil, err
--	}
--	return &env, nil
--}
--
--// Info returns system-wide information about the Docker server.
--//
--// See http://goo.gl/wmqZsW for more details.
--func (c *Client) Info() (*Env, error) {
--	body, _, err := c.do("GET", "/info", nil)
--	if err != nil {
--		return nil, err
--	}
--	var info Env
--	err = info.Decode(bytes.NewReader(body))
--	if err != nil {
--		return nil, err
--	}
--	return &info, nil
--}
--
--// ParseRepositoryTag gets the name of the repository and returns it splitted
--// in two parts: the repository and the tag.
--//
--// Some examples:
--//
--//     localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest
--//     localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, ""
--func ParseRepositoryTag(repoTag string) (repository string, tag string) {
--	n := strings.LastIndex(repoTag, ":")
--	if n < 0 {
--		return repoTag, ""
--	}
--	if tag := repoTag[n+1:]; !strings.Contains(tag, "/") {
--		return repoTag[:n], tag
--	}
--	return repoTag, ""
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go
-deleted file mode 100644
-index ceaf076..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go
-+++ /dev/null
-@@ -1,159 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"net/http"
--	"net/url"
--	"reflect"
--	"sort"
--	"testing"
--)
--
--type DockerVersion struct {
--	Version   string
--	GitCommit string
--	GoVersion string
--}
--
--func TestVersion(t *testing.T) {
--	body := `{
--     "Version":"0.2.2",
--     "GitCommit":"5a2a5cc+CHANGES",
--     "GoVersion":"go1.0.3"
--}`
--	fakeRT := FakeRoundTripper{message: body, status: http.StatusOK}
--	client := newTestClient(&fakeRT)
--	expected := DockerVersion{
--		Version:   "0.2.2",
--		GitCommit: "5a2a5cc+CHANGES",
--		GoVersion: "go1.0.3",
--	}
--	version, err := client.Version()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if result := version.Get("Version"); result != expected.Version {
--		t.Errorf("Version(): Wrong result. Want %#v. Got %#v.", expected.Version, version.Get("Version"))
--	}
--	if result := version.Get("GitCommit"); result != expected.GitCommit {
--		t.Errorf("GitCommit(): Wrong result. Want %#v. Got %#v.", expected.GitCommit, version.Get("GitCommit"))
--	}
--	if result := version.Get("GoVersion"); result != expected.GoVersion {
--		t.Errorf("GoVersion(): Wrong result. Want %#v. Got %#v.", expected.GoVersion, version.Get("GoVersion"))
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "GET" {
--		t.Errorf("Version(): wrong request method. Want GET. Got %s.", req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/version"))
--	if req.URL.Path != u.Path {
--		t.Errorf("Version(): wrong request path. Want %q. Got %q.", u.Path, req.URL.Path)
--	}
--}
--
--func TestVersionError(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError}
--	client := newTestClient(fakeRT)
--	version, err := client.Version()
--	if version != nil {
--		t.Errorf("Version(): expected <nil> value, got %#v.", version)
--	}
--	if err == nil {
--		t.Error("Version(): unexpected <nil> error")
--	}
--}
--
--func TestInfo(t *testing.T) {
--	body := `{
--     "Containers":11,
--     "Images":16,
--     "Debug":0,
--     "NFd":11,
--     "NGoroutines":21,
--     "MemoryLimit":1,
--     "SwapLimit":0
--}`
--	fakeRT := FakeRoundTripper{message: body, status: http.StatusOK}
--	client := newTestClient(&fakeRT)
--	expected := Env{}
--	expected.SetInt("Containers", 11)
--	expected.SetInt("Images", 16)
--	expected.SetBool("Debug", false)
--	expected.SetInt("NFd", 11)
--	expected.SetInt("NGoroutines", 21)
--	expected.SetBool("MemoryLimit", true)
--	expected.SetBool("SwapLimit", false)
--	info, err := client.Info()
--	if err != nil {
--		t.Fatal(err)
--	}
--	infoSlice := []string(*info)
--	expectedSlice := []string(expected)
--	sort.Strings(infoSlice)
--	sort.Strings(expectedSlice)
--	if !reflect.DeepEqual(expectedSlice, infoSlice) {
--		t.Errorf("Info(): Wrong result.\nWant %#v.\nGot %#v.", expected, *info)
--	}
--	req := fakeRT.requests[0]
--	if req.Method != "GET" {
--		t.Errorf("Info(): Wrong HTTP method. Want GET. Got %s.", req.Method)
--	}
--	u, _ := url.Parse(client.getURL("/info"))
--	if req.URL.Path != u.Path {
--		t.Errorf("Info(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path)
--	}
--}
--
--func TestInfoError(t *testing.T) {
--	fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError}
--	client := newTestClient(fakeRT)
--	version, err := client.Info()
--	if version != nil {
--		t.Errorf("Info(): expected <nil> value, got %#v.", version)
--	}
--	if err == nil {
--		t.Error("Info(): unexpected <nil> error")
--	}
--}
--
--func TestParseRepositoryTag(t *testing.T) {
--	var tests = []struct {
--		input        string
--		expectedRepo string
--		expectedTag  string
--	}{
--		{
--			"localhost.localdomain:5000/samalba/hipache:latest",
--			"localhost.localdomain:5000/samalba/hipache",
--			"latest",
--		},
--		{
--			"localhost.localdomain:5000/samalba/hipache",
--			"localhost.localdomain:5000/samalba/hipache",
--			"",
--		},
--		{
--			"tsuru/python",
--			"tsuru/python",
--			"",
--		},
--		{
--			"tsuru/python:2.7",
--			"tsuru/python",
--			"2.7",
--		},
--	}
--	for _, tt := range tests {
--		repo, tag := ParseRepositoryTag(tt.input)
--		if repo != tt.expectedRepo {
--			t.Errorf("ParseRepositoryTag(%q): wrong repository. Want %q. Got %q", tt.input, tt.expectedRepo, repo)
--		}
--		if tag != tt.expectedTag {
--			t.Errorf("ParseRepositoryTag(%q): wrong tag. Want %q. Got %q", tt.input, tt.expectedTag, tag)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go
-deleted file mode 100644
-index 16aa003..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go
-+++ /dev/null
-@@ -1,49 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--// Signal represents a signal that can be send to the container on
--// KillContainer call.
--type Signal int
--
--// These values represent all signals available on Linux, where containers will
--// be running.
--const (
--	SIGABRT   = Signal(0x6)
--	SIGALRM   = Signal(0xe)
--	SIGBUS    = Signal(0x7)
--	SIGCHLD   = Signal(0x11)
--	SIGCLD    = Signal(0x11)
--	SIGCONT   = Signal(0x12)
--	SIGFPE    = Signal(0x8)
--	SIGHUP    = Signal(0x1)
--	SIGILL    = Signal(0x4)
--	SIGINT    = Signal(0x2)
--	SIGIO     = Signal(0x1d)
--	SIGIOT    = Signal(0x6)
--	SIGKILL   = Signal(0x9)
--	SIGPIPE   = Signal(0xd)
--	SIGPOLL   = Signal(0x1d)
--	SIGPROF   = Signal(0x1b)
--	SIGPWR    = Signal(0x1e)
--	SIGQUIT   = Signal(0x3)
--	SIGSEGV   = Signal(0xb)
--	SIGSTKFLT = Signal(0x10)
--	SIGSTOP   = Signal(0x13)
--	SIGSYS    = Signal(0x1f)
--	SIGTERM   = Signal(0xf)
--	SIGTRAP   = Signal(0x5)
--	SIGTSTP   = Signal(0x14)
--	SIGTTIN   = Signal(0x15)
--	SIGTTOU   = Signal(0x16)
--	SIGUNUSED = Signal(0x1f)
--	SIGURG    = Signal(0x17)
--	SIGUSR1   = Signal(0xa)
--	SIGUSR2   = Signal(0xc)
--	SIGVTALRM = Signal(0x1a)
--	SIGWINCH  = Signal(0x1c)
--	SIGXCPU   = Signal(0x18)
--	SIGXFSZ   = Signal(0x19)
--)
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/stdcopy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/stdcopy.go
-deleted file mode 100644
-index 3782f3d..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/stdcopy.go
-+++ /dev/null
-@@ -1,91 +0,0 @@
--// Copyright 2014 Docker authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the DOCKER-LICENSE file.
--
--package docker
--
--import (
--	"encoding/binary"
--	"errors"
--	"io"
--)
--
--const (
--	stdWriterPrefixLen = 8
--	stdWriterFdIndex   = 0
--	stdWriterSizeIndex = 4
--)
--
--var errInvalidStdHeader = errors.New("Unrecognized input header")
--
--func stdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
--	var (
--		buf       = make([]byte, 32*1024+stdWriterPrefixLen+1)
--		bufLen    = len(buf)
--		nr, nw    int
--		er, ew    error
--		out       io.Writer
--		frameSize int
--	)
--	for {
--		for nr < stdWriterPrefixLen {
--			var nr2 int
--			nr2, er = src.Read(buf[nr:])
--			if er == io.EOF {
--				if nr < stdWriterPrefixLen && nr2 < stdWriterPrefixLen {
--					return written, nil
--				}
--				nr += nr2
--				break
--			} else if er != nil {
--				return 0, er
--			}
--			nr += nr2
--		}
--		switch buf[stdWriterFdIndex] {
--		case 0:
--			fallthrough
--		case 1:
--			out = dstout
--		case 2:
--			out = dsterr
--		default:
--			return 0, errInvalidStdHeader
--		}
--		frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
--		if frameSize+stdWriterPrefixLen > bufLen {
--			buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-len(buf)+1)...)
--			bufLen = len(buf)
--		}
--		for nr < frameSize+stdWriterPrefixLen {
--			var nr2 int
--			nr2, er = src.Read(buf[nr:])
--			if er == io.EOF {
--				if nr == 0 {
--					return written, nil
--				}
--				nr += nr2
--				break
--			} else if er != nil {
--				return 0, er
--			}
--			nr += nr2
--		}
--		bound := frameSize + stdWriterPrefixLen
--		if bound > nr {
--			bound = nr
--		}
--		nw, ew = out.Write(buf[stdWriterPrefixLen:bound])
--		if nw > 0 {
--			written += int64(nw)
--		}
--		if ew != nil {
--			return 0, ew
--		}
--		if nw != frameSize {
--			return written, io.ErrShortWrite
--		}
--		copy(buf, buf[frameSize+stdWriterPrefixLen:])
--		nr -= frameSize + stdWriterPrefixLen
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/stdcopy_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/stdcopy_test.go
-deleted file mode 100644
-index 75b8922..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/stdcopy_test.go
-+++ /dev/null
-@@ -1,255 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the DOCKER-LICENSE file.
--
--package docker
--
--import (
--	"bytes"
--	"encoding/binary"
--	"errors"
--	"io"
--	"strings"
--	"testing"
--	"testing/iotest"
--)
--
--type errorWriter struct {
--}
--
--func (errorWriter) Write([]byte) (int, error) {
--	return 0, errors.New("something went wrong")
--}
--
--func TestStdCopy(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
--	input.Write([]byte("something happened!"))
--	input.Write([]byte{1, 0, 0, 0, 0, 0, 0, 12})
--	input.Write([]byte("just kidding"))
--	input.Write([]byte{0, 0, 0, 0, 0, 0, 0, 6})
--	input.Write([]byte("\nyeah!"))
--	n, err := stdCopy(&stdout, &stderr, &input)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if expected := int64(19 + 12 + 6); n != expected {
--		t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
--	}
--	if got := stderr.String(); got != "something happened!" {
--		t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "something happened!", got)
--	}
--	if got := stdout.String(); got != "just kidding\nyeah!" {
--		t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "just kidding\nyeah!", got)
--	}
--}
--
--func TestStdCopyStress(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	value := strings.Repeat("something ", 4096)
--	writer := newStdWriter(&input, Stdout)
--	writer.Write([]byte(value))
--	n, err := stdCopy(&stdout, &stderr, &input)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if n != 40960 {
--		t.Errorf("Wrong number of bytes. Want 40960. Got %d.", n)
--	}
--	if got := stderr.String(); got != "" {
--		t.Errorf("stdCopy: wrong stderr. Want empty string. Got %q", got)
--	}
--	if got := stdout.String(); got != value {
--		t.Errorf("stdCopy: wrong stdout. Want %q. Got %q", value, got)
--	}
--}
--
--func TestStdCopyInvalidStdHeader(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{3, 0, 0, 0, 0, 0, 0, 19})
--	n, err := stdCopy(&stdout, &stderr, &input)
--	if n != 0 {
--		t.Errorf("stdCopy: wrong number of bytes. Want 0. Got %d", n)
--	}
--	if err != errInvalidStdHeader {
--		t.Errorf("stdCopy: wrong error. Want ErrInvalidStdHeader. Got %#v", err)
--	}
--}
--
--func TestStdCopyBigFrame(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 18})
--	input.Write([]byte("something happened!"))
--	n, err := stdCopy(&stdout, &stderr, &input)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if expected := int64(18); n != expected {
--		t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
--	}
--	if got := stderr.String(); got != "something happened" {
--		t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "something happened", got)
--	}
--	if got := stdout.String(); got != "" {
--		t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
--	}
--}
--
--func TestStdCopySmallFrame(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 20})
--	input.Write([]byte("something happened!"))
--	n, err := stdCopy(&stdout, &stderr, &input)
--	if err != io.ErrShortWrite {
--		t.Errorf("stdCopy: wrong error. Want ShortWrite. Got %#v", err)
--	}
--	if expected := int64(19); n != expected {
--		t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
--	}
--	if got := stderr.String(); got != "something happened!" {
--		t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "something happened", got)
--	}
--	if got := stdout.String(); got != "" {
--		t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
--	}
--}
--
--func TestStdCopyEmpty(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	n, err := stdCopy(&stdout, &stderr, &input)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if n != 0 {
--		t.Errorf("stdCopy: wrong number of bytes. Want 0. Got %d.", n)
--	}
--}
--
--func TestStdCopyCorruptedHeader(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0})
--	n, err := stdCopy(&stdout, &stderr, &input)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if n != 0 {
--		t.Errorf("stdCopy: wrong number of bytes. Want 0. Got %d.", n)
--	}
--}
--
--func TestStdCopyTruncateWriter(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
--	input.Write([]byte("something happened!"))
--	n, err := stdCopy(&stdout, iotest.TruncateWriter(&stderr, 7), &input)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if expected := int64(19); n != expected {
--		t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
--	}
--	if got := stderr.String(); got != "somethi" {
--		t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "somethi", got)
--	}
--	if got := stdout.String(); got != "" {
--		t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
--	}
--}
--
--func TestStdCopyHeaderOnly(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
--	n, err := stdCopy(&stdout, iotest.TruncateWriter(&stderr, 7), &input)
--	if err != io.ErrShortWrite {
--		t.Errorf("stdCopy: wrong error. Want ShortWrite. Got %#v", err)
--	}
--	if n != 0 {
--		t.Errorf("Wrong number of bytes. Want 0. Got %d.", n)
--	}
--	if got := stderr.String(); got != "" {
--		t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "", got)
--	}
--	if got := stdout.String(); got != "" {
--		t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
--	}
--}
--
--func TestStdCopyDataErrReader(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
--	input.Write([]byte("something happened!"))
--	n, err := stdCopy(&stdout, &stderr, iotest.DataErrReader(&input))
--	if err != nil {
--		t.Fatal(err)
--	}
--	if expected := int64(19); n != expected {
--		t.Errorf("Wrong number of bytes. Want %d. Got %d.", expected, n)
--	}
--	if got := stderr.String(); got != "something happened!" {
--		t.Errorf("stdCopy: wrong stderr. Want %q. Got %q.", "something happened!", got)
--	}
--	if got := stdout.String(); got != "" {
--		t.Errorf("stdCopy: wrong stdout. Want %q. Got %q.", "", got)
--	}
--}
--
--func TestStdCopyTimeoutReader(t *testing.T) {
--	var input, stdout, stderr bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
--	input.Write([]byte("something happened!"))
--	_, err := stdCopy(&stdout, &stderr, iotest.TimeoutReader(&input))
--	if err != iotest.ErrTimeout {
--		t.Errorf("stdCopy: wrong error. Want ErrTimeout. Got %#v.", err)
--	}
--}
--
--func TestStdCopyWriteError(t *testing.T) {
--	var input bytes.Buffer
--	input.Write([]byte{2, 0, 0, 0, 0, 0, 0, 19})
--	input.Write([]byte("something happened!"))
--	var stdout, stderr errorWriter
--	n, err := stdCopy(stdout, stderr, &input)
--	if err.Error() != "something went wrong" {
--		t.Errorf("stdCopy: wrong error. Want %q. Got %q", "something went wrong", err)
--	}
--	if n != 0 {
--		t.Errorf("stdCopy: wrong number of bytes. Want 0. Got %d.", n)
--	}
--}
--
--type StdType [8]byte
--
--var (
--	Stdin  = StdType{0: 0}
--	Stdout = StdType{0: 1}
--	Stderr = StdType{0: 2}
--)
--
--type StdWriter struct {
--	io.Writer
--	prefix  StdType
--	sizeBuf []byte
--}
--
--func (w *StdWriter) Write(buf []byte) (n int, err error) {
--	if w == nil || w.Writer == nil {
--		return 0, errors.New("Writer not instanciated")
--	}
--	binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
--	buf = append(w.prefix[:], buf...)
--
--	n, err = w.Writer.Write(buf)
--	return n - 8, err
--}
--
--func newStdWriter(w io.Writer, t StdType) *StdWriter {
--	if len(t) != 8 {
--		return nil
--	}
--
--	return &StdWriter{
--		Writer:  w,
--		prefix:  t,
--		sizeBuf: make([]byte, 4),
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go
-deleted file mode 100644
-index 7c4a204..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go
-+++ /dev/null
-@@ -1,99 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package docker
--
--import (
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"path"
--	"path/filepath"
--	"strings"
--
--	"github.com/docker/docker/pkg/archive"
--	"github.com/docker/docker/pkg/fileutils"
--)
--
--func createTarStream(srcPath string) (io.ReadCloser, error) {
--	excludes, err := parseDockerignore(srcPath)
--	if err != nil {
--		return nil, err
--	}
--
--	if err := validateContextDirectory(srcPath, excludes); err != nil {
--		return nil, err
--	}
--	tarOpts := &archive.TarOptions{
--		Excludes:    excludes,
--		Compression: archive.Uncompressed,
--		NoLchown:    true,
--	}
--	return archive.TarWithOptions(srcPath, tarOpts)
--}
--
--// validateContextDirectory checks if all the contents of the directory
--// can be read and returns an error if some files can't be read.
--// Symlinks which point to non-existing files don't trigger an error
--func validateContextDirectory(srcPath string, excludes []string) error {
--	return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
--		// skip this directory/file if it's not in the path, it won't get added to the context
--		if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
--			return err
--		} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
--			return err
--		} else if skip {
--			if f.IsDir() {
--				return filepath.SkipDir
--			}
--			return nil
--		}
--
--		if err != nil {
--			if os.IsPermission(err) {
--				return fmt.Errorf("can't stat '%s'", filePath)
--			}
--			if os.IsNotExist(err) {
--				return nil
--			}
--			return err
--		}
--
--		// skip checking if symlinks point to non-existing files, such symlinks can be useful
--		// also skip named pipes, because they hanging on open
--		if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
--			return nil
--		}
--
--		if !f.IsDir() {
--			currentFile, err := os.Open(filePath)
--			if err != nil && os.IsPermission(err) {
--				return fmt.Errorf("no permission to read from '%s'", filePath)
--			}
--			currentFile.Close()
--		}
--		return nil
--	})
--}
--
--func parseDockerignore(root string) ([]string, error) {
--	var excludes []string
--	ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
--	if err != nil && !os.IsNotExist(err) {
--		return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
--	}
--	for _, pattern := range strings.Split(string(ignore), "\n") {
--		matches, err := filepath.Match(pattern, "Dockerfile")
--		if err != nil {
--			return excludes, fmt.Errorf("bad .dockerignore pattern: '%s', error: %s", pattern, err)
--		}
--		if matches {
--			return excludes, fmt.Errorf("dockerfile was excluded by .dockerignore pattern '%s'", pattern)
--		}
--		excludes = append(excludes, pattern)
--	}
--
--	return excludes, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/bin/fmtpolice b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/bin/fmtpolice
-deleted file mode 100644
-index d13bd0c..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/bin/fmtpolice
-+++ /dev/null
-@@ -1,38 +0,0 @@
--#!/bin/bash
--
--readonly GOPATH="${GOPATH%%:*}"
--
--main() {
--  check_fmt
--  check_lint
--}
--
--check_fmt() {
--  eval "set -e"
--  for file in $(git ls-files '*.go') ; do
--    gofmt $file | diff -u $file -
--  done
--  eval "set +e"
--}
--
--check_lint() {
--  _install_linter
--
--  for file in $(git ls-files '*.go') ; do
--    if [[ ! "$(${GOPATH}/bin/golint $file)" =~ ^[[:blank:]]*$ ]] ; then
--      _lint_verbose && exit 1
--    fi
--  done
--}
--
--_lint_verbose() {
--  for file in $(git ls-files '*.go') ; do $GOPATH/bin/golint $file ; done
--}
--
--_install_linter() {
--  if [[ ! -x "${GOPATH}/bin/golint" ]] ; then
--    go get -u github.com/golang/lint/golint
--  fi
--}
--
--main "$@"
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore
-deleted file mode 100644
-index 027e8c2..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore
-+++ /dev/null
-@@ -1,3 +0,0 @@
--container.tar
--dockerfile.tar
--foofile
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile
-deleted file mode 100644
-index 0948dcf..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile
-+++ /dev/null
-@@ -1,15 +0,0 @@
--# this file describes how to build tsuru python image
--# to run it:
--# 1- install docker
--# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile
--
--from	base:ubuntu-quantal
--run	apt-get install wget -y --force-yes
--run	wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate
--run	mkdir /var/lib/tsuru
--run	tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1
--run	cp /var/lib/tsuru/python/deploy /var/lib/tsuru
--run	cp /var/lib/tsuru/base/restart /var/lib/tsuru
--run	cp /var/lib/tsuru/base/start /var/lib/tsuru
--run	/var/lib/tsuru/base/install
--run	/var/lib/tsuru/base/setup
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile
-deleted file mode 100644
-index e69de29..0000000
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem
-deleted file mode 100644
-index 8e38bba..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem
-+++ /dev/null
-@@ -1,18 +0,0 @@
-------BEGIN CERTIFICATE-----
--MIIC1TCCAb+gAwIBAgIQJ9MsNxrUxumNbAytGi3GEDALBgkqhkiG9w0BAQswFjEU
--MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTM4WhcNMTcwOTMwMjAy
--MTM4WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD
--ggEPADCCAQoCggEBALpFCSARjG+5yXoqr7UMzuE0df7RRZfeRZI06lJ02ZqV4Iii
--rgL7ML9yPxX50NbLnjiilSDTUhnyocYFItokzUzz8qpX/nlYhuN2Iqwh4d0aWS8z
--f5y248F+H1z+HY2W8NPl/6DVlVwYaNW1/k+RPMlHS0INLR6j+3Ievew7RNE0NnM2
--znELW6NetekDt3GUcz0Z95vDUDfdPnIk1eIFMmYvLxZh23xOca4Q37a3S8F3d+dN
--+OOpwjdgY9Qme0NQUaXpgp58jWuQfB8q7mZrdnLlLqRa8gx1HeDSotX7UmWtWPkb
--vd9EdlKLYw5PVpxMV1rkwf2t4TdgD5NfkpXlXkkCAwEAAaMjMCEwDgYDVR0PAQH/
--BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4IBAQBxYjHVSKqE
--MJw7CW0GddesULtXXVWGJuZdWJLQlPvPMfIfjIvlcZyS4cdVNiQ3sREFIZz8TpII
--CT0/Pg3sgv/FcOQe1CN0xZYZcyiAZHK1z0fJQq2qVpdv7+tJcjI2vvU6NI24iQCo
--W1wz25trJz9QbdB2MRLMjyz7TSWuafztIvcfEzaIdQ0Whqund/cSuPGQx5IwF83F
--rvlkOyJSH2+VIEBTCIuykJeL0DLTt8cePBQR5L1ISXb4RUMK9ZtqRscBRv8sn7o2
--ixG3wtL0gYF4xLtsQWVxI3iFVrU3WzOH/3c5shVRkWBd+AQRSwCJI4mKH7penJCF
--i3/zzlkvOnjV
-------END CERTIFICATE-----
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem
-deleted file mode 100644
-index 5e7244b..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem
-+++ /dev/null
-@@ -1,18 +0,0 @@
-------BEGIN CERTIFICATE-----
--MIIC6DCCAdKgAwIBAgIRANO6ymxQAjp66KmEka1G6b0wCwYJKoZIhvcNAQELMBYx
--FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MTAxNjIwMjE1MloXDTE3MDkzMDIw
--MjE1MlowFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA
--A4IBDwAwggEKAoIBAQDGA1mAhSOpZspD1dpZ7qVEQrIJw4Xo8252jHaORnEdDiFm
--b6brEmr6jw8t4P3IGxbqBc/TqRV+SSXxwYEVvfpeQKH+SmqStoMNtD3Ura161az4
--V0BcxMtSlsUGpoz+//QCAq8qiaxMwgiyc5253mkQm88anj2cNt7xbewiu/KFWuf7
--BVpNK1+ltpJmlukfcj/G+I1bw7j1KxBjDrFqe5cyDuuZcDL2tmUXP/ZWDyXwSv+H
--AOckqn44z6aXlBkVvOXDBZJqY76d/vWVDNCuZeXRnqlhP3t1kH4V0RQXo+JD2tgt
--JgdU0unzyoFOSWNUBPm73tqmjUGGAmGHBmeegJr/AgMBAAGjNTAzMA4GA1UdDwEB
--/wQEAwIAgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMAsGCSqG
--SIb3DQEBCwOCAQEABVTWl5SmBP+j5He5bQsgnIXjviSKqe40/10V4LJAOmilycRF
--zLrzM+YMwfjg6PLIs8CldAMWHw9y9ktZY4MxkgCktaiaN/QmMTMwFWEcN4wy5IpM
--U5l93eAg7xsnY430h3QBBADujX4wdF3fs8rSL8zAAQFL0ihurwU124K3yXKsrwpb
--CiVUGfIN4sPwjy8Ws9oxHFDC9/P8lgjHZ1nBIf8KSHnMzlxDGj7isQfhtH+7mcCL
--cM1qO2NirS2v7uaEPPY+MJstAz+W7EJCW9dfMSmHna2SDC37Xkin7uEY9z+qaKFL
--8d/XxOB/L8Ucy8VZhdsv0dsBq5KfJntITM0ksQ==
-------END CERTIFICATE-----
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar
-deleted file mode 100644
-index e4b066e3b6df8cb78ac445a34234f3780d164cf4..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 2048
-zcmeH_Q3``F42FH)DgF~kTC`qZ7s*`9%A^%r$Bu89Fp<6NMew1akmheFe?H>)Y5N#5
-z`(UT)m>?q4G^iwZ#(XmAwH8Ujv`|_rQd)Ig3sQ!(szArs+5bAH%#&Di1HU}iJx_zp
-z+3uU9k~Zgl)J<3?S%)LS_Hgc7e)t4AX&%Rz>>WAcX2Ec>82D}md=O1Y)p%bo=N_rJ
-OD+CIGLZA@%gTMmt=q{T8
-
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar
-deleted file mode 100644
-index 32c9ce64704835cd096b85ac44c35b5087b5ccdd..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 2560
-zcmeHGy>8<$49;3V1%d0TNOs}`$a>xT46-c8LTt+?QB8ACf0XPiQll-<p+kUZFvXvb
-z{76$zR-LqKOs7{rc7zbS?G{!f_q$z^qL_3tiM%LE$cs&}-<R8RFF at p*a#OBA{1~IF
-z#KEI<M2)`Q_$$ZaN?}d2uwARM6CtMNqP&sw3$QgF;sQXey>h0~9$I?_v`_`p)qp;@
-z0OJK)JAmosQD=m*-~y?5ASGvD1{zS;L7n!AYz2z}2Y8%Kb25fgK0fDb5l4UE+{yF$
-zXs`{{TG^hbn!J);Cl1>2UV0=k!T8hL+GbhfZ2u5L51|SJ2KFb&fyiW3|3Qw(jvC+i
-zouk4oz*u9Q((Iyric9uLhPZsmgZ8ANMrS_2p5cn+n!M}dU&=mMrdq8|OlgOvF-oFN
-zh5A!%9Pk(EcxS4q(c~Z~u-BL7!+gIN2&&-GnGy1YRpY|{e@?X?J9}9;KY_$PxYO}H
-o;5QJT#=q||{Y*ZuNn-Gk-)jtGb|Y`+PV+v2`vmS2xaA4_1I+dVl>h($
-
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile
-deleted file mode 100644
-index e69de29..0000000
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem
-deleted file mode 100644
-index a9346bc..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem
-+++ /dev/null
-@@ -1,27 +0,0 @@
-------BEGIN RSA PRIVATE KEY-----
--MIIEowIBAAKCAQEAxgNZgIUjqWbKQ9XaWe6lREKyCcOF6PNudox2jkZxHQ4hZm+m
--6xJq+o8PLeD9yBsW6gXP06kVfkkl8cGBFb36XkCh/kpqkraDDbQ91K2tetWs+FdA
--XMTLUpbFBqaM/v/0AgKvKomsTMIIsnOdud5pEJvPGp49nDbe8W3sIrvyhVrn+wVa
--TStfpbaSZpbpH3I/xviNW8O49SsQYw6xanuXMg7rmXAy9rZlFz/2Vg8l8Er/hwDn
--JKp+OM+ml5QZFbzlwwWSamO+nf71lQzQrmXl0Z6pYT97dZB+FdEUF6PiQ9rYLSYH
--VNLp88qBTkljVAT5u97apo1BhgJhhwZnnoCa/wIDAQABAoIBAQCaGy9EC9pmU95l
--DwGh7k5nIrUnTilg1FwLHWSDdCVCZKXv8ENrPelOWZqJrUo1u4eI2L8XTsewgkNq
--tJu/DRzWz9yDaO0qg6rZNobMh+K076lvmZA44twOydJLS8H+D7ua+PXU2FLlZjmY
--kMyXRJZmW6zCXZc7haTbJx6ZJccoquk/DkS4FcFurJP177u1YrWS9TTw9kensUtU
--jQ63uf56UTN1i+0+Rxl7OW1TZlqwlri5I4njg5249+FxwwHzIq8+l7zD7K9pl8c/
--nG1HuulvU2bVlDlRdyslMPAH34vw9Sku1BD8furrJLr1na5lRSLKJODEaIPEsLwv
--CdEUwP9JAoGBAO76ZW80RyNB2fA+wbTq70Sr8CwrXxYemXrez5LKDC7SsohKFCPE
--IedpO/n+nmymiiJvMm874EExoG6BVrbkWkeb+2vinEfOQNlDMsDx7WLjPekP3t6i
--rXHO3CjFooVFq2z3mZa/Nc5NZqu8fNWNCKJxZDJphdoj6sORNJIUvZVjAoGBANQd
--++J+ITcu3/+A6JrGcgLunBFQYPqkiItk0J4QKYKuX5ik9rWcQDN8TTtfW2mDuiQ4
--NrCwuVPq1V1kB16JzH017SsYLo9g8I20YjnBZge9pKTeUaLVTb3C50LW8FBylop0
--Bnm597dNbtSjphjoTMg0XyC19o3Esf2YeWG0QNS1AoGAWWDfFRNJU99qIldmXULM
--0DM6NVrXSk+ReYnhunXEzrJQwXZrR+EwCPurydk36Uz0NuK9yypquhdUeF/5TZfk
--SAoHo5byekyipl9imRUigqyY2BTudvgCxKDoaHtaSFwBPFTyZZYICquaLbrmOXxw
--8UhVgCFFRYvPXuts7QHC0h8CgYBWEvy9gfU0kV7wLX02IUTuj6jhFb7ktpN6DSTi
--nyhZES1VoctDEu6ydcRZTW6ouH12aSE4Pd5WgTqntQmQgVZrkNB25k8ue2Xh+srJ
--KQOgLIJ9LIHwE6KCWG7DnrjRzE3uTPq7to0g4tkQjH/AJ7PQof/gJDayfJjFkXPg
--A+cy6QKBgEPbKpiqscm03gT2QanBut5pg4dqPOxp0SlErA3kSFNTRK3oYBQPC+LH
--qA5nD5brdkeNBB58Rll8Zpzxiff50bcvLP/7/Sb3NjaXFTEY0gVbdRof3n6N0YP3
--Hu5XDNJ9RNkNzE5RIG1g86KE+aKlcrKMaigqAiuIy2PSnjkQeGk8
-------END RSA PRIVATE KEY-----
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem
-deleted file mode 100644
-index 89cc445..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem
-+++ /dev/null
-@@ -1,18 +0,0 @@
-------BEGIN CERTIFICATE-----
--MIIC/DCCAeagAwIBAgIQMUILcXtvmSOK63zEBo0VXzALBgkqhkiG9w0BAQswFjEU
--MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTQ2WhcNMTcwOTMwMjAy
--MTQ2WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD
--ggEPADCCAQoCggEBANxUOUhNnqFnrTlLsBYzfFRZWQo268l+4K4lOJCVbfDonP3g
--Mz0vGi9fcyFqEWSA8Y+ShXna625HTnReCwFdsu0861qCIq7v95hFFCyOe0iIxpd0
--AKLnl90d+1vonE7andgFgoobbTiMly4UK4H6z8D148fFNIihoteOG3PIF89TFxP7
--CJ/3wXnx/IKpdlO8PAnub3tBPJHvGDj7KORLy4IBxRX5VBAdfGNybE66fcrehEva
--rLA4m9pgiaR/Nnr9FdKhPyqYdjflLNvzydxNvMIV4M0hFlhXmYvpMjA5/XsTnsyV
--t9JHJa5Upwqsbne08t7rsm7liZNxZlko8xPOTQcCAwEAAaNKMEgwDgYDVR0PAQH/
--BAQDAgCgMAwGA1UdEwEB/wQCMAAwKAYDVR0RBCEwH4ILYm9vdDJkb2NrZXKHBH8A
--AAGHBAoAAg+HBMCoO2cwCwYJKoZIhvcNAQELA4IBAQAYoYcDkDWkl73FZ0WnPmAj
--LiF7HU95Qg3KyEpFsAJeShSLPPbQntmwhdekEzY4tQ3eKQB/+zHFjzsCr/lmDUmH
--Ea/ryQ17C+jyH+Ykg0IWW6L6veZhvRDg6Z9focVtPVBRxPTqC/Qhb54blWRASV+W
--UreMuXQ5+1dQptAM7ixOeLVHjBi/bd9TL3jvwBVCr9QedteMjjK4TCF9Tbcou+MF
--2w3OJJZMDhcD+YwoK9uJDqlKmcTm/vVMbSsp/pTMcnQ7jxCeR8/XyX+VwTZwaHAa
--o92Q/eg3THAiWhvyT/SzyH9dHHBAyXynUwGCggKawHktfvW4QXRPuLxLrJ7iB5cy
-------END CERTIFICATE-----
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem
-deleted file mode 100644
-index c897e5d..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem
-+++ /dev/null
-@@ -1,27 +0,0 @@
-------BEGIN RSA PRIVATE KEY-----
--MIIEoAIBAAKCAQEA3FQ5SE2eoWetOUuwFjN8VFlZCjbryX7griU4kJVt8Oic/eAz
--PS8aL19zIWoRZIDxj5KFedrrbkdOdF4LAV2y7TzrWoIiru/3mEUULI57SIjGl3QA
--oueX3R37W+icTtqd2AWCihttOIyXLhQrgfrPwPXjx8U0iKGi144bc8gXz1MXE/sI
--n/fBefH8gql2U7w8Ce5ve0E8ke8YOPso5EvLggHFFflUEB18Y3JsTrp9yt6ES9qs
--sDib2mCJpH82ev0V0qE/Kph2N+Us2/PJ3E28whXgzSEWWFeZi+kyMDn9exOezJW3
--0kclrlSnCqxud7Ty3uuybuWJk3FmWSjzE85NBwIDAQABAoIBAG0ak+cW8LeShHf7
--3+2Of0GxoOLrAWWdG5uAuPr31CJYve0FybnBimDtDjD8ujIfm/7xmoEWBEFutA3x
--x9dcU88gvJbsHEqub9gKVQwfXjMz78tt2SbSMiR/xUnk7QorPcCMMfE71aEMFYzu
--1gCed6Rg3vO81t/V0rKVH0j9S7UQz5v/oX15eVDV5LOqyCHwAi6K0eXXbqnbI0TH
--SOQ/nexM2msVXWbO9t6ra6f5V7FXziDK5Xi+rPxRbX9mkrDzxDAevfuRqYBx5vtL
--W2Q2hKjUAHFgXFniNSZBS7dCdAtz0el/3ct+cNmpuTMhhs7M6wC1CuYiZ/DxLiFh
--Si73VckCgYEA+/ceh3+VjtQ0rgEw8sD9bqYEA8IaBiObjneIoFnKBYRG7yZd8JMm
--HD4M/aQ1qhcRLPN7GR03YQULgQJURbKSjJHnhfTXHyeHC3NN4gMVHQXewu2MHCh6
--7FCQ9CfK0KcYLgegVVvL3PrF3hyWGnmTu+G0UkDQRYVnaNrB7snrW6UCgYEA39tq
--+MCQdu0moJ5szSZf02undg9EeW6isk9qzi7TId3/MLci2eH7PEnipipPUK3+DERq
--aba0y0TKgBR2EXvXLFJA/+kfdo2loIEHOfox85HVfxgUaFRti63ZI0uF8D0QT2Yy
--oJal+RFghVoSnv4LjhRKEPbIkScTXGjdK+7wFjsCfz79iKRXQQx0ALd/lL0bgkAn
--QNmvrNHcFQeI2p8700WNzC39aX67SsvEt3qxkrjzC1gxhpTAuReIK1gVPPwvqHN8
--BmV20FD5kMlMCix2mNCopwgUWvKvLAvoGFTxncKMA39+aJbuXAjiqJTekKgNvOE7
--i9kEWw0GTNPp3JHV6QECgYAPwb0M11kT1euDIMOdyRazpf86kyaJuZzgGjD1ZFxe
--JOcigbGFTp/FhZnbglzk2+pm6KXo3QBq0mPCki4hWusxZnTGzpz1VlETNCHTFeZQ
--M7KoaIR/N3oie9Et59H8r/+m5xWnMhNqratyl316DX24uXrhKM3DUdHODl+LCR2D
--IwKBgE1MbHuwolUPEw3HeO4R7NMFVTFei7E/fpUsimPfArGg8UydwvloNT1myJos
--N2JzfGGjN2KPVcBk9fOs71mJ6VcK3C3g5JIccplk6h9VNaw55+zdQvKPTzoBoTvy
--A+Fwx2AlF61KeRF87DL2YTRJ6B9MHmWgf7+GVZOxomLgEAcZ
-------END RSA PRIVATE KEY-----
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/symlink b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/symlink
-deleted file mode 120000
-index 3ddf86a..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/symlink
-+++ /dev/null
-@@ -1 +0,0 @@
--doesnotexist
-\ No newline at end of file
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go
-deleted file mode 100644
-index da24fb2..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go
-+++ /dev/null
-@@ -1,744 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package testing provides a fake implementation of the Docker API, useful for
--// testing purpose.
--package testing
--
--import (
--	"archive/tar"
--	"crypto/rand"
--	"encoding/json"
--	"errors"
--	"fmt"
--	mathrand "math/rand"
--	"net"
--	"net/http"
--	"regexp"
--	"strconv"
--	"strings"
--	"sync"
--	"time"
--
--	"github.com/fsouza/go-dockerclient"
--	"github.com/gorilla/mux"
--)
--
--// DockerServer represents a programmable, concurrent (not much), HTTP server
--// implementing a fake version of the Docker remote API.
--//
--// It can used in standalone mode, listening for connections or as an arbitrary
--// HTTP handler.
--//
--// For more details on the remote API, check http://goo.gl/G3plxW.
--type DockerServer struct {
--	containers     []*docker.Container
--	execs          []*docker.Exec
--	cMut           sync.RWMutex
--	images         []docker.Image
--	iMut           sync.RWMutex
--	imgIDs         map[string]string
--	listener       net.Listener
--	mux            *mux.Router
--	hook           func(*http.Request)
--	failures       map[string]string
--	customHandlers map[string]http.Handler
--	handlerMutex   sync.RWMutex
--	cChan          chan<- *docker.Container
--}
--
--// NewServer returns a new instance of the fake server, in standalone mode. Use
--// the method URL to get the URL of the server.
--//
--// It receives the bind address (use 127.0.0.1:0 for getting an available port
--// on the host), a channel of containers and a hook function, that will be
--// called on every request.
--//
--// The fake server will send containers in the channel whenever the container
--// changes its state, via the HTTP API (i.e.: create, start and stop). This
--// channel may be nil, which means that the server won't notify on state
--// changes.
--func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*http.Request)) (*DockerServer, error) {
--	listener, err := net.Listen("tcp", bind)
--	if err != nil {
--		return nil, err
--	}
--	server := DockerServer{
--		listener:       listener,
--		imgIDs:         make(map[string]string),
--		hook:           hook,
--		failures:       make(map[string]string),
--		customHandlers: make(map[string]http.Handler),
--		cChan:          containerChan,
--	}
--	server.buildMuxer()
--	go http.Serve(listener, &server)
--	return &server, nil
--}
--
--func (s *DockerServer) notify(container *docker.Container) {
--	if s.cChan != nil {
--		s.cChan <- container
--	}
--}
--
--func (s *DockerServer) buildMuxer() {
--	s.mux = mux.NewRouter()
--	s.mux.Path("/commit").Methods("POST").HandlerFunc(s.handlerWrapper(s.commitContainer))
--	s.mux.Path("/containers/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listContainers))
--	s.mux.Path("/containers/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createContainer))
--	s.mux.Path("/containers/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectContainer))
--	s.mux.Path("/containers/{id:.*}/top").Methods("GET").HandlerFunc(s.handlerWrapper(s.topContainer))
--	s.mux.Path("/containers/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startContainer))
--	s.mux.Path("/containers/{id:.*}/kill").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer))
--	s.mux.Path("/containers/{id:.*}/stop").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer))
--	s.mux.Path("/containers/{id:.*}/pause").Methods("POST").HandlerFunc(s.handlerWrapper(s.pauseContainer))
--	s.mux.Path("/containers/{id:.*}/unpause").Methods("POST").HandlerFunc(s.handlerWrapper(s.unpauseContainer))
--	s.mux.Path("/containers/{id:.*}/wait").Methods("POST").HandlerFunc(s.handlerWrapper(s.waitContainer))
--	s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer))
--	s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer))
--	s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer))
--	s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer))
--	s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage))
--	s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage))
--	s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages))
--	s.mux.Path("/images/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeImage))
--	s.mux.Path("/images/{name:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectImage))
--	s.mux.Path("/images/{name:.*}/push").Methods("POST").HandlerFunc(s.handlerWrapper(s.pushImage))
--	s.mux.Path("/images/{name:.*}/tag").Methods("POST").HandlerFunc(s.handlerWrapper(s.tagImage))
--	s.mux.Path("/events").Methods("GET").HandlerFunc(s.listEvents)
--	s.mux.Path("/_ping").Methods("GET").HandlerFunc(s.handlerWrapper(s.pingDocker))
--	s.mux.Path("/images/load").Methods("POST").HandlerFunc(s.handlerWrapper(s.loadImage))
--	s.mux.Path("/images/{id:.*}/get").Methods("GET").HandlerFunc(s.handlerWrapper(s.getImage))
--}
--
--// PrepareFailure adds a new expected failure based on a URL regexp it receives
--// an id for the failure.
--func (s *DockerServer) PrepareFailure(id string, urlRegexp string) {
--	s.failures[id] = urlRegexp
--}
--
--// ResetFailure removes an expected failure identified by the given id.
--func (s *DockerServer) ResetFailure(id string) {
--	delete(s.failures, id)
--}
--
--// CustomHandler registers a custom handler for a specific path.
--//
--// For example:
--//
--//     server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--//         http.Error(w, "Something wrong is not right", http.StatusInternalServerError)
--//     }))
--func (s *DockerServer) CustomHandler(path string, handler http.Handler) {
--	s.handlerMutex.Lock()
--	s.customHandlers[path] = handler
--	s.handlerMutex.Unlock()
--}
--
--// MutateContainer changes the state of a container, returning an error if the
--// given id does not match to any container "running" in the server.
--func (s *DockerServer) MutateContainer(id string, state docker.State) error {
--	for _, container := range s.containers {
--		if container.ID == id {
--			container.State = state
--			return nil
--		}
--	}
--	return errors.New("container not found")
--}
--
--// Stop stops the server.
--func (s *DockerServer) Stop() {
--	if s.listener != nil {
--		s.listener.Close()
--	}
--}
--
--// URL returns the HTTP URL of the server.
--func (s *DockerServer) URL() string {
--	if s.listener == nil {
--		return ""
--	}
--	return "http://" + s.listener.Addr().String() + "/"
--}
--
--// ServeHTTP handles HTTP requests sent to the server.
--func (s *DockerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
--	s.handlerMutex.RLock()
--	defer s.handlerMutex.RUnlock()
--	if handler, ok := s.customHandlers[r.URL.Path]; ok {
--		handler.ServeHTTP(w, r)
--		return
--	}
--	s.mux.ServeHTTP(w, r)
--	if s.hook != nil {
--		s.hook(r)
--	}
--}
--
--// DefaultHandler returns default http.Handler mux, it allows customHandlers to
--// call the default behavior if wanted.
--func (s *DockerServer) DefaultHandler() http.Handler {
--	return s.mux
--}
--
--func (s *DockerServer) handlerWrapper(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
--	return func(w http.ResponseWriter, r *http.Request) {
--		for errorID, urlRegexp := range s.failures {
--			matched, err := regexp.MatchString(urlRegexp, r.URL.Path)
--			if err != nil {
--				http.Error(w, err.Error(), http.StatusBadRequest)
--				return
--			}
--			if !matched {
--				continue
--			}
--			http.Error(w, errorID, http.StatusBadRequest)
--			return
--		}
--		f(w, r)
--	}
--}
--
--func (s *DockerServer) listContainers(w http.ResponseWriter, r *http.Request) {
--	all := r.URL.Query().Get("all")
--	s.cMut.RLock()
--	result := make([]docker.APIContainers, len(s.containers))
--	for i, container := range s.containers {
--		if all == "1" || container.State.Running {
--			result[i] = docker.APIContainers{
--				ID:      container.ID,
--				Image:   container.Image,
--				Command: fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")),
--				Created: container.Created.Unix(),
--				Status:  container.State.String(),
--				Ports:   container.NetworkSettings.PortMappingAPI(),
--				Names:   []string{fmt.Sprintf("/%s", container.Name)},
--			}
--		}
--	}
--	s.cMut.RUnlock()
--	w.Header().Set("Content-Type", "application/json")
--	w.WriteHeader(http.StatusOK)
--	json.NewEncoder(w).Encode(result)
--}
--
--func (s *DockerServer) listImages(w http.ResponseWriter, r *http.Request) {
--	s.cMut.RLock()
--	result := make([]docker.APIImages, len(s.images))
--	for i, image := range s.images {
--		result[i] = docker.APIImages{
--			ID:      image.ID,
--			Created: image.Created.Unix(),
--		}
--		for tag, id := range s.imgIDs {
--			if id == image.ID {
--				result[i].RepoTags = append(result[i].RepoTags, tag)
--			}
--		}
--	}
--	s.cMut.RUnlock()
--	w.Header().Set("Content-Type", "application/json")
--	w.WriteHeader(http.StatusOK)
--	json.NewEncoder(w).Encode(result)
--}
--
--func (s *DockerServer) findImage(id string) (string, error) {
--	s.iMut.RLock()
--	defer s.iMut.RUnlock()
--	image, ok := s.imgIDs[id]
--	if ok {
--		return image, nil
--	}
--	image, _, err := s.findImageByID(id)
--	return image, err
--}
--
--func (s *DockerServer) findImageByID(id string) (string, int, error) {
--	s.iMut.RLock()
--	defer s.iMut.RUnlock()
--	for i, image := range s.images {
--		if image.ID == id {
--			return image.ID, i, nil
--		}
--	}
--	return "", -1, errors.New("No such image")
--}
--
--func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) {
--	var config docker.Config
--	defer r.Body.Close()
--	err := json.NewDecoder(r.Body).Decode(&config)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusBadRequest)
--		return
--	}
--	if _, err := s.findImage(config.Image); err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	w.WriteHeader(http.StatusCreated)
--	ports := map[docker.Port][]docker.PortBinding{}
--	for port := range config.ExposedPorts {
--		ports[port] = []docker.PortBinding{{
--			HostIP:   "0.0.0.0",
--			HostPort: strconv.Itoa(mathrand.Int() % 65536),
--		}}
--	}
--
--	//the container may not have cmd when using a Dockerfile
--	var path string
--	var args []string
--	if len(config.Cmd) == 1 {
--		path = config.Cmd[0]
--	} else if len(config.Cmd) > 1 {
--		path = config.Cmd[0]
--		args = config.Cmd[1:]
--	}
--
--	container := docker.Container{
--		Name:    r.URL.Query().Get("name"),
--		ID:      s.generateID(),
--		Created: time.Now(),
--		Path:    path,
--		Args:    args,
--		Config:  &config,
--		State: docker.State{
--			Running:   false,
--			Pid:       mathrand.Int() % 50000,
--			ExitCode:  0,
--			StartedAt: time.Now(),
--		},
--		Image: config.Image,
--		NetworkSettings: &docker.NetworkSettings{
--			IPAddress:   fmt.Sprintf("172.16.42.%d", mathrand.Int()%250+2),
--			IPPrefixLen: 24,
--			Gateway:     "172.16.42.1",
--			Bridge:      "docker0",
--			Ports:       ports,
--		},
--	}
--	s.cMut.Lock()
--	s.containers = append(s.containers, &container)
--	s.cMut.Unlock()
--	s.notify(&container)
--	var c = struct{ ID string }{ID: container.ID}
--	json.NewEncoder(w).Encode(c)
--}
--
--func (s *DockerServer) generateID() string {
--	var buf [16]byte
--	rand.Read(buf[:])
--	return fmt.Sprintf("%x", buf)
--}
--
--func (s *DockerServer) inspectContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	w.Header().Set("Content-Type", "application/json")
--	w.WriteHeader(http.StatusOK)
--	json.NewEncoder(w).Encode(container)
--}
--
--func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	if !container.State.Running {
--		w.WriteHeader(http.StatusInternalServerError)
--		fmt.Fprintf(w, "Container %s is not running", id)
--		return
--	}
--	w.Header().Set("Content-Type", "application/json")
--	w.WriteHeader(http.StatusOK)
--	result := docker.TopResult{
--		Titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"},
--		Processes: [][]string{
--			{"root", "7535", "7516", "0", "03:20", "?", "00:00:00", container.Path + " " + strings.Join(container.Args, " ")},
--		},
--	}
--	json.NewEncoder(w).Encode(result)
--}
--
--func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	s.cMut.Lock()
--	defer s.cMut.Unlock()
--	if container.State.Running {
--		http.Error(w, "Container already running", http.StatusBadRequest)
--		return
--	}
--	container.State.Running = true
--	s.notify(container)
--}
--
--func (s *DockerServer) stopContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	s.cMut.Lock()
--	defer s.cMut.Unlock()
--	if !container.State.Running {
--		http.Error(w, "Container not running", http.StatusBadRequest)
--		return
--	}
--	w.WriteHeader(http.StatusNoContent)
--	container.State.Running = false
--	s.notify(container)
--}
--
--func (s *DockerServer) pauseContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	s.cMut.Lock()
--	defer s.cMut.Unlock()
--	if container.State.Paused {
--		http.Error(w, "Container already paused", http.StatusBadRequest)
--		return
--	}
--	w.WriteHeader(http.StatusNoContent)
--	container.State.Paused = true
--}
--
--func (s *DockerServer) unpauseContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	s.cMut.Lock()
--	defer s.cMut.Unlock()
--	if !container.State.Paused {
--		http.Error(w, "Container not paused", http.StatusBadRequest)
--		return
--	}
--	w.WriteHeader(http.StatusNoContent)
--	container.State.Paused = false
--}
--
--func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	outStream := newStdWriter(w, stdout)
--	fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
--	if container.State.Running {
--		fmt.Fprintf(outStream, "Container %q is running\n", container.ID)
--	} else {
--		fmt.Fprintf(outStream, "Container %q is not running\n", container.ID)
--	}
--	fmt.Fprintln(outStream, "What happened?")
--	fmt.Fprintln(outStream, "Something happened")
--}
--
--func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	for {
--		time.Sleep(1e6)
--		s.cMut.RLock()
--		if !container.State.Running {
--			s.cMut.RUnlock()
--			break
--		}
--		s.cMut.RUnlock()
--	}
--	result := map[string]int{"StatusCode": container.State.ExitCode}
--	json.NewEncoder(w).Encode(result)
--}
--
--func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	_, index, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	if s.containers[index].State.Running {
--		msg := "Error: API error (406): Impossible to remove a running container, please stop it first"
--		http.Error(w, msg, http.StatusInternalServerError)
--		return
--	}
--	w.WriteHeader(http.StatusNoContent)
--	s.cMut.Lock()
--	defer s.cMut.Unlock()
--	s.containers[index] = s.containers[len(s.containers)-1]
--	s.containers = s.containers[:len(s.containers)-1]
--}
--
--func (s *DockerServer) commitContainer(w http.ResponseWriter, r *http.Request) {
--	id := r.URL.Query().Get("container")
--	container, _, err := s.findContainer(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	var config *docker.Config
--	runConfig := r.URL.Query().Get("run")
--	if runConfig != "" {
--		config = new(docker.Config)
--		err = json.Unmarshal([]byte(runConfig), config)
--		if err != nil {
--			http.Error(w, err.Error(), http.StatusBadRequest)
--			return
--		}
--	}
--	w.WriteHeader(http.StatusOK)
--	image := docker.Image{
--		ID:        "img-" + container.ID,
--		Parent:    container.Image,
--		Container: container.ID,
--		Comment:   r.URL.Query().Get("m"),
--		Author:    r.URL.Query().Get("author"),
--		Config:    config,
--	}
--	repository := r.URL.Query().Get("repo")
--	s.iMut.Lock()
--	s.images = append(s.images, image)
--	if repository != "" {
--		s.imgIDs[repository] = image.ID
--	}
--	s.iMut.Unlock()
--	fmt.Fprintf(w, `{"ID":%q}`, image.ID)
--}
--
--func (s *DockerServer) findContainer(id string) (*docker.Container, int, error) {
--	s.cMut.RLock()
--	defer s.cMut.RUnlock()
--	for i, container := range s.containers {
--		if container.ID == id {
--			return container, i, nil
--		}
--	}
--	return nil, -1, errors.New("No such container")
--}
--
--func (s *DockerServer) buildImage(w http.ResponseWriter, r *http.Request) {
--	if ct := r.Header.Get("Content-Type"); ct == "application/tar" {
--		gotDockerFile := false
--		tr := tar.NewReader(r.Body)
--		for {
--			header, err := tr.Next()
--			if err != nil {
--				break
--			}
--			if header.Name == "Dockerfile" {
--				gotDockerFile = true
--			}
--		}
--		if !gotDockerFile {
--			w.WriteHeader(http.StatusBadRequest)
--			w.Write([]byte("miss Dockerfile"))
--			return
--		}
--	}
--	//we did not use that Dockerfile to build image cause we are a fake Docker daemon
--	image := docker.Image{
--		ID:      s.generateID(),
--		Created: time.Now(),
--	}
--
--	query := r.URL.Query()
--	repository := image.ID
--	if t := query.Get("t"); t != "" {
--		repository = t
--	}
--	s.iMut.Lock()
--	s.images = append(s.images, image)
--	s.imgIDs[repository] = image.ID
--	s.iMut.Unlock()
--	w.Write([]byte(fmt.Sprintf("Successfully built %s", image.ID)))
--}
--
--func (s *DockerServer) pullImage(w http.ResponseWriter, r *http.Request) {
--	repository := r.URL.Query().Get("fromImage")
--	image := docker.Image{
--		ID: s.generateID(),
--	}
--	s.iMut.Lock()
--	s.images = append(s.images, image)
--	if repository != "" {
--		s.imgIDs[repository] = image.ID
--	}
--	s.iMut.Unlock()
--}
--
--func (s *DockerServer) pushImage(w http.ResponseWriter, r *http.Request) {
--	name := mux.Vars(r)["name"]
--	s.iMut.RLock()
--	if _, ok := s.imgIDs[name]; !ok {
--		s.iMut.RUnlock()
--		http.Error(w, "No such image", http.StatusNotFound)
--		return
--	}
--	s.iMut.RUnlock()
--	fmt.Fprintln(w, "Pushing...")
--	fmt.Fprintln(w, "Pushed")
--}
--
--func (s *DockerServer) tagImage(w http.ResponseWriter, r *http.Request) {
--	name := mux.Vars(r)["name"]
--	s.iMut.RLock()
--	if _, ok := s.imgIDs[name]; !ok {
--		s.iMut.RUnlock()
--		http.Error(w, "No such image", http.StatusNotFound)
--		return
--	}
--	s.iMut.RUnlock()
--	s.iMut.Lock()
--	defer s.iMut.Unlock()
--	newRepo := r.URL.Query().Get("repo")
--	s.imgIDs[newRepo] = s.imgIDs[name]
--	w.WriteHeader(http.StatusCreated)
--}
--
--func (s *DockerServer) removeImage(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	s.iMut.RLock()
--	var tag string
--	if img, ok := s.imgIDs[id]; ok {
--		id, tag = img, id
--	}
--	var tags []string
--	for tag, taggedID := range s.imgIDs {
--		if taggedID == id {
--			tags = append(tags, tag)
--		}
--	}
--	s.iMut.RUnlock()
--	_, index, err := s.findImageByID(id)
--	if err != nil {
--		http.Error(w, err.Error(), http.StatusNotFound)
--		return
--	}
--	w.WriteHeader(http.StatusNoContent)
--	s.iMut.Lock()
--	defer s.iMut.Unlock()
--	if len(tags) < 2 {
--		s.images[index] = s.images[len(s.images)-1]
--		s.images = s.images[:len(s.images)-1]
--	}
--	if tag != "" {
--		delete(s.imgIDs, tag)
--	}
--}
--
--func (s *DockerServer) inspectImage(w http.ResponseWriter, r *http.Request) {
--	name := mux.Vars(r)["name"]
--	if id, ok := s.imgIDs[name]; ok {
--		s.iMut.Lock()
--		defer s.iMut.Unlock()
--
--		for _, img := range s.images {
--			if img.ID == id {
--				w.Header().Set("Content-Type", "application/json")
--				w.WriteHeader(http.StatusOK)
--				json.NewEncoder(w).Encode(img)
--				return
--			}
--		}
--	}
--	http.Error(w, "not found", http.StatusNotFound)
--}
--
--func (s *DockerServer) listEvents(w http.ResponseWriter, r *http.Request) {
--	w.Header().Set("Content-Type", "application/json")
--	var events [][]byte
--	count := mathrand.Intn(20)
--	for i := 0; i < count; i++ {
--		data, err := json.Marshal(s.generateEvent())
--		if err != nil {
--			w.WriteHeader(http.StatusInternalServerError)
--			return
--		}
--		events = append(events, data)
--	}
--	w.WriteHeader(http.StatusOK)
--	for _, d := range events {
--		fmt.Fprintln(w, d)
--		time.Sleep(time.Duration(mathrand.Intn(200)) * time.Millisecond)
--	}
--}
--
--func (s *DockerServer) pingDocker(w http.ResponseWriter, r *http.Request) {
--	w.WriteHeader(http.StatusOK)
--}
--
--func (s *DockerServer) generateEvent() *docker.APIEvents {
--	var eventType string
--	switch mathrand.Intn(4) {
--	case 0:
--		eventType = "create"
--	case 1:
--		eventType = "start"
--	case 2:
--		eventType = "stop"
--	case 3:
--		eventType = "destroy"
--	}
--	return &docker.APIEvents{
--		ID:     s.generateID(),
--		Status: eventType,
--		From:   "mybase:latest",
--		Time:   time.Now().Unix(),
--	}
--}
--
--func (s *DockerServer) loadImage(w http.ResponseWriter, r *http.Request) {
--	w.WriteHeader(http.StatusOK)
--}
--
--func (s *DockerServer) getImage(w http.ResponseWriter, r *http.Request) {
--	w.WriteHeader(http.StatusOK)
--	w.Header().Set("Content-Type", "application/tar")
--
--}
--
--func (s *DockerServer) createExecContainer(w http.ResponseWriter, r *http.Request) {
--	w.WriteHeader(http.StatusOK)
--	w.Header().Set("Content-Type", "application/json")
--	exec := docker.Exec{ID: "id-exec-created-by-test"}
--	s.execs = append(s.execs, &exec)
--	json.NewEncoder(w).Encode(map[string]string{"Id": exec.ID})
--
--}
--
--func (s *DockerServer) startExecContainer(w http.ResponseWriter, r *http.Request) {
--	id := mux.Vars(r)["id"]
--	for _, exec := range s.execs {
--		if exec.ID == id {
--			w.WriteHeader(http.StatusOK)
--			return
--		}
--	}
--	w.WriteHeader(http.StatusNotFound)
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go
-deleted file mode 100644
-index d8763cb..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go
-+++ /dev/null
-@@ -1,1091 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package testing
--
--import (
--	"encoding/json"
--	"fmt"
--	"math/rand"
--	"net"
--	"net/http"
--	"net/http/httptest"
--	"os"
--	"reflect"
--	"strings"
--	"testing"
--	"time"
--
--	"github.com/fsouza/go-dockerclient"
--)
--
--func TestNewServer(t *testing.T) {
--	server, err := NewServer("127.0.0.1:0", nil, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer server.listener.Close()
--	conn, err := net.Dial("tcp", server.listener.Addr().String())
--	if err != nil {
--		t.Fatal(err)
--	}
--	conn.Close()
--}
--
--func TestServerStop(t *testing.T) {
--	server, err := NewServer("127.0.0.1:0", nil, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	server.Stop()
--	_, err = net.Dial("tcp", server.listener.Addr().String())
--	if err == nil {
--		t.Error("Unexpected <nil> error when dialing to stopped server")
--	}
--}
--
--func TestServerStopNoListener(t *testing.T) {
--	server := DockerServer{}
--	server.Stop()
--}
--
--func TestServerURL(t *testing.T) {
--	server, err := NewServer("127.0.0.1:0", nil, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer server.Stop()
--	url := server.URL()
--	if expected := "http://" + server.listener.Addr().String() + "/"; url != expected {
--		t.Errorf("DockerServer.URL(): Want %q. Got %q.", expected, url)
--	}
--}
--
--func TestServerURLNoListener(t *testing.T) {
--	server := DockerServer{}
--	url := server.URL()
--	if url != "" {
--		t.Errorf("DockerServer.URL(): Expected empty URL on handler mode, got %q.", url)
--	}
--}
--
--func TestHandleWithHook(t *testing.T) {
--	var called bool
--	server, _ := NewServer("127.0.0.1:0", nil, func(*http.Request) { called = true })
--	defer server.Stop()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
--	server.ServeHTTP(recorder, request)
--	if !called {
--		t.Error("ServeHTTP did not call the hook function.")
--	}
--}
--
--func TestCustomHandler(t *testing.T) {
--	var called bool
--	server, _ := NewServer("127.0.0.1:0", nil, nil)
--	addContainers(server, 2)
--	server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		called = true
--		fmt.Fprint(w, "Hello world")
--	}))
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
--	server.ServeHTTP(recorder, request)
--	if !called {
--		t.Error("Did not call the custom handler")
--	}
--	if got := recorder.Body.String(); got != "Hello world" {
--		t.Errorf("Wrong output for custom handler: want %q. Got %q.", "Hello world", got)
--	}
--}
--
--func TestListContainers(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 2)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("ListContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	expected := make([]docker.APIContainers, 2)
--	for i, container := range server.containers {
--		expected[i] = docker.APIContainers{
--			ID:      container.ID,
--			Image:   container.Image,
--			Command: strings.Join(container.Config.Cmd, " "),
--			Created: container.Created.Unix(),
--			Status:  container.State.String(),
--			Ports:   container.NetworkSettings.PortMappingAPI(),
--			Names:   []string{"/" + container.Name},
--		}
--	}
--	var got []docker.APIContainers
--	err := json.NewDecoder(recorder.Body).Decode(&got)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("ListContainers. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestListRunningContainers(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 2)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/containers/json?all=0", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("ListRunningContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	var got []docker.APIContainers
--	err := json.NewDecoder(recorder.Body).Decode(&got)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if len(got) == 0 {
--		t.Errorf("ListRunningContainers: Want 0. Got %d.", len(got))
--	}
--}
--
--func TestCreateContainer(t *testing.T) {
--	server := DockerServer{}
--	server.imgIDs = map[string]string{"base": "a1234"}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true,
--"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":""}`
--	request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body))
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusCreated {
--		t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code)
--	}
--	var returned docker.Container
--	err := json.NewDecoder(recorder.Body).Decode(&returned)
--	if err != nil {
--		t.Fatal(err)
--	}
--	stored := server.containers[0]
--	if returned.ID != stored.ID {
--		t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID)
--	}
--	if stored.State.Running {
--		t.Errorf("CreateContainer should not set container to running state.")
--	}
--}
--
--func TestCreateContainerWithNotifyChannel(t *testing.T) {
--	ch := make(chan *docker.Container, 1)
--	server := DockerServer{}
--	server.imgIDs = map[string]string{"base": "a1234"}
--	server.cChan = ch
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true,
--"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":""}`
--	request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body))
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusCreated {
--		t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code)
--	}
--	if notified := <-ch; notified != server.containers[0] {
--		t.Errorf("CreateContainer: did not notify the proper container. Want %q. Got %q.", server.containers[0].ID, notified.ID)
--	}
--}
--
--func TestCreateContainerInvalidBody(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader("whaaaaaat---"))
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusBadRequest {
--		t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
--	}
--}
--
--func TestCreateContainerImageNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true,
--"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"],
--"Image":"base", "Volumes":{}, "VolumesFrom":""}`
--	request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body))
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestCommitContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 2)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("CommitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	expected := fmt.Sprintf(`{"ID":"%s"}`, server.images[0].ID)
--	if got := recorder.Body.String(); got != expected {
--		t.Errorf("CommitContainer: wrong response body. Want %q. Got %q.", expected, got)
--	}
--}
--
--func TestCommitContainerComplete(t *testing.T) {
--	server := DockerServer{}
--	server.imgIDs = make(map[string]string)
--	addContainers(&server, 2)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&m=saving&author=developers"
--	queryString += `&run={"Cmd": ["cat", "/world"],"PortSpecs":["22"]}`
--	request, _ := http.NewRequest("POST", "/commit?"+queryString, nil)
--	server.ServeHTTP(recorder, request)
--	image := server.images[0]
--	if image.Parent != server.containers[0].Image {
--		t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent)
--	}
--	if image.Container != server.containers[0].ID {
--		t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container)
--	}
--	message := "saving"
--	if image.Comment != message {
--		t.Errorf("CommitContainer: wrong comment (commit message). Want %q. Got %q.", message, image.Comment)
--	}
--	author := "developers"
--	if image.Author != author {
--		t.Errorf("CommitContainer: wrong author. Want %q. Got %q.", author, image.Author)
--	}
--	if id := server.imgIDs["tsuru/python"]; id != image.ID {
--		t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id)
--	}
--	portSpecs := []string{"22"}
--	if !reflect.DeepEqual(image.Config.PortSpecs, portSpecs) {
--		t.Errorf("CommitContainer: wrong port spec in config. Want %#v. Got %#v.", portSpecs, image.Config.PortSpecs)
--	}
--	cmd := []string{"cat", "/world"}
--	if !reflect.DeepEqual(image.Config.Cmd, cmd) {
--		t.Errorf("CommitContainer: wrong cmd in config. Want %#v. Got %#v.", cmd, image.Config.Cmd)
--	}
--}
--
--func TestCommitContainerInvalidRun(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID+"&run=abc---", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusBadRequest {
--		t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
--	}
--}
--
--func TestCommitContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/commit?container=abc123", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestInspectContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 2)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/json", server.containers[0].ID)
--	request, _ := http.NewRequest("GET", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("InspectContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	expected := server.containers[0]
--	var got docker.Container
--	err := json.NewDecoder(recorder.Body).Decode(&got)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(got.Config, expected.Config) {
--		t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got)
--	}
--	if !reflect.DeepEqual(got.NetworkSettings, expected.NetworkSettings) {
--		t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got)
--	}
--	got.State.StartedAt = expected.State.StartedAt
--	got.State.FinishedAt = expected.State.FinishedAt
--	got.Config = expected.Config
--	got.Created = expected.Created
--	got.NetworkSettings = expected.NetworkSettings
--	if !reflect.DeepEqual(got, *expected) {
--		t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got)
--	}
--}
--
--func TestInspectContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/containers/abc123/json", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("InspectContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestTopContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Running = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID)
--	request, _ := http.NewRequest("GET", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	var got docker.TopResult
--	err := json.NewDecoder(recorder.Body).Decode(&got)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(got.Titles, []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}) {
--		t.Fatalf("TopContainer: Unexpected titles, got: %#v", got.Titles)
--	}
--	if len(got.Processes) != 1 {
--		t.Fatalf("TopContainer: Unexpected process len, got: %d", len(got.Processes))
--	}
--	if got.Processes[0][len(got.Processes[0])-1] != "ls -la .." {
--		t.Fatalf("TopContainer: Unexpected command name, got: %s", got.Processes[0][len(got.Processes[0])-1])
--	}
--}
--
--func TestTopContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/containers/xyz/top", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestTopContainerStopped(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID)
--	request, _ := http.NewRequest("GET", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusInternalServerError {
--		t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code)
--	}
--}
--
--func TestStartContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	if !server.containers[0].State.Running {
--		t.Error("StartContainer: did not set the container to running state")
--	}
--}
--
--func TestStartContainerWithNotifyChannel(t *testing.T) {
--	ch := make(chan *docker.Container, 1)
--	server := DockerServer{}
--	server.cChan = ch
--	addContainers(&server, 1)
--	addContainers(&server, 1)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/start", server.containers[1].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	if notified := <-ch; notified != server.containers[1] {
--		t.Errorf("StartContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID)
--	}
--}
--
--func TestStartContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := "/containers/abc123/start"
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestStartContainerAlreadyRunning(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Running = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusBadRequest {
--		t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
--	}
--}
--
--func TestStopContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Running = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNoContent {
--		t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
--	}
--	if server.containers[0].State.Running {
--		t.Error("StopContainer: did not stop the container")
--	}
--}
--
--func TestKillContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Running = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/kill", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNoContent {
--		t.Errorf("KillContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
--	}
--	if server.containers[0].State.Running {
--		t.Error("KillContainer: did not stop the container")
--	}
--}
--
--func TestStopContainerWithNotifyChannel(t *testing.T) {
--	ch := make(chan *docker.Container, 1)
--	server := DockerServer{}
--	server.cChan = ch
--	addContainers(&server, 1)
--	addContainers(&server, 1)
--	server.containers[1].State.Running = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/stop", server.containers[1].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNoContent {
--		t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
--	}
--	if notified := <-ch; notified != server.containers[1] {
--		t.Errorf("StopContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID)
--	}
--}
--
--func TestStopContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := "/containers/abc123/stop"
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestStopContainerNotRunning(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusBadRequest {
--		t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
--	}
--}
--
--func TestPauseContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNoContent {
--		t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
--	}
--	if !server.containers[0].State.Paused {
--		t.Error("PauseContainer: did not pause the container")
--	}
--}
--
--func TestPauseContainerAlreadyPaused(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Paused = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusBadRequest {
--		t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
--	}
--}
--
--func TestPauseContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := "/containers/abc123/pause"
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestUnpauseContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Paused = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNoContent {
--		t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
--	}
--	if server.containers[0].State.Paused {
--		t.Error("UnpauseContainer: did not unpause the container")
--	}
--}
--
--func TestUnpauseContainerNotPaused(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusBadRequest {
--		t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
--	}
--}
--
--func TestUnpauseContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := "/containers/abc123/unpause"
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestWaitContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Running = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	go func() {
--		server.cMut.Lock()
--		server.containers[0].State.Running = false
--		server.cMut.Unlock()
--	}()
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	expected := `{"StatusCode":0}` + "\n"
--	if body := recorder.Body.String(); body != expected {
--		t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body)
--	}
--}
--
--func TestWaitContainerStatus(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.buildMuxer()
--	server.containers[0].State.ExitCode = 63
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	expected := `{"StatusCode":63}` + "\n"
--	if body := recorder.Body.String(); body != expected {
--		t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body)
--	}
--}
--
--func TestWaitContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := "/containers/abc123/wait"
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("WaitContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestAttachContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Running = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s/attach?logs=1", server.containers[0].ID)
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	lines := []string{
--		fmt.Sprintf("\x01\x00\x00\x00\x03\x00\x00\x00Container %q is running", server.containers[0].ID),
--		"What happened?",
--		"Something happened",
--	}
--	expected := strings.Join(lines, "\n") + "\n"
--	if body := recorder.Body.String(); body == expected {
--		t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body)
--	}
--}
--
--func TestAttachContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := "/containers/abc123/attach?logs=1"
--	request, _ := http.NewRequest("POST", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("AttachContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestRemoveContainer(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s", server.containers[0].ID)
--	request, _ := http.NewRequest("DELETE", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNoContent {
--		t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
--	}
--	if len(server.containers) > 0 {
--		t.Error("RemoveContainer: did not remove the container.")
--	}
--}
--
--func TestRemoveContainerNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/abc123")
--	request, _ := http.NewRequest("DELETE", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestRemoveContainerRunning(t *testing.T) {
--	server := DockerServer{}
--	addContainers(&server, 1)
--	server.containers[0].State.Running = true
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/containers/%s", server.containers[0].ID)
--	request, _ := http.NewRequest("DELETE", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusInternalServerError {
--		t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code)
--	}
--	if len(server.containers) < 1 {
--		t.Error("RemoveContainer: should not remove the container.")
--	}
--}
--
--func TestPullImage(t *testing.T) {
--	server := DockerServer{imgIDs: make(map[string]string)}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/images/create?fromImage=base", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	if len(server.images) != 1 {
--		t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images))
--	}
--	if _, ok := server.imgIDs["base"]; !ok {
--		t.Error("PullImage: Repository should not be empty.")
--	}
--}
--
--func TestPushImage(t *testing.T) {
--	server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--}
--
--func TestPushImageNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func TestTagImage(t *testing.T) {
--	server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/images/tsuru/python/tag?repo=tsuru/new-python", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusCreated {
--		t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code)
--	}
--	if server.imgIDs["tsuru/python"] != server.imgIDs["tsuru/new-python"] {
--		t.Errorf("TagImage: did not tag the image")
--	}
--}
--
--func TestTagImageNotFound(t *testing.T) {
--	server := DockerServer{}
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/images/tsuru/python/tag", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNotFound {
--		t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
--	}
--}
--
--func addContainers(server *DockerServer, n int) {
--	server.cMut.Lock()
--	defer server.cMut.Unlock()
--	for i := 0; i < n; i++ {
--		date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour)
--		container := docker.Container{
--			Name:    fmt.Sprintf("%x", rand.Int()%10000),
--			ID:      fmt.Sprintf("%x", rand.Int()%10000),
--			Created: date,
--			Path:    "ls",
--			Args:    []string{"-la", ".."},
--			Config: &docker.Config{
--				Hostname:     fmt.Sprintf("docker-%d", i),
--				AttachStdout: true,
--				AttachStderr: true,
--				Env:          []string{"ME=you", fmt.Sprintf("NUMBER=%d", i)},
--				Cmd:          []string{"ls", "-la", ".."},
--				Image:        "base",
--			},
--			State: docker.State{
--				Running:   false,
--				Pid:       400 + i,
--				ExitCode:  0,
--				StartedAt: date,
--			},
--			Image: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
--			NetworkSettings: &docker.NetworkSettings{
--				IPAddress:   fmt.Sprintf("10.10.10.%d", i+2),
--				IPPrefixLen: 24,
--				Gateway:     "10.10.10.1",
--				Bridge:      "docker0",
--				PortMapping: map[string]docker.PortMapping{
--					"Tcp": {"8888": fmt.Sprintf("%d", 49600+i)},
--				},
--			},
--			ResolvConfPath: "/etc/resolv.conf",
--		}
--		server.containers = append(server.containers, &container)
--	}
--}
--
--func addImages(server *DockerServer, n int, repo bool) {
--	server.iMut.Lock()
--	defer server.iMut.Unlock()
--	if server.imgIDs == nil {
--		server.imgIDs = make(map[string]string)
--	}
--	for i := 0; i < n; i++ {
--		date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour)
--		image := docker.Image{
--			ID:      fmt.Sprintf("%x", rand.Int()%10000),
--			Created: date,
--		}
--		server.images = append(server.images, image)
--		if repo {
--			repo := "docker/python-" + image.ID
--			server.imgIDs[repo] = image.ID
--		}
--	}
--}
--
--func TestListImages(t *testing.T) {
--	server := DockerServer{}
--	addImages(&server, 2, true)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/images/json?all=1", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("ListImages: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--	expected := make([]docker.APIImages, 2)
--	for i, image := range server.images {
--		expected[i] = docker.APIImages{
--			ID:       image.ID,
--			Created:  image.Created.Unix(),
--			RepoTags: []string{"docker/python-" + image.ID},
--		}
--	}
--	var got []docker.APIImages
--	err := json.NewDecoder(recorder.Body).Decode(&got)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(got, expected) {
--		t.Errorf("ListImages. Want %#v. Got %#v.", expected, got)
--	}
--}
--
--func TestRemoveImage(t *testing.T) {
--	server := DockerServer{}
--	addImages(&server, 1, false)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/images/%s", server.images[0].ID)
--	request, _ := http.NewRequest("DELETE", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNoContent {
--		t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
--	}
--	if len(server.images) > 0 {
--		t.Error("RemoveImage: did not remove the image.")
--	}
--}
--
--func TestRemoveImageByName(t *testing.T) {
--	server := DockerServer{}
--	addImages(&server, 1, true)
--	server.buildMuxer()
--	recorder := httptest.NewRecorder()
--	imgName := "docker/python-" + server.images[0].ID
--	path := "/images/" + imgName
--	request, _ := http.NewRequest("DELETE", path, nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusNoContent {
--		t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
--	}
--	if len(server.images) > 0 {
--		t.Error("RemoveImage: did not remove the image.")
--	}
--	_, ok := server.imgIDs[imgName]
--	if ok {
--		t.Error("RemoveImage: did not remove image tag name.")
--	}
--}
--
--func TestRemoveImageWithMultipleTags(t *testing.T) {
--	server := DockerServer{}
--	addImages(&server, 1, true)
--	server.buildMuxer()
--	imgID := server.images[0].ID
--	imgName := "docker/python-" + imgID
--	server.imgIDs["docker/python-wat"] = imgID
--	recorder := httptest.NewRecorder()
--	path := fmt.Sprintf("/images/%s", imgName)
--	request, _ := http.NewRequest("DELETE", path, nil)
--	server.ServeHTTP(recorder, request)
--	_, ok := server.imgIDs[imgName]
--	if ok {
--		t.Error("RemoveImage: did not remove image tag name.")
--	}
--	id, ok := server.imgIDs["docker/python-wat"]
--	if !ok {
--		t.Error("RemoveImage: removed the wrong tag name.")
--	}
--	if id != imgID {
--		t.Error("RemoveImage: disassociated the wrong ID from the tag")
--	}
--	if len(server.images) < 1 {
--		t.Fatal("RemoveImage: removed the image, but should keep it")
--	}
--	if server.images[0].ID != imgID {
--		t.Error("RemoveImage: changed the ID of the image!")
--	}
--}
--
--func TestPrepareFailure(t *testing.T) {
--	server := DockerServer{failures: make(map[string]string)}
--	server.buildMuxer()
--	errorID := "my_error"
--	server.PrepareFailure(errorID, "containers/json")
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusBadRequest {
--		t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
--	}
--	if recorder.Body.String() != errorID+"\n" {
--		t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String())
--	}
--}
--
--func TestRemoveFailure(t *testing.T) {
--	server := DockerServer{failures: make(map[string]string)}
--	server.buildMuxer()
--	errorID := "my_error"
--	server.PrepareFailure(errorID, "containers/json")
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusBadRequest {
--		t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
--	}
--	server.ResetFailure(errorID)
--	recorder = httptest.NewRecorder()
--	request, _ = http.NewRequest("GET", "/containers/json?all=1", nil)
--	server.ServeHTTP(recorder, request)
--	if recorder.Code != http.StatusOK {
--		t.Errorf("RemoveFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
--	}
--}
--
--func TestMutateContainer(t *testing.T) {
--	server := DockerServer{failures: make(map[string]string)}
--	server.buildMuxer()
--	server.containers = append(server.containers, &docker.Container{ID: "id123"})
--	state := docker.State{Running: false, ExitCode: 1}
--	err := server.MutateContainer("id123", state)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(server.containers[0].State, state) {
--		t.Errorf("Wrong state after mutation.\nWant %#v.\nGot %#v.",
--			state, server.containers[0].State)
--	}
--}
--
--func TestMutateContainerNotFound(t *testing.T) {
--	server := DockerServer{failures: make(map[string]string)}
--	server.buildMuxer()
--	state := docker.State{Running: false, ExitCode: 1}
--	err := server.MutateContainer("id123", state)
--	if err == nil {
--		t.Error("Unexpected <nil> error")
--	}
--	if err.Error() != "container not found" {
--		t.Errorf("wrong error message. Want %q. Got %q.", "container not found", err)
--	}
--}
--
--func TestBuildImageWithContentTypeTar(t *testing.T) {
--	server := DockerServer{imgIDs: make(map[string]string)}
--	imageName := "teste"
--	recorder := httptest.NewRecorder()
--	tarFile, err := os.Open("data/dockerfile.tar")
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer tarFile.Close()
--	request, _ := http.NewRequest("POST", "/build?t=teste", tarFile)
--	request.Header.Add("Content-Type", "application/tar")
--	server.buildImage(recorder, request)
--	if recorder.Body.String() == "miss Dockerfile" {
--		t.Errorf("BuildImage: miss Dockerfile")
--		return
--	}
--	if _, ok := server.imgIDs[imageName]; ok == false {
--		t.Errorf("BuildImage: image %s not builded", imageName)
--	}
--}
--
--func TestBuildImageWithRemoteDockerfile(t *testing.T) {
--	server := DockerServer{imgIDs: make(map[string]string)}
--	imageName := "teste"
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("POST", "/build?t=teste&remote=http://localhost/Dockerfile", nil)
--	server.buildImage(recorder, request)
--	if _, ok := server.imgIDs[imageName]; ok == false {
--		t.Errorf("BuildImage: image %s not builded", imageName)
--	}
--}
--
--func TestPing(t *testing.T) {
--	server := DockerServer{}
--	recorder := httptest.NewRecorder()
--	request, _ := http.NewRequest("GET", "/_ping", nil)
--	server.pingDocker(recorder, request)
--	if recorder.Body.String() != "" {
--		t.Errorf("Ping: Unexpected body: %s", recorder.Body.String())
--	}
--	if recorder.Code != http.StatusOK {
--		t.Errorf("Ping: Expected code %d, got: %d", http.StatusOK, recorder.Code)
--	}
--}
--
--func TestDefaultHandler(t *testing.T) {
--	server, err := NewServer("127.0.0.1:0", nil, nil)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer server.listener.Close()
--	if server.mux != server.DefaultHandler() {
--		t.Fatalf("DefaultHandler: Expected to return server.mux, got: %#v", server.DefaultHandler())
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/writer.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/writer.go
-deleted file mode 100644
-index 4ef857a..0000000
---- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/writer.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--// Copyright 2014 go-dockerclient authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package testing
--
--import (
--	"encoding/binary"
--	"errors"
--	"io"
--)
--
--type stdType [8]byte
--
--var (
--	stdin  = stdType{0: 0}
--	stdout = stdType{0: 1}
--	stderr = stdType{0: 2}
--)
--
--type stdWriter struct {
--	io.Writer
--	prefix  stdType
--	sizeBuf []byte
--}
--
--func (w *stdWriter) Write(buf []byte) (n int, err error) {
--	if w == nil || w.Writer == nil {
--		return 0, errors.New("Writer not instanciated")
--	}
--	binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
--	buf = append(w.prefix[:], buf...)
--
--	n, err = w.Writer.Write(buf)
--	return n - 8, err
--}
--
--func newStdWriter(w io.Writer, t stdType) *stdWriter {
--	if len(t) != 8 {
--		return nil
--	}
--	return &stdWriter{Writer: w, prefix: t, sizeBuf: make([]byte, 4)}
--}
-diff --git a/Godeps/_workspace/src/github.com/ghodss/yaml/.gitignore b/Godeps/_workspace/src/github.com/ghodss/yaml/.gitignore
-deleted file mode 100644
-index e256a31..0000000
---- a/Godeps/_workspace/src/github.com/ghodss/yaml/.gitignore
-+++ /dev/null
-@@ -1,20 +0,0 @@
--# OSX leaves these everywhere on SMB shares
--._*
--
--# Eclipse files
--.classpath
--.project
--.settings/**
--
--# Emacs save files
--*~
--
--# Vim-related files
--[._]*.s[a-w][a-z]
--[._]s[a-w][a-z]
--*.un~
--Session.vim
--.netrwhist
--
--# Go test binaries
--*.test
-diff --git a/Godeps/_workspace/src/github.com/ghodss/yaml/LICENSE b/Godeps/_workspace/src/github.com/ghodss/yaml/LICENSE
-deleted file mode 100644
-index 7805d36..0000000
---- a/Godeps/_workspace/src/github.com/ghodss/yaml/LICENSE
-+++ /dev/null
-@@ -1,50 +0,0 @@
--The MIT License (MIT)
--
--Copyright (c) 2014 Sam Ghods
--
--Permission is hereby granted, free of charge, to any person obtaining a copy
--of this software and associated documentation files (the "Software"), to deal
--in the Software without restriction, including without limitation the rights
--to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
--copies of the Software, and to permit persons to whom the Software is
--furnished to do so, subject to the following conditions:
--
--The above copyright notice and this permission notice shall be included in all
--copies or substantial portions of the Software.
--
--THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
--IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
--FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
--AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
--LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
--OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
--SOFTWARE.
--
--
--Copyright (c) 2012 The Go Authors. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--   * Neither the name of Google Inc. nor the names of its
--contributors may be used to endorse or promote products derived from
--this software without specific prior written permission.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/github.com/ghodss/yaml/README.md b/Godeps/_workspace/src/github.com/ghodss/yaml/README.md
-deleted file mode 100644
-index 2d60309..0000000
---- a/Godeps/_workspace/src/github.com/ghodss/yaml/README.md
-+++ /dev/null
-@@ -1,114 +0,0 @@
--# YAML marshaling and unmarshaling support for Go
--
--## Introduction
--
--A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. 
--
--In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
--
--## Compatibility
--
--This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
--
--## Caveats
--
--**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
--
--```
--BAD:
--	exampleKey: !!binary gIGC
--
--GOOD:
--	exampleKey: gIGC
--... and decode the base64 data in your code.
--```
--
--**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
--
--## Installation and usage
--
--To install, run:
--
--```
--$ go get github.com/ghodss/yaml
--```
--
--And import using:
--
--```
--import "github.com/ghodss/yaml"
--```
--
--Usage is very similar to the JSON library:
--
--```go
--import (
--	"fmt"
--
--	"github.com/ghodss/yaml"
--)
--
--type Person struct {
--	Name string `json:"name"`  // Affects YAML field names too.
--	Age int `json:"name"`
--}
--
--func main() {
--	// Marshal a Person struct to YAML.
--	p := Person{"John", 30}
--	y, err := yaml.Marshal(p)
--	if err != nil {
--		fmt.Printf("err: %v\n", err)
--		return
--	}
--	fmt.Println(string(y))
--	/* Output:
--	name: John
--	age: 30
--	*/
--
--	// Unmarshal the YAML back into a Person struct.
--	var p2 Person
--	err := yaml.Unmarshal(y, &p2)
--	if err != nil {
--		fmt.Printf("err: %v\n", err)
--		return
--	}
--	fmt.Println(p2)
--	/* Output:
--	{John 30}
--	*/
--}
--```
--
--`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
--
--```go
--import (
--	"fmt"
--
--	"github.com/ghodss/yaml"
--)
--func main() {
--	j := []byte(`{"name": "John", "age": 30}`)
--	y, err := yaml.JSONToYAML(j)
--	if err != nil {
--		fmt.Printf("err: %v\n", err)
--		return
--	}
--	fmt.Println(string(y))
--	/* Output:
--	name: John
--	age: 30
--	*/
--	j2, err := yaml.YAMLToJSON(y)
--	if err != nil {
--		fmt.Printf("err: %v\n", err)
--		return
--	}
--	fmt.Println(string(j2))
--	/* Output:
--	{"age":30,"name":"John"}
--	*/
--}
--```
-diff --git a/Godeps/_workspace/src/github.com/ghodss/yaml/fields.go b/Godeps/_workspace/src/github.com/ghodss/yaml/fields.go
-deleted file mode 100644
-index 0bd3c2b..0000000
---- a/Godeps/_workspace/src/github.com/ghodss/yaml/fields.go
-+++ /dev/null
-@@ -1,497 +0,0 @@
--// Copyright 2013 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--package yaml
--
--import (
--	"bytes"
--	"encoding"
--	"encoding/json"
--	"reflect"
--	"sort"
--	"strings"
--	"sync"
--	"unicode"
--	"unicode/utf8"
--)
--
--// indirect walks down v allocating pointers as needed,
--// until it gets to a non-pointer.
--// if it encounters an Unmarshaler, indirect stops and returns that.
--// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
--func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
--	// If v is a named type and is addressable,
--	// start with its address, so that if the type has pointer methods,
--	// we find them.
--	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
--		v = v.Addr()
--	}
--	for {
--		// Load value from interface, but only if the result will be
--		// usefully addressable.
--		if v.Kind() == reflect.Interface && !v.IsNil() {
--			e := v.Elem()
--			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
--				v = e
--				continue
--			}
--		}
--
--		if v.Kind() != reflect.Ptr {
--			break
--		}
--
--		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
--			break
--		}
--		if v.IsNil() {
--			v.Set(reflect.New(v.Type().Elem()))
--		}
--		if v.Type().NumMethod() > 0 {
--			if u, ok := v.Interface().(json.Unmarshaler); ok {
--				return u, nil, reflect.Value{}
--			}
--			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
--				return nil, u, reflect.Value{}
--			}
--		}
--		v = v.Elem()
--	}
--	return nil, nil, v
--}
--
--// A field represents a single field found in a struct.
--type field struct {
--	name      string
--	nameBytes []byte                 // []byte(name)
--	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
--
--	tag       bool
--	index     []int
--	typ       reflect.Type
--	omitEmpty bool
--	quoted    bool
--}
--
--func fillField(f field) field {
--	f.nameBytes = []byte(f.name)
--	f.equalFold = foldFunc(f.nameBytes)
--	return f
--}
--
--// byName sorts field by name, breaking ties with depth,
--// then breaking ties with "name came from json tag", then
--// breaking ties with index sequence.
--type byName []field
--
--func (x byName) Len() int { return len(x) }
--
--func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
--
--func (x byName) Less(i, j int) bool {
--	if x[i].name != x[j].name {
--		return x[i].name < x[j].name
--	}
--	if len(x[i].index) != len(x[j].index) {
--		return len(x[i].index) < len(x[j].index)
--	}
--	if x[i].tag != x[j].tag {
--		return x[i].tag
--	}
--	return byIndex(x).Less(i, j)
--}
--
--// byIndex sorts field by index sequence.
--type byIndex []field
--
--func (x byIndex) Len() int { return len(x) }
--
--func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
--
--func (x byIndex) Less(i, j int) bool {
--	for k, xik := range x[i].index {
--		if k >= len(x[j].index) {
--			return false
--		}
--		if xik != x[j].index[k] {
--			return xik < x[j].index[k]
--		}
--	}
--	return len(x[i].index) < len(x[j].index)
--}
--
--// typeFields returns a list of fields that JSON should recognize for the given type.
--// The algorithm is breadth-first search over the set of structs to include - the top struct
--// and then any reachable anonymous structs.
--func typeFields(t reflect.Type) []field {
--	// Anonymous fields to explore at the current level and the next.
--	current := []field{}
--	next := []field{{typ: t}}
--
--	// Count of queued names for current level and the next.
--	count := map[reflect.Type]int{}
--	nextCount := map[reflect.Type]int{}
--
--	// Types already visited at an earlier level.
--	visited := map[reflect.Type]bool{}
--
--	// Fields found.
--	var fields []field
--
--	for len(next) > 0 {
--		current, next = next, current[:0]
--		count, nextCount = nextCount, map[reflect.Type]int{}
--
--		for _, f := range current {
--			if visited[f.typ] {
--				continue
--			}
--			visited[f.typ] = true
--
--			// Scan f.typ for fields to include.
--			for i := 0; i < f.typ.NumField(); i++ {
--				sf := f.typ.Field(i)
--				if sf.PkgPath != "" { // unexported
--					continue
--				}
--				tag := sf.Tag.Get("json")
--				if tag == "-" {
--					continue
--				}
--				name, opts := parseTag(tag)
--				if !isValidTag(name) {
--					name = ""
--				}
--				index := make([]int, len(f.index)+1)
--				copy(index, f.index)
--				index[len(f.index)] = i
--
--				ft := sf.Type
--				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
--					// Follow pointer.
--					ft = ft.Elem()
--				}
--
--				// Record found field and index sequence.
--				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
--					tagged := name != ""
--					if name == "" {
--						name = sf.Name
--					}
--					fields = append(fields, fillField(field{
--						name:      name,
--						tag:       tagged,
--						index:     index,
--						typ:       ft,
--						omitEmpty: opts.Contains("omitempty"),
--						quoted:    opts.Contains("string"),
--					}))
--					if count[f.typ] > 1 {
--						// If there were multiple instances, add a second,
--						// so that the annihilation code will see a duplicate.
--						// It only cares about the distinction between 1 or 2,
--						// so don't bother generating any more copies.
--						fields = append(fields, fields[len(fields)-1])
--					}
--					continue
--				}
--
--				// Record new anonymous struct to explore in next round.
--				nextCount[ft]++
--				if nextCount[ft] == 1 {
--					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
--				}
--			}
--		}
--	}
--
--	sort.Sort(byName(fields))
--
--	// Delete all fields that are hidden by the Go rules for embedded fields,
--	// except that fields with JSON tags are promoted.
--
--	// The fields are sorted in primary order of name, secondary order
--	// of field index length. Loop over names; for each name, delete
--	// hidden fields by choosing the one dominant field that survives.
--	out := fields[:0]
--	for advance, i := 0, 0; i < len(fields); i += advance {
--		// One iteration per name.
--		// Find the sequence of fields with the name of this first field.
--		fi := fields[i]
--		name := fi.name
--		for advance = 1; i+advance < len(fields); advance++ {
--			fj := fields[i+advance]
--			if fj.name != name {
--				break
--			}
--		}
--		if advance == 1 { // Only one field with this name
--			out = append(out, fi)
--			continue
--		}
--		dominant, ok := dominantField(fields[i : i+advance])
--		if ok {
--			out = append(out, dominant)
--		}
--	}
--
--	fields = out
--	sort.Sort(byIndex(fields))
--
--	return fields
--}
--
--// dominantField looks through the fields, all of which are known to
--// have the same name, to find the single field that dominates the
--// others using Go's embedding rules, modified by the presence of
--// JSON tags. If there are multiple top-level fields, the boolean
--// will be false: This condition is an error in Go and we skip all
--// the fields.
--func dominantField(fields []field) (field, bool) {
--	// The fields are sorted in increasing index-length order. The winner
--	// must therefore be one with the shortest index length. Drop all
--	// longer entries, which is easy: just truncate the slice.
--	length := len(fields[0].index)
--	tagged := -1 // Index of first tagged field.
--	for i, f := range fields {
--		if len(f.index) > length {
--			fields = fields[:i]
--			break
--		}
--		if f.tag {
--			if tagged >= 0 {
--				// Multiple tagged fields at the same level: conflict.
--				// Return no field.
--				return field{}, false
--			}
--			tagged = i
--		}
--	}
--	if tagged >= 0 {
--		return fields[tagged], true
--	}
--	// All remaining fields have the same length. If there's more than one,
--	// we have a conflict (two fields named "X" at the same level) and we
--	// return no field.
--	if len(fields) > 1 {
--		return field{}, false
--	}
--	return fields[0], true
--}
--
--var fieldCache struct {
--	sync.RWMutex
--	m map[reflect.Type][]field
--}
--
--// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
--func cachedTypeFields(t reflect.Type) []field {
--	fieldCache.RLock()
--	f := fieldCache.m[t]
--	fieldCache.RUnlock()
--	if f != nil {
--		return f
--	}
--
--	// Compute fields without lock.
--	// Might duplicate effort but won't hold other computations back.
--	f = typeFields(t)
--	if f == nil {
--		f = []field{}
--	}
--
--	fieldCache.Lock()
--	if fieldCache.m == nil {
--		fieldCache.m = map[reflect.Type][]field{}
--	}
--	fieldCache.m[t] = f
--	fieldCache.Unlock()
--	return f
--}
--
--func isValidTag(s string) bool {
--	if s == "" {
--		return false
--	}
--	for _, c := range s {
--		switch {
--		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
--			// Backslash and quote chars are reserved, but
--			// otherwise any punctuation chars are allowed
--			// in a tag name.
--		default:
--			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
--				return false
--			}
--		}
--	}
--	return true
--}
--
--const (
--	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
--	kelvin       = '\u212a'
--	smallLongEss = '\u017f'
--)
--
--// foldFunc returns one of four different case folding equivalence
--// functions, from most general (and slow) to fastest:
--//
--// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
--// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
--// 3) asciiEqualFold, no special, but includes non-letters (including _)
--// 4) simpleLetterEqualFold, no specials, no non-letters.
--//
--// The letters S and K are special because they map to 3 runes, not just 2:
--//  * S maps to s and to U+017F 'ſ' Latin small letter long s
--//  * k maps to K and to U+212A 'K' Kelvin sign
--// See http://play.golang.org/p/tTxjOc0OGo
--//
--// The returned function is specialized for matching against s and
--// should only be given s. It's not curried for performance reasons.
--func foldFunc(s []byte) func(s, t []byte) bool {
--	nonLetter := false
--	special := false // special letter
--	for _, b := range s {
--		if b >= utf8.RuneSelf {
--			return bytes.EqualFold
--		}
--		upper := b & caseMask
--		if upper < 'A' || upper > 'Z' {
--			nonLetter = true
--		} else if upper == 'K' || upper == 'S' {
--			// See above for why these letters are special.
--			special = true
--		}
--	}
--	if special {
--		return equalFoldRight
--	}
--	if nonLetter {
--		return asciiEqualFold
--	}
--	return simpleLetterEqualFold
--}
--
--// equalFoldRight is a specialization of bytes.EqualFold when s is
--// known to be all ASCII (including punctuation), but contains an 's',
--// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
--// See comments on foldFunc.
--func equalFoldRight(s, t []byte) bool {
--	for _, sb := range s {
--		if len(t) == 0 {
--			return false
--		}
--		tb := t[0]
--		if tb < utf8.RuneSelf {
--			if sb != tb {
--				sbUpper := sb & caseMask
--				if 'A' <= sbUpper && sbUpper <= 'Z' {
--					if sbUpper != tb&caseMask {
--						return false
--					}
--				} else {
--					return false
--				}
--			}
--			t = t[1:]
--			continue
--		}
--		// sb is ASCII and t is not. t must be either kelvin
--		// sign or long s; sb must be s, S, k, or K.
--		tr, size := utf8.DecodeRune(t)
--		switch sb {
--		case 's', 'S':
--			if tr != smallLongEss {
--				return false
--			}
--		case 'k', 'K':
--			if tr != kelvin {
--				return false
--			}
--		default:
--			return false
--		}
--		t = t[size:]
--
--	}
--	if len(t) > 0 {
--		return false
--	}
--	return true
--}
--
--// asciiEqualFold is a specialization of bytes.EqualFold for use when
--// s is all ASCII (but may contain non-letters) and contains no
--// special-folding letters.
--// See comments on foldFunc.
--func asciiEqualFold(s, t []byte) bool {
--	if len(s) != len(t) {
--		return false
--	}
--	for i, sb := range s {
--		tb := t[i]
--		if sb == tb {
--			continue
--		}
--		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
--			if sb&caseMask != tb&caseMask {
--				return false
--			}
--		} else {
--			return false
--		}
--	}
--	return true
--}
--
--// simpleLetterEqualFold is a specialization of bytes.EqualFold for
--// use when s is all ASCII letters (no underscores, etc) and also
--// doesn't contain 'k', 'K', 's', or 'S'.
--// See comments on foldFunc.
--func simpleLetterEqualFold(s, t []byte) bool {
--	if len(s) != len(t) {
--		return false
--	}
--	for i, b := range s {
--		if b&caseMask != t[i]&caseMask {
--			return false
--		}
--	}
--	return true
--}
--
--// tagOptions is the string following a comma in a struct field's "json"
--// tag, or the empty string. It does not include the leading comma.
--type tagOptions string
--
--// parseTag splits a struct field's json tag into its name and
--// comma-separated options.
--func parseTag(tag string) (string, tagOptions) {
--	if idx := strings.Index(tag, ","); idx != -1 {
--		return tag[:idx], tagOptions(tag[idx+1:])
--	}
--	return tag, tagOptions("")
--}
--
--// Contains reports whether a comma-separated list of options
--// contains a particular substr flag. substr must be surrounded by a
--// string boundary or commas.
--func (o tagOptions) Contains(optionName string) bool {
--	if len(o) == 0 {
--		return false
--	}
--	s := string(o)
--	for s != "" {
--		var next string
--		i := strings.Index(s, ",")
--		if i >= 0 {
--			s, next = s[:i], s[i+1:]
--		}
--		if s == optionName {
--			return true
--		}
--		s = next
--	}
--	return false
--}
-diff --git a/Godeps/_workspace/src/github.com/ghodss/yaml/yaml.go b/Godeps/_workspace/src/github.com/ghodss/yaml/yaml.go
-deleted file mode 100644
-index feab226..0000000
---- a/Godeps/_workspace/src/github.com/ghodss/yaml/yaml.go
-+++ /dev/null
-@@ -1,250 +0,0 @@
--package yaml
--
--import (
--	"bytes"
--	"encoding/json"
--	"fmt"
--	"reflect"
--	"strconv"
--
--	"gopkg.in/v2/yaml"
--)
--
--// Marshals the object into JSON then converts JSON to YAML and returns the
--// YAML.
--func Marshal(o interface{}) ([]byte, error) {
--	j, err := json.Marshal(o)
--	if err != nil {
--		return nil, fmt.Errorf("error marshaling into JSON: ", err)
--	}
--
--	y, err := JSONToYAML(j)
--	if err != nil {
--		return nil, fmt.Errorf("error converting JSON to YAML: ", err)
--	}
--
--	return y, nil
--}
--
--// Converts YAML to JSON then uses JSON to unmarshal into an object.
--func Unmarshal(y []byte, o interface{}) error {
--	vo := reflect.ValueOf(o)
--	j, err := yamlToJSON(y, &vo)
--	if err != nil {
--		return fmt.Errorf("error converting YAML to JSON: %v", err)
--	}
--
--	err = json.Unmarshal(j, o)
--	if err != nil {
--		return fmt.Errorf("error unmarshaling JSON: %v", err)
--	}
--
--	return nil
--}
--
--// Convert JSON to YAML.
--func JSONToYAML(j []byte) ([]byte, error) {
--	// Convert the JSON to an object.
--	var jsonObj interface{}
--	err := json.Unmarshal(j, &jsonObj)
--	if err != nil {
--		return nil, err
--	}
--
--	// Marshal this object into YAML.
--	return yaml.Marshal(jsonObj)
--}
--
--// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
--// this method should be a no-op.
--//
--// Things YAML can do that are not supported by JSON:
--// * In YAML you can have binary and null keys in your maps. These are invalid
--//   in JSON. (int and float keys are converted to strings.)
--// * Binary data in YAML with the !!binary tag is not supported. If you want to
--//   use binary data with this library, encode the data as base64 as usual but do
--//   not use the !!binary tag in your YAML. This will ensure the original base64
--//   encoded data makes it all the way through to the JSON.
--func YAMLToJSON(y []byte) ([]byte, error) {
--	return yamlToJSON(y, nil)
--}
--
--func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
--	// Convert the YAML to an object.
--	var yamlObj interface{}
--	err := yaml.Unmarshal(y, &yamlObj)
--	if err != nil {
--		return nil, err
--	}
--
--	// YAML objects are not completely compatible with JSON objects (e.g. you
--	// can have non-string keys in YAML). So, convert the YAML-compatible object
--	// to a JSON-compatible object, failing with an error if irrecoverable
--	// incompatibilties happen along the way.
--	jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
--	if err != nil {
--		return nil, err
--	}
--
--	// Convert this object to JSON and return the data.
--	return json.Marshal(jsonObj)
--}
--
--func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
--	var err error
--
--	// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
--	// interface). We pass decodingNull as false because we're not actually
--	// decoding into the value, we're just checking if the ultimate target is a
--	// string.
--	if jsonTarget != nil {
--		ju, tu, pv := indirect(*jsonTarget, false)
--		// We have a JSON or Text Umarshaler at this level, so we can't be trying
--		// to decode into a string.
--		if ju != nil || tu != nil {
--			jsonTarget = nil
--		} else {
--			jsonTarget = &pv
--		}
--	}
--
--	// If yamlObj is a number, check if jsonTarget is a string - if so, coerce.
--	// Else return normal.
--	// If yamlObj is a map or array, find the field that each key is
--	// unmarshaling to, and when you recurse pass the reflect.Value for that
--	// field back into this function.
--
--	switch typedYAMLObj := yamlObj.(type) {
--	case map[interface{}]interface{}:
--		// JSON does not support arbitrary keys in a map, so we must convert
--		// these keys to strings.
--		//
--		// From my reading of go-yaml v2 (specifically the resolve function),
--		// keys can only have the types string, int, int64, float64, binary
--		// (unsupported), or null (unsupported).
--		strMap := make(map[string]interface{})
--		for k, v := range typedYAMLObj {
--			// Resolve the key to a string first.
--			var keyString string
--			switch typedKey := k.(type) {
--			case string:
--				keyString = typedKey
--			case int:
--				keyString = strconv.Itoa(typedKey)
--			case int64:
--				// go-yaml will only return an int64 as a key if the system
--				// architecture is 32-bit and the key's value is between 32-bit
--				// and 64-bit. Otherwise the key type will simply be int.
--				keyString = strconv.FormatInt(typedKey, 10)
--			case float64:
--				// Stolen from go-yaml to use the same conversion to string as
--				// the go-yaml library uses to convert float to string when
--				// Marshaling.
--				s := strconv.FormatFloat(typedKey, 'g', -1, 32)
--				switch s {
--				case "+Inf":
--					s = ".inf"
--				case "-Inf":
--					s = "-.inf"
--				case "NaN":
--					s = ".nan"
--				}
--				keyString = s
--			default:
--				return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
--					reflect.TypeOf(k), k, v)
--			}
--
--			// If jsonTarget is a struct (which it really should be), find the
--			// field it's going to map to. If it's not a struct, just pass nil
--			// - JSON conversion will error for us if it's a real issue.
--			if jsonTarget != nil {
--				t := *jsonTarget
--				if t.Kind() == reflect.Struct {
--					keyBytes := []byte(keyString)
--					// Find the field that the JSON library would use.
--					var f *field
--					fields := cachedTypeFields(t.Type())
--					for i := range fields {
--						ff := &fields[i]
--						if bytes.Equal(ff.nameBytes, keyBytes) {
--							f = ff
--							break
--						}
--						// Do case-insensitive comparison.
--						if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
--							f = ff
--						}
--					}
--					if f != nil {
--						// Find the reflect.Value of the most preferential
--						// struct field.
--						jtf := t.Field(f.index[0])
--						strMap[keyString], err = convertToJSONableObject(v, &jtf)
--						if err != nil {
--							return nil, err
--						}
--						continue
--					}
--				}
--			}
--			strMap[keyString], err = convertToJSONableObject(v, nil)
--			if err != nil {
--				return nil, err
--			}
--		}
--		return strMap, nil
--	case []interface{}:
--		// We need to recurse into arrays in case there are any
--		// map[interface{}]interface{}'s inside and to convert any
--		// numbers to strings.
--
--		// If jsonTarget is a slice (which it really should be), find the
--		// thing it's going to map to. If it's not a slice, just pass nil
--		// - JSON conversion will error for us if it's a real issue.
--		var jsonSliceElemValue *reflect.Value
--		if jsonTarget != nil {
--			t := *jsonTarget
--			if t.Kind() == reflect.Slice {
--				// By default slices point to nil, but we need a reflect.Value
--				// pointing to a value of the slice type, so we create one here.
--				ev := reflect.Indirect(reflect.New(t.Type().Elem()))
--				jsonSliceElemValue = &ev
--			}
--		}
--
--		// Make and use a new array.
--		arr := make([]interface{}, len(typedYAMLObj))
--		for i, v := range typedYAMLObj {
--			arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
--			if err != nil {
--				return nil, err
--			}
--		}
--		return arr, nil
--	default:
--		// If the target type is a string and the YAML type is a number,
--		// convert the YAML type to a string.
--		if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
--			// Based on my reading of go-yaml, it may return int, int64,
--			// float64, or uint64.
--			var s string
--			switch num := typedYAMLObj.(type) {
--			case int:
--				s = strconv.FormatInt(int64(num), 10)
--			case int64:
--				s = strconv.FormatInt(num, 10)
--			case float64:
--				s = strconv.FormatFloat(num, 'g', -1, 32)
--			case uint64:
--				s = strconv.FormatUint(num, 10)
--			}
--			if len(s) > 0 {
--				yamlObj = interface{}(s)
--			}
--		}
--		return yamlObj, nil
--	}
--
--	return nil, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/ghodss/yaml/yaml_test.go b/Godeps/_workspace/src/github.com/ghodss/yaml/yaml_test.go
-deleted file mode 100644
-index c569ebd..0000000
---- a/Godeps/_workspace/src/github.com/ghodss/yaml/yaml_test.go
-+++ /dev/null
-@@ -1,243 +0,0 @@
--package yaml
--
--import (
--	"reflect"
--	"testing"
--)
--
--type MarshalTest struct {
--	A string
--}
--
--func TestMarshal(t *testing.T) {
--	s := MarshalTest{"a"}
--	e := []byte("A: a\n")
--
--	y, err := Marshal(s)
--	if err != nil {
--		t.Errorf("error marshaling YAML: %v", err)
--	}
--
--	if !reflect.DeepEqual(y, e) {
--		t.Errorf("marshal YAML was unsuccessful, expected: %#v, got: %#v",
--			string(e), string(y))
--	}
--}
--
--type UnmarshalString struct {
--	A string
--}
--
--type UnmarshalNestedString struct {
--	A NestedString
--}
--
--type NestedString struct {
--	A string
--}
--
--type UnmarshalSlice struct {
--	A []NestedSlice
--}
--
--type NestedSlice struct {
--	B string
--	C *string
--}
--
--func TestUnmarshal(t *testing.T) {
--	y := []byte("a: 1")
--	s1 := UnmarshalString{}
--	e1 := UnmarshalString{"1"}
--	unmarshal(t, y, &s1, &e1)
--
--	y = []byte("a:\n  a: 1")
--	s2 := UnmarshalNestedString{}
--	e2 := UnmarshalNestedString{NestedString{"1"}}
--	unmarshal(t, y, &s2, &e2)
--
--	y = []byte("a:\n  - b: abc\n    c: def\n  - b: 123\n    c: 456\n")
--	s3 := UnmarshalSlice{}
--	e3 := UnmarshalSlice{[]NestedSlice{NestedSlice{"abc", strPtr("def")}, NestedSlice{"123", strPtr("456")}}}
--	unmarshal(t, y, &s3, &e3)
--}
--
--func unmarshal(t *testing.T, y []byte, s, e interface{}) {
--	err := Unmarshal(y, s)
--	if err != nil {
--		t.Errorf("error unmarshaling YAML: %v", err)
--	}
--
--	if !reflect.DeepEqual(s, e) {
--		t.Errorf("unmarshal YAML was unsuccessful, expected: %+#v, got: %+#v",
--			e, s)
--	}
--}
--
--type Case struct {
--	input  string
--	output string
--	// By default we test that reversing the output == input. But if there is a
--	// difference in the reversed output, you can optionally specify it here.
--	reverse *string
--}
--
--type RunType int
--
--const (
--	RunTypeJSONToYAML RunType = iota
--	RunTypeYAMLToJSON
--)
--
--func TestJSONToYAML(t *testing.T) {
--	cases := []Case{
--		{
--			`{"t":"a"}`,
--			"t: a\n",
--			nil,
--		}, {
--			`{"t":null}`,
--			"t: null\n",
--			nil,
--		},
--	}
--
--	runCases(t, RunTypeJSONToYAML, cases)
--}
--
--func TestYAMLToJSON(t *testing.T) {
--	cases := []Case{
--		{
--			"t: a\n",
--			`{"t":"a"}`,
--			nil,
--		}, {
--			"t: \n",
--			`{"t":null}`,
--			strPtr("t: null\n"),
--		}, {
--			"t: null\n",
--			`{"t":null}`,
--			nil,
--		}, {
--			"1: a\n",
--			`{"1":"a"}`,
--			strPtr("\"1\": a\n"),
--		}, {
--			"1000000000000000000000000000000000000: a\n",
--			`{"1e+36":"a"}`,
--			strPtr("\"1e+36\": a\n"),
--		}, {
--			"1e+36: a\n",
--			`{"1e+36":"a"}`,
--			strPtr("\"1e+36\": a\n"),
--		}, {
--			"\"1e+36\": a\n",
--			`{"1e+36":"a"}`,
--			nil,
--		}, {
--			"\"1.2\": a\n",
--			`{"1.2":"a"}`,
--			nil,
--		}, {
--			"- t: a\n",
--			`[{"t":"a"}]`,
--			nil,
--		}, {
--			"- t: a\n" +
--				"- t:\n" +
--				"    b: 1\n" +
--				"    c: 2\n",
--			`[{"t":"a"},{"t":{"b":1,"c":2}}]`,
--			nil,
--		}, {
--			`[{t: a}, {t: {b: 1, c: 2}}]`,
--			`[{"t":"a"},{"t":{"b":1,"c":2}}]`,
--			strPtr("- t: a\n" +
--				"- t:\n" +
--				"    b: 1\n" +
--				"    c: 2\n"),
--		}, {
--			"- t: \n",
--			`[{"t":null}]`,
--			strPtr("- t: null\n"),
--		}, {
--			"- t: null\n",
--			`[{"t":null}]`,
--			nil,
--		},
--	}
--
--	// Cases that should produce errors.
--	_ = []Case{
--		{
--			"~: a",
--			`{"null":"a"}`,
--			nil,
--		}, {
--			"a: !!binary gIGC\n",
--			"{\"a\":\"\x80\x81\x82\"}",
--			nil,
--		},
--	}
--
--	runCases(t, RunTypeYAMLToJSON, cases)
--}
--
--func runCases(t *testing.T, runType RunType, cases []Case) {
--	var f func([]byte) ([]byte, error)
--	var invF func([]byte) ([]byte, error)
--	var msg string
--	var invMsg string
--	if runType == RunTypeJSONToYAML {
--		f = JSONToYAML
--		invF = YAMLToJSON
--		msg = "JSON to YAML"
--		invMsg = "YAML back to JSON"
--	} else {
--		f = YAMLToJSON
--		invF = JSONToYAML
--		msg = "YAML to JSON"
--		invMsg = "JSON back to YAML"
--	}
--
--	for _, c := range cases {
--		// Convert the string.
--		t.Logf("converting %s\n", c.input)
--		output, err := f([]byte(c.input))
--		if err != nil {
--			t.Errorf("Failed to convert %s, input: `%s`, err: %v", msg, c.input, err)
--		}
--
--		// Check it against the expected output.
--		if string(output) != c.output {
--			t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`",
--				msg, c.input, c.output, string(output))
--		}
--
--		// Set the string that we will compare the reversed output to.
--		reverse := c.input
--		// If a special reverse string was specified, use that instead.
--		if c.reverse != nil {
--			reverse = *c.reverse
--		}
--
--		// Reverse the output.
--		input, err := invF(output)
--		if err != nil {
--			t.Errorf("Failed to convert %s, input: `%s`, err: %v", invMsg, string(output), err)
--		}
--
--		// Check the reverse is equal to the input (or to *c.reverse).
--		if string(input) != reverse {
--			t.Errorf("Failed to convert %s, input: `%s`, expected `%s`, got `%s`",
--				invMsg, string(output), reverse, string(input))
--		}
--	}
--
--}
--
--// To be able to easily fill in the *Case.reverse string above.
--func strPtr(s string) *string {
--	return &s
--}
-diff --git a/Godeps/_workspace/src/github.com/golang/glog/LICENSE b/Godeps/_workspace/src/github.com/golang/glog/LICENSE
-deleted file mode 100644
-index 37ec93a..0000000
---- a/Godeps/_workspace/src/github.com/golang/glog/LICENSE
-+++ /dev/null
-@@ -1,191 +0,0 @@
--Apache License
--Version 2.0, January 2004
--http://www.apache.org/licenses/
--
--TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
--
--1. Definitions.
--
--"License" shall mean the terms and conditions for use, reproduction, and
--distribution as defined by Sections 1 through 9 of this document.
--
--"Licensor" shall mean the copyright owner or entity authorized by the copyright
--owner that is granting the License.
--
--"Legal Entity" shall mean the union of the acting entity and all other entities
--that control, are controlled by, or are under common control with that entity.
--For the purposes of this definition, "control" means (i) the power, direct or
--indirect, to cause the direction or management of such entity, whether by
--contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
--outstanding shares, or (iii) beneficial ownership of such entity.
--
--"You" (or "Your") shall mean an individual or Legal Entity exercising
--permissions granted by this License.
--
--"Source" form shall mean the preferred form for making modifications, including
--but not limited to software source code, documentation source, and configuration
--files.
--
--"Object" form shall mean any form resulting from mechanical transformation or
--translation of a Source form, including but not limited to compiled object code,
--generated documentation, and conversions to other media types.
--
--"Work" shall mean the work of authorship, whether in Source or Object form, made
--available under the License, as indicated by a copyright notice that is included
--in or attached to the work (an example is provided in the Appendix below).
--
--"Derivative Works" shall mean any work, whether in Source or Object form, that
--is based on (or derived from) the Work and for which the editorial revisions,
--annotations, elaborations, or other modifications represent, as a whole, an
--original work of authorship. For the purposes of this License, Derivative Works
--shall not include works that remain separable from, or merely link (or bind by
--name) to the interfaces of, the Work and Derivative Works thereof.
--
--"Contribution" shall mean any work of authorship, including the original version
--of the Work and any modifications or additions to that Work or Derivative Works
--thereof, that is intentionally submitted to Licensor for inclusion in the Work
--by the copyright owner or by an individual or Legal Entity authorized to submit
--on behalf of the copyright owner. For the purposes of this definition,
--"submitted" means any form of electronic, verbal, or written communication sent
--to the Licensor or its representatives, including but not limited to
--communication on electronic mailing lists, source code control systems, and
--issue tracking systems that are managed by, or on behalf of, the Licensor for
--the purpose of discussing and improving the Work, but excluding communication
--that is conspicuously marked or otherwise designated in writing by the copyright
--owner as "Not a Contribution."
--
--"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
--of whom a Contribution has been received by Licensor and subsequently
--incorporated within the Work.
--
--2. Grant of Copyright License.
--
--Subject to the terms and conditions of this License, each Contributor hereby
--grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
--irrevocable copyright license to reproduce, prepare Derivative Works of,
--publicly display, publicly perform, sublicense, and distribute the Work and such
--Derivative Works in Source or Object form.
--
--3. Grant of Patent License.
--
--Subject to the terms and conditions of this License, each Contributor hereby
--grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
--irrevocable (except as stated in this section) patent license to make, have
--made, use, offer to sell, sell, import, and otherwise transfer the Work, where
--such license applies only to those patent claims licensable by such Contributor
--that are necessarily infringed by their Contribution(s) alone or by combination
--of their Contribution(s) with the Work to which such Contribution(s) was
--submitted. If You institute patent litigation against any entity (including a
--cross-claim or counterclaim in a lawsuit) alleging that the Work or a
--Contribution incorporated within the Work constitutes direct or contributory
--patent infringement, then any patent licenses granted to You under this License
--for that Work shall terminate as of the date such litigation is filed.
--
--4. Redistribution.
--
--You may reproduce and distribute copies of the Work or Derivative Works thereof
--in any medium, with or without modifications, and in Source or Object form,
--provided that You meet the following conditions:
--
--You must give any other recipients of the Work or Derivative Works a copy of
--this License; and
--You must cause any modified files to carry prominent notices stating that You
--changed the files; and
--You must retain, in the Source form of any Derivative Works that You distribute,
--all copyright, patent, trademark, and attribution notices from the Source form
--of the Work, excluding those notices that do not pertain to any part of the
--Derivative Works; and
--If the Work includes a "NOTICE" text file as part of its distribution, then any
--Derivative Works that You distribute must include a readable copy of the
--attribution notices contained within such NOTICE file, excluding those notices
--that do not pertain to any part of the Derivative Works, in at least one of the
--following places: within a NOTICE text file distributed as part of the
--Derivative Works; within the Source form or documentation, if provided along
--with the Derivative Works; or, within a display generated by the Derivative
--Works, if and wherever such third-party notices normally appear. The contents of
--the NOTICE file are for informational purposes only and do not modify the
--License. You may add Your own attribution notices within Derivative Works that
--You distribute, alongside or as an addendum to the NOTICE text from the Work,
--provided that such additional attribution notices cannot be construed as
--modifying the License.
--You may add Your own copyright statement to Your modifications and may provide
--additional or different license terms and conditions for use, reproduction, or
--distribution of Your modifications, or for any such Derivative Works as a whole,
--provided Your use, reproduction, and distribution of the Work otherwise complies
--with the conditions stated in this License.
--
--5. Submission of Contributions.
--
--Unless You explicitly state otherwise, any Contribution intentionally submitted
--for inclusion in the Work by You to the Licensor shall be under the terms and
--conditions of this License, without any additional terms or conditions.
--Notwithstanding the above, nothing herein shall supersede or modify the terms of
--any separate license agreement you may have executed with Licensor regarding
--such Contributions.
--
--6. Trademarks.
--
--This License does not grant permission to use the trade names, trademarks,
--service marks, or product names of the Licensor, except as required for
--reasonable and customary use in describing the origin of the Work and
--reproducing the content of the NOTICE file.
--
--7. Disclaimer of Warranty.
--
--Unless required by applicable law or agreed to in writing, Licensor provides the
--Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
--including, without limitation, any warranties or conditions of TITLE,
--NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
--solely responsible for determining the appropriateness of using or
--redistributing the Work and assume any risks associated with Your exercise of
--permissions under this License.
--
--8. Limitation of Liability.
--
--In no event and under no legal theory, whether in tort (including negligence),
--contract, or otherwise, unless required by applicable law (such as deliberate
--and grossly negligent acts) or agreed to in writing, shall any Contributor be
--liable to You for damages, including any direct, indirect, special, incidental,
--or consequential damages of any character arising as a result of this License or
--out of the use or inability to use the Work (including but not limited to
--damages for loss of goodwill, work stoppage, computer failure or malfunction, or
--any and all other commercial damages or losses), even if such Contributor has
--been advised of the possibility of such damages.
--
--9. Accepting Warranty or Additional Liability.
--
--While redistributing the Work or Derivative Works thereof, You may choose to
--offer, and charge a fee for, acceptance of support, warranty, indemnity, or
--other liability obligations and/or rights consistent with this License. However,
--in accepting such obligations, You may act only on Your own behalf and on Your
--sole responsibility, not on behalf of any other Contributor, and only if You
--agree to indemnify, defend, and hold each Contributor harmless for any liability
--incurred by, or claims asserted against, such Contributor by reason of your
--accepting any such warranty or additional liability.
--
--END OF TERMS AND CONDITIONS
--
--APPENDIX: How to apply the Apache License to your work
--
--To apply the Apache License to your work, attach the following boilerplate
--notice, with the fields enclosed by brackets "[]" replaced with your own
--identifying information. (Don't include the brackets!) The text should be
--enclosed in the appropriate comment syntax for the file format. We also
--recommend that a file or class name and description of purpose be included on
--the same "printed page" as the copyright notice for easier identification within
--third-party archives.
--
--   Copyright [yyyy] [name of copyright owner]
--
--   Licensed under the Apache License, Version 2.0 (the "License");
--   you may not use this file except in compliance with the License.
--   You may obtain a copy of the License at
--
--     http://www.apache.org/licenses/LICENSE-2.0
--
--   Unless required by applicable law or agreed to in writing, software
--   distributed under the License is distributed on an "AS IS" BASIS,
--   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--   See the License for the specific language governing permissions and
--   limitations under the License.
-diff --git a/Godeps/_workspace/src/github.com/golang/glog/README b/Godeps/_workspace/src/github.com/golang/glog/README
-deleted file mode 100644
-index 5f9c114..0000000
---- a/Godeps/_workspace/src/github.com/golang/glog/README
-+++ /dev/null
-@@ -1,44 +0,0 @@
--glog
--====
--
--Leveled execution logs for Go.
--
--This is an efficient pure Go implementation of leveled logs in the
--manner of the open source C++ package
--	http://code.google.com/p/google-glog
--
--By binding methods to booleans it is possible to use the log package
--without paying the expense of evaluating the arguments to the log.
--Through the -vmodule flag, the package also provides fine-grained
--control over logging at the file level.
--
--The comment from glog.go introduces the ideas:
--
--	Package glog implements logging analogous to the Google-internal
--	C++ INFO/ERROR/V setup.  It provides functions Info, Warning,
--	Error, Fatal, plus formatting variants such as Infof. It
--	also provides V-style logging controlled by the -v and
--	-vmodule=file=2 flags.
--	
--	Basic examples:
--	
--		glog.Info("Prepare to repel boarders")
--	
--		glog.Fatalf("Initialization failed: %s", err)
--	
--	See the documentation for the V function for an explanation
--	of these examples:
--	
--		if glog.V(2) {
--			glog.Info("Starting transaction...")
--		}
--	
--		glog.V(2).Infoln("Processed", nItems, "elements")
--
--
--The repository contains an open source version of the log package
--used inside Google. The master copy of the source lives inside
--Google, not here. The code in this repo is for export only and is not itself
--under development. Feature requests will be ignored.
--
--Send bug reports to golang-nuts at googlegroups.com.
-diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog.go b/Godeps/_workspace/src/github.com/golang/glog/glog.go
-deleted file mode 100644
-index 3e63fff..0000000
---- a/Godeps/_workspace/src/github.com/golang/glog/glog.go
-+++ /dev/null
-@@ -1,1177 +0,0 @@
--// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
--//
--// Copyright 2013 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
--// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
--// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
--//
--// Basic examples:
--//
--//	glog.Info("Prepare to repel boarders")
--//
--//	glog.Fatalf("Initialization failed: %s", err)
--//
--// See the documentation for the V function for an explanation of these examples:
--//
--//	if glog.V(2) {
--//		glog.Info("Starting transaction...")
--//	}
--//
--//	glog.V(2).Infoln("Processed", nItems, "elements")
--//
--// Log output is buffered and written periodically using Flush. Programs
--// should call Flush before exiting to guarantee all log output is written.
--//
--// By default, all log statements write to files in a temporary directory.
--// This package provides several flags that modify this behavior.
--// As a result, flag.Parse must be called before any logging is done.
--//
--//	-logtostderr=false
--//		Logs are written to standard error instead of to files.
--//	-alsologtostderr=false
--//		Logs are written to standard error as well as to files.
--//	-stderrthreshold=ERROR
--//		Log events at or above this severity are logged to standard
--//		error as well as to files.
--//	-log_dir=""
--//		Log files will be written to this directory instead of the
--//		default temporary directory.
--//
--//	Other flags provide aids to debugging.
--//
--//	-log_backtrace_at=""
--//		When set to a file and line number holding a logging statement,
--//		such as
--//			-log_backtrace_at=gopherflakes.go:234
--//		a stack trace will be written to the Info log whenever execution
--//		hits that statement. (Unlike with -vmodule, the ".go" must be
--//		present.)
--//	-v=0
--//		Enable V-leveled logging at the specified level.
--//	-vmodule=""
--//		The syntax of the argument is a comma-separated list of pattern=N,
--//		where pattern is a literal file name (minus the ".go" suffix) or
--//		"glob" pattern and N is a V level. For instance,
--//			-vmodule=gopher*=3
--//		sets the V level to 3 in all Go files whose names begin "gopher".
--//
--package glog
--
--import (
--	"bufio"
--	"bytes"
--	"errors"
--	"flag"
--	"fmt"
--	"io"
--	stdLog "log"
--	"os"
--	"path/filepath"
--	"runtime"
--	"strconv"
--	"strings"
--	"sync"
--	"sync/atomic"
--	"time"
--)
--
--// severity identifies the sort of log: info, warning etc. It also implements
--// the flag.Value interface. The -stderrthreshold flag is of type severity and
--// should be modified only through the flag.Value interface. The values match
--// the corresponding constants in C++.
--type severity int32 // sync/atomic int32
--
--// These constants identify the log levels in order of increasing severity.
--// A message written to a high-severity log file is also written to each
--// lower-severity log file.
--const (
--	infoLog severity = iota
--	warningLog
--	errorLog
--	fatalLog
--	numSeverity = 4
--)
--
--const severityChar = "IWEF"
--
--var severityName = []string{
--	infoLog:    "INFO",
--	warningLog: "WARNING",
--	errorLog:   "ERROR",
--	fatalLog:   "FATAL",
--}
--
--// get returns the value of the severity.
--func (s *severity) get() severity {
--	return severity(atomic.LoadInt32((*int32)(s)))
--}
--
--// set sets the value of the severity.
--func (s *severity) set(val severity) {
--	atomic.StoreInt32((*int32)(s), int32(val))
--}
--
--// String is part of the flag.Value interface.
--func (s *severity) String() string {
--	return strconv.FormatInt(int64(*s), 10)
--}
--
--// Get is part of the flag.Value interface.
--func (s *severity) Get() interface{} {
--	return *s
--}
--
--// Set is part of the flag.Value interface.
--func (s *severity) Set(value string) error {
--	var threshold severity
--	// Is it a known name?
--	if v, ok := severityByName(value); ok {
--		threshold = v
--	} else {
--		v, err := strconv.Atoi(value)
--		if err != nil {
--			return err
--		}
--		threshold = severity(v)
--	}
--	logging.stderrThreshold.set(threshold)
--	return nil
--}
--
--func severityByName(s string) (severity, bool) {
--	s = strings.ToUpper(s)
--	for i, name := range severityName {
--		if name == s {
--			return severity(i), true
--		}
--	}
--	return 0, false
--}
--
--// OutputStats tracks the number of output lines and bytes written.
--type OutputStats struct {
--	lines int64
--	bytes int64
--}
--
--// Lines returns the number of lines written.
--func (s *OutputStats) Lines() int64 {
--	return atomic.LoadInt64(&s.lines)
--}
--
--// Bytes returns the number of bytes written.
--func (s *OutputStats) Bytes() int64 {
--	return atomic.LoadInt64(&s.bytes)
--}
--
--// Stats tracks the number of lines of output and number of bytes
--// per severity level. Values must be read with atomic.LoadInt64.
--var Stats struct {
--	Info, Warning, Error OutputStats
--}
--
--var severityStats = [numSeverity]*OutputStats{
--	infoLog:    &Stats.Info,
--	warningLog: &Stats.Warning,
--	errorLog:   &Stats.Error,
--}
--
--// Level is exported because it appears in the arguments to V and is
--// the type of the v flag, which can be set programmatically.
--// It's a distinct type because we want to discriminate it from logType.
--// Variables of type level are only changed under logging.mu.
--// The -v flag is read only with atomic ops, so the state of the logging
--// module is consistent.
--
--// Level is treated as a sync/atomic int32.
--
--// Level specifies a level of verbosity for V logs. *Level implements
--// flag.Value; the -v flag is of type Level and should be modified
--// only through the flag.Value interface.
--type Level int32
--
--// get returns the value of the Level.
--func (l *Level) get() Level {
--	return Level(atomic.LoadInt32((*int32)(l)))
--}
--
--// set sets the value of the Level.
--func (l *Level) set(val Level) {
--	atomic.StoreInt32((*int32)(l), int32(val))
--}
--
--// String is part of the flag.Value interface.
--func (l *Level) String() string {
--	return strconv.FormatInt(int64(*l), 10)
--}
--
--// Get is part of the flag.Value interface.
--func (l *Level) Get() interface{} {
--	return *l
--}
--
--// Set is part of the flag.Value interface.
--func (l *Level) Set(value string) error {
--	v, err := strconv.Atoi(value)
--	if err != nil {
--		return err
--	}
--	logging.mu.Lock()
--	defer logging.mu.Unlock()
--	logging.setVState(Level(v), logging.vmodule.filter, false)
--	return nil
--}
--
--// moduleSpec represents the setting of the -vmodule flag.
--type moduleSpec struct {
--	filter []modulePat
--}
--
--// modulePat contains a filter for the -vmodule flag.
--// It holds a verbosity level and a file pattern to match.
--type modulePat struct {
--	pattern string
--	literal bool // The pattern is a literal string
--	level   Level
--}
--
--// match reports whether the file matches the pattern. It uses a string
--// comparison if the pattern contains no metacharacters.
--func (m *modulePat) match(file string) bool {
--	if m.literal {
--		return file == m.pattern
--	}
--	match, _ := filepath.Match(m.pattern, file)
--	return match
--}
--
--func (m *moduleSpec) String() string {
--	// Lock because the type is not atomic. TODO: clean this up.
--	logging.mu.Lock()
--	defer logging.mu.Unlock()
--	var b bytes.Buffer
--	for i, f := range m.filter {
--		if i > 0 {
--			b.WriteRune(',')
--		}
--		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
--	}
--	return b.String()
--}
--
--// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
--// struct is not exported.
--func (m *moduleSpec) Get() interface{} {
--	return nil
--}
--
--var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
--
--// Syntax: -vmodule=recordio=2,file=1,gfs*=3
--func (m *moduleSpec) Set(value string) error {
--	var filter []modulePat
--	for _, pat := range strings.Split(value, ",") {
--		if len(pat) == 0 {
--			// Empty strings such as from a trailing comma can be ignored.
--			continue
--		}
--		patLev := strings.Split(pat, "=")
--		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
--			return errVmoduleSyntax
--		}
--		pattern := patLev[0]
--		v, err := strconv.Atoi(patLev[1])
--		if err != nil {
--			return errors.New("syntax error: expect comma-separated list of filename=N")
--		}
--		if v < 0 {
--			return errors.New("negative value for vmodule level")
--		}
--		if v == 0 {
--			continue // Ignore. It's harmless but no point in paying the overhead.
--		}
--		// TODO: check syntax of filter?
--		filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
--	}
--	logging.mu.Lock()
--	defer logging.mu.Unlock()
--	logging.setVState(logging.verbosity, filter, true)
--	return nil
--}
--
--// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
--// that require filepath.Match to be called to match the pattern.
--func isLiteral(pattern string) bool {
--	return !strings.ContainsAny(pattern, `\*?[]`)
--}
--
--// traceLocation represents the setting of the -log_backtrace_at flag.
--type traceLocation struct {
--	file string
--	line int
--}
--
--// isSet reports whether the trace location has been specified.
--// logging.mu is held.
--func (t *traceLocation) isSet() bool {
--	return t.line > 0
--}
--
--// match reports whether the specified file and line matches the trace location.
--// The argument file name is the full path, not the basename specified in the flag.
--// logging.mu is held.
--func (t *traceLocation) match(file string, line int) bool {
--	if t.line != line {
--		return false
--	}
--	if i := strings.LastIndex(file, "/"); i >= 0 {
--		file = file[i+1:]
--	}
--	return t.file == file
--}
--
--func (t *traceLocation) String() string {
--	// Lock because the type is not atomic. TODO: clean this up.
--	logging.mu.Lock()
--	defer logging.mu.Unlock()
--	return fmt.Sprintf("%s:%d", t.file, t.line)
--}
--
--// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
--// struct is not exported
--func (t *traceLocation) Get() interface{} {
--	return nil
--}
--
--var errTraceSyntax = errors.New("syntax error: expect file.go:234")
--
--// Syntax: -log_backtrace_at=gopherflakes.go:234
--// Note that unlike vmodule the file extension is included here.
--func (t *traceLocation) Set(value string) error {
--	if value == "" {
--		// Unset.
--		t.line = 0
--		t.file = ""
--	}
--	fields := strings.Split(value, ":")
--	if len(fields) != 2 {
--		return errTraceSyntax
--	}
--	file, line := fields[0], fields[1]
--	if !strings.Contains(file, ".") {
--		return errTraceSyntax
--	}
--	v, err := strconv.Atoi(line)
--	if err != nil {
--		return errTraceSyntax
--	}
--	if v <= 0 {
--		return errors.New("negative or zero value for level")
--	}
--	logging.mu.Lock()
--	defer logging.mu.Unlock()
--	t.line = v
--	t.file = file
--	return nil
--}
--
--// flushSyncWriter is the interface satisfied by logging destinations.
--type flushSyncWriter interface {
--	Flush() error
--	Sync() error
--	io.Writer
--}
--
--func init() {
--	flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
--	flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
--	flag.Var(&logging.verbosity, "v", "log level for V logs")
--	flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
--	flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
--	flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
--
--	// Default stderrThreshold is ERROR.
--	logging.stderrThreshold = errorLog
--
--	logging.setVState(0, nil, false)
--	go logging.flushDaemon()
--}
--
--// Flush flushes all pending log I/O.
--func Flush() {
--	logging.lockAndFlushAll()
--}
--
--// loggingT collects all the global state of the logging setup.
--type loggingT struct {
--	// Boolean flags. Not handled atomically because the flag.Value interface
--	// does not let us avoid the =true, and that shorthand is necessary for
--	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
--	toStderr     bool // The -logtostderr flag.
--	alsoToStderr bool // The -alsologtostderr flag.
--
--	// Level flag. Handled atomically.
--	stderrThreshold severity // The -stderrthreshold flag.
--
--	// freeList is a list of byte buffers, maintained under freeListMu.
--	freeList *buffer
--	// freeListMu maintains the free list. It is separate from the main mutex
--	// so buffers can be grabbed and printed to without holding the main lock,
--	// for better parallelization.
--	freeListMu sync.Mutex
--
--	// mu protects the remaining elements of this structure and is
--	// used to synchronize logging.
--	mu sync.Mutex
--	// file holds writer for each of the log types.
--	file [numSeverity]flushSyncWriter
--	// pcs is used in V to avoid an allocation when computing the caller's PC.
--	pcs [1]uintptr
--	// vmap is a cache of the V Level for each V() call site, identified by PC.
--	// It is wiped whenever the vmodule flag changes state.
--	vmap map[uintptr]Level
--	// filterLength stores the length of the vmodule filter chain. If greater
--	// than zero, it means vmodule is enabled. It may be read safely
--	// using sync.LoadInt32, but is only modified under mu.
--	filterLength int32
--	// traceLocation is the state of the -log_backtrace_at flag.
--	traceLocation traceLocation
--	// These flags are modified only under lock, although verbosity may be fetched
--	// safely using atomic.LoadInt32.
--	vmodule   moduleSpec // The state of the -vmodule flag.
--	verbosity Level      // V logging level, the value of the -v flag/
--}
--
--// buffer holds a byte Buffer for reuse. The zero value is ready for use.
--type buffer struct {
--	bytes.Buffer
--	tmp  [64]byte // temporary byte array for creating headers.
--	next *buffer
--}
--
--var logging loggingT
--
--// setVState sets a consistent state for V logging.
--// l.mu is held.
--func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
--	// Turn verbosity off so V will not fire while we are in transition.
--	logging.verbosity.set(0)
--	// Ditto for filter length.
--	atomic.StoreInt32(&logging.filterLength, 0)
--
--	// Set the new filters and wipe the pc->Level map if the filter has changed.
--	if setFilter {
--		logging.vmodule.filter = filter
--		logging.vmap = make(map[uintptr]Level)
--	}
--
--	// Things are consistent now, so enable filtering and verbosity.
--	// They are enabled in order opposite to that in V.
--	atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
--	logging.verbosity.set(verbosity)
--}
--
--// getBuffer returns a new, ready-to-use buffer.
--func (l *loggingT) getBuffer() *buffer {
--	l.freeListMu.Lock()
--	b := l.freeList
--	if b != nil {
--		l.freeList = b.next
--	}
--	l.freeListMu.Unlock()
--	if b == nil {
--		b = new(buffer)
--	} else {
--		b.next = nil
--		b.Reset()
--	}
--	return b
--}
--
--// putBuffer returns a buffer to the free list.
--func (l *loggingT) putBuffer(b *buffer) {
--	if b.Len() >= 256 {
--		// Let big buffers die a natural death.
--		return
--	}
--	l.freeListMu.Lock()
--	b.next = l.freeList
--	l.freeList = b
--	l.freeListMu.Unlock()
--}
--
--var timeNow = time.Now // Stubbed out for testing.
--
--/*
--header formats a log header as defined by the C++ implementation.
--It returns a buffer containing the formatted header and the user's file and line number.
--The depth specifies how many stack frames above lives the source line to be identified in the log message.
--
--Log lines have this form:
--	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
--where the fields are defined as follows:
--	L                A single character, representing the log level (eg 'I' for INFO)
--	mm               The month (zero padded; ie May is '05')
--	dd               The day (zero padded)
--	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
--	threadid         The space-padded thread ID as returned by GetTID()
--	file             The file name
--	line             The line number
--	msg              The user-supplied message
--*/
--func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
--	_, file, line, ok := runtime.Caller(3 + depth)
--	if !ok {
--		file = "???"
--		line = 1
--	} else {
--		slash := strings.LastIndex(file, "/")
--		if slash >= 0 {
--			file = file[slash+1:]
--		}
--	}
--	return l.formatHeader(s, file, line), file, line
--}
--
--// formatHeader formats a log header using the provided file name and line number.
--func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
--	now := timeNow()
--	if line < 0 {
--		line = 0 // not a real line number, but acceptable to someDigits
--	}
--	if s > fatalLog {
--		s = infoLog // for safety.
--	}
--	buf := l.getBuffer()
--
--	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
--	// It's worth about 3X. Fprintf is hard.
--	_, month, day := now.Date()
--	hour, minute, second := now.Clock()
--	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
--	buf.tmp[0] = severityChar[s]
--	buf.twoDigits(1, int(month))
--	buf.twoDigits(3, day)
--	buf.tmp[5] = ' '
--	buf.twoDigits(6, hour)
--	buf.tmp[8] = ':'
--	buf.twoDigits(9, minute)
--	buf.tmp[11] = ':'
--	buf.twoDigits(12, second)
--	buf.tmp[14] = '.'
--	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
--	buf.tmp[21] = ' '
--	buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
--	buf.tmp[29] = ' '
--	buf.Write(buf.tmp[:30])
--	buf.WriteString(file)
--	buf.tmp[0] = ':'
--	n := buf.someDigits(1, line)
--	buf.tmp[n+1] = ']'
--	buf.tmp[n+2] = ' '
--	buf.Write(buf.tmp[:n+3])
--	return buf
--}
--
--// Some custom tiny helper functions to print the log header efficiently.
--
--const digits = "0123456789"
--
--// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
--func (buf *buffer) twoDigits(i, d int) {
--	buf.tmp[i+1] = digits[d%10]
--	d /= 10
--	buf.tmp[i] = digits[d%10]
--}
--
--// nDigits formats an n-digit integer at buf.tmp[i],
--// padding with pad on the left.
--// It assumes d >= 0.
--func (buf *buffer) nDigits(n, i, d int, pad byte) {
--	j := n - 1
--	for ; j >= 0 && d > 0; j-- {
--		buf.tmp[i+j] = digits[d%10]
--		d /= 10
--	}
--	for ; j >= 0; j-- {
--		buf.tmp[i+j] = pad
--	}
--}
--
--// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
--func (buf *buffer) someDigits(i, d int) int {
--	// Print into the top, then copy down. We know there's space for at least
--	// a 10-digit number.
--	j := len(buf.tmp)
--	for {
--		j--
--		buf.tmp[j] = digits[d%10]
--		d /= 10
--		if d == 0 {
--			break
--		}
--	}
--	return copy(buf.tmp[i:], buf.tmp[j:])
--}
--
--func (l *loggingT) println(s severity, args ...interface{}) {
--	buf, file, line := l.header(s, 0)
--	fmt.Fprintln(buf, args...)
--	l.output(s, buf, file, line, false)
--}
--
--func (l *loggingT) print(s severity, args ...interface{}) {
--	l.printDepth(s, 1, args...)
--}
--
--func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
--	buf, file, line := l.header(s, depth)
--	fmt.Fprint(buf, args...)
--	if buf.Bytes()[buf.Len()-1] != '\n' {
--		buf.WriteByte('\n')
--	}
--	l.output(s, buf, file, line, false)
--}
--
--func (l *loggingT) printf(s severity, format string, args ...interface{}) {
--	buf, file, line := l.header(s, 0)
--	fmt.Fprintf(buf, format, args...)
--	if buf.Bytes()[buf.Len()-1] != '\n' {
--		buf.WriteByte('\n')
--	}
--	l.output(s, buf, file, line, false)
--}
--
--// printWithFileLine behaves like print but uses the provided file and line number.  If
--// alsoLogToStderr is true, the log message always appears on standard error; it
--// will also appear in the log file unless --logtostderr is set.
--func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
--	buf := l.formatHeader(s, file, line)
--	fmt.Fprint(buf, args...)
--	if buf.Bytes()[buf.Len()-1] != '\n' {
--		buf.WriteByte('\n')
--	}
--	l.output(s, buf, file, line, alsoToStderr)
--}
--
--// output writes the data to the log files and releases the buffer.
--func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
--	l.mu.Lock()
--	if l.traceLocation.isSet() {
--		if l.traceLocation.match(file, line) {
--			buf.Write(stacks(false))
--		}
--	}
--	data := buf.Bytes()
--	if l.toStderr {
--		os.Stderr.Write(data)
--	} else {
--		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
--			os.Stderr.Write(data)
--		}
--		if l.file[s] == nil {
--			if err := l.createFiles(s); err != nil {
--				os.Stderr.Write(data) // Make sure the message appears somewhere.
--				l.exit(err)
--			}
--		}
--		switch s {
--		case fatalLog:
--			l.file[fatalLog].Write(data)
--			fallthrough
--		case errorLog:
--			l.file[errorLog].Write(data)
--			fallthrough
--		case warningLog:
--			l.file[warningLog].Write(data)
--			fallthrough
--		case infoLog:
--			l.file[infoLog].Write(data)
--		}
--	}
--	if s == fatalLog {
--		// If we got here via Exit rather than Fatal, print no stacks.
--		if atomic.LoadUint32(&fatalNoStacks) > 0 {
--			l.mu.Unlock()
--			timeoutFlush(10 * time.Second)
--			os.Exit(1)
--		}
--		// Dump all goroutine stacks before exiting.
--		// First, make sure we see the trace for the current goroutine on standard error.
--		// If -logtostderr has been specified, the loop below will do that anyway
--		// as the first stack in the full dump.
--		if !l.toStderr {
--			os.Stderr.Write(stacks(false))
--		}
--		// Write the stack trace for all goroutines to the files.
--		trace := stacks(true)
--		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
--		for log := fatalLog; log >= infoLog; log-- {
--			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
--				f.Write(trace)
--			}
--		}
--		l.mu.Unlock()
--		timeoutFlush(10 * time.Second)
--		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
--	}
--	l.putBuffer(buf)
--	l.mu.Unlock()
--	if stats := severityStats[s]; stats != nil {
--		atomic.AddInt64(&stats.lines, 1)
--		atomic.AddInt64(&stats.bytes, int64(len(data)))
--	}
--}
--
--// timeoutFlush calls Flush and returns when it completes or after timeout
--// elapses, whichever happens first.  This is needed because the hooks invoked
--// by Flush may deadlock when glog.Fatal is called from a hook that holds
--// a lock.
--func timeoutFlush(timeout time.Duration) {
--	done := make(chan bool, 1)
--	go func() {
--		Flush() // calls logging.lockAndFlushAll()
--		done <- true
--	}()
--	select {
--	case <-done:
--	case <-time.After(timeout):
--		fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
--	}
--}
--
--// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
--func stacks(all bool) []byte {
--	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
--	n := 10000
--	if all {
--		n = 100000
--	}
--	var trace []byte
--	for i := 0; i < 5; i++ {
--		trace = make([]byte, n)
--		nbytes := runtime.Stack(trace, all)
--		if nbytes < len(trace) {
--			return trace[:nbytes]
--		}
--		n *= 2
--	}
--	return trace
--}
--
--// logExitFunc provides a simple mechanism to override the default behavior
--// of exiting on error. Used in testing and to guarantee we reach a required exit
--// for fatal logs. Instead, exit could be a function rather than a method but that
--// would make its use clumsier.
--var logExitFunc func(error)
--
--// exit is called if there is trouble creating or writing log files.
--// It flushes the logs and exits the program; there's no point in hanging around.
--// l.mu is held.
--func (l *loggingT) exit(err error) {
--	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
--	// If logExitFunc is set, we do that instead of exiting.
--	if logExitFunc != nil {
--		logExitFunc(err)
--		return
--	}
--	l.flushAll()
--	os.Exit(2)
--}
--
--// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
--// file's Sync method and providing a wrapper for the Write method that provides log
--// file rotation. There are conflicting methods, so the file cannot be embedded.
--// l.mu is held for all its methods.
--type syncBuffer struct {
--	logger *loggingT
--	*bufio.Writer
--	file   *os.File
--	sev    severity
--	nbytes uint64 // The number of bytes written to this file
--}
--
--func (sb *syncBuffer) Sync() error {
--	return sb.file.Sync()
--}
--
--func (sb *syncBuffer) Write(p []byte) (n int, err error) {
--	if sb.nbytes+uint64(len(p)) >= MaxSize {
--		if err := sb.rotateFile(time.Now()); err != nil {
--			sb.logger.exit(err)
--		}
--	}
--	n, err = sb.Writer.Write(p)
--	sb.nbytes += uint64(n)
--	if err != nil {
--		sb.logger.exit(err)
--	}
--	return
--}
--
--// rotateFile closes the syncBuffer's file and starts a new one.
--func (sb *syncBuffer) rotateFile(now time.Time) error {
--	if sb.file != nil {
--		sb.Flush()
--		sb.file.Close()
--	}
--	var err error
--	sb.file, _, err = create(severityName[sb.sev], now)
--	sb.nbytes = 0
--	if err != nil {
--		return err
--	}
--
--	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
--
--	// Write header.
--	var buf bytes.Buffer
--	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
--	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
--	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
--	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
--	n, err := sb.file.Write(buf.Bytes())
--	sb.nbytes += uint64(n)
--	return err
--}
--
--// bufferSize sizes the buffer associated with each log file. It's large
--// so that log records can accumulate without the logging thread blocking
--// on disk I/O. The flushDaemon will block instead.
--const bufferSize = 256 * 1024
--
--// createFiles creates all the log files for severity from sev down to infoLog.
--// l.mu is held.
--func (l *loggingT) createFiles(sev severity) error {
--	now := time.Now()
--	// Files are created in decreasing severity order, so as soon as we find one
--	// has already been created, we can stop.
--	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
--		sb := &syncBuffer{
--			logger: l,
--			sev:    s,
--		}
--		if err := sb.rotateFile(now); err != nil {
--			return err
--		}
--		l.file[s] = sb
--	}
--	return nil
--}
--
--const flushInterval = 30 * time.Second
--
--// flushDaemon periodically flushes the log file buffers.
--func (l *loggingT) flushDaemon() {
--	for _ = range time.NewTicker(flushInterval).C {
--		l.lockAndFlushAll()
--	}
--}
--
--// lockAndFlushAll is like flushAll but locks l.mu first.
--func (l *loggingT) lockAndFlushAll() {
--	l.mu.Lock()
--	l.flushAll()
--	l.mu.Unlock()
--}
--
--// flushAll flushes all the logs and attempts to "sync" their data to disk.
--// l.mu is held.
--func (l *loggingT) flushAll() {
--	// Flush from fatal down, in case there's trouble flushing.
--	for s := fatalLog; s >= infoLog; s-- {
--		file := l.file[s]
--		if file != nil {
--			file.Flush() // ignore error
--			file.Sync()  // ignore error
--		}
--	}
--}
--
--// CopyStandardLogTo arranges for messages written to the Go "log" package's
--// default logs to also appear in the Google logs for the named and lower
--// severities.  Subsequent changes to the standard log's default output location
--// or format may break this behavior.
--//
--// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
--// recognized, CopyStandardLogTo panics.
--func CopyStandardLogTo(name string) {
--	sev, ok := severityByName(name)
--	if !ok {
--		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
--	}
--	// Set a log format that captures the user's file and line:
--	//   d.go:23: message
--	stdLog.SetFlags(stdLog.Lshortfile)
--	stdLog.SetOutput(logBridge(sev))
--}
--
--// logBridge provides the Write method that enables CopyStandardLogTo to connect
--// Go's standard logs to the logs provided by this package.
--type logBridge severity
--
--// Write parses the standard logging line and passes its components to the
--// logger for severity(lb).
--func (lb logBridge) Write(b []byte) (n int, err error) {
--	var (
--		file = "???"
--		line = 1
--		text string
--	)
--	// Split "d.go:23: message" into "d.go", "23", and "message".
--	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
--		text = fmt.Sprintf("bad log format: %s", b)
--	} else {
--		file = string(parts[0])
--		text = string(parts[2][1:]) // skip leading space
--		line, err = strconv.Atoi(string(parts[1]))
--		if err != nil {
--			text = fmt.Sprintf("bad line number: %s", b)
--			line = 1
--		}
--	}
--	// printWithFileLine with alsoToStderr=true, so standard log messages
--	// always appear on standard error.
--	logging.printWithFileLine(severity(lb), file, line, true, text)
--	return len(b), nil
--}
--
--// setV computes and remembers the V level for a given PC
--// when vmodule is enabled.
--// File pattern matching takes the basename of the file, stripped
--// of its .go suffix, and uses filepath.Match, which is a little more
--// general than the *? matching used in C++.
--// l.mu is held.
--func (l *loggingT) setV(pc uintptr) Level {
--	fn := runtime.FuncForPC(pc)
--	file, _ := fn.FileLine(pc)
--	// The file is something like /a/b/c/d.go. We want just the d.
--	if strings.HasSuffix(file, ".go") {
--		file = file[:len(file)-3]
--	}
--	if slash := strings.LastIndex(file, "/"); slash >= 0 {
--		file = file[slash+1:]
--	}
--	for _, filter := range l.vmodule.filter {
--		if filter.match(file) {
--			l.vmap[pc] = filter.level
--			return filter.level
--		}
--	}
--	l.vmap[pc] = 0
--	return 0
--}
--
--// Verbose is a boolean type that implements Infof (like Printf) etc.
--// See the documentation of V for more information.
--type Verbose bool
--
--// V reports whether verbosity at the call site is at least the requested level.
--// The returned value is a boolean of type Verbose, which implements Info, Infoln
--// and Infof. These methods will write to the Info log if called.
--// Thus, one may write either
--//	if glog.V(2) { glog.Info("log this") }
--// or
--//	glog.V(2).Info("log this")
--// The second form is shorter but the first is cheaper if logging is off because it does
--// not evaluate its arguments.
--//
--// Whether an individual call to V generates a log record depends on the setting of
--// the -v and --vmodule flags; both are off by default. If the level in the call to
--// V is at least the value of -v, or of -vmodule for the source file containing the
--// call, the V call will log.
--func V(level Level) Verbose {
--	// This function tries hard to be cheap unless there's work to do.
--	// The fast path is two atomic loads and compares.
--
--	// Here is a cheap but safe test to see if V logging is enabled globally.
--	if logging.verbosity.get() >= level {
--		return Verbose(true)
--	}
--
--	// It's off globally but it vmodule may still be set.
--	// Here is another cheap but safe test to see if vmodule is enabled.
--	if atomic.LoadInt32(&logging.filterLength) > 0 {
--		// Now we need a proper lock to use the logging structure. The pcs field
--		// is shared so we must lock before accessing it. This is fairly expensive,
--		// but if V logging is enabled we're slow anyway.
--		logging.mu.Lock()
--		defer logging.mu.Unlock()
--		if runtime.Callers(2, logging.pcs[:]) == 0 {
--			return Verbose(false)
--		}
--		v, ok := logging.vmap[logging.pcs[0]]
--		if !ok {
--			v = logging.setV(logging.pcs[0])
--		}
--		return Verbose(v >= level)
--	}
--	return Verbose(false)
--}
--
--// Info is equivalent to the global Info function, guarded by the value of v.
--// See the documentation of V for usage.
--func (v Verbose) Info(args ...interface{}) {
--	if v {
--		logging.print(infoLog, args...)
--	}
--}
--
--// Infoln is equivalent to the global Infoln function, guarded by the value of v.
--// See the documentation of V for usage.
--func (v Verbose) Infoln(args ...interface{}) {
--	if v {
--		logging.println(infoLog, args...)
--	}
--}
--
--// Infof is equivalent to the global Infof function, guarded by the value of v.
--// See the documentation of V for usage.
--func (v Verbose) Infof(format string, args ...interface{}) {
--	if v {
--		logging.printf(infoLog, format, args...)
--	}
--}
--
--// Info logs to the INFO log.
--// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
--func Info(args ...interface{}) {
--	logging.print(infoLog, args...)
--}
--
--// InfoDepth acts as Info but uses depth to determine which call frame to log.
--// InfoDepth(0, "msg") is the same as Info("msg").
--func InfoDepth(depth int, args ...interface{}) {
--	logging.printDepth(infoLog, depth, args...)
--}
--
--// Infoln logs to the INFO log.
--// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
--func Infoln(args ...interface{}) {
--	logging.println(infoLog, args...)
--}
--
--// Infof logs to the INFO log.
--// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
--func Infof(format string, args ...interface{}) {
--	logging.printf(infoLog, format, args...)
--}
--
--// Warning logs to the WARNING and INFO logs.
--// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
--func Warning(args ...interface{}) {
--	logging.print(warningLog, args...)
--}
--
--// WarningDepth acts as Warning but uses depth to determine which call frame to log.
--// WarningDepth(0, "msg") is the same as Warning("msg").
--func WarningDepth(depth int, args ...interface{}) {
--	logging.printDepth(warningLog, depth, args...)
--}
--
--// Warningln logs to the WARNING and INFO logs.
--// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
--func Warningln(args ...interface{}) {
--	logging.println(warningLog, args...)
--}
--
--// Warningf logs to the WARNING and INFO logs.
--// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
--func Warningf(format string, args ...interface{}) {
--	logging.printf(warningLog, format, args...)
--}
--
--// Error logs to the ERROR, WARNING, and INFO logs.
--// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
--func Error(args ...interface{}) {
--	logging.print(errorLog, args...)
--}
--
--// ErrorDepth acts as Error but uses depth to determine which call frame to log.
--// ErrorDepth(0, "msg") is the same as Error("msg").
--func ErrorDepth(depth int, args ...interface{}) {
--	logging.printDepth(errorLog, depth, args...)
--}
--
--// Errorln logs to the ERROR, WARNING, and INFO logs.
--// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
--func Errorln(args ...interface{}) {
--	logging.println(errorLog, args...)
--}
--
--// Errorf logs to the ERROR, WARNING, and INFO logs.
--// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
--func Errorf(format string, args ...interface{}) {
--	logging.printf(errorLog, format, args...)
--}
--
--// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
--// including a stack trace of all running goroutines, then calls os.Exit(255).
--// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
--func Fatal(args ...interface{}) {
--	logging.print(fatalLog, args...)
--}
--
--// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
--// FatalDepth(0, "msg") is the same as Fatal("msg").
--func FatalDepth(depth int, args ...interface{}) {
--	logging.printDepth(fatalLog, depth, args...)
--}
--
--// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
--// including a stack trace of all running goroutines, then calls os.Exit(255).
--// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
--func Fatalln(args ...interface{}) {
--	logging.println(fatalLog, args...)
--}
--
--// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
--// including a stack trace of all running goroutines, then calls os.Exit(255).
--// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
--func Fatalf(format string, args ...interface{}) {
--	logging.printf(fatalLog, format, args...)
--}
--
--// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
--// It allows Exit and relatives to use the Fatal logs.
--var fatalNoStacks uint32
--
--// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
--// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
--func Exit(args ...interface{}) {
--	atomic.StoreUint32(&fatalNoStacks, 1)
--	logging.print(fatalLog, args...)
--}
--
--// ExitDepth acts as Exit but uses depth to determine which call frame to log.
--// ExitDepth(0, "msg") is the same as Exit("msg").
--func ExitDepth(depth int, args ...interface{}) {
--	atomic.StoreUint32(&fatalNoStacks, 1)
--	logging.printDepth(fatalLog, depth, args...)
--}
--
--// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
--func Exitln(args ...interface{}) {
--	atomic.StoreUint32(&fatalNoStacks, 1)
--	logging.println(fatalLog, args...)
--}
--
--// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
--// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
--func Exitf(format string, args ...interface{}) {
--	atomic.StoreUint32(&fatalNoStacks, 1)
--	logging.printf(fatalLog, format, args...)
--}
-diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog_file.go b/Godeps/_workspace/src/github.com/golang/glog/glog_file.go
-deleted file mode 100644
-index 65075d2..0000000
---- a/Godeps/_workspace/src/github.com/golang/glog/glog_file.go
-+++ /dev/null
-@@ -1,124 +0,0 @@
--// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
--//
--// Copyright 2013 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--// File I/O for logs.
--
--package glog
--
--import (
--	"errors"
--	"flag"
--	"fmt"
--	"os"
--	"os/user"
--	"path/filepath"
--	"strings"
--	"sync"
--	"time"
--)
--
--// MaxSize is the maximum size of a log file in bytes.
--var MaxSize uint64 = 1024 * 1024 * 1800
--
--// logDirs lists the candidate directories for new log files.
--var logDirs []string
--
--// If non-empty, overrides the choice of directory in which to write logs.
--// See createLogDirs for the full list of possible destinations.
--var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
--
--func createLogDirs() {
--	if *logDir != "" {
--		logDirs = append(logDirs, *logDir)
--	}
--	logDirs = append(logDirs, os.TempDir())
--}
--
--var (
--	pid      = os.Getpid()
--	program  = filepath.Base(os.Args[0])
--	host     = "unknownhost"
--	userName = "unknownuser"
--)
--
--func init() {
--	h, err := os.Hostname()
--	if err == nil {
--		host = shortHostname(h)
--	}
--
--	current, err := user.Current()
--	if err == nil {
--		userName = current.Username
--	}
--
--	// Sanitize userName since it may contain filepath separators on Windows.
--	userName = strings.Replace(userName, `\`, "_", -1)
--}
--
--// shortHostname returns its argument, truncating at the first period.
--// For instance, given "www.google.com" it returns "www".
--func shortHostname(hostname string) string {
--	if i := strings.Index(hostname, "."); i >= 0 {
--		return hostname[:i]
--	}
--	return hostname
--}
--
--// logName returns a new log file name containing tag, with start time t, and
--// the name for the symlink for tag.
--func logName(tag string, t time.Time) (name, link string) {
--	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
--		program,
--		host,
--		userName,
--		tag,
--		t.Year(),
--		t.Month(),
--		t.Day(),
--		t.Hour(),
--		t.Minute(),
--		t.Second(),
--		pid)
--	return name, program + "." + tag
--}
--
--var onceLogDirs sync.Once
--
--// create creates a new log file and returns the file and its filename, which
--// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
--// successfully, create also attempts to update the symlink for that tag, ignoring
--// errors.
--func create(tag string, t time.Time) (f *os.File, filename string, err error) {
--	onceLogDirs.Do(createLogDirs)
--	if len(logDirs) == 0 {
--		return nil, "", errors.New("log: no log dirs")
--	}
--	name, link := logName(tag, t)
--	var lastErr error
--	for _, dir := range logDirs {
--		fname := filepath.Join(dir, name)
--		f, err := os.Create(fname)
--		if err == nil {
--			symlink := filepath.Join(dir, link)
--			os.Remove(symlink)        // ignore err
--			os.Symlink(name, symlink) // ignore err
--			return f, fname, nil
--		}
--		lastErr = err
--	}
--	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
--}
-diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog_test.go b/Godeps/_workspace/src/github.com/golang/glog/glog_test.go
-deleted file mode 100644
-index 0fb376e..0000000
---- a/Godeps/_workspace/src/github.com/golang/glog/glog_test.go
-+++ /dev/null
-@@ -1,415 +0,0 @@
--// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
--//
--// Copyright 2013 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--package glog
--
--import (
--	"bytes"
--	"fmt"
--	stdLog "log"
--	"path/filepath"
--	"runtime"
--	"strconv"
--	"strings"
--	"testing"
--	"time"
--)
--
--// Test that shortHostname works as advertised.
--func TestShortHostname(t *testing.T) {
--	for hostname, expect := range map[string]string{
--		"":                "",
--		"host":            "host",
--		"host.google.com": "host",
--	} {
--		if got := shortHostname(hostname); expect != got {
--			t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
--		}
--	}
--}
--
--// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
--type flushBuffer struct {
--	bytes.Buffer
--}
--
--func (f *flushBuffer) Flush() error {
--	return nil
--}
--
--func (f *flushBuffer) Sync() error {
--	return nil
--}
--
--// swap sets the log writers and returns the old array.
--func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
--	l.mu.Lock()
--	defer l.mu.Unlock()
--	old = l.file
--	for i, w := range writers {
--		logging.file[i] = w
--	}
--	return
--}
--
--// newBuffers sets the log writers to all new byte buffers and returns the old array.
--func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
--	return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
--}
--
--// contents returns the specified log value as a string.
--func contents(s severity) string {
--	return logging.file[s].(*flushBuffer).String()
--}
--
--// contains reports whether the string is contained in the log.
--func contains(s severity, str string, t *testing.T) bool {
--	return strings.Contains(contents(s), str)
--}
--
--// setFlags configures the logging flags how the test expects them.
--func setFlags() {
--	logging.toStderr = false
--}
--
--// Test that Info works as advertised.
--func TestInfo(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	Info("test")
--	if !contains(infoLog, "I", t) {
--		t.Errorf("Info has wrong character: %q", contents(infoLog))
--	}
--	if !contains(infoLog, "test", t) {
--		t.Error("Info failed")
--	}
--}
--
--func TestInfoDepth(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--
--	f := func() { InfoDepth(1, "depth-test1") }
--
--	// The next three lines must stay together
--	_, _, wantLine, _ := runtime.Caller(0)
--	InfoDepth(0, "depth-test0")
--	f()
--
--	msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
--	if len(msgs) != 2 {
--		t.Fatalf("Got %d lines, expected 2", len(msgs))
--	}
--
--	for i, m := range msgs {
--		if !strings.HasPrefix(m, "I") {
--			t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
--		}
--		w := fmt.Sprintf("depth-test%d", i)
--		if !strings.Contains(m, w) {
--			t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
--		}
--
--		// pull out the line number (between : and ])
--		msg := m[strings.LastIndex(m, ":")+1:]
--		x := strings.Index(msg, "]")
--		if x < 0 {
--			t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
--			continue
--		}
--		line, err := strconv.Atoi(msg[:x])
--		if err != nil {
--			t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
--			continue
--		}
--		wantLine++
--		if wantLine != line {
--			t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
--		}
--	}
--}
--
--func init() {
--	CopyStandardLogTo("INFO")
--}
--
--// Test that CopyStandardLogTo panics on bad input.
--func TestCopyStandardLogToPanic(t *testing.T) {
--	defer func() {
--		if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
--			t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
--		}
--	}()
--	CopyStandardLogTo("LOG")
--}
--
--// Test that using the standard log package logs to INFO.
--func TestStandardLog(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	stdLog.Print("test")
--	if !contains(infoLog, "I", t) {
--		t.Errorf("Info has wrong character: %q", contents(infoLog))
--	}
--	if !contains(infoLog, "test", t) {
--		t.Error("Info failed")
--	}
--}
--
--// Test that the header has the correct format.
--func TestHeader(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	defer func(previous func() time.Time) { timeNow = previous }(timeNow)
--	timeNow = func() time.Time {
--		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
--	}
--	pid = 1234
--	Info("test")
--	var line int
--	format := "I0102 15:04:05.067890    1234 glog_test.go:%d] test\n"
--	n, err := fmt.Sscanf(contents(infoLog), format, &line)
--	if n != 1 || err != nil {
--		t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
--	}
--	// Scanf treats multiple spaces as equivalent to a single space,
--	// so check for correct space-padding also.
--	want := fmt.Sprintf(format, line)
--	if contents(infoLog) != want {
--		t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
--	}
--}
--
--// Test that an Error log goes to Warning and Info.
--// Even in the Info log, the source character will be E, so the data should
--// all be identical.
--func TestError(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	Error("test")
--	if !contains(errorLog, "E", t) {
--		t.Errorf("Error has wrong character: %q", contents(errorLog))
--	}
--	if !contains(errorLog, "test", t) {
--		t.Error("Error failed")
--	}
--	str := contents(errorLog)
--	if !contains(warningLog, str, t) {
--		t.Error("Warning failed")
--	}
--	if !contains(infoLog, str, t) {
--		t.Error("Info failed")
--	}
--}
--
--// Test that a Warning log goes to Info.
--// Even in the Info log, the source character will be W, so the data should
--// all be identical.
--func TestWarning(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	Warning("test")
--	if !contains(warningLog, "W", t) {
--		t.Errorf("Warning has wrong character: %q", contents(warningLog))
--	}
--	if !contains(warningLog, "test", t) {
--		t.Error("Warning failed")
--	}
--	str := contents(warningLog)
--	if !contains(infoLog, str, t) {
--		t.Error("Info failed")
--	}
--}
--
--// Test that a V log goes to Info.
--func TestV(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	logging.verbosity.Set("2")
--	defer logging.verbosity.Set("0")
--	V(2).Info("test")
--	if !contains(infoLog, "I", t) {
--		t.Errorf("Info has wrong character: %q", contents(infoLog))
--	}
--	if !contains(infoLog, "test", t) {
--		t.Error("Info failed")
--	}
--}
--
--// Test that a vmodule enables a log in this file.
--func TestVmoduleOn(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	logging.vmodule.Set("glog_test=2")
--	defer logging.vmodule.Set("")
--	if !V(1) {
--		t.Error("V not enabled for 1")
--	}
--	if !V(2) {
--		t.Error("V not enabled for 2")
--	}
--	if V(3) {
--		t.Error("V enabled for 3")
--	}
--	V(2).Info("test")
--	if !contains(infoLog, "I", t) {
--		t.Errorf("Info has wrong character: %q", contents(infoLog))
--	}
--	if !contains(infoLog, "test", t) {
--		t.Error("Info failed")
--	}
--}
--
--// Test that a vmodule of another file does not enable a log in this file.
--func TestVmoduleOff(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	logging.vmodule.Set("notthisfile=2")
--	defer logging.vmodule.Set("")
--	for i := 1; i <= 3; i++ {
--		if V(Level(i)) {
--			t.Errorf("V enabled for %d", i)
--		}
--	}
--	V(2).Info("test")
--	if contents(infoLog) != "" {
--		t.Error("V logged incorrectly")
--	}
--}
--
--// vGlobs are patterns that match/don't match this file at V=2.
--var vGlobs = map[string]bool{
--	// Easy to test the numeric match here.
--	"glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail.
--	"glog_test=2": true,
--	"glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed.
--	// These all use 2 and check the patterns. All are true.
--	"*=2":           true,
--	"?l*=2":         true,
--	"????_*=2":      true,
--	"??[mno]?_*t=2": true,
--	// These all use 2 and check the patterns. All are false.
--	"*x=2":         false,
--	"m*=2":         false,
--	"??_*=2":       false,
--	"?[abc]?_*t=2": false,
--}
--
--// Test that vmodule globbing works as advertised.
--func testVmoduleGlob(pat string, match bool, t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	defer logging.vmodule.Set("")
--	logging.vmodule.Set(pat)
--	if V(2) != Verbose(match) {
--		t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
--	}
--}
--
--// Test that a vmodule globbing works as advertised.
--func TestVmoduleGlob(t *testing.T) {
--	for glob, match := range vGlobs {
--		testVmoduleGlob(glob, match, t)
--	}
--}
--
--func TestRollover(t *testing.T) {
--	setFlags()
--	var err error
--	defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
--	logExitFunc = func(e error) {
--		err = e
--	}
--	defer func(previous uint64) { MaxSize = previous }(MaxSize)
--	MaxSize = 512
--
--	Info("x") // Be sure we have a file.
--	info, ok := logging.file[infoLog].(*syncBuffer)
--	if !ok {
--		t.Fatal("info wasn't created")
--	}
--	if err != nil {
--		t.Fatalf("info has initial error: %v", err)
--	}
--	fname0 := info.file.Name()
--	Info(strings.Repeat("x", int(MaxSize))) // force a rollover
--	if err != nil {
--		t.Fatalf("info has error after big write: %v", err)
--	}
--
--	// Make sure the next log file gets a file name with a different
--	// time stamp.
--	//
--	// TODO: determine whether we need to support subsecond log
--	// rotation.  C++ does not appear to handle this case (nor does it
--	// handle Daylight Savings Time properly).
--	time.Sleep(1 * time.Second)
--
--	Info("x") // create a new file
--	if err != nil {
--		t.Fatalf("error after rotation: %v", err)
--	}
--	fname1 := info.file.Name()
--	if fname0 == fname1 {
--		t.Errorf("info.f.Name did not change: %v", fname0)
--	}
--	if info.nbytes >= MaxSize {
--		t.Errorf("file size was not reset: %d", info.nbytes)
--	}
--}
--
--func TestLogBacktraceAt(t *testing.T) {
--	setFlags()
--	defer logging.swap(logging.newBuffers())
--	// The peculiar style of this code simplifies line counting and maintenance of the
--	// tracing block below.
--	var infoLine string
--	setTraceLocation := func(file string, line int, ok bool, delta int) {
--		if !ok {
--			t.Fatal("could not get file:line")
--		}
--		_, file = filepath.Split(file)
--		infoLine = fmt.Sprintf("%s:%d", file, line+delta)
--		err := logging.traceLocation.Set(infoLine)
--		if err != nil {
--			t.Fatal("error setting log_backtrace_at: ", err)
--		}
--	}
--	{
--		// Start of tracing block. These lines know about each other's relative position.
--		_, file, line, ok := runtime.Caller(0)
--		setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
--		Info("we want a stack trace here")
--	}
--	numAppearances := strings.Count(contents(infoLog), infoLine)
--	if numAppearances < 2 {
--		// Need 2 appearances, one in the log header and one in the trace:
--		//   log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
--		//   ...
--		//   github.com/glog/glog_test.go:280 (0x41ba91)
--		//   ...
--		// We could be more precise but that would require knowing the details
--		// of the traceback format, which may not be dependable.
--		t.Fatal("got no trace back; log is ", contents(infoLog))
--	}
--}
--
--func BenchmarkHeader(b *testing.B) {
--	for i := 0; i < b.N; i++ {
--		buf, _, _ := logging.header(infoLog, 0)
--		logging.putBuffer(buf)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/client/README.md b/Godeps/_workspace/src/github.com/google/cadvisor/client/README.md
-deleted file mode 100644
-index fededef..0000000
---- a/Godeps/_workspace/src/github.com/google/cadvisor/client/README.md
-+++ /dev/null
-@@ -1,54 +0,0 @@
--# Example REST API Client
--
--This is an implementation of a cAdvisor REST API in Go.  You can use it like this:
--
--```go
--client, err := client.NewClient("http://192.168.59.103:8080/")
--```
--
--Obviously, replace the URL with the path to your actual cAdvisor REST endpoint.
--
--
--### MachineInfo
--
--```go
--client.MachineInfo()
--```
--
--This method returns a cadvisor/info.MachineInfo struct with all the fields filled in.  Here is an example return value:
--
--```
--(*info.MachineInfo)(0xc208022b10)({
-- NumCores: (int) 4,
-- MemoryCapacity: (int64) 2106028032,
-- Filesystems: ([]info.FsInfo) (len=1 cap=4) {
--  (info.FsInfo) {
--   Device: (string) (len=9) "/dev/sda1",
--   Capacity: (uint64) 19507089408
--  }
-- }
--})
--```
--
--You can see the full specification of the [MachineInfo struct in the source](../info/container.go)
--
--### ContainerInfo
--
--Given a container name and a ContainerInfoRequest, will return all information about the specified container.  The ContainerInfoRequest struct just has one field, NumStats, which is the number of stat entries that you want returned.
--
--```go
--request := info.ContainerInfoRequest{10}
--sInfo, err := client.ContainerInfo("/docker/d9d3eb10179e6f93a...", &request)
--```
--Returns a [ContainerInfo struct](../info/container.go)
--
--### SubcontainersInfo
--
--Given a container name and a ContainerInfoRequest, will recursively return all info about the container and all subcontainers contained within the container.  The ContainerInfoRequest struct just has one field, NumStats, which is the number of stat entries that you want returned.
--
--```go
--request := info.ContainerInfoRequest{10}
--sInfo, err := client.SubcontainersInfo("/docker", &request)
--```
--
--Returns a [ContainerInfo struct](../info/container.go) with the Subcontainers field populated.
-diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/client/client.go b/Godeps/_workspace/src/github.com/google/cadvisor/client/client.go
-deleted file mode 100644
-index 5c016e0..0000000
---- a/Godeps/_workspace/src/github.com/google/cadvisor/client/client.go
-+++ /dev/null
-@@ -1,161 +0,0 @@
--// Copyright 2014 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--// TODO(cAdvisor): Package comment.
--package client
--
--import (
--	"bytes"
--	"encoding/json"
--	"fmt"
--	"io/ioutil"
--	"net/http"
--	"path"
--	"strings"
--
--	"github.com/google/cadvisor/info"
--)
--
--// Client represents the base URL for a cAdvisor client.
--type Client struct {
--	baseUrl string
--}
--
--// NewClient returns a new client with the specified base URL.
--func NewClient(url string) (*Client, error) {
--	if !strings.HasSuffix(url, "/") {
--		url += "/"
--	}
--
--	return &Client{
--		baseUrl: fmt.Sprintf("%sapi/v1.2/", url),
--	}, nil
--}
--
--// MachineInfo returns the JSON machine information for this client.
--// A non-nil error result indicates a problem with obtaining
--// the JSON machine information data.
--func (self *Client) MachineInfo() (minfo *info.MachineInfo, err error) {
--	u := self.machineInfoUrl()
--	ret := new(info.MachineInfo)
--	if err = self.httpGetJsonData(ret, nil, u, "machine info"); err != nil {
--		return
--	}
--	minfo = ret
--	return
--}
--
--// ContainerInfo returns the JSON container information for the specified
--// container and request.
--func (self *Client) ContainerInfo(name string, query *info.ContainerInfoRequest) (cinfo *info.ContainerInfo, err error) {
--	u := self.containerInfoUrl(name)
--	ret := new(info.ContainerInfo)
--	if err = self.httpGetJsonData(ret, query, u, fmt.Sprintf("container info for %q", name)); err != nil {
--		return
--	}
--	cinfo = ret
--	return
--}
--
--// Returns the information about all subcontainers (recursive) of the specified container (including itself).
--func (self *Client) SubcontainersInfo(name string, query *info.ContainerInfoRequest) ([]info.ContainerInfo, error) {
--	var response []info.ContainerInfo
--	url := self.subcontainersInfoUrl(name)
--	err := self.httpGetJsonData(&response, query, url, fmt.Sprintf("subcontainers container info for %q", name))
--	if err != nil {
--		return []info.ContainerInfo{}, err
--
--	}
--	return response, nil
--}
--
--// Returns the JSON container information for the specified
--// Docker container and request.
--func (self *Client) DockerContainer(name string, query *info.ContainerInfoRequest) (cinfo info.ContainerInfo, err error) {
--	u := self.dockerInfoUrl(name)
--	ret := make(map[string]info.ContainerInfo)
--	if err = self.httpGetJsonData(&ret, query, u, fmt.Sprintf("Docker container info for %q", name)); err != nil {
--		return
--	}
--	if len(ret) != 1 {
--		err = fmt.Errorf("expected to only receive 1 Docker container: %+v", ret)
--		return
--	}
--	for _, cont := range ret {
--		cinfo = cont
--	}
--	return
--}
--
--// Returns the JSON container information for all Docker containers.
--func (self *Client) AllDockerContainers(query *info.ContainerInfoRequest) (cinfo []info.ContainerInfo, err error) {
--	u := self.dockerInfoUrl("/")
--	ret := make(map[string]info.ContainerInfo)
--	if err = self.httpGetJsonData(&ret, query, u, "all Docker containers info"); err != nil {
--		return
--	}
--	cinfo = make([]info.ContainerInfo, 0, len(ret))
--	for _, cont := range ret {
--		cinfo = append(cinfo, cont)
--	}
--	return
--}
--
--func (self *Client) machineInfoUrl() string {
--	return self.baseUrl + path.Join("machine")
--}
--
--func (self *Client) containerInfoUrl(name string) string {
--	return self.baseUrl + path.Join("containers", name)
--}
--
--func (self *Client) subcontainersInfoUrl(name string) string {
--	return self.baseUrl + path.Join("subcontainers", name)
--}
--
--func (self *Client) dockerInfoUrl(name string) string {
--	return self.baseUrl + path.Join("docker", name)
--}
--
--func (self *Client) httpGetJsonData(data, postData interface{}, url, infoName string) error {
--	var resp *http.Response
--	var err error
--
--	if postData != nil {
--		data, err := json.Marshal(postData)
--		if err != nil {
--			return fmt.Errorf("unable to marshal data: %v", err)
--		}
--		resp, err = http.Post(url, "application/json", bytes.NewBuffer(data))
--	} else {
--		resp, err = http.Get(url)
--	}
--	if err != nil {
--		return fmt.Errorf("unable to get %q: %v", infoName, err)
--	}
--	if resp == nil {
--		return fmt.Errorf("received empty response from %q", infoName)
--	}
--	defer resp.Body.Close()
--	body, err := ioutil.ReadAll(resp.Body)
--	if err != nil {
--		err = fmt.Errorf("unable to read all %q: %v", infoName, err)
--		return err
--	}
--	if err = json.Unmarshal(body, data); err != nil {
--		err = fmt.Errorf("unable to unmarshal %q (%v): %v", infoName, string(body), err)
--		return err
--	}
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/client/client_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/client/client_test.go
-deleted file mode 100644
-index 67bf3c2..0000000
---- a/Godeps/_workspace/src/github.com/google/cadvisor/client/client_test.go
-+++ /dev/null
-@@ -1,154 +0,0 @@
--// Copyright 2014 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--package client
--
--import (
--	"encoding/json"
--	"fmt"
--	"net/http"
--	"net/http/httptest"
--	"path"
--	"reflect"
--	"testing"
--	"time"
--
--	"github.com/google/cadvisor/info"
--	itest "github.com/google/cadvisor/info/test"
--	"github.com/kr/pretty"
--)
--
--func testGetJsonData(
--	expected interface{},
--	f func() (interface{}, error),
--) error {
--	reply, err := f()
--	if err != nil {
--		return fmt.Errorf("unable to retrieve data: %v", err)
--	}
--	if !reflect.DeepEqual(reply, expected) {
--		return pretty.Errorf("retrieved wrong data: %# v != %# v", reply, expected)
--	}
--	return nil
--}
--
--func cadvisorTestClient(path string, expectedPostObj, expectedPostObjEmpty, replyObj interface{}, t *testing.T) (*Client, *httptest.Server, error) {
--	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		if r.URL.Path == path {
--			if expectedPostObj != nil {
--				decoder := json.NewDecoder(r.Body)
--				if err := decoder.Decode(expectedPostObjEmpty); err != nil {
--					t.Errorf("Received invalid object: %v", err)
--				}
--				if !reflect.DeepEqual(expectedPostObj, expectedPostObjEmpty) {
--					t.Errorf("Received unexpected object: %+v", expectedPostObjEmpty)
--				}
--			}
--			encoder := json.NewEncoder(w)
--			encoder.Encode(replyObj)
--		} else if r.URL.Path == "/api/v1.2/machine" {
--			fmt.Fprint(w, `{"num_cores":8,"memory_capacity":31625871360}`)
--		} else {
--			w.WriteHeader(http.StatusNotFound)
--			fmt.Fprintf(w, "Page not found.")
--		}
--	}))
--	client, err := NewClient(ts.URL)
--	if err != nil {
--		ts.Close()
--		return nil, nil, err
--	}
--	return client, ts, err
--}
--
--// TestGetMachineInfo performs one test to check if MachineInfo()
--// in a cAdvisor client returns the correct result.
--func TestGetMachineinfo(t *testing.T) {
--	minfo := &info.MachineInfo{
--		NumCores:       8,
--		MemoryCapacity: 31625871360,
--	}
--	client, server, err := cadvisorTestClient("/api/v1.2/machine", nil, nil, minfo, t)
--	if err != nil {
--		t.Fatalf("unable to get a client %v", err)
--	}
--	defer server.Close()
--	returned, err := client.MachineInfo()
--	if err != nil {
--		t.Fatal(err)
--	}
--	if !reflect.DeepEqual(returned, minfo) {
--		t.Fatalf("received unexpected machine info")
--	}
--}
--
--// TestGetContainerInfo generates a random container information object
--// and then checks that ContainerInfo returns the expected result.
--func TestGetContainerInfo(t *testing.T) {
--	query := &info.ContainerInfoRequest{
--		NumStats: 3,
--	}
--	containerName := "/some/container"
--	cinfo := itest.GenerateRandomContainerInfo(containerName, 4, query, 1*time.Second)
--	client, server, err := cadvisorTestClient(fmt.Sprintf("/api/v1.2/containers%v", containerName), query, &info.ContainerInfoRequest{}, cinfo, t)
--	if err != nil {
--		t.Fatalf("unable to get a client %v", err)
--	}
--	defer server.Close()
--	returned, err := client.ContainerInfo(containerName, query)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !returned.Eq(cinfo) {
--		t.Error("received unexpected ContainerInfo")
--	}
--}
--
--func TestGetSubcontainersInfo(t *testing.T) {
--	query := &info.ContainerInfoRequest{
--		NumStats: 3,
--	}
--	containerName := "/some/container"
--	cinfo := itest.GenerateRandomContainerInfo(containerName, 4, query, 1*time.Second)
--	cinfo1 := itest.GenerateRandomContainerInfo(path.Join(containerName, "sub1"), 4, query, 1*time.Second)
--	cinfo2 := itest.GenerateRandomContainerInfo(path.Join(containerName, "sub2"), 4, query, 1*time.Second)
--	response := []info.ContainerInfo{
--		*cinfo,
--		*cinfo1,
--		*cinfo2,
--	}
--	client, server, err := cadvisorTestClient(fmt.Sprintf("/api/v1.2/subcontainers%v", containerName), query, &info.ContainerInfoRequest{}, response, t)
--	if err != nil {
--		t.Fatalf("unable to get a client %v", err)
--	}
--	defer server.Close()
--	returned, err := client.SubcontainersInfo(containerName, query)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if len(returned) != 3 {
--		t.Errorf("unexpected number of results: got %d, expected 3", len(returned))
--	}
--	if !returned[0].Eq(cinfo) {
--		t.Error("received unexpected ContainerInfo")
--	}
--	if !returned[1].Eq(cinfo1) {
--		t.Error("received unexpected ContainerInfo")
--	}
--	if !returned[2].Eq(cinfo2) {
--		t.Error("received unexpected ContainerInfo")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/container.go
-deleted file mode 100644
-index 5a4fbbd..0000000
---- a/Godeps/_workspace/src/github.com/google/cadvisor/info/container.go
-+++ /dev/null
-@@ -1,379 +0,0 @@
--// Copyright 2014 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--package info
--
--import (
--	"reflect"
--	"time"
--)
--
--type CpuSpec struct {
--	Limit    uint64 `json:"limit"`
--	MaxLimit uint64 `json:"max_limit"`
--	Mask     string `json:"mask,omitempty"`
--}
--
--type MemorySpec struct {
--	// The amount of memory requested. Default is unlimited (-1).
--	// Units: bytes.
--	Limit uint64 `json:"limit,omitempty"`
--
--	// The amount of guaranteed memory.  Default is 0.
--	// Units: bytes.
--	Reservation uint64 `json:"reservation,omitempty"`
--
--	// The amount of swap space requested. Default is unlimited (-1).
--	// Units: bytes.
--	SwapLimit uint64 `json:"swap_limit,omitempty"`
--}
--
--type ContainerSpec struct {
--	HasCpu bool    `json:"has_cpu"`
--	Cpu    CpuSpec `json:"cpu,omitempty"`
--
--	HasMemory bool       `json:"has_memory"`
--	Memory    MemorySpec `json:"memory,omitempty"`
--
--	HasNetwork bool `json:"has_network"`
--
--	HasFilesystem bool `json:"has_filesystem"`
--}
--
--// Container reference contains enough information to uniquely identify a container
--type ContainerReference struct {
--	// The absolute name of the container. This is unique on the machine.
--	Name string `json:"name"`
--
--	// Other names by which the container is known within a certain namespace.
--	// This is unique within that namespace.
--	Aliases []string `json:"aliases,omitempty"`
--
--	// Namespace under which the aliases of a container are unique.
--	// An example of a namespace is "docker" for Docker containers.
--	Namespace string `json:"namespace,omitempty"`
--}
--
--// ContainerInfoQuery is used when users check a container info from the REST api.
--// It specifies how much data users want to get about a container
--type ContainerInfoRequest struct {
--	// Max number of stats to return.
--	NumStats int `json:"num_stats,omitempty"`
--}
--
--type ContainerInfo struct {
--	ContainerReference
--
--	// The direct subcontainers of the current container.
--	Subcontainers []ContainerReference `json:"subcontainers,omitempty"`
--
--	// The isolation used in the container.
--	Spec ContainerSpec `json:"spec,omitempty"`
--
--	// Historical statistics gathered from the container.
--	Stats []*ContainerStats `json:"stats,omitempty"`
--}
--
--// ContainerInfo may be (un)marshaled by json or other en/decoder. In that
--// case, the Timestamp field in each stats/sample may not be precisely
--// en/decoded.  This will lead to small but acceptable differences between a
--// ContainerInfo and its encode-then-decode version.  Eq() is used to compare
--// two ContainerInfo accepting small difference (<10ms) of Time fields.
--func (self *ContainerInfo) Eq(b *ContainerInfo) bool {
--
--	// If both self and b are nil, then Eq() returns true
--	if self == nil {
--		return b == nil
--	}
--	if b == nil {
--		return self == nil
--	}
--
--	// For fields other than time.Time, we will compare them precisely.
--	// This would require that any slice should have same order.
--	if !reflect.DeepEqual(self.ContainerReference, b.ContainerReference) {
--		return false
--	}
--	if !reflect.DeepEqual(self.Subcontainers, b.Subcontainers) {
--		return false
--	}
--	if !reflect.DeepEqual(self.Spec, b.Spec) {
--		return false
--	}
--
--	for i, expectedStats := range b.Stats {
--		selfStats := self.Stats[i]
--		if !expectedStats.Eq(selfStats) {
--			return false
--		}
--	}
--
--	return true
--}
--
--func (self *ContainerInfo) StatsAfter(ref time.Time) []*ContainerStats {
--	n := len(self.Stats) + 1
--	for i, s := range self.Stats {
--		if s.Timestamp.After(ref) {
--			n = i
--			break
--		}
--	}
--	if n > len(self.Stats) {
--		return nil
--	}
--	return self.Stats[n:]
--}
--
--func (self *ContainerInfo) StatsStartTime() time.Time {
--	var ret time.Time
--	for _, s := range self.Stats {
--		if s.Timestamp.Before(ret) || ret.IsZero() {
--			ret = s.Timestamp
--		}
--	}
--	return ret
--}
--
--func (self *ContainerInfo) StatsEndTime() time.Time {
--	var ret time.Time
--	for i := len(self.Stats) - 1; i >= 0; i-- {
--		s := self.Stats[i]
--		if s.Timestamp.After(ret) {
--			ret = s.Timestamp
--		}
--	}
--	return ret
--}
--
--// All CPU usage metrics are cumulative from the creation of the container
--type CpuStats struct {
--	Usage struct {
--		// Total CPU usage.
--		// Units: nanoseconds
--		Total uint64 `json:"total"`
--
--		// Per CPU/core usage of the container.
--		// Unit: nanoseconds.
--		PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
--
--		// Time spent in user space.
--		// Unit: nanoseconds
--		User uint64 `json:"user"`
--
--		// Time spent in kernel space.
--		// Unit: nanoseconds
--		System uint64 `json:"system"`
--	} `json:"usage"`
--	Load int32 `json:"load"`
--}
--
--type PerDiskStats struct {
--	Major uint64            `json:"major"`
--	Minor uint64            `json:"minor"`
--	Stats map[string]uint64 `json:"stats"`
--}
--
--type DiskIoStats struct {
--	IoServiceBytes []PerDiskStats `json:"io_service_bytes,omitempty"`
--	IoServiced     []PerDiskStats `json:"io_serviced,omitempty"`
--	IoQueued       []PerDiskStats `json:"io_queued,omitempty"`
--	Sectors        []PerDiskStats `json:"sectors,omitempty"`
--	IoServiceTime  []PerDiskStats `json:"io_service_time,omitempty"`
--	IoWaitTime     []PerDiskStats `json:"io_wait_time,omitempty"`
--	IoMerged       []PerDiskStats `json:"io_merged,omitempty"`
--	IoTime         []PerDiskStats `json:"io_time,omitempty"`
--}
--
--type MemoryStats struct {
--	// Current memory usage, this includes all memory regardless of when it was
--	// accessed.
--	// Units: Bytes.
--	Usage uint64 `json:"usage"`
--
--	// The amount of working set memory, this includes recently accessed memory,
--	// dirty memory, and kernel memory. Working set is <= "usage".
--	// Units: Bytes.
--	WorkingSet uint64 `json:"working_set"`
--
--	ContainerData    MemoryStatsMemoryData `json:"container_data,omitempty"`
--	HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"`
--}
--
--type MemoryStatsMemoryData struct {
--	Pgfault    uint64 `json:"pgfault"`
--	Pgmajfault uint64 `json:"pgmajfault"`
--}
--
--type NetworkStats struct {
--	// Cumulative count of bytes received.
--	RxBytes uint64 `json:"rx_bytes"`
--	// Cumulative count of packets received.
--	RxPackets uint64 `json:"rx_packets"`
--	// Cumulative count of receive errors encountered.
--	RxErrors uint64 `json:"rx_errors"`
--	// Cumulative count of packets dropped while receiving.
--	RxDropped uint64 `json:"rx_dropped"`
--	// Cumulative count of bytes transmitted.
--	TxBytes uint64 `json:"tx_bytes"`
--	// Cumulative count of packets transmitted.
--	TxPackets uint64 `json:"tx_packets"`
--	// Cumulative count of transmit errors encountered.
--	TxErrors uint64 `json:"tx_errors"`
--	// Cumulative count of packets dropped while transmitting.
--	TxDropped uint64 `json:"tx_dropped"`
--}
--
--type FsStats struct {
--	// The block device name associated with the filesystem.
--	Device string `json:"device,omitempty"`
--
--	// Number of bytes that can be consumed by the container on this filesystem.
--	Limit uint64 `json:"capacity"`
--
--	// Number of bytes that is consumed by the container on this filesystem.
--	Usage uint64 `json:"usage"`
--
--	// Number of reads completed
--	// This is the total number of reads completed successfully.
--	ReadsCompleted uint64 `json:"reads_completed"`
--
--	// Number of reads merged
--	// Reads and writes which are adjacent to each other may be merged for
--	// efficiency.  Thus two 4K reads may become one 8K read before it is
--	// ultimately handed to the disk, and so it will be counted (and queued)
--	// as only one I/O.  This field lets you know how often this was done.
--	ReadsMerged uint64 `json:"reads_merged"`
--
--	// Number of sectors read
--	// This is the total number of sectors read successfully.
--	SectorsRead uint64 `json:"sectors_read"`
--
--	// Number of milliseconds spent reading
--	// This is the total number of milliseconds spent by all reads (as
--	// measured from __make_request() to end_that_request_last()).
--	ReadTime uint64 `json:"read_time"`
--
--	// Number of writes completed
--	// This is the total number of writes completed successfully.
--	WritesCompleted uint64 `json:"writes_completed"`
--
--	// Number of writes merged
--	// See the description of reads merged.
--	WritesMerged uint64 `json:"writes_merged"`
--
--	// Number of sectors written
--	// This is the total number of sectors written successfully.
--	SectorsWritten uint64 `json:"sectors_written"`
--
--	// Number of milliseconds spent writing
--	// This is the total number of milliseconds spent by all writes (as
--	// measured from __make_request() to end_that_request_last()).
--	WriteTime uint64 `json:"write_time"`
--
--	// Number of I/Os currently in progress
--	// The only field that should go to zero. Incremented as requests are
--	// given to appropriate struct request_queue and decremented as they finish.
--	IoInProgress uint64 `json:"io_in_progress"`
--
--	// Number of milliseconds spent doing I/Os
--	// This field increases so long as field 9 is nonzero.
--	IoTime uint64 `json:"io_time"`
--
--	// weighted number of milliseconds spent doing I/Os
--	// This field is incremented at each I/O start, I/O completion, I/O
--	// merge, or read of these stats by the number of I/Os in progress
--	// (field 9) times the number of milliseconds spent doing I/O since the
--	// last update of this field.  This can provide an easy measure of both
--	// I/O completion time and the backlog that may be accumulating.
--	WeightedIoTime uint64 `json:"weighted_io_time"`
--}
--
--type ContainerStats struct {
--	// The time of this stat point.
--	Timestamp time.Time    `json:"timestamp"`
--	Cpu       CpuStats     `json:"cpu,omitempty"`
--	DiskIo    DiskIoStats  `json:"diskio,omitempty"`
--	Memory    MemoryStats  `json:"memory,omitempty"`
--	Network   NetworkStats `json:"network,omitempty"`
--
--	// Filesystem statistics
--	Filesystem []FsStats `json:"filesystem,omitempty"`
--}
--
--func timeEq(t1, t2 time.Time, tolerance time.Duration) bool {
--	// t1 should not be later than t2
--	if t1.After(t2) {
--		t1, t2 = t2, t1
--	}
--	diff := t2.Sub(t1)
--	if diff <= tolerance {
--		return true
--	}
--	return false
--}
--
--func durationEq(a, b time.Duration, tolerance time.Duration) bool {
--	if a > b {
--		a, b = b, a
--	}
--	diff := a - b
--	if diff <= tolerance {
--		return true
--	}
--	return false
--}
--
--const (
--	// 10ms, i.e. 0.01s
--	timePrecision time.Duration = 10 * time.Millisecond
--)
--
--// This function is useful because we do not require precise time
--// representation.
--func (a *ContainerStats) Eq(b *ContainerStats) bool {
--	if !timeEq(a.Timestamp, b.Timestamp, timePrecision) {
--		return false
--	}
--	return a.StatsEq(b)
--}
--
--// Checks equality of the stats values.
--func (a *ContainerStats) StatsEq(b *ContainerStats) bool {
--	// TODO(vmarmol): Consider using this through reflection.
--	if !reflect.DeepEqual(a.Cpu, b.Cpu) {
--		return false
--	}
--	if !reflect.DeepEqual(a.Memory, b.Memory) {
--		return false
--	}
--	if !reflect.DeepEqual(a.DiskIo, b.DiskIo) {
--		return false
--	}
--	if !reflect.DeepEqual(a.Network, b.Network) {
--		return false
--	}
--	if !reflect.DeepEqual(a.Filesystem, b.Filesystem) {
--		return false
--	}
--	return true
--}
--
--// Saturate CPU usage to 0.
--func calculateCpuUsage(prev, cur uint64) uint64 {
--	if prev > cur {
--		return 0
--	}
--	return cur - prev
--}
-diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/container_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/container_test.go
-deleted file mode 100644
-index 2ff38e6..0000000
---- a/Godeps/_workspace/src/github.com/google/cadvisor/info/container_test.go
-+++ /dev/null
-@@ -1,79 +0,0 @@
--// Copyright 2014 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--package info
--
--import (
--	"testing"
--	"time"
--)
--
--func TestStatsStartTime(t *testing.T) {
--	N := 10
--	stats := make([]*ContainerStats, 0, N)
--	ct := time.Now()
--	for i := 0; i < N; i++ {
--		s := &ContainerStats{
--			Timestamp: ct.Add(time.Duration(i) * time.Second),
--		}
--		stats = append(stats, s)
--	}
--	cinfo := &ContainerInfo{
--		ContainerReference: ContainerReference{
--			Name: "/some/container",
--		},
--		Stats: stats,
--	}
--	ref := ct.Add(time.Duration(N-1) * time.Second)
--	end := cinfo.StatsEndTime()
--
--	if !ref.Equal(end) {
--		t.Errorf("end time is %v; should be %v", end, ref)
--	}
--}
--
--func TestStatsEndTime(t *testing.T) {
--	N := 10
--	stats := make([]*ContainerStats, 0, N)
--	ct := time.Now()
--	for i := 0; i < N; i++ {
--		s := &ContainerStats{
--			Timestamp: ct.Add(time.Duration(i) * time.Second),
--		}
--		stats = append(stats, s)
--	}
--	cinfo := &ContainerInfo{
--		ContainerReference: ContainerReference{
--			Name: "/some/container",
--		},
--		Stats: stats,
--	}
--	ref := ct
--	start := cinfo.StatsStartTime()
--
--	if !ref.Equal(start) {
--		t.Errorf("start time is %v; should be %v", start, ref)
--	}
--}
--
--func createStats(cpuUsage, memUsage uint64, timestamp time.Time) *ContainerStats {
--	stats := &ContainerStats{}
--	stats.Cpu.Usage.PerCpu = []uint64{cpuUsage}
--	stats.Cpu.Usage.Total = cpuUsage
--	stats.Cpu.Usage.System = 0
--	stats.Cpu.Usage.User = cpuUsage
--	stats.Memory.Usage = memUsage
--	stats.Timestamp = timestamp
--	return stats
--}
-diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/machine.go
-deleted file mode 100644
-index 0e73a8e..0000000
---- a/Godeps/_workspace/src/github.com/google/cadvisor/info/machine.go
-+++ /dev/null
-@@ -1,53 +0,0 @@
--// Copyright 2014 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--package info
--
--type FsInfo struct {
--	// Block device associated with the filesystem.
--	Device string `json:"device"`
--
--	// Total number of bytes available on the filesystem.
--	Capacity uint64 `json:"capacity"`
--}
--
--type MachineInfo struct {
--	// The number of cores in this machine.
--	NumCores int `json:"num_cores"`
--
--	// The amount of memory (in bytes) in this machine
--	MemoryCapacity int64 `json:"memory_capacity"`
--
--	// Filesystems on this machine.
--	Filesystems []FsInfo `json:"filesystems"`
--}
--
--type VersionInfo struct {
--	// Kernel version.
--	KernelVersion string `json:"kernel_version"`
--
--	// OS image being used for cadvisor container, or host image if running on host directly.
--	ContainerOsVersion string `json:"container_os_version"`
--
--	// Docker version.
--	DockerVersion string `json:"docker_version"`
--
--	// cAdvisor version.
--	CadvisorVersion string `json:"cadvisor_version"`
--}
--
--type MachineInfoFactory interface {
--	GetMachineInfo() (*MachineInfo, error)
--	GetVersionInfo() (*VersionInfo, error)
--}
-diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/test/datagen.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/test/datagen.go
-deleted file mode 100644
-index 519e28c..0000000
---- a/Godeps/_workspace/src/github.com/google/cadvisor/info/test/datagen.go
-+++ /dev/null
-@@ -1,76 +0,0 @@
--// Copyright 2014 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--package test
--
--import (
--	"fmt"
--	"math/rand"
--	"time"
--
--	"github.com/google/cadvisor/info"
--)
--
--func GenerateRandomStats(numStats, numCores int, duration time.Duration) []*info.ContainerStats {
--	ret := make([]*info.ContainerStats, numStats)
--	perCoreUsages := make([]uint64, numCores)
--	currentTime := time.Now()
--	for i := range perCoreUsages {
--		perCoreUsages[i] = uint64(rand.Int63n(1000))
--	}
--	for i := 0; i < numStats; i++ {
--		stats := new(info.ContainerStats)
--		stats.Timestamp = currentTime
--		currentTime = currentTime.Add(duration)
--
--		percore := make([]uint64, numCores)
--		for i := range perCoreUsages {
--			perCoreUsages[i] += uint64(rand.Int63n(1000))
--			percore[i] = perCoreUsages[i]
--			stats.Cpu.Usage.Total += percore[i]
--		}
--		stats.Cpu.Usage.PerCpu = percore
--		stats.Cpu.Usage.User = stats.Cpu.Usage.Total
--		stats.Cpu.Usage.System = 0
--		stats.Memory.Usage = uint64(rand.Int63n(4096))
--		ret[i] = stats
--	}
--	return ret
--}
--
--func GenerateRandomContainerSpec(numCores int) info.ContainerSpec {
--	ret := info.ContainerSpec{
--		Cpu:    info.CpuSpec{},
--		Memory: info.MemorySpec{},
--	}
--	ret.Cpu.Limit = uint64(1000 + rand.Int63n(2000))
--	ret.Cpu.MaxLimit = uint64(1000 + rand.Int63n(2000))
--	ret.Cpu.Mask = fmt.Sprintf("0-%d", numCores-1)
--	ret.Memory.Limit = uint64(4096 + rand.Int63n(4096))
--	return ret
--}
--
--func GenerateRandomContainerInfo(containerName string, numCores int, query *info.ContainerInfoRequest, duration time.Duration) *info.ContainerInfo {
--	stats := GenerateRandomStats(query.NumStats, numCores, duration)
--	spec := GenerateRandomContainerSpec(numCores)
--
--	ret := &info.ContainerInfo{
--		ContainerReference: info.ContainerReference{
--			Name: containerName,
--		},
--		Spec:  spec,
--		Stats: stats,
--	}
--	return ret
--}
-diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/version.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/version.go
-deleted file mode 100644
-index 771382a..0000000
---- a/Godeps/_workspace/src/github.com/google/cadvisor/info/version.go
-+++ /dev/null
-@@ -1,18 +0,0 @@
--// Copyright 2014 Google Inc. All Rights Reserved.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--//
--//     http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--package info
--
--// Version of cAdvisor.
--const VERSION = "0.6.2"
-diff --git a/Godeps/_workspace/src/github.com/google/gofuzz/.travis.yml b/Godeps/_workspace/src/github.com/google/gofuzz/.travis.yml
-deleted file mode 100644
-index 9384a54..0000000
---- a/Godeps/_workspace/src/github.com/google/gofuzz/.travis.yml
-+++ /dev/null
-@@ -1,12 +0,0 @@
--language: go
--
--go:
--  - 1.3
--  - 1.2
--  - tip
--
--install: 
--  - go get code.google.com/p/go.tools/cmd/cover
--
--script:
--  - go test -cover
-diff --git a/Godeps/_workspace/src/github.com/google/gofuzz/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/google/gofuzz/CONTRIBUTING.md
-deleted file mode 100644
-index 51cf5cd..0000000
---- a/Godeps/_workspace/src/github.com/google/gofuzz/CONTRIBUTING.md
-+++ /dev/null
-@@ -1,67 +0,0 @@
--# How to contribute #
--
--We'd love to accept your patches and contributions to this project.  There are
--a just a few small guidelines you need to follow.
--
--
--## Contributor License Agreement ##
--
--Contributions to any Google project must be accompanied by a Contributor
--License Agreement.  This is not a copyright **assignment**, it simply gives
--Google permission to use and redistribute your contributions as part of the
--project.
--
--  * If you are an individual writing original source code and you're sure you
--    own the intellectual property, then you'll need to sign an [individual
--    CLA][].
--
--  * If you work for a company that wants to allow you to contribute your work,
--    then you'll need to sign a [corporate CLA][].
--
--You generally only need to submit a CLA once, so if you've already submitted
--one (even if it was for a different project), you probably don't need to do it
--again.
--
--[individual CLA]: https://developers.google.com/open-source/cla/individual
--[corporate CLA]: https://developers.google.com/open-source/cla/corporate
--
--
--## Submitting a patch ##
--
--  1. It's generally best to start by opening a new issue describing the bug or
--     feature you're intending to fix.  Even if you think it's relatively minor,
--     it's helpful to know what people are working on.  Mention in the initial
--     issue that you are planning to work on that bug or feature so that it can
--     be assigned to you.
--
--  1. Follow the normal process of [forking][] the project, and setup a new
--     branch to work in.  It's important that each group of changes be done in
--     separate branches in order to ensure that a pull request only includes the
--     commits related to that bug or feature.
--
--  1. Go makes it very simple to ensure properly formatted code, so always run
--     `go fmt` on your code before committing it.  You should also run
--     [golint][] over your code.  As noted in the [golint readme][], it's not
--     strictly necessary that your code be completely "lint-free", but this will
--     help you find common style issues.
--
--  1. Any significant changes should almost always be accompanied by tests.  The
--     project already has good test coverage, so look at some of the existing
--     tests if you're unsure how to go about it.  [gocov][] and [gocov-html][]
--     are invaluable tools for seeing which parts of your code aren't being
--     exercised by your tests.
--
--  1. Do your best to have [well-formed commit messages][] for each change.
--     This provides consistency throughout the project, and ensures that commit
--     messages are able to be formatted properly by various git tools.
--
--  1. Finally, push the commits to your fork and submit a [pull request][].
--
--[forking]: https://help.github.com/articles/fork-a-repo
--[golint]: https://github.com/golang/lint
--[golint readme]: https://github.com/golang/lint/blob/master/README
--[gocov]: https://github.com/axw/gocov
--[gocov-html]: https://github.com/matm/gocov-html
--[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
--[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
--[pull request]: https://help.github.com/articles/creating-a-pull-request
-diff --git a/Godeps/_workspace/src/github.com/google/gofuzz/LICENSE b/Godeps/_workspace/src/github.com/google/gofuzz/LICENSE
-deleted file mode 100644
-index d645695..0000000
---- a/Godeps/_workspace/src/github.com/google/gofuzz/LICENSE
-+++ /dev/null
-@@ -1,202 +0,0 @@
--
--                                 Apache License
--                           Version 2.0, January 2004
--                        http://www.apache.org/licenses/
--
--   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
--
--   1. Definitions.
--
--      "License" shall mean the terms and conditions for use, reproduction,
--      and distribution as defined by Sections 1 through 9 of this document.
--
--      "Licensor" shall mean the copyright owner or entity authorized by
--      the copyright owner that is granting the License.
--
--      "Legal Entity" shall mean the union of the acting entity and all
--      other entities that control, are controlled by, or are under common
--      control with that entity. For the purposes of this definition,
--      "control" means (i) the power, direct or indirect, to cause the
--      direction or management of such entity, whether by contract or
--      otherwise, or (ii) ownership of fifty percent (50%) or more of the
--      outstanding shares, or (iii) beneficial ownership of such entity.
--
--      "You" (or "Your") shall mean an individual or Legal Entity
--      exercising permissions granted by this License.
--
--      "Source" form shall mean the preferred form for making modifications,
--      including but not limited to software source code, documentation
--      source, and configuration files.
--
--      "Object" form shall mean any form resulting from mechanical
--      transformation or translation of a Source form, including but
--      not limited to compiled object code, generated documentation,
--      and conversions to other media types.
--
--      "Work" shall mean the work of authorship, whether in Source or
--      Object form, made available under the License, as indicated by a
--      copyright notice that is included in or attached to the work
--      (an example is provided in the Appendix below).
--
--      "Derivative Works" shall mean any work, whether in Source or Object
--      form, that is based on (or derived from) the Work and for which the
--      editorial revisions, annotations, elaborations, or other modifications
--      represent, as a whole, an original work of authorship. For the purposes
--      of this License, Derivative Works shall not include works that remain
--      separable from, or merely link (or bind by name) to the interfaces of,
--      the Work and Derivative Works thereof.
--
--      "Contribution" shall mean any work of authorship, including
--      the original version of the Work and any modifications or additions
--      to that Work or Derivative Works thereof, that is intentionally
--      submitted to Licensor for inclusion in the Work by the copyright owner
--      or by an individual or Legal Entity authorized to submit on behalf of
--      the copyright owner. For the purposes of this definition, "submitted"
--      means any form of electronic, verbal, or written communication sent
--      to the Licensor or its representatives, including but not limited to
--      communication on electronic mailing lists, source code control systems,
--      and issue tracking systems that are managed by, or on behalf of, the
--      Licensor for the purpose of discussing and improving the Work, but
--      excluding communication that is conspicuously marked or otherwise
--      designated in writing by the copyright owner as "Not a Contribution."
--
--      "Contributor" shall mean Licensor and any individual or Legal Entity
--      on behalf of whom a Contribution has been received by Licensor and
--      subsequently incorporated within the Work.
--
--   2. Grant of Copyright License. Subject to the terms and conditions of
--      this License, each Contributor hereby grants to You a perpetual,
--      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
--      copyright license to reproduce, prepare Derivative Works of,
--      publicly display, publicly perform, sublicense, and distribute the
--      Work and such Derivative Works in Source or Object form.
--
--   3. Grant of Patent License. Subject to the terms and conditions of
--      this License, each Contributor hereby grants to You a perpetual,
--      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
--      (except as stated in this section) patent license to make, have made,
--      use, offer to sell, sell, import, and otherwise transfer the Work,
--      where such license applies only to those patent claims licensable
--      by such Contributor that are necessarily infringed by their
--      Contribution(s) alone or by combination of their Contribution(s)
--      with the Work to which such Contribution(s) was submitted. If You
--      institute patent litigation against any entity (including a
--      cross-claim or counterclaim in a lawsuit) alleging that the Work
--      or a Contribution incorporated within the Work constitutes direct
--      or contributory patent infringement, then any patent licenses
--      granted to You under this License for that Work shall terminate
--      as of the date such litigation is filed.
--
--   4. Redistribution. You may reproduce and distribute copies of the
--      Work or Derivative Works thereof in any medium, with or without
--      modifications, and in Source or Object form, provided that You
--      meet the following conditions:
--
--      (a) You must give any other recipients of the Work or
--          Derivative Works a copy of this License; and
--
--      (b) You must cause any modified files to carry prominent notices
--          stating that You changed the files; and
--
--      (c) You must retain, in the Source form of any Derivative Works
--          that You distribute, all copyright, patent, trademark, and
--          attribution notices from the Source form of the Work,
--          excluding those notices that do not pertain to any part of
--          the Derivative Works; and
--
--      (d) If the Work includes a "NOTICE" text file as part of its
--          distribution, then any Derivative Works that You distribute must
--          include a readable copy of the attribution notices contained
--          within such NOTICE file, excluding those notices that do not
--          pertain to any part of the Derivative Works, in at least one
--          of the following places: within a NOTICE text file distributed
--          as part of the Derivative Works; within the Source form or
--          documentation, if provided along with the Derivative Works; or,
--          within a display generated by the Derivative Works, if and
--          wherever such third-party notices normally appear. The contents
--          of the NOTICE file are for informational purposes only and
--          do not modify the License. You may add Your own attribution
--          notices within Derivative Works that You distribute, alongside
--          or as an addendum to the NOTICE text from the Work, provided
--          that such additional attribution notices cannot be construed
--          as modifying the License.
--
--      You may add Your own copyright statement to Your modifications and
--      may provide additional or different license terms and conditions
--      for use, reproduction, or distribution of Your modifications, or
--      for any such Derivative Works as a whole, provided Your use,
--      reproduction, and distribution of the Work otherwise complies with
--      the conditions stated in this License.
--
--   5. Submission of Contributions. Unless You explicitly state otherwise,
--      any Contribution intentionally submitted for inclusion in the Work
--      by You to the Licensor shall be under the terms and conditions of
--      this License, without any additional terms or conditions.
--      Notwithstanding the above, nothing herein shall supersede or modify
--      the terms of any separate license agreement you may have executed
--      with Licensor regarding such Contributions.
--
--   6. Trademarks. This License does not grant permission to use the trade
--      names, trademarks, service marks, or product names of the Licensor,
--      except as required for reasonable and customary use in describing the
--      origin of the Work and reproducing the content of the NOTICE file.
--
--   7. Disclaimer of Warranty. Unless required by applicable law or
--      agreed to in writing, Licensor provides the Work (and each
--      Contributor provides its Contributions) on an "AS IS" BASIS,
--      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
--      implied, including, without limitation, any warranties or conditions
--      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
--      PARTICULAR PURPOSE. You are solely responsible for determining the
--      appropriateness of using or redistributing the Work and assume any
--      risks associated with Your exercise of permissions under this License.
--
--   8. Limitation of Liability. In no event and under no legal theory,
--      whether in tort (including negligence), contract, or otherwise,
--      unless required by applicable law (such as deliberate and grossly
--      negligent acts) or agreed to in writing, shall any Contributor be
--      liable to You for damages, including any direct, indirect, special,
--      incidental, or consequential damages of any character arising as a
--      result of this License or out of the use or inability to use the
--      Work (including but not limited to damages for loss of goodwill,
--      work stoppage, computer failure or malfunction, or any and all
--      other commercial damages or losses), even if such Contributor
--      has been advised of the possibility of such damages.
--
--   9. Accepting Warranty or Additional Liability. While redistributing
--      the Work or Derivative Works thereof, You may choose to offer,
--      and charge a fee for, acceptance of support, warranty, indemnity,
--      or other liability obligations and/or rights consistent with this
--      License. However, in accepting such obligations, You may act only
--      on Your own behalf and on Your sole responsibility, not on behalf
--      of any other Contributor, and only if You agree to indemnify,
--      defend, and hold each Contributor harmless for any liability
--      incurred by, or claims asserted against, such Contributor by reason
--      of your accepting any such warranty or additional liability.
--
--   END OF TERMS AND CONDITIONS
--
--   APPENDIX: How to apply the Apache License to your work.
--
--      To apply the Apache License to your work, attach the following
--      boilerplate notice, with the fields enclosed by brackets "[]"
--      replaced with your own identifying information. (Don't include
--      the brackets!)  The text should be enclosed in the appropriate
--      comment syntax for the file format. We also recommend that a
--      file or class name and description of purpose be included on the
--      same "printed page" as the copyright notice for easier
--      identification within third-party archives.
--
--   Copyright [yyyy] [name of copyright owner]
--
--   Licensed under the Apache License, Version 2.0 (the "License");
--   you may not use this file except in compliance with the License.
--   You may obtain a copy of the License at
--
--       http://www.apache.org/licenses/LICENSE-2.0
--
--   Unless required by applicable law or agreed to in writing, software
--   distributed under the License is distributed on an "AS IS" BASIS,
--   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--   See the License for the specific language governing permissions and
--   limitations under the License.
-diff --git a/Godeps/_workspace/src/github.com/google/gofuzz/README.md b/Godeps/_workspace/src/github.com/google/gofuzz/README.md
-deleted file mode 100644
-index 68fcf2c..0000000
---- a/Godeps/_workspace/src/github.com/google/gofuzz/README.md
-+++ /dev/null
-@@ -1,71 +0,0 @@
--gofuzz
--======
--
--gofuzz is a library for populating go objects with random values.
--
--[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz)
--[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
--
--This is useful for testing:
--
--* Do your project's objects really serialize/unserialize correctly in all cases?
--* Is there an incorrectly formatted object that will cause your project to panic?
--
--Import with ```import "github.com/google/gofuzz"```
--
--You can use it on single variables:
--```
--f := fuzz.New()
--var myInt int
--f.Fuzz(&myInt) // myInt gets a random value.
--```
--
--You can use it on maps:
--```
--f := fuzz.New().NilChance(0).NumElements(1, 1)
--var myMap map[ComplexKeyType]string
--f.Fuzz(&myMap) // myMap will have exactly one element.
--```
--
--Customize the chance of getting a nil pointer:
--```
--f := fuzz.New().NilChance(.5)
--var fancyStruct struct {
--  A, B, C, D *string
--}
--f.Fuzz(&fancyStruct) // About half the pointers should be set.
--```
--
--You can even customize the randomization completely if needed:
--```
--type MyEnum string
--const (
--        A MyEnum = "A"
--        B MyEnum = "B"
--)
--type MyInfo struct {
--        Type MyEnum
--        AInfo *string
--        BInfo *string
--}
--
--f := fuzz.New().NilChance(0).Funcs(
--        func(e *MyInfo, c fuzz.Continue) {
--                switch c.Intn(2) {
--                case 0:
--                        e.Type = A
--                        c.Fuzz(&e.AInfo)
--                case 1:
--                        e.Type = B
--                        c.Fuzz(&e.BInfo)
--                }
--        },
--)
--
--var myObject MyInfo
--f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
--```
--
--See more examples in ```example_test.go```.
--
--Happy testing!
-diff --git a/Godeps/_workspace/src/github.com/google/gofuzz/doc.go b/Godeps/_workspace/src/github.com/google/gofuzz/doc.go
-deleted file mode 100644
-index 9f9956d..0000000
---- a/Godeps/_workspace/src/github.com/google/gofuzz/doc.go
-+++ /dev/null
-@@ -1,18 +0,0 @@
--/*
--Copyright 2014 Google Inc. All rights reserved.
--
--Licensed under the Apache License, Version 2.0 (the "License");
--you may not use this file except in compliance with the License.
--You may obtain a copy of the License at
--
--    http://www.apache.org/licenses/LICENSE-2.0
--
--Unless required by applicable law or agreed to in writing, software
--distributed under the License is distributed on an "AS IS" BASIS,
--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--See the License for the specific language governing permissions and
--limitations under the License.
--*/
--
--// Package fuzz is a library for populating go objects with random values.
--package fuzz
-diff --git a/Godeps/_workspace/src/github.com/google/gofuzz/example_test.go b/Godeps/_workspace/src/github.com/google/gofuzz/example_test.go
-deleted file mode 100644
-index 792707a..0000000
---- a/Godeps/_workspace/src/github.com/google/gofuzz/example_test.go
-+++ /dev/null
-@@ -1,225 +0,0 @@
--/*
--Copyright 2014 Google Inc. All rights reserved.
--
--Licensed under the Apache License, Version 2.0 (the "License");
--you may not use this file except in compliance with the License.
--You may obtain a copy of the License at
--
--    http://www.apache.org/licenses/LICENSE-2.0
--
--Unless required by applicable law or agreed to in writing, software
--distributed under the License is distributed on an "AS IS" BASIS,
--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--See the License for the specific language governing permissions and
--limitations under the License.
--*/
--
--package fuzz_test
--
--import (
--	"encoding/json"
--	"fmt"
--	"math/rand"
--
--	"github.com/google/gofuzz"
--)
--
--func ExampleSimple() {
--	type MyType struct {
--		A string
--		B string
--		C int
--		D struct {
--			E float64
--		}
--	}
--
--	f := fuzz.New()
--	object := MyType{}
--
--	uniqueObjects := map[MyType]int{}
--
--	for i := 0; i < 1000; i++ {
--		f.Fuzz(&object)
--		uniqueObjects[object]++
--	}
--	fmt.Printf("Got %v unique objects.\n", len(uniqueObjects))
--	// Output:
--	// Got 1000 unique objects.
--}
--
--func ExampleCustom() {
--	type MyType struct {
--		A int
--		B string
--	}
--
--	counter := 0
--	f := fuzz.New().Funcs(
--		func(i *int, c fuzz.Continue) {
--			*i = counter
--			counter++
--		},
--	)
--	object := MyType{}
--
--	uniqueObjects := map[MyType]int{}
--
--	for i := 0; i < 100; i++ {
--		f.Fuzz(&object)
--		if object.A != i {
--			fmt.Printf("Unexpected value: %#v\n", object)
--		}
--		uniqueObjects[object]++
--	}
--	fmt.Printf("Got %v unique objects.\n", len(uniqueObjects))
--	// Output:
--	// Got 100 unique objects.
--}
--
--func ExampleComplex() {
--	type OtherType struct {
--		A string
--		B string
--	}
--	type MyType struct {
--		Pointer             *OtherType
--		Map                 map[string]OtherType
--		PointerMap          *map[string]OtherType
--		Slice               []OtherType
--		SlicePointer        []*OtherType
--		PointerSlicePointer *[]*OtherType
--	}
--
--	f := fuzz.New().RandSource(rand.NewSource(0)).NilChance(0).NumElements(1, 1).Funcs(
--		func(o *OtherType, c fuzz.Continue) {
--			o.A = "Foo"
--			o.B = "Bar"
--		},
--		func(op **OtherType, c fuzz.Continue) {
--			*op = &OtherType{"A", "B"}
--		},
--		func(m map[string]OtherType, c fuzz.Continue) {
--			m["Works Because"] = OtherType{
--				"Fuzzer",
--				"Preallocated",
--			}
--		},
--	)
--	object := MyType{}
--	f.Fuzz(&object)
--	bytes, err := json.MarshalIndent(&object, "", "    ")
--	if err != nil {
--		fmt.Printf("error: %v\n", err)
--	}
--	fmt.Printf("%s\n", string(bytes))
--	// Output:
--	// {
--	//     "Pointer": {
--	//         "A": "A",
--	//         "B": "B"
--	//     },
--	//     "Map": {
--	//         "Works Because": {
--	//             "A": "Fuzzer",
--	//             "B": "Preallocated"
--	//         }
--	//     },
--	//     "PointerMap": {
--	//         "Works Because": {
--	//             "A": "Fuzzer",
--	//             "B": "Preallocated"
--	//         }
--	//     },
--	//     "Slice": [
--	//         {
--	//             "A": "Foo",
--	//             "B": "Bar"
--	//         }
--	//     ],
--	//     "SlicePointer": [
--	//         {
--	//             "A": "A",
--	//             "B": "B"
--	//         }
--	//     ],
--	//     "PointerSlicePointer": [
--	//         {
--	//             "A": "A",
--	//             "B": "B"
--	//         }
--	//     ]
--	// }
--}
--
--func ExampleMap() {
--	f := fuzz.New().NilChance(0).NumElements(1, 1)
--	var myMap map[struct{ A, B, C int }]string
--	f.Fuzz(&myMap)
--	fmt.Printf("myMap has %v element(s).\n", len(myMap))
--	// Output:
--	// myMap has 1 element(s).
--}
--
--func ExampleSingle() {
--	f := fuzz.New()
--	var i int
--	f.Fuzz(&i)
--
--	// Technically, we'd expect this to fail one out of 2 billion attempts...
--	fmt.Printf("(i == 0) == %v", i == 0)
--	// Output:
--	// (i == 0) == false
--}
--
--func ExampleEnum() {
--	type MyEnum string
--	const (
--		A MyEnum = "A"
--		B MyEnum = "B"
--	)
--	type MyInfo struct {
--		Type  MyEnum
--		AInfo *string
--		BInfo *string
--	}
--
--	f := fuzz.New().NilChance(0).Funcs(
--		func(e *MyInfo, c fuzz.Continue) {
--			// Note c's embedded Rand allows for direct use.
--			// We could also use c.RandBool() here.
--			switch c.Intn(2) {
--			case 0:
--				e.Type = A
--				c.Fuzz(&e.AInfo)
--			case 1:
--				e.Type = B
--				c.Fuzz(&e.BInfo)
--			}
--		},
--	)
--
--	for i := 0; i < 100; i++ {
--		var myObject MyInfo
--		f.Fuzz(&myObject)
--		switch myObject.Type {
--		case A:
--			if myObject.AInfo == nil {
--				fmt.Println("AInfo should have been set!")
--			}
--			if myObject.BInfo != nil {
--				fmt.Println("BInfo should NOT have been set!")
--			}
--		case B:
--			if myObject.BInfo == nil {
--				fmt.Println("BInfo should have been set!")
--			}
--			if myObject.AInfo != nil {
--				fmt.Println("AInfo should NOT have been set!")
--			}
--		default:
--			fmt.Println("Invalid enum value!")
--		}
--	}
--	// Output:
--}
-diff --git a/Godeps/_workspace/src/github.com/google/gofuzz/fuzz.go b/Godeps/_workspace/src/github.com/google/gofuzz/fuzz.go
-deleted file mode 100644
-index 31c2838..0000000
---- a/Godeps/_workspace/src/github.com/google/gofuzz/fuzz.go
-+++ /dev/null
-@@ -1,366 +0,0 @@
--/*
--Copyright 2014 Google Inc. All rights reserved.
--
--Licensed under the Apache License, Version 2.0 (the "License");
--you may not use this file except in compliance with the License.
--You may obtain a copy of the License at
--
--    http://www.apache.org/licenses/LICENSE-2.0
--
--Unless required by applicable law or agreed to in writing, software
--distributed under the License is distributed on an "AS IS" BASIS,
--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--See the License for the specific language governing permissions and
--limitations under the License.
--*/
--
--package fuzz
--
--import (
--	"fmt"
--	"math/rand"
--	"reflect"
--	"time"
--)
--
--// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
--type fuzzFuncMap map[reflect.Type]reflect.Value
--
--// Fuzzer knows how to fill any object with random fields.
--type Fuzzer struct {
--	fuzzFuncs   fuzzFuncMap
--	r           *rand.Rand
--	nilChance   float64
--	minElements int
--	maxElements int
--}
--
--// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
--// RandSource, NilChance, or NumElements in any order.
--func New() *Fuzzer {
--	f := &Fuzzer{
--		fuzzFuncs:   fuzzFuncMap{},
--		r:           rand.New(rand.NewSource(time.Now().UnixNano())),
--		nilChance:   .2,
--		minElements: 1,
--		maxElements: 10,
--	}
--	return f
--}
--
--// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
--//
--// Each entry in fuzzFuncs must be a function taking two parameters.
--// The first parameter must be a pointer or map. It is the variable that
--// function will fill with random data. The second parameter must be a
--// fuzz.Continue, which will provide a source of randomness and a way
--// to automatically continue fuzzing smaller pieces of the first parameter.
--//
--// These functions are called sensibly, e.g., if you wanted custom string
--// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
--// called and passed the address of strings. Maps and pointers will always
--// be made/new'd for you, ignoring the NilChange option. For slices, it
--// doesn't make much sense to  pre-create them--Fuzzer doesn't know how
--// long you want your slice--so take a pointer to a slice, and make it
--// yourself. (If you don't want your map/pointer type pre-made, take a
--// pointer to it, and make it yourself.) See the examples for a range of
--// custom functions.
--func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
--	for i := range fuzzFuncs {
--		v := reflect.ValueOf(fuzzFuncs[i])
--		if v.Kind() != reflect.Func {
--			panic("Need only funcs!")
--		}
--		t := v.Type()
--		if t.NumIn() != 2 || t.NumOut() != 0 {
--			panic("Need 2 in and 0 out params!")
--		}
--		argT := t.In(0)
--		switch argT.Kind() {
--		case reflect.Ptr, reflect.Map:
--		default:
--			panic("fuzzFunc must take pointer or map type")
--		}
--		if t.In(1) != reflect.TypeOf(Continue{}) {
--			panic("fuzzFunc's second parameter must be type fuzz.Continue")
--		}
--		f.fuzzFuncs[argT] = v
--	}
--	return f
--}
--
--// RandSource causes f to get values from the given source of randomness.
--// Use if you want deterministic fuzzing.
--func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
--	f.r = rand.New(s)
--	return f
--}
--
--// NilChance sets the probability of creating a nil pointer, map, or slice to
--// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
--func (f *Fuzzer) NilChance(p float64) *Fuzzer {
--	if p < 0 || p > 1 {
--		panic("p should be between 0 and 1, inclusive.")
--	}
--	f.nilChance = p
--	return f
--}
--
--// NumElements sets the minimum and maximum number of elements that will be
--// added to a non-nil map or slice.
--func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
--	if atLeast > atMost {
--		panic("atLeast must be <= atMost")
--	}
--	if atLeast < 0 {
--		panic("atLeast must be >= 0")
--	}
--	f.minElements = atLeast
--	f.maxElements = atMost
--	return f
--}
--
--func (f *Fuzzer) genElementCount() int {
--	if f.minElements == f.maxElements {
--		return f.minElements
--	}
--	return f.minElements + f.r.Intn(f.maxElements-f.minElements)
--}
--
--func (f *Fuzzer) genShouldFill() bool {
--	return f.r.Float64() > f.nilChance
--}
--
--// Fuzz recursively fills all of obj's fields with something random.
--// Not safe for cyclic or tree-like structs!
--// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
--// Intended for tests, so will panic on bad input or unimplemented fields.
--func (f *Fuzzer) Fuzz(obj interface{}) {
--	v := reflect.ValueOf(obj)
--	if v.Kind() != reflect.Ptr {
--		panic("needed ptr!")
--	}
--	v = v.Elem()
--	f.doFuzz(v)
--}
--
--func (f *Fuzzer) doFuzz(v reflect.Value) {
--	if !v.CanSet() {
--		return
--	}
--	// Check for both pointer and non-pointer custom functions.
--	if v.CanAddr() && f.tryCustom(v.Addr()) {
--		return
--	}
--	if f.tryCustom(v) {
--		return
--	}
--	if fn, ok := fillFuncMap[v.Kind()]; ok {
--		fn(v, f.r)
--		return
--	}
--	switch v.Kind() {
--	case reflect.Map:
--		if f.genShouldFill() {
--			v.Set(reflect.MakeMap(v.Type()))
--			n := f.genElementCount()
--			for i := 0; i < n; i++ {
--				key := reflect.New(v.Type().Key()).Elem()
--				f.doFuzz(key)
--				val := reflect.New(v.Type().Elem()).Elem()
--				f.doFuzz(val)
--				v.SetMapIndex(key, val)
--			}
--			return
--		}
--		v.Set(reflect.Zero(v.Type()))
--	case reflect.Ptr:
--		if f.genShouldFill() {
--			v.Set(reflect.New(v.Type().Elem()))
--			f.doFuzz(v.Elem())
--			return
--		}
--		v.Set(reflect.Zero(v.Type()))
--	case reflect.Slice:
--		if f.genShouldFill() {
--			n := f.genElementCount()
--			v.Set(reflect.MakeSlice(v.Type(), n, n))
--			for i := 0; i < n; i++ {
--				f.doFuzz(v.Index(i))
--			}
--			return
--		}
--		v.Set(reflect.Zero(v.Type()))
--	case reflect.Struct:
--		for i := 0; i < v.NumField(); i++ {
--			f.doFuzz(v.Field(i))
--		}
--	case reflect.Array:
--		fallthrough
--	case reflect.Chan:
--		fallthrough
--	case reflect.Func:
--		fallthrough
--	case reflect.Interface:
--		fallthrough
--	default:
--		panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
--	}
--}
--
--// tryCustom searches for custom handlers, and returns true iff it finds a match
--// and successfully randomizes v.
--func (f *Fuzzer) tryCustom(v reflect.Value) bool {
--	doCustom, ok := f.fuzzFuncs[v.Type()]
--	if !ok {
--		return false
--	}
--
--	switch v.Kind() {
--	case reflect.Ptr:
--		if v.IsNil() {
--			if !v.CanSet() {
--				return false
--			}
--			v.Set(reflect.New(v.Type().Elem()))
--		}
--	case reflect.Map:
--		if v.IsNil() {
--			if !v.CanSet() {
--				return false
--			}
--			v.Set(reflect.MakeMap(v.Type()))
--		}
--	default:
--		return false
--	}
--
--	doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
--		f:    f,
--		Rand: f.r,
--	})})
--	return true
--}
--
--// Continue can be passed to custom fuzzing functions to allow them to use
--// the correct source of randomness and to continue fuzzing their members.
--type Continue struct {
--	f *Fuzzer
--
--	// For convenience, Continue implements rand.Rand via embedding.
--	// Use this for generating any randomness if you want your fuzzing
--	// to be repeatable for a given seed.
--	*rand.Rand
--}
--
--// Fuzz continues fuzzing obj. obj must be a pointer.
--func (c Continue) Fuzz(obj interface{}) {
--	v := reflect.ValueOf(obj)
--	if v.Kind() != reflect.Ptr {
--		panic("needed ptr!")
--	}
--	v = v.Elem()
--	c.f.doFuzz(v)
--}
--
--// RandString makes a random string up to 20 characters long. The returned string
--// may include a variety of (valid) UTF-8 encodings.
--func (c Continue) RandString() string {
--	return randString(c.Rand)
--}
--
--// RandUint64 makes random 64 bit numbers.
--// Weirdly, rand doesn't have a function that gives you 64 random bits.
--func (c Continue) RandUint64() uint64 {
--	return randUint64(c.Rand)
--}
--
--// RandBool returns true or false randomly.
--func (c Continue) RandBool() bool {
--	return randBool(c.Rand)
--}
--
--func fuzzInt(v reflect.Value, r *rand.Rand) {
--	v.SetInt(int64(randUint64(r)))
--}
--
--func fuzzUint(v reflect.Value, r *rand.Rand) {
--	v.SetUint(randUint64(r))
--}
--
--var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
--	reflect.Bool: func(v reflect.Value, r *rand.Rand) {
--		v.SetBool(randBool(r))
--	},
--	reflect.Int:     fuzzInt,
--	reflect.Int8:    fuzzInt,
--	reflect.Int16:   fuzzInt,
--	reflect.Int32:   fuzzInt,
--	reflect.Int64:   fuzzInt,
--	reflect.Uint:    fuzzUint,
--	reflect.Uint8:   fuzzUint,
--	reflect.Uint16:  fuzzUint,
--	reflect.Uint32:  fuzzUint,
--	reflect.Uint64:  fuzzUint,
--	reflect.Uintptr: fuzzUint,
--	reflect.Float32: func(v reflect.Value, r *rand.Rand) {
--		v.SetFloat(float64(r.Float32()))
--	},
--	reflect.Float64: func(v reflect.Value, r *rand.Rand) {
--		v.SetFloat(r.Float64())
--	},
--	reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
--		panic("unimplemented")
--	},
--	reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
--		panic("unimplemented")
--	},
--	reflect.String: func(v reflect.Value, r *rand.Rand) {
--		v.SetString(randString(r))
--	},
--	reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
--		panic("unimplemented")
--	},
--}
--
--// randBool returns true or false randomly.
--func randBool(r *rand.Rand) bool {
--	if r.Int()&1 == 1 {
--		return true
--	}
--	return false
--}
--
--type charRange struct {
--	first, last rune
--}
--
--// choose returns a random unicode character from the given range, using the
--// given randomness source.
--func (r *charRange) choose(rand *rand.Rand) rune {
--	count := int64(r.last - r.first)
--	return r.first + rune(rand.Int63n(count))
--}
--
--var unicodeRanges = []charRange{
--	{' ', '~'},           // ASCII characters
--	{'\u00a0', '\u02af'}, // Multi-byte encoded characters
--	{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
--}
--
--// randString makes a random string up to 20 characters long. The returned string
--// may include a variety of (valid) UTF-8 encodings.
--func randString(r *rand.Rand) string {
--	n := r.Intn(20)
--	runes := make([]rune, n)
--	for i := range runes {
--		runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
--	}
--	return string(runes)
--}
--
--// randUint64 makes random 64 bit numbers.
--// Weirdly, rand doesn't have a function that gives you 64 random bits.
--func randUint64(r *rand.Rand) uint64 {
--	return uint64(r.Uint32())<<32 | uint64(r.Uint32())
--}
-diff --git a/Godeps/_workspace/src/github.com/google/gofuzz/fuzz_test.go b/Godeps/_workspace/src/github.com/google/gofuzz/fuzz_test.go
-deleted file mode 100644
-index 4f0d4db..0000000
---- a/Godeps/_workspace/src/github.com/google/gofuzz/fuzz_test.go
-+++ /dev/null
-@@ -1,258 +0,0 @@
--/*
--Copyright 2014 Google Inc. All rights reserved.
--
--Licensed under the Apache License, Version 2.0 (the "License");
--you may not use this file except in compliance with the License.
--You may obtain a copy of the License at
--
--    http://www.apache.org/licenses/LICENSE-2.0
--
--Unless required by applicable law or agreed to in writing, software
--distributed under the License is distributed on an "AS IS" BASIS,
--WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--See the License for the specific language governing permissions and
--limitations under the License.
--*/
--
--package fuzz
--
--import (
--	"reflect"
--	"testing"
--)
--
--func TestFuzz_basic(t *testing.T) {
--	obj := &struct {
--		I    int
--		I8   int8
--		I16  int16
--		I32  int32
--		I64  int64
--		U    uint
--		U8   uint8
--		U16  uint16
--		U32  uint32
--		U64  uint64
--		Uptr uintptr
--		S    string
--		B    bool
--	}{}
--
--	failed := map[string]int{}
--	for i := 0; i < 10; i++ {
--		New().Fuzz(obj)
--
--		if n, v := "i", obj.I; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "i8", obj.I8; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "i16", obj.I16; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "i32", obj.I32; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "i64", obj.I64; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "u", obj.U; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "u8", obj.U8; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "u16", obj.U16; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "u32", obj.U32; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "u64", obj.U64; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "uptr", obj.Uptr; v == 0 {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "s", obj.S; v == "" {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "b", obj.B; v == false {
--			failed[n] = failed[n] + 1
--		}
--	}
--	checkFailed(t, failed)
--}
--
--func checkFailed(t *testing.T, failed map[string]int) {
--	for k, v := range failed {
--		if v > 8 {
--			t.Errorf("%v seems to not be getting set, was zero value %v times", k, v)
--		}
--	}
--}
--
--func TestFuzz_structptr(t *testing.T) {
--	obj := &struct {
--		A *struct {
--			S string
--		}
--	}{}
--
--	f := New().NilChance(.5)
--	failed := map[string]int{}
--	for i := 0; i < 10; i++ {
--		f.Fuzz(obj)
--
--		if n, v := "a not nil", obj.A; v == nil {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "a nil", obj.A; v != nil {
--			failed[n] = failed[n] + 1
--		}
--		if n, v := "as", obj.A; v == nil || v.S == "" {
--			failed[n] = failed[n] + 1
--		}
--	}
--	checkFailed(t, failed)
--}
--
--// tryFuzz tries fuzzing up to 20 times. Fail if check() never passes, report the highest
--// stage it ever got to.
--func tryFuzz(t *testing.T, f *Fuzzer, obj interface{}, check func() (stage int, passed bool)) {
--	maxStage := 0
--	for i := 0; i < 20; i++ {
--		f.Fuzz(obj)
--		stage, passed := check()
--		if stage > maxStage {
--			maxStage = stage
--		}
--		if passed {
--			return
--		}
--	}
--	t.Errorf("Only ever got to stage %v", maxStage)
--}
--
--func TestFuzz_structmap(t *testing.T) {
--	obj := &struct {
--		A map[struct {
--			S string
--		}]struct {
--			S2 string
--		}
--		B map[string]string
--	}{}
--
--	tryFuzz(t, New(), obj, func() (int, bool) {
--		if obj.A == nil {
--			return 1, false
--		}
--		if len(obj.A) == 0 {
--			return 2, false
--		}
--		for k, v := range obj.A {
--			if k.S == "" {
--				return 3, false
--			}
--			if v.S2 == "" {
--				return 4, false
--			}
--		}
--
--		if obj.B == nil {
--			return 5, false
--		}
--		if len(obj.B) == 0 {
--			return 6, false
--		}
--		for k, v := range obj.B {
--			if k == "" {
--				return 7, false
--			}
--			if v == "" {
--				return 8, false
--			}
--		}
--		return 9, true
--	})
--}
--
--func TestFuzz_structslice(t *testing.T) {
--	obj := &struct {
--		A []struct {
--			S string
--		}
--		B []string
--	}{}
--
--	tryFuzz(t, New(), obj, func() (int, bool) {
--		if obj.A == nil {
--			return 1, false
--		}
--		if len(obj.A) == 0 {
--			return 2, false
--		}
--		for _, v := range obj.A {
--			if v.S == "" {
--				return 3, false
--			}
--		}
--
--		if obj.B == nil {
--			return 4, false
--		}
--		if len(obj.B) == 0 {
--			return 5, false
--		}
--		for _, v := range obj.B {
--			if v == "" {
--				return 6, false
--			}
--		}
--		return 7, true
--	})
--}
--
--func TestFuzz_custom(t *testing.T) {
--	obj := &struct {
--		A string
--		B *string
--		C map[string]string
--		D *map[string]string
--	}{}
--
--	testPhrase := "gotcalled"
--	testMap := map[string]string{"C": "D"}
--	f := New().Funcs(
--		func(s *string, c Continue) {
--			*s = testPhrase
--		},
--		func(m map[string]string, c Continue) {
--			m["C"] = "D"
--		},
--	)
--
--	tryFuzz(t, f, obj, func() (int, bool) {
--		if obj.A != testPhrase {
--			return 1, false
--		}
--		if obj.B == nil {
--			return 2, false
--		}
--		if *obj.B != testPhrase {
--			return 3, false
--		}
--		if e, a := testMap, obj.C; !reflect.DeepEqual(e, a) {
--			return 4, false
--		}
--		if obj.D == nil {
--			return 5, false
--		}
--		if e, a := testMap, *obj.D; !reflect.DeepEqual(e, a) {
--			return 6, false
--		}
--		return 7, true
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/.travis.yml b/Godeps/_workspace/src/github.com/imdario/mergo/.travis.yml
-deleted file mode 100644
-index 9d91c63..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/.travis.yml
-+++ /dev/null
-@@ -1,2 +0,0 @@
--language: go
--install: go get -t
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/LICENSE b/Godeps/_workspace/src/github.com/imdario/mergo/LICENSE
-deleted file mode 100644
-index 6866802..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/LICENSE
-+++ /dev/null
-@@ -1,28 +0,0 @@
--Copyright (c) 2013 Dario Castañé. All rights reserved.
--Copyright (c) 2012 The Go Authors. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--   * Neither the name of Google Inc. nor the names of its
--contributors may be used to endorse or promote products derived from
--this software without specific prior written permission.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/README.md b/Godeps/_workspace/src/github.com/imdario/mergo/README.md
-deleted file mode 100644
-index cdcea0f..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/README.md
-+++ /dev/null
-@@ -1,68 +0,0 @@
--# Mergo
--
--A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
--
--Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
--
--![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg)
--
--## Status
--
--It is ready for production use. It works fine although it may use more of testing. Here some projects in the wild using Mergo:
--
--- [EagerIO/Stout](https://github.com/EagerIO/Stout)
--- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
--- [russross/canvasassignments](https://github.com/russross/canvasassignments)
--- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
--- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
--- [divshot/gitling](https://github.com/divshot/gitling)
--- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
--
--[![Build Status][1]][2]
--[![GoDoc](https://godoc.org/github.com/imdario/mergo?status.svg)](https://godoc.org/github.com/imdario/mergo)
--
--[1]: https://travis-ci.org/imdario/mergo.png
--[2]: https://travis-ci.org/imdario/mergo
--
--## Installation
--
--    go get github.com/imdario/mergo
--
--    // use in your .go code
--    import (
--        "github.com/imdario/mergo"
--    )
--
--## Usage
--
--You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
--
--    if err := mergo.Merge(&dst, src); err != nil {
--        // ...
--    }
--
--Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
--
--    if err := mergo.Map(&dst, srcMap); err != nil {
--        // ...
--    }
--
--Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
--
--More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
--
--Note: if test are failing due missing package, please execute:
--
--    go get gopkg.in/yaml.v1
--
--## Contact me
--
--If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
--
--## About
--
--Written by [Dario Castañé](http://dario.im).
--
--## License
--
--[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/doc.go b/Godeps/_workspace/src/github.com/imdario/mergo/doc.go
-deleted file mode 100644
-index 6e9aa7b..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/doc.go
-+++ /dev/null
-@@ -1,44 +0,0 @@
--// Copyright 2013 Dario Castañé. All rights reserved.
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--/*
--Package mergo merges same-type structs and maps by setting default values in zero-value fields.
--
--Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
--
--Usage
--
--From my own work-in-progress project:
--
--	type networkConfig struct {
--		Protocol string
--		Address string
--		ServerType string `json: "server_type"`
--		Port uint16
--	}
--
--	type FssnConfig struct {
--		Network networkConfig
--	}
--
--	var fssnDefault = FssnConfig {
--		networkConfig {
--			"tcp",
--			"127.0.0.1",
--			"http",
--			31560,
--		},
--	}
--
--	// Inside a function [...]
--
--	if err := mergo.Merge(&config, fssnDefault); err != nil {
--		log.Fatal(err)
--	}
--
--	// More code [...]
--
--*/
--package mergo
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/map.go b/Godeps/_workspace/src/github.com/imdario/mergo/map.go
-deleted file mode 100644
-index 44361e8..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/map.go
-+++ /dev/null
-@@ -1,146 +0,0 @@
--// Copyright 2014 Dario Castañé. All rights reserved.
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Based on src/pkg/reflect/deepequal.go from official
--// golang's stdlib.
--
--package mergo
--
--import (
--	"fmt"
--	"reflect"
--	"unicode"
--	"unicode/utf8"
--)
--
--func changeInitialCase(s string, mapper func(rune) rune) string {
--	if s == "" {
--		return s
--	}
--	r, n := utf8.DecodeRuneInString(s)
--	return string(mapper(r)) + s[n:]
--}
--
--func isExported(field reflect.StructField) bool {
--	r, _ := utf8.DecodeRuneInString(field.Name)
--	return r >= 'A' && r <= 'Z'
--}
--
--// Traverses recursively both values, assigning src's fields values to dst.
--// The map argument tracks comparisons that have already been seen, which allows
--// short circuiting on recursive types.
--func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
--	if dst.CanAddr() {
--		addr := dst.UnsafeAddr()
--		h := 17 * addr
--		seen := visited[h]
--		typ := dst.Type()
--		for p := seen; p != nil; p = p.next {
--			if p.ptr == addr && p.typ == typ {
--				return nil
--			}
--		}
--		// Remember, remember...
--		visited[h] = &visit{addr, typ, seen}
--	}
--	zeroValue := reflect.Value{}
--	switch dst.Kind() {
--	case reflect.Map:
--		dstMap := dst.Interface().(map[string]interface{})
--		for i, n := 0, src.NumField(); i < n; i++ {
--			srcType := src.Type()
--			field := srcType.Field(i)
--			if !isExported(field) {
--				continue
--			}
--			fieldName := field.Name
--			fieldName = changeInitialCase(fieldName, unicode.ToLower)
--			if v, ok := dstMap[fieldName]; !ok || isEmptyValue(reflect.ValueOf(v)) {
--				dstMap[fieldName] = src.Field(i).Interface()
--			}
--		}
--	case reflect.Struct:
--		srcMap := src.Interface().(map[string]interface{})
--		for key := range srcMap {
--			srcValue := srcMap[key]
--			fieldName := changeInitialCase(key, unicode.ToUpper)
--			dstElement := dst.FieldByName(fieldName)
--			if dstElement == zeroValue {
--				// We discard it because the field doesn't exist.
--				continue
--			}
--			srcElement := reflect.ValueOf(srcValue)
--			dstKind := dstElement.Kind()
--			srcKind := srcElement.Kind()
--			if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
--				srcElement = srcElement.Elem()
--				srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
--			} else if dstKind == reflect.Ptr {
--				// Can this work? I guess it can't.
--				if srcKind != reflect.Ptr && srcElement.CanAddr() {
--					srcPtr := srcElement.Addr()
--					srcElement = reflect.ValueOf(srcPtr)
--					srcKind = reflect.Ptr
--				}
--			}
--			if !srcElement.IsValid() {
--				continue
--			}
--			if srcKind == dstKind {
--				if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil {
--					return
--				}
--			} else {
--				if srcKind == reflect.Map {
--					if err = deepMap(dstElement, srcElement, visited, depth+1); err != nil {
--						return
--					}
--				} else {
--					return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
--				}
--			}
--		}
--	}
--	return
--}
--
--// Map sets fields' values in dst from src.
--// src can be a map with string keys or a struct. dst must be the opposite:
--// if src is a map, dst must be a valid pointer to struct. If src is a struct,
--// dst must be map[string]interface{}.
--// It won't merge unexported (private) fields and will do recursively
--// any exported field.
--// If dst is a map, keys will be src fields' names in lower camel case.
--// Missing key in src that doesn't match a field in dst will be skipped. This
--// doesn't apply if dst is a map.
--// This is separated method from Merge because it is cleaner and it keeps sane
--// semantics: merging equal types, mapping different (restricted) types.
--func Map(dst, src interface{}) error {
--	var (
--		vDst, vSrc reflect.Value
--		err        error
--	)
--	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
--		return err
--	}
--	// To be friction-less, we redirect equal-type arguments
--	// to deepMerge. Only because arguments can be anything.
--	if vSrc.Kind() == vDst.Kind() {
--		return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0)
--	}
--	switch vSrc.Kind() {
--	case reflect.Struct:
--		if vDst.Kind() != reflect.Map {
--			return ErrExpectedMapAsDestination
--		}
--	case reflect.Map:
--		if vDst.Kind() != reflect.Struct {
--			return ErrExpectedStructAsDestination
--		}
--	default:
--		return ErrNotSupported
--	}
--	return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0)
--}
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/merge.go b/Godeps/_workspace/src/github.com/imdario/mergo/merge.go
-deleted file mode 100644
-index 5d328b1..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/merge.go
-+++ /dev/null
-@@ -1,99 +0,0 @@
--// Copyright 2013 Dario Castañé. All rights reserved.
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Based on src/pkg/reflect/deepequal.go from official
--// golang's stdlib.
--
--package mergo
--
--import (
--	"reflect"
--)
--
--// Traverses recursively both values, assigning src's fields values to dst.
--// The map argument tracks comparisons that have already been seen, which allows
--// short circuiting on recursive types.
--func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
--	if !src.IsValid() {
--		return
--	}
--	if dst.CanAddr() {
--		addr := dst.UnsafeAddr()
--		h := 17 * addr
--		seen := visited[h]
--		typ := dst.Type()
--		for p := seen; p != nil; p = p.next {
--			if p.ptr == addr && p.typ == typ {
--				return nil
--			}
--		}
--		// Remember, remember...
--		visited[h] = &visit{addr, typ, seen}
--	}
--	switch dst.Kind() {
--	case reflect.Struct:
--		for i, n := 0, dst.NumField(); i < n; i++ {
--			if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1); err != nil {
--				return
--			}
--		}
--	case reflect.Map:
--		for _, key := range src.MapKeys() {
--			srcElement := src.MapIndex(key)
--			if !srcElement.IsValid() {
--				continue
--			}
--			dstElement := dst.MapIndex(key)
--			switch reflect.TypeOf(srcElement.Interface()).Kind() {
--			case reflect.Struct:
--				fallthrough
--			case reflect.Map:
--				if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil {
--					return
--				}
--			}
--			if !dstElement.IsValid() {
--				dst.SetMapIndex(key, srcElement)
--			}
--		}
--	case reflect.Ptr:
--		fallthrough
--	case reflect.Interface:
--		if src.IsNil() {
--			break
--		} else if dst.IsNil() {
--			if dst.CanSet() && isEmptyValue(dst) {
--				dst.Set(src)
--			}
--		} else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1); err != nil {
--			return
--		}
--	default:
--		if dst.CanSet() && !isEmptyValue(src) {
--			dst.Set(src)
--		}
--	}
--	return
--}
--
--// Merge sets fields' values in dst from src if they have a zero
--// value of their type.
--// dst and src must be valid same-type structs and dst must be
--// a pointer to struct.
--// It won't merge unexported (private) fields and will do recursively
--// any exported field.
--func Merge(dst, src interface{}) error {
--	var (
--		vDst, vSrc reflect.Value
--		err        error
--	)
--	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
--		return err
--	}
--	if vDst.Type() != vSrc.Type() {
--		return ErrDifferentArgumentsTypes
--	}
--	return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0)
--}
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/mergo.go b/Godeps/_workspace/src/github.com/imdario/mergo/mergo.go
-deleted file mode 100644
-index f8a0991..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/mergo.go
-+++ /dev/null
-@@ -1,90 +0,0 @@
--// Copyright 2013 Dario Castañé. All rights reserved.
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Based on src/pkg/reflect/deepequal.go from official
--// golang's stdlib.
--
--package mergo
--
--import (
--	"errors"
--	"reflect"
--)
--
--// Errors reported by Mergo when it finds invalid arguments.
--var (
--	ErrNilArguments                = errors.New("src and dst must not be nil")
--	ErrDifferentArgumentsTypes     = errors.New("src and dst must be of same type")
--	ErrNotSupported                = errors.New("only structs and maps are supported")
--	ErrExpectedMapAsDestination    = errors.New("dst was expected to be a map")
--	ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
--)
--
--// During deepMerge, must keep track of checks that are
--// in progress.  The comparison algorithm assumes that all
--// checks in progress are true when it reencounters them.
--// Visited are stored in a map indexed by 17 * a1 + a2;
--type visit struct {
--	ptr  uintptr
--	typ  reflect.Type
--	next *visit
--}
--
--// From src/pkg/encoding/json.
--func isEmptyValue(v reflect.Value) bool {
--	switch v.Kind() {
--	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
--		return v.Len() == 0
--	case reflect.Bool:
--		return !v.Bool()
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		return v.Int() == 0
--	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
--		return v.Uint() == 0
--	case reflect.Float32, reflect.Float64:
--		return v.Float() == 0
--	case reflect.Interface, reflect.Ptr:
--		return v.IsNil()
--	}
--	return false
--}
--
--func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
--	if dst == nil || src == nil {
--		err = ErrNilArguments
--		return
--	}
--	vDst = reflect.ValueOf(dst).Elem()
--	if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
--		err = ErrNotSupported
--		return
--	}
--	vSrc = reflect.ValueOf(src)
--	// We check if vSrc is a pointer to dereference it.
--	if vSrc.Kind() == reflect.Ptr {
--		vSrc = vSrc.Elem()
--	}
--	return
--}
--
--// Traverses recursively both values, assigning src's fields values to dst.
--// The map argument tracks comparisons that have already been seen, which allows
--// short circuiting on recursive types.
--func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
--	if dst.CanAddr() {
--		addr := dst.UnsafeAddr()
--		h := 17 * addr
--		seen := visited[h]
--		typ := dst.Type()
--		for p := seen; p != nil; p = p.next {
--			if p.ptr == addr && p.typ == typ {
--				return nil
--			}
--		}
--		// Remember, remember...
--		visited[h] = &visit{addr, typ, seen}
--	}
--	return // TODO refactor
--}
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/mergo_test.go b/Godeps/_workspace/src/github.com/imdario/mergo/mergo_test.go
-deleted file mode 100644
-index 072bddb..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/mergo_test.go
-+++ /dev/null
-@@ -1,288 +0,0 @@
--// Copyright 2013 Dario Castañé. All rights reserved.
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package mergo
--
--import (
--	"gopkg.in/yaml.v1"
--	"io/ioutil"
--	"reflect"
--	"testing"
--)
--
--type simpleTest struct {
--	Value int
--}
--
--type complexTest struct {
--	St simpleTest
--	sz int
--	Id string
--}
--
--type moreComplextText struct {
--	Ct complexTest
--	St simpleTest
--	Nt simpleTest
--}
--
--type pointerTest struct {
--	C *simpleTest
--}
--
--type sliceTest struct {
--	S []int
--}
--
--func TestNil(t *testing.T) {
--	if err := Merge(nil, nil); err != ErrNilArguments {
--		t.Fail()
--	}
--}
--
--func TestDifferentTypes(t *testing.T) {
--	a := simpleTest{42}
--	b := 42
--	if err := Merge(&a, b); err != ErrDifferentArgumentsTypes {
--		t.Fail()
--	}
--}
--
--func TestSimpleStruct(t *testing.T) {
--	a := simpleTest{}
--	b := simpleTest{42}
--	if err := Merge(&a, b); err != nil {
--		t.FailNow()
--	}
--	if a.Value != 42 {
--		t.Fatalf("b not merged in a properly: a.Value(%d) != b.Value(%d)", a.Value, b.Value)
--	}
--	if !reflect.DeepEqual(a, b) {
--		t.FailNow()
--	}
--}
--
--func TestComplexStruct(t *testing.T) {
--	a := complexTest{}
--	a.Id = "athing"
--	b := complexTest{simpleTest{42}, 1, "bthing"}
--	if err := Merge(&a, b); err != nil {
--		t.FailNow()
--	}
--	if a.St.Value != 42 {
--		t.Fatalf("b not merged in a properly: a.St.Value(%d) != b.St.Value(%d)", a.St.Value, b.St.Value)
--	}
--	if a.sz == 1 {
--		t.Fatalf("a's private field sz not preserved from merge: a.sz(%d) == b.sz(%d)", a.sz, b.sz)
--	}
--	if a.Id != b.Id {
--		t.Fatalf("a's field Id not merged properly: a.Id(%s) != b.Id(%s)", a.Id, b.Id)
--	}
--}
--
--func TestPointerStruct(t *testing.T) {
--	s1 := simpleTest{}
--	s2 := simpleTest{19}
--	a := pointerTest{&s1}
--	b := pointerTest{&s2}
--	if err := Merge(&a, b); err != nil {
--		t.FailNow()
--	}
--	if a.C.Value != b.C.Value {
--		//t.Fatalf("b not merged in a properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value)
--	}
--}
--
--func TestPointerStructNil(t *testing.T) {
--	a := pointerTest{nil}
--	b := pointerTest{&simpleTest{19}}
--	if err := Merge(&a, b); err != nil {
--		t.FailNow()
--	}
--	if a.C.Value != b.C.Value {
--		t.Fatalf("b not merged in a properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value)
--	}
--}
--
--func TestSliceStruct(t *testing.T) {
--	a := sliceTest{}
--	b := sliceTest{[]int{1, 2, 3}}
--	if err := Merge(&a, b); err != nil {
--		t.FailNow()
--	}
--	if len(b.S) != 3 {
--		t.FailNow()
--	}
--	if len(a.S) != len(b.S) {
--		t.Fatalf("b not merged in a properly %d != %d", len(a.S), len(b.S))
--	}
--
--	a = sliceTest{[]int{1}}
--	b = sliceTest{[]int{1, 2, 3}}
--	if err := Merge(&a, b); err != nil {
--		t.FailNow()
--	}
--	if len(b.S) != 3 {
--		t.FailNow()
--	}
--	if len(a.S) != len(b.S) {
--		t.Fatalf("b not merged in a properly %d != %d", len(a.S), len(b.S))
--	}
--}
--
--func TestMaps(t *testing.T) {
--	m := map[string]simpleTest{
--		"a": simpleTest{},
--		"b": simpleTest{42},
--	}
--	n := map[string]simpleTest{
--		"a": simpleTest{16},
--		"b": simpleTest{},
--		"c": simpleTest{12},
--	}
--	if err := Merge(&m, n); err != nil {
--		t.Fatalf(err.Error())
--	}
--	if len(m) != 3 {
--		t.Fatalf(`n not merged in m properly, m must have 3 elements instead of %d`, len(m))
--	}
--	if m["a"].Value != 0 {
--		t.Fatalf(`n merged in m because I solved non-addressable map values TODO: m["a"].Value(%d) != n["a"].Value(%d)`, m["a"].Value, n["a"].Value)
--	}
--	if m["b"].Value != 42 {
--		t.Fatalf(`n wrongly merged in m: m["b"].Value(%d) != n["b"].Value(%d)`, m["b"].Value, n["b"].Value)
--	}
--	if m["c"].Value != 12 {
--		t.Fatalf(`n not merged in m: m["c"].Value(%d) != n["c"].Value(%d)`, m["c"].Value, n["c"].Value)
--	}
--}
--
--func TestYAMLMaps(t *testing.T) {
--	thing := loadYAML("testdata/thing.yml")
--	license := loadYAML("testdata/license.yml")
--	ft := thing["fields"].(map[interface{}]interface{})
--	fl := license["fields"].(map[interface{}]interface{})
--	expectedLength := len(ft) + len(fl)
--	if err := Merge(&license, thing); err != nil {
--		t.Fatal(err.Error())
--	}
--	currentLength := len(license["fields"].(map[interface{}]interface{}))
--	if currentLength != expectedLength {
--		t.Fatalf(`thing not merged in license properly, license must have %d elements instead of %d`, expectedLength, currentLength)
--	}
--	fields := license["fields"].(map[interface{}]interface{})
--	if _, ok := fields["id"]; !ok {
--		t.Fatalf(`thing not merged in license properly, license must have a new id field from thing`)
--	}
--}
--
--func TestTwoPointerValues(t *testing.T) {
--	a := &simpleTest{}
--	b := &simpleTest{42}
--	if err := Merge(a, b); err != nil {
--		t.Fatalf(`Boom. You crossed the streams: %s`, err)
--	}
--}
--
--func TestMap(t *testing.T) {
--	a := complexTest{}
--	a.Id = "athing"
--	c := moreComplextText{a, simpleTest{}, simpleTest{}}
--	b := map[string]interface{}{
--		"ct": map[string]interface{}{
--			"st": map[string]interface{}{
--				"value": 42,
--			},
--			"sz": 1,
--			"id": "bthing",
--		},
--		"st": &simpleTest{144}, // Mapping a reference
--		"zt": simpleTest{299},  // Mapping a missing field (zt doesn't exist)
--		"nt": simpleTest{3},
--	}
--	if err := Map(&c, b); err != nil {
--		t.FailNow()
--	}
--	m := b["ct"].(map[string]interface{})
--	n := m["st"].(map[string]interface{})
--	o := b["st"].(*simpleTest)
--	p := b["nt"].(simpleTest)
--	if c.Ct.St.Value != 42 {
--		t.Fatalf("b not merged in a properly: c.Ct.St.Value(%d) != b.Ct.St.Value(%d)", c.Ct.St.Value, n["value"])
--	}
--	if c.St.Value != 144 {
--		t.Fatalf("b not merged in a properly: c.St.Value(%d) != b.St.Value(%d)", c.St.Value, o.Value)
--	}
--	if c.Nt.Value != 3 {
--		t.Fatalf("b not merged in a properly: c.Nt.Value(%d) != b.Nt.Value(%d)", c.St.Value, p.Value)
--	}
--	if c.Ct.sz == 1 {
--		t.Fatalf("a's private field sz not preserved from merge: c.Ct.sz(%d) == b.Ct.sz(%d)", c.Ct.sz, m["sz"])
--	}
--	if c.Ct.Id != m["id"] {
--		t.Fatalf("a's field Id not merged properly: c.Ct.Id(%s) != b.Ct.Id(%s)", c.Ct.Id, m["id"])
--	}
--}
--
--func TestSimpleMap(t *testing.T) {
--	a := simpleTest{}
--	b := map[string]interface{}{
--		"value": 42,
--	}
--	if err := Map(&a, b); err != nil {
--		t.FailNow()
--	}
--	if a.Value != 42 {
--		t.Fatalf("b not merged in a properly: a.Value(%d) != b.Value(%v)", a.Value, b["value"])
--	}
--}
--
--type pointerMapTest struct {
--	A      int
--	hidden int
--	B      *simpleTest
--}
--
--func TestBackAndForth(t *testing.T) {
--	pt := pointerMapTest{42, 1, &simpleTest{66}}
--	m := make(map[string]interface{})
--	if err := Map(&m, pt); err != nil {
--		t.FailNow()
--	}
--	var (
--		v  interface{}
--		ok bool
--	)
--	if v, ok = m["a"]; v.(int) != pt.A || !ok {
--		t.Fatalf("pt not merged properly: m[`a`](%d) != pt.A(%d)", v, pt.A)
--	}
--	if v, ok = m["b"]; !ok {
--		t.Fatalf("pt not merged properly: B is missing in m")
--	}
--	var st *simpleTest
--	if st = v.(*simpleTest); st.Value != 66 {
--		t.Fatalf("something went wrong while mapping pt on m, B wasn't copied")
--	}
--	bpt := pointerMapTest{}
--	if err := Map(&bpt, m); err != nil {
--		t.Fatal(err)
--	}
--	if bpt.A != pt.A {
--		t.Fatalf("pt not merged properly: bpt.A(%d) != pt.A(%d)", bpt.A, pt.A)
--	}
--	if bpt.hidden == pt.hidden {
--		t.Fatalf("pt unexpectedly merged: bpt.hidden(%d) == pt.hidden(%d)", bpt.hidden, pt.hidden)
--	}
--	if bpt.B.Value != pt.B.Value {
--		t.Fatalf("pt not merged properly: bpt.B.Value(%d) != pt.B.Value(%d)", bpt.B.Value, pt.B.Value)
--	}
--}
--
--func loadYAML(path string) (m map[string]interface{}) {
--	m = make(map[string]interface{})
--	raw, _ := ioutil.ReadFile(path)
--	_ = yaml.Unmarshal(raw, &m)
--	return
--}
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/testdata/license.yml b/Godeps/_workspace/src/github.com/imdario/mergo/testdata/license.yml
-deleted file mode 100644
-index 62fdb61..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/testdata/license.yml
-+++ /dev/null
-@@ -1,3 +0,0 @@
--import: ../../../../fossene/db/schema/thing.yml
--fields:
--    site: string
-diff --git a/Godeps/_workspace/src/github.com/imdario/mergo/testdata/thing.yml b/Godeps/_workspace/src/github.com/imdario/mergo/testdata/thing.yml
-deleted file mode 100644
-index c28eab0..0000000
---- a/Godeps/_workspace/src/github.com/imdario/mergo/testdata/thing.yml
-+++ /dev/null
-@@ -1,5 +0,0 @@
--fields:
--    id: int
--    name: string
--    parent: ref "datu:thing"
--    status: enum(draft, public, private)
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/.gitignore b/Godeps/_workspace/src/github.com/miekg/dns/.gitignore
-deleted file mode 100644
-index 776cd95..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/.gitignore
-+++ /dev/null
-@@ -1,4 +0,0 @@
--*.6
--tags
--test.out
--a.out
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml b/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml
-deleted file mode 100644
-index 4485679..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/.travis.yml
-+++ /dev/null
-@@ -1,21 +0,0 @@
--language: go
--go:
--  - 1.2
--  - 1.3
--env:
--  # "gvm update" resets GOOS and GOARCH environment variable, workaround it by setting
--  # BUILD_GOOS and BUILD_GOARCH and overriding GOARCH and GOOS in the build script
--  global:
--    - BUILD_GOARCH=amd64
--  matrix:
--    - BUILD_GOOS=linux
--    - BUILD_GOOS=darwin
--    - BUILD_GOOS=windows
--script:
--  - gvm cross $BUILD_GOOS $BUILD_GOARCH
--  - GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go build
--
--  # only test on linux
--  # also specify -short; the crypto tests fail in weird ways *sometimes*
--  # See issue #151
--  - if [ $BUILD_GOOS == "linux" ]; then GOARCH=$BUILD_GOARCH GOOS=$BUILD_GOOS go test -short -bench=.; fi
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/AUTHORS b/Godeps/_workspace/src/github.com/miekg/dns/AUTHORS
-deleted file mode 100644
-index 1965683..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/AUTHORS
-+++ /dev/null
-@@ -1 +0,0 @@
--Miek Gieben <miek at miek.nl>
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/CONTRIBUTORS b/Godeps/_workspace/src/github.com/miekg/dns/CONTRIBUTORS
-deleted file mode 100644
-index f77e8a8..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/CONTRIBUTORS
-+++ /dev/null
-@@ -1,9 +0,0 @@
--Alex A. Skinner
--Andrew Tunnell-Jones
--Ask Bjørn Hansen
--Dave Cheney
--Dusty Wilson
--Marek Majkowski
--Peter van Dijk
--Omri Bahumi
--Alex Sergeyev
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/COPYRIGHT b/Godeps/_workspace/src/github.com/miekg/dns/COPYRIGHT
-deleted file mode 100644
-index 35702b1..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/COPYRIGHT
-+++ /dev/null
-@@ -1,9 +0,0 @@
--Copyright 2009 The Go Authors. All rights reserved. Use of this source code
--is governed by a BSD-style license that can be found in the LICENSE file.
--Extensions of the original work are copyright (c) 2011 Miek Gieben
--
--Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
--governed by a BSD-style license that can be found in the LICENSE file.
--
--Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
--governed by a BSD-style license that can be found in the LICENSE file.
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/LICENSE b/Godeps/_workspace/src/github.com/miekg/dns/LICENSE
-deleted file mode 100644
-index 5763fa7..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/LICENSE
-+++ /dev/null
-@@ -1,32 +0,0 @@
--Extensions of the original work are copyright (c) 2011 Miek Gieben
--
--As this is fork of the official Go code the same license applies:
--
--Copyright (c) 2009 The Go Authors. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--   * Neither the name of Google Inc. nor the names of its
--contributors may be used to endorse or promote products derived from
--this software without specific prior written permission.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/README.md b/Godeps/_workspace/src/github.com/miekg/dns/README.md
-deleted file mode 100644
-index 3cb850a..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/README.md
-+++ /dev/null
-@@ -1,140 +0,0 @@
--[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
--
--# Alternative (more granular) approach to a DNS library
--
--> Less is more.
--
--Complete and usable DNS library. All widely used Resource Records are
--supported, including the DNSSEC types. It follows a lean and mean philosophy.
--If there is stuff you should know as a DNS programmer there isn't a convenience
--function for it. Server side and client side programming is supported, i.e. you
--can build servers and resolvers with it.
--
--If you like this, you may also be interested in:
--
--* https://github.com/miekg/unbound -- Go wrapper for the Unbound resolver.
--
--# Goals
--
--* KISS;
--* Fast;
--* Small API, if its easy to code in Go, don't make a function for it.
--
--# Users
--
--A not-so-up-to-date-list-that-may-be-actually-current:
--
--* https://github.com/abh/geodns
--* http://www.statdns.com/
--* http://www.dnsinspect.com/
--* https://github.com/chuangbo/jianbing-dictionary-dns
--* http://www.dns-lg.com/
--* https://github.com/fcambus/rrda
--* https://github.com/kenshinx/godns
--* https://github.com/skynetservices/skydns
--* https://github.com/DevelopersPL/godnsagent
--* https://github.com/duedil-ltd/discodns
--
--Send pull request if you want to be listed here.
--
--# Features
--
--* UDP/TCP queries, IPv4 and IPv6;
--* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported;
--* Fast:
--    * Reply speed around ~ 80K qps (faster hardware results in more qps);
--    * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds;
--* Server side programming (mimicking the net/http package);
--* Client side programming;
--* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA;
--* EDNS0, NSID;
--* AXFR/IXFR;
--* TSIG, SIG(0);
--* DNS name compression;
--* Depends only on the standard library.
--
--Have fun!
--
--Miek Gieben  -  2010-2012  -  <miek at miek.nl>
--
--# Building
--
--Building is done with the `go` tool. If you have setup your GOPATH
--correctly, the following should work:
--
--    go get github.com/miekg/dns
--    go build github.com/miekg/dns
--
--## Examples
--
--A short "how to use the API" is at the beginning of dns.go (this also will show
--when you call `godoc github.com/miekg/dns`).
--
--Example programs can be found in the `github.com/miekg/exdns` repository.
--
--## Supported RFCs
--
--*all of them*
--
--* 103{4,5} - DNS standard
--* 1348 - NSAP record
--* 1982 - Serial Arithmetic
--* 1876 - LOC record
--* 1995 - IXFR
--* 1996 - DNS notify
--* 2136 - DNS Update (dynamic updates)
--* 2181 - RRset definition - there is no RRset type though, just []RR
--* 2537 - RSAMD5 DNS keys
--* 2065 - DNSSEC (updated in later RFCs)
--* 2671 - EDNS record
--* 2782 - SRV record
--* 2845 - TSIG record
--* 2915 - NAPTR record
--* 2929 - DNS IANA Considerations
--* 3110 - RSASHA1 DNS keys
--* 3225 - DO bit (DNSSEC OK)
--* 340{1,2,3} - NAPTR record
--* 3445 - Limiting the scope of (DNS)KEY
--* 3597 - Unkown RRs
--* 403{3,4,5} - DNSSEC + validation functions
--* 4255 - SSHFP record
--* 4343 - Case insensitivity
--* 4408 - SPF record
--* 4509 - SHA256 Hash in DS
--* 4592 - Wildcards in the DNS
--* 4635 - HMAC SHA TSIG
--* 4701 - DHCID
--* 4892 - id.server
--* 5001 - NSID
--* 5155 - NSEC3 record
--* 5205 - HIP record
--* 5702 - SHA2 in the DNS
--* 5936 - AXFR
--* 5966 - TCP implementation recommendations
--* 6605 - ECDSA
--* 6725 - IANA Registry Update
--* 6742 - ILNP DNS
--* 6891 - EDNS0 update
--* 6895 - DNS IANA considerations
--* 6975 - Algorithm Understanding in DNSSEC
--* 7043 - EUI48/EUI64 records
--* 7314 - DNS (EDNS) EXPIRE Option
--* xxxx - URI record (draft)
--* xxxx - EDNS0 DNS Update Lease (draft)
--
--## Loosely based upon
--
--* `ldns`
--* `NSD`
--* `Net::DNS`
--* `GRONG`
--
--## TODO
--
--* privatekey.Precompute() when signing?
--* Last remaining RRs: APL, ATMA, A6 and NXT;
--* Missing in parsing: ISDN, UNSPEC, ATMA;
--* CAA parsing is broken;
--* NSEC(3) cover/match/closest enclose;
--* Replies with TC bit are not parsed to the end;
--* Create IsMsg to validate a message before fully parsing it.
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/client.go b/Godeps/_workspace/src/github.com/miekg/dns/client.go
-deleted file mode 100644
-index ee8e223..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/client.go
-+++ /dev/null
-@@ -1,319 +0,0 @@
--package dns
--
--// A client implementation.
--
--import (
--	"bytes"
--	"io"
--	"net"
--	"time"
--)
--
--const dnsTimeout time.Duration = 2 * 1e9
--const tcpIdleTimeout time.Duration = 8 * time.Second
--
--// A Conn represents a connection to a DNS server.
--type Conn struct {
--	net.Conn                         // a net.Conn holding the connection
--	UDPSize        uint16            // minimum receive buffer for UDP messages
--	TsigSecret     map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
--	rtt            time.Duration
--	t              time.Time
--	tsigRequestMAC string
--}
--
--// A Client defines parameters for a DNS client.
--type Client struct {
--	Net            string            // if "tcp" a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
--	UDPSize        uint16            // minimum receive buffer for UDP messages
--	DialTimeout    time.Duration     // net.DialTimeout (ns), defaults to 2 * 1e9
--	ReadTimeout    time.Duration     // net.Conn.SetReadTimeout value for connections (ns), defaults to 2 * 1e9
--	WriteTimeout   time.Duration     // net.Conn.SetWriteTimeout value for connections (ns), defaults to 2 * 1e9
--	TsigSecret     map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
--	SingleInflight bool              // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
--	group          singleflight
--}
--
--// Exchange performs a synchronous UDP query. It sends the message m to the address
--// contained in a and waits for an reply. Exchange does not retry a failed query, nor
--// will it fall back to TCP in case of truncation.
--// If you need to send a DNS message on an already existing connection, you can use the
--// following:
--//
--//	co := &dns.Conn{Conn: c} // c is your net.Conn
--//	co.WriteMsg(m)
--//	in, err  := co.ReadMsg()
--//	co.Close()
--//
--func Exchange(m *Msg, a string) (r *Msg, err error) {
--	var co *Conn
--	co, err = DialTimeout("udp", a, dnsTimeout)
--	if err != nil {
--		return nil, err
--	}
--
--	defer co.Close()
--	co.SetReadDeadline(time.Now().Add(dnsTimeout))
--	co.SetWriteDeadline(time.Now().Add(dnsTimeout))
--	if err = co.WriteMsg(m); err != nil {
--		return nil, err
--	}
--	r, err = co.ReadMsg()
--	return r, err
--}
--
--// ExchangeConn performs a synchronous query. It sends the message m via the connection
--// c and waits for a reply. The connection c is not closed by ExchangeConn.
--// This function is going away, but can easily be mimicked:
--//
--//	co := &dns.Conn{Conn: c} // c is your net.Conn
--//	co.WriteMsg(m)
--//	in, _  := co.ReadMsg()
--//	co.Close()
--//
--func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
--	println("dns: this function is deprecated")
--	co := new(Conn)
--	co.Conn = c
--	if err = co.WriteMsg(m); err != nil {
--		return nil, err
--	}
--	r, err = co.ReadMsg()
--	return r, err
--}
--
--// Exchange performs an synchronous query. It sends the message m to the address
--// contained in a and waits for an reply. Basic use pattern with a *dns.Client:
--//
--//	c := new(dns.Client)
--//	in, rtt, err := c.Exchange(message, "127.0.0.1:53")
--//
--// Exchange does not retry a failed query, nor will it fall back to TCP in
--// case of truncation.
--func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
--	if !c.SingleInflight {
--		return c.exchange(m, a)
--	}
--	// This adds a bunch of garbage, TODO(miek).
--	t := "nop"
--	if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
--		t = t1
--	}
--	cl := "nop"
--	if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
--		cl = cl1
--	}
--	r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
--		return c.exchange(m, a)
--	})
--	if err != nil {
--		return r, rtt, err
--	}
--	if shared {
--		return r.Copy(), rtt, nil
--	}
--	return r, rtt, nil
--}
--
--func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
--	timeout := dnsTimeout
--	var co *Conn
--	if c.DialTimeout != 0 {
--		timeout = c.DialTimeout
--	}
--	if c.Net == "" {
--		co, err = DialTimeout("udp", a, timeout)
--	} else {
--		co, err = DialTimeout(c.Net, a, timeout)
--	}
--	if err != nil {
--		return nil, 0, err
--	}
--	timeout = dnsTimeout
--	if c.ReadTimeout != 0 {
--		timeout = c.ReadTimeout
--	}
--	co.SetReadDeadline(time.Now().Add(timeout))
--	timeout = dnsTimeout
--	if c.WriteTimeout != 0 {
--		timeout = c.WriteTimeout
--	}
--	co.SetWriteDeadline(time.Now().Add(timeout))
--	defer co.Close()
--	opt := m.IsEdns0()
--	// If EDNS0 is used use that for size.
--	if opt != nil && opt.UDPSize() >= MinMsgSize {
--		co.UDPSize = opt.UDPSize()
--	}
--	// Otherwise use the client's configured UDP size.
--	if opt == nil && c.UDPSize >= MinMsgSize {
--		co.UDPSize = c.UDPSize
--	}
--	co.TsigSecret = c.TsigSecret
--	if err = co.WriteMsg(m); err != nil {
--		return nil, 0, err
--	}
--	r, err = co.ReadMsg()
--	return r, co.rtt, err
--}
--
--// ReadMsg reads a message from the connection co.
--// If the received message contains a TSIG record the transaction
--// signature is verified.
--func (co *Conn) ReadMsg() (*Msg, error) {
--	var p []byte
--	m := new(Msg)
--	if _, ok := co.Conn.(*net.TCPConn); ok {
--		p = make([]byte, MaxMsgSize)
--	} else {
--		if co.UDPSize >= 512 {
--			p = make([]byte, co.UDPSize)
--		} else {
--			p = make([]byte, MinMsgSize)
--		}
--	}
--	n, err := co.Read(p)
--	if err != nil && n == 0 {
--		return nil, err
--	}
--	p = p[:n]
--	if err := m.Unpack(p); err != nil {
--		return nil, err
--	}
--	co.rtt = time.Since(co.t)
--	if t := m.IsTsig(); t != nil {
--		if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
--			return m, ErrSecret
--		}
--		// Need to work on the original message p, as that was used to calculate the tsig.
--		err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
--	}
--	return m, err
--}
--
--// Read implements the net.Conn read method.
--func (co *Conn) Read(p []byte) (n int, err error) {
--	if co.Conn == nil {
--		return 0, ErrConnEmpty
--	}
--	if len(p) < 2 {
--		return 0, io.ErrShortBuffer
--	}
--	if t, ok := co.Conn.(*net.TCPConn); ok {
--		n, err = t.Read(p[0:2])
--		if err != nil || n != 2 {
--			return n, err
--		}
--		l, _ := unpackUint16(p[0:2], 0)
--		if l == 0 {
--			return 0, ErrShortRead
--		}
--		if int(l) > len(p) {
--			return int(l), io.ErrShortBuffer
--		}
--		n, err = t.Read(p[:l])
--		if err != nil {
--			return n, err
--		}
--		i := n
--		for i < int(l) {
--			j, err := t.Read(p[i:int(l)])
--			if err != nil {
--				return i, err
--			}
--			i += j
--		}
--		n = i
--		return n, err
--	}
--	// UDP connection
--	n, err = co.Conn.Read(p)
--	if err != nil {
--		return n, err
--	}
--	return n, err
--}
--
--// WriteMsg sends a message throught the connection co.
--// If the message m contains a TSIG record the transaction
--// signature is calculated.
--func (co *Conn) WriteMsg(m *Msg) (err error) {
--	var out []byte
--	if t := m.IsTsig(); t != nil {
--		mac := ""
--		if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
--			return ErrSecret
--		}
--		out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
--		// Set for the next read, allthough only used in zone transfers
--		co.tsigRequestMAC = mac
--	} else {
--		out, err = m.Pack()
--	}
--	if err != nil {
--		return err
--	}
--	co.t = time.Now()
--	if _, err = co.Write(out); err != nil {
--		return err
--	}
--	return nil
--}
--
--// Write implements the net.Conn Write method.
--func (co *Conn) Write(p []byte) (n int, err error) {
--	if t, ok := co.Conn.(*net.TCPConn); ok {
--		lp := len(p)
--		if lp < 2 {
--			return 0, io.ErrShortBuffer
--		}
--		if lp > MaxMsgSize {
--			return 0, &Error{err: "message too large"}
--		}
--		l := make([]byte, 2, lp+2)
--		l[0], l[1] = packUint16(uint16(lp))
--		p = append(l, p...)
--		n, err := io.Copy(t, bytes.NewReader(p))
--		return int(n), err
--	}
--	n, err = co.Conn.(*net.UDPConn).Write(p)
--	return n, err
--}
--
--// Dial connects to the address on the named network.
--func Dial(network, address string) (conn *Conn, err error) {
--	conn = new(Conn)
--	conn.Conn, err = net.Dial(network, address)
--	if err != nil {
--		return nil, err
--	}
--	return conn, nil
--}
--
--// Dialtimeout acts like Dial but takes a timeout.
--func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
--	conn = new(Conn)
--	conn.Conn, err = net.DialTimeout(network, address, timeout)
--	if err != nil {
--		return nil, err
--	}
--	return conn, nil
--}
--
--// Close implements the net.Conn Close method.
--func (co *Conn) Close() error { return co.Conn.Close() }
--
--// LocalAddr implements the net.Conn LocalAddr method.
--func (co *Conn) LocalAddr() net.Addr { return co.Conn.LocalAddr() }
--
--// RemoteAddr implements the net.Conn RemoteAddr method.
--func (co *Conn) RemoteAddr() net.Addr { return co.Conn.RemoteAddr() }
--
--// SetDeadline implements the net.Conn SetDeadline method.
--func (co *Conn) SetDeadline(t time.Time) error { return co.Conn.SetDeadline(t) }
--
--// SetReadDeadline implements the net.Conn SetReadDeadline method.
--func (co *Conn) SetReadDeadline(t time.Time) error { return co.Conn.SetReadDeadline(t) }
--
--// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
--func (co *Conn) SetWriteDeadline(t time.Time) error { return co.Conn.SetWriteDeadline(t) }
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/client_test.go b/Godeps/_workspace/src/github.com/miekg/dns/client_test.go
-deleted file mode 100644
-index 2113f3b..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/client_test.go
-+++ /dev/null
-@@ -1,195 +0,0 @@
--package dns
--
--import (
--	"testing"
--	"time"
--)
--
--func TestClientSync(t *testing.T) {
--	HandleFunc("miek.nl.", HelloServer)
--	defer HandleRemove("miek.nl.")
--
--	s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	m := new(Msg)
--	m.SetQuestion("miek.nl.", TypeSOA)
--
--	c := new(Client)
--	r, _, e := c.Exchange(m, addrstr)
--	if e != nil {
--		t.Logf("failed to exchange: %s", e.Error())
--		t.Fail()
--	}
--	if r != nil && r.Rcode != RcodeSuccess {
--		t.Log("failed to get an valid answer")
--		t.Fail()
--		t.Logf("%v\n", r)
--	}
--	// And now with plain Exchange().
--	r, e = Exchange(m, addrstr)
--	if e != nil {
--		t.Logf("failed to exchange: %s", e.Error())
--		t.Fail()
--	}
--	if r != nil && r.Rcode != RcodeSuccess {
--		t.Log("failed to get an valid answer")
--		t.Fail()
--		t.Logf("%v\n", r)
--	}
--}
--
--func TestClientEDNS0(t *testing.T) {
--	HandleFunc("miek.nl.", HelloServer)
--	defer HandleRemove("miek.nl.")
--
--	s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	m := new(Msg)
--	m.SetQuestion("miek.nl.", TypeDNSKEY)
--
--	m.SetEdns0(2048, true)
--
--	c := new(Client)
--	r, _, e := c.Exchange(m, addrstr)
--	if e != nil {
--		t.Logf("failed to exchange: %s", e.Error())
--		t.Fail()
--	}
--
--	if r != nil && r.Rcode != RcodeSuccess {
--		t.Log("failed to get an valid answer")
--		t.Fail()
--		t.Logf("%v\n", r)
--	}
--}
--
--func TestSingleSingleInflight(t *testing.T) {
--	HandleFunc("miek.nl.", HelloServer)
--	defer HandleRemove("miek.nl.")
--
--	s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	m := new(Msg)
--	m.SetQuestion("miek.nl.", TypeDNSKEY)
--
--	c := new(Client)
--	c.SingleInflight = true
--	nr := 10
--	ch := make(chan time.Duration)
--	for i := 0; i < nr; i++ {
--		go func() {
--			_, rtt, _ := c.Exchange(m, addrstr)
--			ch <- rtt
--		}()
--	}
--	i := 0
--	var first time.Duration
--	// With inflight *all* rtt are identical, and by doing actual lookups
--	// the changes that this is a coincidence is small.
--Loop:
--	for {
--		select {
--		case rtt := <-ch:
--			if i == 0 {
--				first = rtt
--			} else {
--				if first != rtt {
--					t.Log("all rtts should be equal")
--					t.Fail()
--				}
--			}
--			i++
--			if i == 10 {
--				break Loop
--			}
--		}
--	}
--}
--
--/*
--func TestClientTsigAXFR(t *testing.T) {
--	m := new(Msg)
--	m.SetAxfr("example.nl.")
--	m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix())
--
--	tr := new(Transfer)
--	tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
--
--	if a, err := tr.In(m, "176.58.119.54:53"); err != nil {
--		t.Log("failed to setup axfr: " + err.Error())
--		t.Fatal()
--	} else {
--		for ex := range a {
--			if ex.Error != nil {
--				t.Logf("error %s\n", ex.Error.Error())
--				t.Fail()
--				break
--			}
--			for _, rr := range ex.RR {
--				t.Logf("%s\n", rr.String())
--			}
--		}
--	}
--}
--
--func TestClientAXFRMultipleEnvelopes(t *testing.T) {
--	m := new(Msg)
--	m.SetAxfr("nlnetlabs.nl.")
--
--	tr := new(Transfer)
--	if a, err := tr.In(m, "213.154.224.1:53"); err != nil {
--		t.Log("Failed to setup axfr" + err.Error())
--		t.Fail()
--		return
--	} else {
--		for ex := range a {
--			if ex.Error != nil {
--				t.Logf("Error %s\n", ex.Error.Error())
--				t.Fail()
--				break
--			}
--		}
--	}
--}
--*/
--
--// ExampleUpdateLeaseTSIG shows how to update a lease signed with TSIG.
--func ExampleUpdateLeaseTSIG(t *testing.T) {
--	m := new(Msg)
--	m.SetUpdate("t.local.ip6.io.")
--	rr, _ := NewRR("t.local.ip6.io. 30 A 127.0.0.1")
--	rrs := make([]RR, 1)
--	rrs[0] = rr
--	m.Insert(rrs)
--
--	lease_rr := new(OPT)
--	lease_rr.Hdr.Name = "."
--	lease_rr.Hdr.Rrtype = TypeOPT
--	e := new(EDNS0_UL)
--	e.Code = EDNS0UL
--	e.Lease = 120
--	lease_rr.Option = append(lease_rr.Option, e)
--	m.Extra = append(m.Extra, lease_rr)
--
--	c := new(Client)
--	m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix())
--	c.TsigSecret = map[string]string{"polvi.": "pRZgBrBvI4NAHZYhxmhs/Q=="}
--
--	_, _, err := c.Exchange(m, "127.0.0.1:53")
--	if err != nil {
--		t.Log(err.Error())
--		t.Fail()
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go b/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go
-deleted file mode 100644
-index 87cf896..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/clientconfig.go
-+++ /dev/null
-@@ -1,94 +0,0 @@
--package dns
--
--import (
--	"bufio"
--	"os"
--	"strconv"
--	"strings"
--)
--
--// Wraps the contents of the /etc/resolv.conf.
--type ClientConfig struct {
--	Servers  []string // servers to use
--	Search   []string // suffixes to append to local name
--	Port     string   // what port to use
--	Ndots    int      // number of dots in name to trigger absolute lookup
--	Timeout  int      // seconds before giving up on packet
--	Attempts int      // lost packets before giving up on server, not used in the package dns
--}
--
--// ClientConfigFromFile parses a resolv.conf(5) like file and returns
--// a *ClientConfig.
--func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
--	file, err := os.Open(resolvconf)
--	if err != nil {
--		return nil, err
--	}
--	defer file.Close()
--	c := new(ClientConfig)
--	b := bufio.NewReader(file)
--	c.Servers = make([]string, 0)
--	c.Search = make([]string, 0)
--	c.Port = "53"
--	c.Ndots = 1
--	c.Timeout = 5
--	c.Attempts = 2
--	for line, ok := b.ReadString('\n'); ok == nil; line, ok = b.ReadString('\n') {
--		f := strings.Fields(line)
--		if len(f) < 1 {
--			continue
--		}
--		switch f[0] {
--		case "nameserver": // add one name server
--			if len(f) > 1 {
--				// One more check: make sure server name is
--				// just an IP address.  Otherwise we need DNS
--				// to look it up.
--				name := f[1]
--				c.Servers = append(c.Servers, name)
--			}
--
--		case "domain": // set search path to just this domain
--			if len(f) > 1 {
--				c.Search = make([]string, 1)
--				c.Search[0] = f[1]
--			} else {
--				c.Search = make([]string, 0)
--			}
--
--		case "search": // set search path to given servers
--			c.Search = make([]string, len(f)-1)
--			for i := 0; i < len(c.Search); i++ {
--				c.Search[i] = f[i+1]
--			}
--
--		case "options": // magic options
--			for i := 1; i < len(f); i++ {
--				s := f[i]
--				switch {
--				case len(s) >= 6 && s[:6] == "ndots:":
--					n, _ := strconv.Atoi(s[6:])
--					if n < 1 {
--						n = 1
--					}
--					c.Ndots = n
--				case len(s) >= 8 && s[:8] == "timeout:":
--					n, _ := strconv.Atoi(s[8:])
--					if n < 1 {
--						n = 1
--					}
--					c.Timeout = n
--				case len(s) >= 8 && s[:9] == "attempts:":
--					n, _ := strconv.Atoi(s[9:])
--					if n < 1 {
--						n = 1
--					}
--					c.Attempts = n
--				case s == "rotate":
--					/* not imp */
--				}
--			}
--		}
--	}
--	return c, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/defaults.go b/Godeps/_workspace/src/github.com/miekg/dns/defaults.go
-deleted file mode 100644
-index 0c8fa9c..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/defaults.go
-+++ /dev/null
-@@ -1,242 +0,0 @@
--package dns
--
--import (
--	"errors"
--	"net"
--	"strconv"
--)
--
--const hexDigit = "0123456789abcdef"
--
--// Everything is assumed in ClassINET.
--
--// SetReply creates a reply message from a request message.
--func (dns *Msg) SetReply(request *Msg) *Msg {
--	dns.Id = request.Id
--	dns.RecursionDesired = request.RecursionDesired // Copy rd bit
--	dns.Response = true
--	dns.Opcode = OpcodeQuery
--	dns.Rcode = RcodeSuccess
--	if len(request.Question) > 0 {
--		dns.Question = make([]Question, 1)
--		dns.Question[0] = request.Question[0]
--	}
--	return dns
--}
--
--// SetQuestion creates a question message.
--func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
--	dns.Id = Id()
--	dns.RecursionDesired = true
--	dns.Question = make([]Question, 1)
--	dns.Question[0] = Question{z, t, ClassINET}
--	return dns
--}
--
--// SetNotify creates a notify message.
--func (dns *Msg) SetNotify(z string) *Msg {
--	dns.Opcode = OpcodeNotify
--	dns.Authoritative = true
--	dns.Id = Id()
--	dns.Question = make([]Question, 1)
--	dns.Question[0] = Question{z, TypeSOA, ClassINET}
--	return dns
--}
--
--// SetRcode creates an error message suitable for the request.
--func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
--	dns.SetReply(request)
--	dns.Rcode = rcode
--	return dns
--}
--
--// SetRcodeFormatError creates a message with FormError set.
--func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
--	dns.Rcode = RcodeFormatError
--	dns.Opcode = OpcodeQuery
--	dns.Response = true
--	dns.Authoritative = false
--	dns.Id = request.Id
--	return dns
--}
--
--// SetUpdate makes the message a dynamic update message. It
--// sets the ZONE section to: z, TypeSOA, ClassINET.
--func (dns *Msg) SetUpdate(z string) *Msg {
--	dns.Id = Id()
--	dns.Response = false
--	dns.Opcode = OpcodeUpdate
--	dns.Compress = false // BIND9 cannot handle compression
--	dns.Question = make([]Question, 1)
--	dns.Question[0] = Question{z, TypeSOA, ClassINET}
--	return dns
--}
--
--// SetIxfr creates message for requesting an IXFR.
--func (dns *Msg) SetIxfr(z string, serial uint32) *Msg {
--	dns.Id = Id()
--	dns.Question = make([]Question, 1)
--	dns.Ns = make([]RR, 1)
--	s := new(SOA)
--	s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
--	s.Serial = serial
--	dns.Question[0] = Question{z, TypeIXFR, ClassINET}
--	dns.Ns[0] = s
--	return dns
--}
--
--// SetAxfr creates message for requesting an AXFR.
--func (dns *Msg) SetAxfr(z string) *Msg {
--	dns.Id = Id()
--	dns.Question = make([]Question, 1)
--	dns.Question[0] = Question{z, TypeAXFR, ClassINET}
--	return dns
--}
--
--// SetTsig appends a TSIG RR to the message.
--// This is only a skeleton TSIG RR that is added as the last RR in the
--// additional section. The Tsig is calculated when the message is being send.
--func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg {
--	t := new(TSIG)
--	t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
--	t.Algorithm = algo
--	t.Fudge = 300
--	t.TimeSigned = uint64(timesigned)
--	t.OrigId = dns.Id
--	dns.Extra = append(dns.Extra, t)
--	return dns
--}
--
--// SetEdns0 appends a EDNS0 OPT RR to the message.
--// TSIG should always the last RR in a message.
--func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
--	e := new(OPT)
--	e.Hdr.Name = "."
--	e.Hdr.Rrtype = TypeOPT
--	e.SetUDPSize(udpsize)
--	if do {
--		e.SetDo()
--	}
--	dns.Extra = append(dns.Extra, e)
--	return dns
--}
--
--// IsTsig checks if the message has a TSIG record as the last record
--// in the additional section. It returns the TSIG record found or nil.
--func (dns *Msg) IsTsig() *TSIG {
--	if len(dns.Extra) > 0 {
--		if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
--			return dns.Extra[len(dns.Extra)-1].(*TSIG)
--		}
--	}
--	return nil
--}
--
--// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
--// record in the additional section will do. It returns the OPT record
--// found or nil.
--func (dns *Msg) IsEdns0() *OPT {
--	for _, r := range dns.Extra {
--		if r.Header().Rrtype == TypeOPT {
--			return r.(*OPT)
--		}
--	}
--	return nil
--}
--
--// IsDomainName checks if s is a valid domainname, it returns
--// the number of labels and true, when a domain name is valid.
--// Note that non fully qualified domain name is considered valid, in this case the
--// last label is counted in the number of labels.
--// When false is returned the number of labels is not defined.
--func IsDomainName(s string) (labels int, ok bool) {
--	_, labels, err := packDomainName(s, nil, 0, nil, false)
--	return labels, err == nil
--}
--
--// IsSubDomain checks if child is indeed a child of the parent. Both child and
--// parent are *not* downcased before doing the comparison.
--func IsSubDomain(parent, child string) bool {
--	// Entire child is contained in parent
--	return CompareDomainName(parent, child) == CountLabel(parent)
--}
--
--// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
--// The checking is performed on the binary payload.
--func IsMsg(buf []byte) error {
--	// Header
--	if len(buf) < 12 {
--		return errors.New("dns: bad message header")
--	}
--	// Header: Opcode
--	// TODO(miek): more checks here, e.g. check all header bits.
--	return nil
--}
--
--// IsFqdn checks if a domain name is fully qualified.
--func IsFqdn(s string) bool {
--	l := len(s)
--	if l == 0 {
--		return false
--	}
--	return s[l-1] == '.'
--}
--
--// Fqdns return the fully qualified domain name from s.
--// If s is already fully qualified, it behaves as the identity function.
--func Fqdn(s string) string {
--	if IsFqdn(s) {
--		return s
--	}
--	return s + "."
--}
--
--// Copied from the official Go code.
--
--// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
--// address suitable for reverse DNS (PTR) record lookups or an error if it fails
--// to parse the IP address.
--func ReverseAddr(addr string) (arpa string, err error) {
--	ip := net.ParseIP(addr)
--	if ip == nil {
--		return "", &Error{err: "unrecognized address: " + addr}
--	}
--	if ip.To4() != nil {
--		return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." +
--			strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil
--	}
--	// Must be IPv6
--	buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
--	// Add it, in reverse, to the buffer
--	for i := len(ip) - 1; i >= 0; i-- {
--		v := ip[i]
--		buf = append(buf, hexDigit[v&0xF])
--		buf = append(buf, '.')
--		buf = append(buf, hexDigit[v>>4])
--		buf = append(buf, '.')
--	}
--	// Append "ip6.arpa." and return (buf already has the final .)
--	buf = append(buf, "ip6.arpa."...)
--	return string(buf), nil
--}
--
--// String returns the string representation for the type t.
--func (t Type) String() string {
--	if t1, ok := TypeToString[uint16(t)]; ok {
--		return t1
--	}
--	return "TYPE" + strconv.Itoa(int(t))
--}
--
--// String returns the string representation for the class c.
--func (c Class) String() string {
--	if c1, ok := ClassToString[uint16(c)]; ok {
--		return c1
--	}
--	return "CLASS" + strconv.Itoa(int(c))
--}
--
--// String returns the string representation for the name n.
--func (n Name) String() string {
--	return sprintName(string(n))
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dns.go b/Godeps/_workspace/src/github.com/miekg/dns/dns.go
-deleted file mode 100644
-index 7540c0d..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/dns.go
-+++ /dev/null
-@@ -1,193 +0,0 @@
--// Package dns implements a full featured interface to the Domain Name System.
--// Server- and client-side programming is supported.
--// The package allows complete control over what is send out to the DNS. The package
--// API follows the less-is-more principle, by presenting a small, clean interface.
--//
--// The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
--// TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
--// Note that domain names MUST be fully qualified, before sending them, unqualified
--// names in a message will result in a packing failure.
--//
--// Resource records are native types. They are not stored in wire format.
--// Basic usage pattern for creating a new resource record:
--//
--//      r := new(dns.MX)
--//      r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
--//      r.Preference = 10
--//      r.Mx = "mx.miek.nl."
--//
--// Or directly from a string:
--//
--//      mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
--//
--// Or when the default TTL (3600) and class (IN) suit you:
--//
--//      mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
--//
--// Or even:
--//
--//      mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
--//
--// In the DNS messages are exchanged, these messages contain resource
--// records (sets).  Use pattern for creating a message:
--//
--//      m := new(dns.Msg)
--//      m.SetQuestion("miek.nl.", dns.TypeMX)
--//
--// Or when not certain if the domain name is fully qualified:
--//
--//	m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
--//
--// The message m is now a message with the question section set to ask
--// the MX records for the miek.nl. zone.
--//
--// The following is slightly more verbose, but more flexible:
--//
--//      m1 := new(dns.Msg)
--//      m1.Id = dns.Id()
--//      m1.RecursionDesired = true
--//      m1.Question = make([]dns.Question, 1)
--//      m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
--//
--// After creating a message it can be send.
--// Basic use pattern for synchronous querying the DNS at a
--// server configured on 127.0.0.1 and port 53:
--//
--//      c := new(dns.Client)
--//      in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
--//
--// Suppressing
--// multiple outstanding queries (with the same question, type and class) is as easy as setting:
--//
--//	c.SingleInflight = true
--//
--// If these "advanced" features are not needed, a simple UDP query can be send,
--// with:
--//
--//	in, err := dns.Exchange(m1, "127.0.0.1:53")
--//
--// When this functions returns you will get dns message. A dns message consists
--// out of four sections.
--// The question section: in.Question, the answer section: in.Answer,
--// the authority section: in.Ns and the additional section: in.Extra.
--//
--// Each of these sections (except the Question section) contain a []RR. Basic
--// use pattern for accessing the rdata of a TXT RR as the first RR in
--// the Answer section:
--//
--//	if t, ok := in.Answer[0].(*dns.TXT); ok {
--//		// do something with t.Txt
--//	}
--//
--// Domain Name and TXT Character String Representations
--//
--// Both domain names and TXT character strings are converted to presentation
--// form both when unpacked and when converted to strings.
--//
--// For TXT character strings, tabs, carriage returns and line feeds will be
--// converted to \t, \r and \n respectively. Back slashes and quotations marks
--// will be escaped. Bytes below 32 and above 127 will be converted to \DDD
--// form.
--//
--// For domain names, in addition to the above rules brackets, periods,
--// spaces, semicolons and the at symbol are escaped.
--package dns
--
--import (
--	"strconv"
--)
--
--const (
--	year68         = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
--	DefaultMsgSize = 4096    // Standard default for larger than 512 bytes.
--	MinMsgSize     = 512     // Minimal size of a DNS packet.
--	MaxMsgSize     = 65536   // Largest possible DNS packet.
--	defaultTtl     = 3600    // Default TTL.
--)
--
--// Error represents a DNS error
--type Error struct{ err string }
--
--func (e *Error) Error() string {
--	if e == nil {
--		return "dns: <nil>"
--	}
--	return "dns: " + e.err
--}
--
--// An RR represents a resource record.
--type RR interface {
--	// Header returns the header of an resource record. The header contains
--	// everything up to the rdata.
--	Header() *RR_Header
--	// String returns the text representation of the resource record.
--	String() string
--	// copy returns a copy of the RR
--	copy() RR
--	// len returns the length (in octects) of the uncompressed RR in wire format.
--	len() int
--}
--
--// DNS resource records.
--// There are many types of RRs,
--// but they all share the same header.
--type RR_Header struct {
--	Name     string `dns:"cdomain-name"`
--	Rrtype   uint16
--	Class    uint16
--	Ttl      uint32
--	Rdlength uint16 // length of data after header
--}
--
--func (h *RR_Header) Header() *RR_Header { return h }
--
--// Just to imlement the RR interface
--func (h *RR_Header) copy() RR { return nil }
--
--func (h *RR_Header) copyHeader() *RR_Header {
--	r := new(RR_Header)
--	r.Name = h.Name
--	r.Rrtype = h.Rrtype
--	r.Class = h.Class
--	r.Ttl = h.Ttl
--	r.Rdlength = h.Rdlength
--	return r
--}
--
--func (h *RR_Header) String() string {
--	var s string
--
--	if h.Rrtype == TypeOPT {
--		s = ";"
--		// and maybe other things
--	}
--
--	s += sprintName(h.Name) + "\t"
--	s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
--	s += Class(h.Class).String() + "\t"
--	s += Type(h.Rrtype).String() + "\t"
--	return s
--}
--
--func (h *RR_Header) len() int {
--	l := len(h.Name) + 1
--	l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
--	return l
--}
--
--// ToRFC3597 converts a known RR to the unknown RR representation
--// from RFC 3597.
--func (rr *RFC3597) ToRFC3597(r RR) error {
--	buf := make([]byte, r.len()*2)
--	off, err := PackStruct(r, buf, 0)
--	if err != nil {
--		return err
--	}
--	buf = buf[:off]
--	rawSetRdlength(buf, 0, off)
--	_, err = UnpackStruct(rr, buf, 0)
--	if err != nil {
--		return err
--	}
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go b/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go
-deleted file mode 100644
-index 16c86f4..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/dns_test.go
-+++ /dev/null
-@@ -1,511 +0,0 @@
--package dns
--
--import (
--	"encoding/hex"
--	"net"
--	"testing"
--)
--
--func TestPackUnpack(t *testing.T) {
--	out := new(Msg)
--	out.Answer = make([]RR, 1)
--	key := new(DNSKEY)
--	key = &DNSKEY{Flags: 257, Protocol: 3, Algorithm: RSASHA1}
--	key.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 3600}
--	key.PublicKey = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"
--
--	out.Answer[0] = key
--	msg, err := out.Pack()
--	if err != nil {
--		t.Log("failed to pack msg with DNSKEY")
--		t.Fail()
--	}
--	in := new(Msg)
--	if in.Unpack(msg) != nil {
--		t.Log("failed to unpack msg with DNSKEY")
--		t.Fail()
--	}
--
--	sig := new(RRSIG)
--	sig = &RRSIG{TypeCovered: TypeDNSKEY, Algorithm: RSASHA1, Labels: 2,
--		OrigTtl: 3600, Expiration: 4000, Inception: 4000, KeyTag: 34641, SignerName: "miek.nl.",
--		Signature: "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"}
--	sig.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeRRSIG, Class: ClassINET, Ttl: 3600}
--
--	out.Answer[0] = sig
--	msg, err = out.Pack()
--	if err != nil {
--		t.Log("failed to pack msg with RRSIG")
--		t.Fail()
--	}
--
--	if in.Unpack(msg) != nil {
--		t.Log("failed to unpack msg with RRSIG")
--		t.Fail()
--	}
--}
--
--func TestPackUnpack2(t *testing.T) {
--	m := new(Msg)
--	m.Extra = make([]RR, 1)
--	m.Answer = make([]RR, 1)
--	dom := "miek.nl."
--	rr := new(A)
--	rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0}
--	rr.A = net.IPv4(127, 0, 0, 1)
--
--	x := new(TXT)
--	x.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
--	x.Txt = []string{"heelalaollo"}
--
--	m.Extra[0] = x
--	m.Answer[0] = rr
--	_, err := m.Pack()
--	if err != nil {
--		t.Log("Packing failed: " + err.Error())
--		t.Fail()
--		return
--	}
--}
--
--func TestPackUnpack3(t *testing.T) {
--	m := new(Msg)
--	m.Extra = make([]RR, 2)
--	m.Answer = make([]RR, 1)
--	dom := "miek.nl."
--	rr := new(A)
--	rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0}
--	rr.A = net.IPv4(127, 0, 0, 1)
--
--	x1 := new(TXT)
--	x1.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
--	x1.Txt = []string{}
--
--	x2 := new(TXT)
--	x2.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
--	x2.Txt = []string{"heelalaollo"}
--
--	m.Extra[0] = x1
--	m.Extra[1] = x2
--	m.Answer[0] = rr
--	b, err := m.Pack()
--	if err != nil {
--		t.Log("packing failed: " + err.Error())
--		t.Fail()
--		return
--	}
--
--	var unpackMsg Msg
--	err = unpackMsg.Unpack(b)
--	if err != nil {
--		t.Log("unpacking failed")
--		t.Fail()
--		return
--	}
--}
--
--func TestBailiwick(t *testing.T) {
--	yes := map[string]string{
--		"miek.nl": "ns.miek.nl",
--		".":       "miek.nl",
--	}
--	for parent, child := range yes {
--		if !IsSubDomain(parent, child) {
--			t.Logf("%s should be child of %s\n", child, parent)
--			t.Logf("comparelabels %d", CompareDomainName(parent, child))
--			t.Logf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
--			t.Fail()
--		}
--	}
--	no := map[string]string{
--		"www.miek.nl":  "ns.miek.nl",
--		"m\\.iek.nl":   "ns.miek.nl",
--		"w\\.iek.nl":   "w.iek.nl",
--		"p\\\\.iek.nl": "ns.p.iek.nl", // p\\.iek.nl , literal \ in domain name
--		"miek.nl":      ".",
--	}
--	for parent, child := range no {
--		if IsSubDomain(parent, child) {
--			t.Logf("%s should not be child of %s\n", child, parent)
--			t.Logf("comparelabels %d", CompareDomainName(parent, child))
--			t.Logf("lenlabels %d %d", CountLabel(parent), CountLabel(child))
--			t.Fail()
--		}
--	}
--}
--
--func TestPack(t *testing.T) {
--	rr := []string{"US.    86400	IN	NSEC	0-.us. NS SOA RRSIG NSEC DNSKEY TYPE65534"}
--	m := new(Msg)
--	var err error
--	m.Answer = make([]RR, 1)
--	for _, r := range rr {
--		m.Answer[0], err = NewRR(r)
--		if err != nil {
--			t.Logf("failed to create RR: %s\n", err.Error())
--			t.Fail()
--			continue
--		}
--		if _, err := m.Pack(); err != nil {
--			t.Logf("packing failed: %s\n", err.Error())
--			t.Fail()
--		}
--	}
--	x := new(Msg)
--	ns, _ := NewRR("pool.ntp.org.   390 IN  NS  a.ntpns.org")
--	ns.(*NS).Ns = "a.ntpns.org"
--	x.Ns = append(m.Ns, ns)
--	x.Ns = append(m.Ns, ns)
--	x.Ns = append(m.Ns, ns)
--	// This crashes due to the fact the a.ntpns.org isn't a FQDN
--	// How to recover() from a remove panic()?
--	if _, err := x.Pack(); err == nil {
--		t.Log("packing should fail")
--		t.Fail()
--	}
--	x.Answer = make([]RR, 1)
--	x.Answer[0], err = NewRR(rr[0])
--	if _, err := x.Pack(); err == nil {
--		t.Log("packing should fail")
--		t.Fail()
--	}
--	x.Question = make([]Question, 1)
--	x.Question[0] = Question{";sd#edddds鍛↙赏‘℅∥↙xzztsestxssweewwsssstx at s@Z嵌e at cn.pool.ntp.org.", TypeA, ClassINET}
--	if _, err := x.Pack(); err == nil {
--		t.Log("packing should fail")
--		t.Fail()
--	}
--}
--
--func TestPackNAPTR(t *testing.T) {
--	for _, n := range []string{
--		`apple.com. IN NAPTR   100 50 "se" "SIP+D2U" "" _sip._udp.apple.com.`,
--		`apple.com. IN NAPTR   90 50 "se" "SIP+D2T" "" _sip._tcp.apple.com.`,
--		`apple.com. IN NAPTR   50 50 "se" "SIPS+D2T" "" _sips._tcp.apple.com.`,
--	} {
--		rr, _ := NewRR(n)
--		msg := make([]byte, rr.len())
--		if off, err := PackRR(rr, msg, 0, nil, false); err != nil {
--			t.Logf("packing failed: %s", err.Error())
--			t.Logf("length %d, need more than %d\n", rr.len(), off)
--			t.Fail()
--		} else {
--			t.Logf("buf size needed: %d\n", off)
--		}
--	}
--}
--
--func TestCompressLength(t *testing.T) {
--	m := new(Msg)
--	m.SetQuestion("miek.nl", TypeMX)
--	ul := m.Len()
--	m.Compress = true
--	if ul != m.Len() {
--		t.Fatalf("should be equal")
--	}
--}
--
--// Does the predicted length match final packed length?
--func TestMsgCompressLength(t *testing.T) {
--	makeMsg := func(question string, ans, ns, e []RR) *Msg {
--		msg := new(Msg)
--		msg.SetQuestion(Fqdn(question), TypeANY)
--		msg.Answer = append(msg.Answer, ans...)
--		msg.Ns = append(msg.Ns, ns...)
--		msg.Extra = append(msg.Extra, e...)
--		msg.Compress = true
--		return msg
--	}
--
--	name1 := "12345678901234567890123456789012345.12345678.123."
--	rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1")
--	rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
--	tests := []*Msg{
--		makeMsg(name1, []RR{rrA}, nil, nil),
--		makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)}
--
--	for _, msg := range tests {
--		predicted := msg.Len()
--		buf, err := msg.Pack()
--		if err != nil {
--			t.Error(err)
--			t.Fail()
--		}
--		if predicted < len(buf) {
--			t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d\n",
--				msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
--			t.Fail()
--		}
--	}
--}
--
--func TestMsgLength(t *testing.T) {
--	makeMsg := func(question string, ans, ns, e []RR) *Msg {
--		msg := new(Msg)
--		msg.SetQuestion(Fqdn(question), TypeANY)
--		msg.Answer = append(msg.Answer, ans...)
--		msg.Ns = append(msg.Ns, ns...)
--		msg.Extra = append(msg.Extra, e...)
--		return msg
--	}
--
--	name1 := "12345678901234567890123456789012345.12345678.123."
--	rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1")
--	rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
--	tests := []*Msg{
--		makeMsg(name1, []RR{rrA}, nil, nil),
--		makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)}
--
--	for _, msg := range tests {
--		predicted := msg.Len()
--		buf, err := msg.Pack()
--		if err != nil {
--			t.Error(err)
--			t.Fail()
--		}
--		if predicted < len(buf) {
--			t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d\n",
--				msg.Question[0].Name, predicted, len(buf))
--			t.Fail()
--		}
--	}
--}
--
--func TestMsgLength2(t *testing.T) {
--	// Serialized replies
--	var testMessages = []string{
--		// google.com. IN A?
--		"064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000",
--		// amazon.com. IN A? (reply has no EDNS0 record)
--		// TODO(miek): this one is off-by-one, need to find out why
--		//"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001",
--		// yahoo.com. IN A?
--		"fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000",
--		// microsoft.com. IN A?
--		"f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000",
--		// google.com. IN MX?
--		"724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000",
--		// reddit.com. IN A?
--		"12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000",
--	}
--
--	for i, hexData := range testMessages {
--		// we won't fail the decoding of the hex
--		input, _ := hex.DecodeString(hexData)
--		m := new(Msg)
--		m.Unpack(input)
--		//println(m.String())
--		m.Compress = true
--		lenComp := m.Len()
--		b, _ := m.Pack()
--		pacComp := len(b)
--		m.Compress = false
--		lenUnComp := m.Len()
--		b, _ = m.Pack()
--		pacUnComp := len(b)
--		if pacComp+1 != lenComp {
--			t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i)
--		}
--		if pacUnComp+1 != lenUnComp {
--			t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i)
--		}
--	}
--}
--
--func TestMsgLengthCompressionMalformed(t *testing.T) {
--	// SOA with empty hostmaster, which is illegal
--	soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345},
--		Ns:      ".",
--		Mbox:    "",
--		Serial:  0,
--		Refresh: 28800,
--		Retry:   7200,
--		Expire:  604800,
--		Minttl:  60}
--	m := new(Msg)
--	m.Compress = true
--	m.Ns = []RR{soa}
--	m.Len() // Should not crash.
--}
--
--func BenchmarkMsgLength(b *testing.B) {
--	b.StopTimer()
--	makeMsg := func(question string, ans, ns, e []RR) *Msg {
--		msg := new(Msg)
--		msg.SetQuestion(Fqdn(question), TypeANY)
--		msg.Answer = append(msg.Answer, ans...)
--		msg.Ns = append(msg.Ns, ns...)
--		msg.Extra = append(msg.Extra, e...)
--		msg.Compress = true
--		return msg
--	}
--	name1 := "12345678901234567890123456789012345.12345678.123."
--	rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
--	msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		msg.Len()
--	}
--}
--
--func BenchmarkMsgLengthPack(b *testing.B) {
--	makeMsg := func(question string, ans, ns, e []RR) *Msg {
--		msg := new(Msg)
--		msg.SetQuestion(Fqdn(question), TypeANY)
--		msg.Answer = append(msg.Answer, ans...)
--		msg.Ns = append(msg.Ns, ns...)
--		msg.Extra = append(msg.Extra, e...)
--		msg.Compress = true
--		return msg
--	}
--	name1 := "12345678901234567890123456789012345.12345678.123."
--	rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
--	msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		_, _ = msg.Pack()
--	}
--}
--
--func BenchmarkMsgPackBuffer(b *testing.B) {
--	makeMsg := func(question string, ans, ns, e []RR) *Msg {
--		msg := new(Msg)
--		msg.SetQuestion(Fqdn(question), TypeANY)
--		msg.Answer = append(msg.Answer, ans...)
--		msg.Ns = append(msg.Ns, ns...)
--		msg.Extra = append(msg.Extra, e...)
--		msg.Compress = true
--		return msg
--	}
--	name1 := "12345678901234567890123456789012345.12345678.123."
--	rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
--	msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
--	buf := make([]byte, 512)
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		_, _ = msg.PackBuffer(buf)
--	}
--}
--
--func BenchmarkMsgUnpack(b *testing.B) {
--	makeMsg := func(question string, ans, ns, e []RR) *Msg {
--		msg := new(Msg)
--		msg.SetQuestion(Fqdn(question), TypeANY)
--		msg.Answer = append(msg.Answer, ans...)
--		msg.Ns = append(msg.Ns, ns...)
--		msg.Extra = append(msg.Extra, e...)
--		msg.Compress = true
--		return msg
--	}
--	name1 := "12345678901234567890123456789012345.12345678.123."
--	rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1)
--	msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)
--	msg_buf, _ := msg.Pack()
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		_ = msg.Unpack(msg_buf)
--	}
--}
--
--func BenchmarkPackDomainName(b *testing.B) {
--	name1 := "12345678901234567890123456789012345.12345678.123."
--	buf := make([]byte, len(name1)+1)
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		_, _ = PackDomainName(name1, buf, 0, nil, false)
--	}
--}
--
--func BenchmarkUnpackDomainName(b *testing.B) {
--	name1 := "12345678901234567890123456789012345.12345678.123."
--	buf := make([]byte, len(name1)+1)
--	_, _ = PackDomainName(name1, buf, 0, nil, false)
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		_, _, _ = UnpackDomainName(buf, 0)
--	}
--}
--
--func BenchmarkUnpackDomainNameUnprintable(b *testing.B) {
--	name1 := "\x02\x02\x02\x025\x02\x02\x02\x02.12345678.123."
--	buf := make([]byte, len(name1)+1)
--	_, _ = PackDomainName(name1, buf, 0, nil, false)
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		_, _, _ = UnpackDomainName(buf, 0)
--	}
--}
--
--func TestToRFC3597(t *testing.T) {
--	a, _ := NewRR("miek.nl. IN A 10.0.1.1")
--	x := new(RFC3597)
--	x.ToRFC3597(a)
--	if x.String() != `miek.nl.	3600	CLASS1	TYPE1	\# 4 0a000101` {
--		t.Fail()
--	}
--}
--
--func TestNoRdataPack(t *testing.T) {
--	data := make([]byte, 1024)
--	for typ, fn := range typeToRR {
--		if typ == TypeCAA {
--			continue // TODO(miek): known omission
--		}
--		r := fn()
--		*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
--		_, e := PackRR(r, data, 0, nil, false)
--		if e != nil {
--			t.Logf("failed to pack RR with zero rdata: %s: %s\n", TypeToString[typ], e.Error())
--			t.Fail()
--		}
--	}
--}
--
--// TODO(miek): fix dns buffer too small errors this throws
--func TestNoRdataUnpack(t *testing.T) {
--	data := make([]byte, 1024)
--	for typ, fn := range typeToRR {
--		if typ == TypeSOA || typ == TypeTSIG || typ == TypeWKS {
--			// SOA, TSIG will not be seen (like this) in dyn. updates?
--			// WKS is an bug, but...deprecated record.
--			continue
--		}
--		r := fn()
--		*r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 3600}
--		off, e := PackRR(r, data, 0, nil, false)
--		if e != nil {
--			// Should always works, TestNoDataPack should have catched this
--			continue
--		}
--		rr, _, e := UnpackRR(data[:off], 0)
--		if e != nil {
--			t.Logf("failed to unpack RR with zero rdata: %s: %s\n", TypeToString[typ], e.Error())
--			t.Fail()
--		}
--		t.Logf("%s\n", rr)
--	}
--}
--
--func TestRdataOverflow(t *testing.T) {
--	rr := new(RFC3597)
--	rr.Hdr.Name = "."
--	rr.Hdr.Class = ClassINET
--	rr.Hdr.Rrtype = 65280
--	rr.Rdata = hex.EncodeToString(make([]byte, 0xFFFF))
--	buf := make([]byte, 0xFFFF*2)
--	if _, err := PackRR(rr, buf, 0, nil, false); err != nil {
--		t.Fatalf("maximum size rrdata pack failed: %v", err)
--	}
--	rr.Rdata += "00"
--	if _, err := PackRR(rr, buf, 0, nil, false); err != ErrRdata {
--		t.Fatalf("oversize rrdata pack didn't return ErrRdata - instead: %v", err)
--	}
--}
--
--func TestCopy(t *testing.T) {
--	rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL
--	rr1 := Copy(rr)
--	if rr.String() != rr1.String() {
--		t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String())
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go
-deleted file mode 100644
-index d1c2ae6..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec.go
-+++ /dev/null
-@@ -1,756 +0,0 @@
--// DNSSEC
--//
--// DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
--// uses public key cryptography to sign resource records. The
--// public keys are stored in DNSKEY records and the signatures in RRSIG records.
--//
--// Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
--// to an request.
--//
--//      m := new(dns.Msg)
--//      m.SetEdns0(4096, true)
--//
--// Signature generation, signature verification and key generation are all supported.
--package dns
--
--import (
--	"bytes"
--	"crypto"
--	"crypto/dsa"
--	"crypto/ecdsa"
--	"crypto/elliptic"
--	"crypto/md5"
--	"crypto/rand"
--	"crypto/rsa"
--	"crypto/sha1"
--	"crypto/sha256"
--	"crypto/sha512"
--	"encoding/hex"
--	"hash"
--	"io"
--	"math/big"
--	"sort"
--	"strings"
--	"time"
--)
--
--// DNSSEC encryption algorithm codes.
--const (
--	_ uint8 = iota
--	RSAMD5
--	DH
--	DSA
--	_ // Skip 4, RFC 6725, section 2.1
--	RSASHA1
--	DSANSEC3SHA1
--	RSASHA1NSEC3SHA1
--	RSASHA256
--	_ // Skip 9, RFC 6725, section 2.1
--	RSASHA512
--	_ // Skip 11, RFC 6725, section 2.1
--	ECCGOST
--	ECDSAP256SHA256
--	ECDSAP384SHA384
--	INDIRECT   uint8 = 252
--	PRIVATEDNS uint8 = 253 // Private (experimental keys)
--	PRIVATEOID uint8 = 254
--)
--
--// DNSSEC hashing algorithm codes.
--const (
--	_      uint8 = iota
--	SHA1         // RFC 4034
--	SHA256       // RFC 4509
--	GOST94       // RFC 5933
--	SHA384       // Experimental
--	SHA512       // Experimental
--)
--
--// DNSKEY flag values.
--const (
--	SEP    = 1
--	REVOKE = 1 << 7
--	ZONE   = 1 << 8
--)
--
--// The RRSIG needs to be converted to wireformat with some of
--// the rdata (the signature) missing. Use this struct to easy
--// the conversion (and re-use the pack/unpack functions).
--type rrsigWireFmt struct {
--	TypeCovered uint16
--	Algorithm   uint8
--	Labels      uint8
--	OrigTtl     uint32
--	Expiration  uint32
--	Inception   uint32
--	KeyTag      uint16
--	SignerName  string `dns:"domain-name"`
--	/* No Signature */
--}
--
--// Used for converting DNSKEY's rdata to wirefmt.
--type dnskeyWireFmt struct {
--	Flags     uint16
--	Protocol  uint8
--	Algorithm uint8
--	PublicKey string `dns:"base64"`
--	/* Nothing is left out */
--}
--
--func divRoundUp(a, b int) int {
--	return (a + b - 1) / b
--}
--
--// KeyTag calculates the keytag (or key-id) of the DNSKEY.
--func (k *DNSKEY) KeyTag() uint16 {
--	if k == nil {
--		return 0
--	}
--	var keytag int
--	switch k.Algorithm {
--	case RSAMD5:
--		// Look at the bottom two bytes of the modules, which the last
--		// item in the pubkey. We could do this faster by looking directly
--		// at the base64 values. But I'm lazy.
--		modulus, _ := fromBase64([]byte(k.PublicKey))
--		if len(modulus) > 1 {
--			x, _ := unpackUint16(modulus, len(modulus)-2)
--			keytag = int(x)
--		}
--	default:
--		keywire := new(dnskeyWireFmt)
--		keywire.Flags = k.Flags
--		keywire.Protocol = k.Protocol
--		keywire.Algorithm = k.Algorithm
--		keywire.PublicKey = k.PublicKey
--		wire := make([]byte, DefaultMsgSize)
--		n, err := PackStruct(keywire, wire, 0)
--		if err != nil {
--			return 0
--		}
--		wire = wire[:n]
--		for i, v := range wire {
--			if i&1 != 0 {
--				keytag += int(v) // must be larger than uint32
--			} else {
--				keytag += int(v) << 8
--			}
--		}
--		keytag += (keytag >> 16) & 0xFFFF
--		keytag &= 0xFFFF
--	}
--	return uint16(keytag)
--}
--
--// ToDS converts a DNSKEY record to a DS record.
--func (k *DNSKEY) ToDS(h uint8) *DS {
--	if k == nil {
--		return nil
--	}
--	ds := new(DS)
--	ds.Hdr.Name = k.Hdr.Name
--	ds.Hdr.Class = k.Hdr.Class
--	ds.Hdr.Rrtype = TypeDS
--	ds.Hdr.Ttl = k.Hdr.Ttl
--	ds.Algorithm = k.Algorithm
--	ds.DigestType = h
--	ds.KeyTag = k.KeyTag()
--
--	keywire := new(dnskeyWireFmt)
--	keywire.Flags = k.Flags
--	keywire.Protocol = k.Protocol
--	keywire.Algorithm = k.Algorithm
--	keywire.PublicKey = k.PublicKey
--	wire := make([]byte, DefaultMsgSize)
--	n, err := PackStruct(keywire, wire, 0)
--	if err != nil {
--		return nil
--	}
--	wire = wire[:n]
--
--	owner := make([]byte, 255)
--	off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
--	if err1 != nil {
--		return nil
--	}
--	owner = owner[:off]
--	// RFC4034:
--	// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
--	// "|" denotes concatenation
--	// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
--
--	// digest buffer
--	digest := append(owner, wire...) // another copy
--
--	switch h {
--	case SHA1:
--		s := sha1.New()
--		io.WriteString(s, string(digest))
--		ds.Digest = hex.EncodeToString(s.Sum(nil))
--	case SHA256:
--		s := sha256.New()
--		io.WriteString(s, string(digest))
--		ds.Digest = hex.EncodeToString(s.Sum(nil))
--	case SHA384:
--		s := sha512.New384()
--		io.WriteString(s, string(digest))
--		ds.Digest = hex.EncodeToString(s.Sum(nil))
--	case GOST94:
--		/* I have no clue */
--	default:
--		return nil
--	}
--	return ds
--}
--
--// Sign signs an RRSet. The signature needs to be filled in with
--// the values: Inception, Expiration, KeyTag, SignerName and Algorithm.
--// The rest is copied from the RRset. Sign returns true when the signing went OK,
--// otherwise false.
--// There is no check if RRSet is a proper (RFC 2181) RRSet.
--// If OrigTTL is non zero, it is used as-is, otherwise the TTL of the RRset
--// is used as the OrigTTL.
--func (rr *RRSIG) Sign(k PrivateKey, rrset []RR) error {
--	if k == nil {
--		return ErrPrivKey
--	}
--	// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
--	if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
--		return ErrKey
--	}
--
--	rr.Hdr.Rrtype = TypeRRSIG
--	rr.Hdr.Name = rrset[0].Header().Name
--	rr.Hdr.Class = rrset[0].Header().Class
--	if rr.OrigTtl == 0 { // If set don't override
--		rr.OrigTtl = rrset[0].Header().Ttl
--	}
--	rr.TypeCovered = rrset[0].Header().Rrtype
--	rr.Labels = uint8(CountLabel(rrset[0].Header().Name))
--
--	if strings.HasPrefix(rrset[0].Header().Name, "*") {
--		rr.Labels-- // wildcard, remove from label count
--	}
--
--	sigwire := new(rrsigWireFmt)
--	sigwire.TypeCovered = rr.TypeCovered
--	sigwire.Algorithm = rr.Algorithm
--	sigwire.Labels = rr.Labels
--	sigwire.OrigTtl = rr.OrigTtl
--	sigwire.Expiration = rr.Expiration
--	sigwire.Inception = rr.Inception
--	sigwire.KeyTag = rr.KeyTag
--	// For signing, lowercase this name
--	sigwire.SignerName = strings.ToLower(rr.SignerName)
--
--	// Create the desired binary blob
--	signdata := make([]byte, DefaultMsgSize)
--	n, err := PackStruct(sigwire, signdata, 0)
--	if err != nil {
--		return err
--	}
--	signdata = signdata[:n]
--	wire, err := rawSignatureData(rrset, rr)
--	if err != nil {
--		return err
--	}
--	signdata = append(signdata, wire...)
--
--	var sighash []byte
--	var h hash.Hash
--	var ch crypto.Hash // Only need for RSA
--	var intlen int
--	switch rr.Algorithm {
--	case DSA, DSANSEC3SHA1:
--		// Implicit in the ParameterSizes
--	case RSASHA1, RSASHA1NSEC3SHA1:
--		h = sha1.New()
--		ch = crypto.SHA1
--	case RSASHA256, ECDSAP256SHA256:
--		h = sha256.New()
--		ch = crypto.SHA256
--		intlen = 32
--	case ECDSAP384SHA384:
--		h = sha512.New384()
--		intlen = 48
--	case RSASHA512:
--		h = sha512.New()
--		ch = crypto.SHA512
--	case RSAMD5:
--		fallthrough // Deprecated in RFC 6725
--	default:
--		return ErrAlg
--	}
--	io.WriteString(h, string(signdata))
--	sighash = h.Sum(nil)
--
--	switch p := k.(type) {
--	case *dsa.PrivateKey:
--		r1, s1, err := dsa.Sign(rand.Reader, p, sighash)
--		if err != nil {
--			return err
--		}
--		signature := []byte{0x4D} // T value, here the ASCII M for Miek (not used in DNSSEC)
--		signature = append(signature, intToBytes(r1, 20)...)
--		signature = append(signature, intToBytes(s1, 20)...)
--		rr.Signature = toBase64(signature)
--	case *rsa.PrivateKey:
--		// We can use nil as rand.Reader here (says AGL)
--		signature, err := rsa.SignPKCS1v15(nil, p, ch, sighash)
--		if err != nil {
--			return err
--		}
--		rr.Signature = toBase64(signature)
--	case *ecdsa.PrivateKey:
--		r1, s1, err := ecdsa.Sign(rand.Reader, p, sighash)
--		if err != nil {
--			return err
--		}
--		signature := intToBytes(r1, intlen)
--		signature = append(signature, intToBytes(s1, intlen)...)
--		rr.Signature = toBase64(signature)
--	default:
--		// Not given the correct key
--		return ErrKeyAlg
--	}
--	return nil
--}
--
--// Verify validates an RRSet with the signature and key. This is only the
--// cryptographic test, the signature validity period must be checked separately.
--// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
--func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
--	// First the easy checks
--	if len(rrset) == 0 {
--		return ErrRRset
--	}
--	if rr.KeyTag != k.KeyTag() {
--		return ErrKey
--	}
--	if rr.Hdr.Class != k.Hdr.Class {
--		return ErrKey
--	}
--	if rr.Algorithm != k.Algorithm {
--		return ErrKey
--	}
--	if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) {
--		return ErrKey
--	}
--	if k.Protocol != 3 {
--		return ErrKey
--	}
--	for _, r := range rrset {
--		if r.Header().Class != rr.Hdr.Class {
--			return ErrRRset
--		}
--		if r.Header().Rrtype != rr.TypeCovered {
--			return ErrRRset
--		}
--	}
--	// RFC 4035 5.3.2.  Reconstructing the Signed Data
--	// Copy the sig, except the rrsig data
--	sigwire := new(rrsigWireFmt)
--	sigwire.TypeCovered = rr.TypeCovered
--	sigwire.Algorithm = rr.Algorithm
--	sigwire.Labels = rr.Labels
--	sigwire.OrigTtl = rr.OrigTtl
--	sigwire.Expiration = rr.Expiration
--	sigwire.Inception = rr.Inception
--	sigwire.KeyTag = rr.KeyTag
--	sigwire.SignerName = strings.ToLower(rr.SignerName)
--	// Create the desired binary blob
--	signeddata := make([]byte, DefaultMsgSize)
--	n, err := PackStruct(sigwire, signeddata, 0)
--	if err != nil {
--		return err
--	}
--	signeddata = signeddata[:n]
--	wire, err := rawSignatureData(rrset, rr)
--	if err != nil {
--		return err
--	}
--	signeddata = append(signeddata, wire...)
--
--	sigbuf := rr.sigBuf()           // Get the binary signature data
--	if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
--		// TODO(mg)
--		// remove the domain name and assume its our
--	}
--
--	switch rr.Algorithm {
--	case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
--		// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
--		pubkey := k.publicKeyRSA() // Get the key
--		if pubkey == nil {
--			return ErrKey
--		}
--		// Setup the hash as defined for this alg.
--		var h hash.Hash
--		var ch crypto.Hash
--		switch rr.Algorithm {
--		case RSAMD5:
--			h = md5.New()
--			ch = crypto.MD5
--		case RSASHA1, RSASHA1NSEC3SHA1:
--			h = sha1.New()
--			ch = crypto.SHA1
--		case RSASHA256:
--			h = sha256.New()
--			ch = crypto.SHA256
--		case RSASHA512:
--			h = sha512.New()
--			ch = crypto.SHA512
--		}
--		io.WriteString(h, string(signeddata))
--		sighash := h.Sum(nil)
--		return rsa.VerifyPKCS1v15(pubkey, ch, sighash, sigbuf)
--	case ECDSAP256SHA256, ECDSAP384SHA384:
--		pubkey := k.publicKeyCurve()
--		if pubkey == nil {
--			return ErrKey
--		}
--		var h hash.Hash
--		switch rr.Algorithm {
--		case ECDSAP256SHA256:
--			h = sha256.New()
--		case ECDSAP384SHA384:
--			h = sha512.New384()
--		}
--		io.WriteString(h, string(signeddata))
--		sighash := h.Sum(nil)
--		// Split sigbuf into the r and s coordinates
--		r := big.NewInt(0)
--		r.SetBytes(sigbuf[:len(sigbuf)/2])
--		s := big.NewInt(0)
--		s.SetBytes(sigbuf[len(sigbuf)/2:])
--		if ecdsa.Verify(pubkey, sighash, r, s) {
--			return nil
--		}
--		return ErrSig
--	}
--	// Unknown alg
--	return ErrAlg
--}
--
--// ValidityPeriod uses RFC1982 serial arithmetic to calculate
--// if a signature period is valid. If t is the zero time, the
--// current time is taken other t is.
--func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
--	var utc int64
--	if t.IsZero() {
--		utc = time.Now().UTC().Unix()
--	} else {
--		utc = t.UTC().Unix()
--	}
--	modi := (int64(rr.Inception) - utc) / year68
--	mode := (int64(rr.Expiration) - utc) / year68
--	ti := int64(rr.Inception) + (modi * year68)
--	te := int64(rr.Expiration) + (mode * year68)
--	return ti <= utc && utc <= te
--}
--
--// Return the signatures base64 encodedig sigdata as a byte slice.
--func (s *RRSIG) sigBuf() []byte {
--	sigbuf, err := fromBase64([]byte(s.Signature))
--	if err != nil {
--		return nil
--	}
--	return sigbuf
--}
--
--// setPublicKeyInPrivate sets the public key in the private key.
--func (k *DNSKEY) setPublicKeyInPrivate(p PrivateKey) bool {
--	switch t := p.(type) {
--	case *dsa.PrivateKey:
--		x := k.publicKeyDSA()
--		if x == nil {
--			return false
--		}
--		t.PublicKey = *x
--	case *rsa.PrivateKey:
--		x := k.publicKeyRSA()
--		if x == nil {
--			return false
--		}
--		t.PublicKey = *x
--	case *ecdsa.PrivateKey:
--		x := k.publicKeyCurve()
--		if x == nil {
--			return false
--		}
--		t.PublicKey = *x
--	}
--	return true
--}
--
--// publicKeyRSA returns the RSA public key from a DNSKEY record.
--func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
--	keybuf, err := fromBase64([]byte(k.PublicKey))
--	if err != nil {
--		return nil
--	}
--
--	// RFC 2537/3110, section 2. RSA Public KEY Resource Records
--	// Length is in the 0th byte, unless its zero, then it
--	// it in bytes 1 and 2 and its a 16 bit number
--	explen := uint16(keybuf[0])
--	keyoff := 1
--	if explen == 0 {
--		explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
--		keyoff = 3
--	}
--	pubkey := new(rsa.PublicKey)
--
--	pubkey.N = big.NewInt(0)
--	shift := uint64((explen - 1) * 8)
--	expo := uint64(0)
--	for i := int(explen - 1); i > 0; i-- {
--		expo += uint64(keybuf[keyoff+i]) << shift
--		shift -= 8
--	}
--	// Remainder
--	expo += uint64(keybuf[keyoff])
--	if expo > 2<<31 {
--		// Larger expo than supported.
--		// println("dns: F5 primes (or larger) are not supported")
--		return nil
--	}
--	pubkey.E = int(expo)
--
--	pubkey.N.SetBytes(keybuf[keyoff+int(explen):])
--	return pubkey
--}
--
--// publicKeyCurve returns the Curve public key from the DNSKEY record.
--func (k *DNSKEY) publicKeyCurve() *ecdsa.PublicKey {
--	keybuf, err := fromBase64([]byte(k.PublicKey))
--	if err != nil {
--		return nil
--	}
--	pubkey := new(ecdsa.PublicKey)
--	switch k.Algorithm {
--	case ECDSAP256SHA256:
--		pubkey.Curve = elliptic.P256()
--		if len(keybuf) != 64 {
--			// wrongly encoded key
--			return nil
--		}
--	case ECDSAP384SHA384:
--		pubkey.Curve = elliptic.P384()
--		if len(keybuf) != 96 {
--			// Wrongly encoded key
--			return nil
--		}
--	}
--	pubkey.X = big.NewInt(0)
--	pubkey.X.SetBytes(keybuf[:len(keybuf)/2])
--	pubkey.Y = big.NewInt(0)
--	pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
--	return pubkey
--}
--
--func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
--	keybuf, err := fromBase64([]byte(k.PublicKey))
--	if err != nil {
--		return nil
--	}
--	if len(keybuf) < 22 {
--		return nil
--	}
--	t, keybuf := int(keybuf[0]), keybuf[1:]
--	size := 64 + t*8
--	q, keybuf := keybuf[:20], keybuf[20:]
--	if len(keybuf) != 3*size {
--		return nil
--	}
--	p, keybuf := keybuf[:size], keybuf[size:]
--	g, y := keybuf[:size], keybuf[size:]
--	pubkey := new(dsa.PublicKey)
--	pubkey.Parameters.Q = big.NewInt(0).SetBytes(q)
--	pubkey.Parameters.P = big.NewInt(0).SetBytes(p)
--	pubkey.Parameters.G = big.NewInt(0).SetBytes(g)
--	pubkey.Y = big.NewInt(0).SetBytes(y)
--	return pubkey
--}
--
--// Set the public key (the value E and N)
--func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
--	if _E == 0 || _N == nil {
--		return false
--	}
--	buf := exponentToBuf(_E)
--	buf = append(buf, _N.Bytes()...)
--	k.PublicKey = toBase64(buf)
--	return true
--}
--
--// Set the public key for Elliptic Curves
--func (k *DNSKEY) setPublicKeyCurve(_X, _Y *big.Int) bool {
--	if _X == nil || _Y == nil {
--		return false
--	}
--	var intlen int
--	switch k.Algorithm {
--	case ECDSAP256SHA256:
--		intlen = 32
--	case ECDSAP384SHA384:
--		intlen = 48
--	}
--	k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
--	return true
--}
--
--// Set the public key for DSA
--func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
--	if _Q == nil || _P == nil || _G == nil || _Y == nil {
--		return false
--	}
--	buf := dsaToBuf(_Q, _P, _G, _Y)
--	k.PublicKey = toBase64(buf)
--	return true
--}
--
--// Set the public key (the values E and N) for RSA
--// RFC 3110: Section 2. RSA Public KEY Resource Records
--func exponentToBuf(_E int) []byte {
--	var buf []byte
--	i := big.NewInt(int64(_E))
--	if len(i.Bytes()) < 256 {
--		buf = make([]byte, 1)
--		buf[0] = uint8(len(i.Bytes()))
--	} else {
--		buf = make([]byte, 3)
--		buf[0] = 0
--		buf[1] = uint8(len(i.Bytes()) >> 8)
--		buf[2] = uint8(len(i.Bytes()))
--	}
--	buf = append(buf, i.Bytes()...)
--	return buf
--}
--
--// Set the public key for X and Y for Curve. The two
--// values are just concatenated.
--func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
--	buf := intToBytes(_X, intlen)
--	buf = append(buf, intToBytes(_Y, intlen)...)
--	return buf
--}
--
--// Set the public key for X and Y for Curve. The two
--// values are just concatenated.
--func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte {
--	t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8)
--	buf := []byte{byte(t)}
--	buf = append(buf, intToBytes(_Q, 20)...)
--	buf = append(buf, intToBytes(_P, 64+t*8)...)
--	buf = append(buf, intToBytes(_G, 64+t*8)...)
--	buf = append(buf, intToBytes(_Y, 64+t*8)...)
--	return buf
--}
--
--type wireSlice [][]byte
--
--func (p wireSlice) Len() int      { return len(p) }
--func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
--func (p wireSlice) Less(i, j int) bool {
--	_, ioff, _ := UnpackDomainName(p[i], 0)
--	_, joff, _ := UnpackDomainName(p[j], 0)
--	return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
--}
--
--// Return the raw signature data.
--func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
--	wires := make(wireSlice, len(rrset))
--	for i, r := range rrset {
--		r1 := r.copy()
--		r1.Header().Ttl = s.OrigTtl
--		labels := SplitDomainName(r1.Header().Name)
--		// 6.2. Canonical RR Form. (4) - wildcards
--		if len(labels) > int(s.Labels) {
--			// Wildcard
--			r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
--		}
--		// RFC 4034: 6.2.  Canonical RR Form. (2) - domain name to lowercase
--		r1.Header().Name = strings.ToLower(r1.Header().Name)
--		// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
--		//   NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
--		//   HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
--		//   SRV, DNAME, A6
--		switch x := r1.(type) {
--		case *NS:
--			x.Ns = strings.ToLower(x.Ns)
--		case *CNAME:
--			x.Target = strings.ToLower(x.Target)
--		case *SOA:
--			x.Ns = strings.ToLower(x.Ns)
--			x.Mbox = strings.ToLower(x.Mbox)
--		case *MB:
--			x.Mb = strings.ToLower(x.Mb)
--		case *MG:
--			x.Mg = strings.ToLower(x.Mg)
--		case *MR:
--			x.Mr = strings.ToLower(x.Mr)
--		case *PTR:
--			x.Ptr = strings.ToLower(x.Ptr)
--		case *MINFO:
--			x.Rmail = strings.ToLower(x.Rmail)
--			x.Email = strings.ToLower(x.Email)
--		case *MX:
--			x.Mx = strings.ToLower(x.Mx)
--		case *NAPTR:
--			x.Replacement = strings.ToLower(x.Replacement)
--		case *KX:
--			x.Exchanger = strings.ToLower(x.Exchanger)
--		case *SRV:
--			x.Target = strings.ToLower(x.Target)
--		case *DNAME:
--			x.Target = strings.ToLower(x.Target)
--		}
--		// 6.2. Canonical RR Form. (5) - origTTL
--		wire := make([]byte, r1.len()+1) // +1 to be safe(r)
--		off, err1 := PackRR(r1, wire, 0, nil, false)
--		if err1 != nil {
--			return nil, err1
--		}
--		wire = wire[:off]
--		wires[i] = wire
--	}
--	sort.Sort(wires)
--	for _, wire := range wires {
--		buf = append(buf, wire...)
--	}
--	return buf, nil
--}
--
--// Map for algorithm names.
--var AlgorithmToString = map[uint8]string{
--	RSAMD5:           "RSAMD5",
--	DH:               "DH",
--	DSA:              "DSA",
--	RSASHA1:          "RSASHA1",
--	DSANSEC3SHA1:     "DSA-NSEC3-SHA1",
--	RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
--	RSASHA256:        "RSASHA256",
--	RSASHA512:        "RSASHA512",
--	ECCGOST:          "ECC-GOST",
--	ECDSAP256SHA256:  "ECDSAP256SHA256",
--	ECDSAP384SHA384:  "ECDSAP384SHA384",
--	INDIRECT:         "INDIRECT",
--	PRIVATEDNS:       "PRIVATEDNS",
--	PRIVATEOID:       "PRIVATEOID",
--}
--
--// Map of algorithm strings.
--var StringToAlgorithm = reverseInt8(AlgorithmToString)
--
--// Map for hash names.
--var HashToString = map[uint8]string{
--	SHA1:   "SHA1",
--	SHA256: "SHA256",
--	GOST94: "GOST94",
--	SHA384: "SHA384",
--	SHA512: "SHA512",
--}
--
--// Map of hash strings.
--var StringToHash = reverseInt8(HashToString)
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go b/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go
-deleted file mode 100644
-index f6263d5..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/dnssec_test.go
-+++ /dev/null
-@@ -1,672 +0,0 @@
--package dns
--
--import (
--	"crypto/rsa"
--	"reflect"
--	"strings"
--	"testing"
--	"time"
--)
--
--func getKey() *DNSKEY {
--	key := new(DNSKEY)
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 14400
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = RSASHA256
--	key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
--	return key
--}
--
--func getSoa() *SOA {
--	soa := new(SOA)
--	soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0}
--	soa.Ns = "open.nlnetlabs.nl."
--	soa.Mbox = "miekg.atoom.net."
--	soa.Serial = 1293945905
--	soa.Refresh = 14400
--	soa.Retry = 3600
--	soa.Expire = 604800
--	soa.Minttl = 86400
--	return soa
--}
--
--func TestGenerateEC(t *testing.T) {
--	if testing.Short() {
--		t.Skip("skipping test in short mode.")
--	}
--	key := new(DNSKEY)
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 14400
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = ECDSAP256SHA256
--	privkey, _ := key.Generate(256)
--	t.Logf("%s\n", key.String())
--	t.Logf("%s\n", key.PrivateKeyString(privkey))
--}
--
--func TestGenerateDSA(t *testing.T) {
--	if testing.Short() {
--		t.Skip("skipping test in short mode.")
--	}
--	key := new(DNSKEY)
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 14400
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = DSA
--	privkey, _ := key.Generate(1024)
--	t.Logf("%s\n", key.String())
--	t.Logf("%s\n", key.PrivateKeyString(privkey))
--}
--
--func TestGenerateRSA(t *testing.T) {
--	if testing.Short() {
--		t.Skip("skipping test in short mode.")
--	}
--	key := new(DNSKEY)
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 14400
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = RSASHA256
--	privkey, _ := key.Generate(1024)
--	t.Logf("%s\n", key.String())
--	t.Logf("%s\n", key.PrivateKeyString(privkey))
--}
--
--func TestSecure(t *testing.T) {
--	soa := getSoa()
--
--	sig := new(RRSIG)
--	sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
--	sig.TypeCovered = TypeSOA
--	sig.Algorithm = RSASHA256
--	sig.Labels = 2
--	sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
--	sig.Inception = 1293942305  // date -u '+%s' -d"2011-01-02 04:25:05"
--	sig.OrigTtl = 14400
--	sig.KeyTag = 12051
--	sig.SignerName = "miek.nl."
--	sig.Signature = "oMCbslaAVIp/8kVtLSms3tDABpcPRUgHLrOR48OOplkYo+8TeEGWwkSwaz/MRo2fB4FxW0qj/hTlIjUGuACSd+b1wKdH5GvzRJc2pFmxtCbm55ygAh4EUL0F6U5cKtGJGSXxxg6UFCQ0doJCmiGFa78LolaUOXImJrk6AFrGa0M="
--
--	key := new(DNSKEY)
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 14400
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = RSASHA256
--	key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
--
--	// It should validate. Period is checked seperately, so this will keep on working
--	if sig.Verify(key, []RR{soa}) != nil {
--		t.Log("failure to validate")
--		t.Fail()
--	}
--}
--
--func TestSignature(t *testing.T) {
--	sig := new(RRSIG)
--	sig.Hdr.Name = "miek.nl."
--	sig.Hdr.Class = ClassINET
--	sig.Hdr.Ttl = 3600
--	sig.TypeCovered = TypeDNSKEY
--	sig.Algorithm = RSASHA1
--	sig.Labels = 2
--	sig.OrigTtl = 4000
--	sig.Expiration = 1000 //Thu Jan  1 02:06:40 CET 1970
--	sig.Inception = 800   //Thu Jan  1 01:13:20 CET 1970
--	sig.KeyTag = 34641
--	sig.SignerName = "miek.nl."
--	sig.Signature = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"
--
--	// Should not be valid
--	if sig.ValidityPeriod(time.Now()) {
--		t.Log("should not be valid")
--		t.Fail()
--	}
--
--	sig.Inception = 315565800   //Tue Jan  1 10:10:00 CET 1980
--	sig.Expiration = 4102477800 //Fri Jan  1 10:10:00 CET 2100
--	if !sig.ValidityPeriod(time.Now()) {
--		t.Log("should be valid")
--		t.Fail()
--	}
--}
--
--func TestSignVerify(t *testing.T) {
--	// The record we want to sign
--	soa := new(SOA)
--	soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0}
--	soa.Ns = "open.nlnetlabs.nl."
--	soa.Mbox = "miekg.atoom.net."
--	soa.Serial = 1293945905
--	soa.Refresh = 14400
--	soa.Retry = 3600
--	soa.Expire = 604800
--	soa.Minttl = 86400
--
--	soa1 := new(SOA)
--	soa1.Hdr = RR_Header{"*.miek.nl.", TypeSOA, ClassINET, 14400, 0}
--	soa1.Ns = "open.nlnetlabs.nl."
--	soa1.Mbox = "miekg.atoom.net."
--	soa1.Serial = 1293945905
--	soa1.Refresh = 14400
--	soa1.Retry = 3600
--	soa1.Expire = 604800
--	soa1.Minttl = 86400
--
--	srv := new(SRV)
--	srv.Hdr = RR_Header{"srv.miek.nl.", TypeSRV, ClassINET, 14400, 0}
--	srv.Port = 1000
--	srv.Weight = 800
--	srv.Target = "web1.miek.nl."
--
--	// With this key
--	key := new(DNSKEY)
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 14400
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = RSASHA256
--	privkey, _ := key.Generate(512)
--
--	// Fill in the values of the Sig, before signing
--	sig := new(RRSIG)
--	sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
--	sig.TypeCovered = soa.Hdr.Rrtype
--	sig.Labels = uint8(CountLabel(soa.Hdr.Name)) // works for all 3
--	sig.OrigTtl = soa.Hdr.Ttl
--	sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
--	sig.Inception = 1293942305  // date -u '+%s' -d"2011-01-02 04:25:05"
--	sig.KeyTag = key.KeyTag()   // Get the keyfrom the Key
--	sig.SignerName = key.Hdr.Name
--	sig.Algorithm = RSASHA256
--
--	for _, r := range []RR{soa, soa1, srv} {
--		if sig.Sign(privkey, []RR{r}) != nil {
--			t.Log("failure to sign the record")
--			t.Fail()
--			continue
--		}
--		if sig.Verify(key, []RR{r}) != nil {
--			t.Log("failure to validate")
--			t.Fail()
--			continue
--		}
--		t.Logf("validated: %s\n", r.Header().Name)
--	}
--}
--
--func Test65534(t *testing.T) {
--	t6 := new(RFC3597)
--	t6.Hdr = RR_Header{"miek.nl.", 65534, ClassINET, 14400, 0}
--	t6.Rdata = "505D870001"
--	key := new(DNSKEY)
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 14400
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = RSASHA256
--	privkey, _ := key.Generate(1024)
--
--	sig := new(RRSIG)
--	sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
--	sig.TypeCovered = t6.Hdr.Rrtype
--	sig.Labels = uint8(CountLabel(t6.Hdr.Name))
--	sig.OrigTtl = t6.Hdr.Ttl
--	sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
--	sig.Inception = 1293942305  // date -u '+%s' -d"2011-01-02 04:25:05"
--	sig.KeyTag = key.KeyTag()
--	sig.SignerName = key.Hdr.Name
--	sig.Algorithm = RSASHA256
--	if err := sig.Sign(privkey, []RR{t6}); err != nil {
--		t.Log(err)
--		t.Log("failure to sign the TYPE65534 record")
--		t.Fail()
--	}
--	if err := sig.Verify(key, []RR{t6}); err != nil {
--		t.Log(err)
--		t.Log("failure to validate")
--		t.Fail()
--	} else {
--		t.Logf("validated: %s\n", t6.Header().Name)
--	}
--}
--
--func TestDnskey(t *testing.T) {
--	//	f, _ := os.Open("t/Kmiek.nl.+010+05240.key")
--	pubkey, _ := ReadRR(strings.NewReader(`
--miek.nl.	IN	DNSKEY	256 3 10 AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL ;{id = 5240 (zsk), size = 1024b}
--`), "Kmiek.nl.+010+05240.key")
--	privkey, _ := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(`
--Private-key-format: v1.2
--Algorithm: 10 (RSASHA512)
--Modulus: m4wK7YV26AeROtdiCXmqLG9wPDVoMOW8vjr/EkpscEAdjXp81RvZvrlzCSjYmz9onFRgltmTl3AINnFh+t9tlW0M9C5zejxBoKFXELv8ljPYAdz2oe+pDWPhWsfvVFYg2VCjpViPM38EakyE5mhk4TDOnUd+w4TeU1hyhZTWyYs=
--PublicExponent: AQAB
--PrivateExponent: UfCoIQ/Z38l8vB6SSqOI/feGjHEl/fxIPX4euKf0D/32k30fHbSaNFrFOuIFmWMB3LimWVEs6u3dpbB9CQeCVg7hwU5puG7OtuiZJgDAhNeOnxvo5btp4XzPZrJSxR4WNQnwIiYWbl0aFlL1VGgHC/3By89ENZyWaZcMLW4KGWE=
--Prime1: yxwC6ogAu8aVcDx2wg1V0b5M5P6jP8qkRFVMxWNTw60Vkn+ECvw6YAZZBHZPaMyRYZLzPgUlyYRd0cjupy4+fQ==
--Prime2: xA1bF8M0RTIQ6+A11AoVG6GIR/aPGg5sogRkIZ7ID/sF6g9HMVU/CM2TqVEBJLRPp73cv6ZeC3bcqOCqZhz+pw==
--Exponent1: xzkblyZ96bGYxTVZm2/vHMOXswod4KWIyMoOepK6B/ZPcZoIT6omLCgtypWtwHLfqyCz3MK51Nc0G2EGzg8rFQ==
--Exponent2: Pu5+mCEb7T5F+kFNZhQadHUklt0JUHbi3hsEvVoHpEGSw3BGDQrtIflDde0/rbWHgDPM4WQY+hscd8UuTXrvLw==
--Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1gf8zENMYwYLeWpuYlFQ==
--`), "Kmiek.nl.+010+05240.private")
--	if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" {
--		t.Log("pubkey is not what we've read")
--		t.Fail()
--	}
--	// Coefficient looks fishy...
--	t.Logf("%s", pubkey.(*DNSKEY).PrivateKeyString(privkey))
--}
--
--func TestTag(t *testing.T) {
--	key := new(DNSKEY)
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 3600
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = RSASHA256
--	key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
--
--	tag := key.KeyTag()
--	if tag != 12051 {
--		t.Logf("wrong key tag: %d for key %v\n", tag, key)
--		t.Fail()
--	}
--}
--
--func TestKeyRSA(t *testing.T) {
--	if testing.Short() {
--		t.Skip("skipping test in short mode.")
--	}
--	key := new(DNSKEY)
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 3600
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = RSASHA256
--	priv, _ := key.Generate(2048)
--
--	soa := new(SOA)
--	soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0}
--	soa.Ns = "open.nlnetlabs.nl."
--	soa.Mbox = "miekg.atoom.net."
--	soa.Serial = 1293945905
--	soa.Refresh = 14400
--	soa.Retry = 3600
--	soa.Expire = 604800
--	soa.Minttl = 86400
--
--	sig := new(RRSIG)
--	sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
--	sig.TypeCovered = TypeSOA
--	sig.Algorithm = RSASHA256
--	sig.Labels = 2
--	sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
--	sig.Inception = 1293942305  // date -u '+%s' -d"2011-01-02 04:25:05"
--	sig.OrigTtl = soa.Hdr.Ttl
--	sig.KeyTag = key.KeyTag()
--	sig.SignerName = key.Hdr.Name
--
--	if err := sig.Sign(priv, []RR{soa}); err != nil {
--		t.Logf("failed to sign")
--		t.Fail()
--		return
--	}
--	if err := sig.Verify(key, []RR{soa}); err != nil {
--		t.Logf("failed to verify")
--		t.Fail()
--	}
--}
--
--func TestKeyToDS(t *testing.T) {
--	key := new(DNSKEY)
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 3600
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = RSASHA256
--	key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz"
--
--	ds := key.ToDS(SHA1)
--	if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" {
--		t.Logf("wrong DS digest for SHA1\n%v\n", ds)
--		t.Fail()
--	}
--}
--
--func TestSignRSA(t *testing.T) {
--	pub := "miek.nl. IN DNSKEY 256 3 5 AwEAAb+8lGNCxJgLS8rYVer6EnHVuIkQDghdjdtewDzU3G5R7PbMbKVRvH2Ma7pQyYceoaqWZQirSj72euPWfPxQnMy9ucCylA+FuH9cSjIcPf4PqJfdupHk9X6EBYjxrCLY4p1/yBwgyBIRJtZtAqM3ceAH2WovEJD6rTtOuHo5AluJ"
--
--	priv := `Private-key-format: v1.3
--Algorithm: 5 (RSASHA1)
--Modulus: v7yUY0LEmAtLythV6voScdW4iRAOCF2N217APNTcblHs9sxspVG8fYxrulDJhx6hqpZlCKtKPvZ649Z8/FCczL25wLKUD4W4f1xKMhw9/g+ol926keT1foQFiPGsItjinX/IHCDIEhEm1m0Cozdx4AfZai8QkPqtO064ejkCW4k=
--PublicExponent: AQAB
--PrivateExponent: YPwEmwjk5HuiROKU4xzHQ6l1hG8Iiha4cKRG3P5W2b66/EN/GUh07ZSf0UiYB67o257jUDVEgwCuPJz776zfApcCB4oGV+YDyEu7Hp/rL8KcSN0la0k2r9scKwxTp4BTJT23zyBFXsV/1wRDK1A5NxsHPDMYi2SoK63Enm/1ptk=
--Prime1: /wjOG+fD0ybNoSRn7nQ79udGeR1b0YhUA5mNjDx/x2fxtIXzygYk0Rhx9QFfDy6LOBvz92gbNQlzCLz3DJt5hw==
--Prime2: wHZsJ8OGhkp5p3mrJFZXMDc2mbYusDVTA+t+iRPdS797Tj0pjvU2HN4vTnTj8KBQp6hmnY7dLp9Y1qserySGbw==
--Exponent1: N0A7FsSRIg+IAN8YPQqlawoTtG1t1OkJ+nWrurPootScApX6iMvn8fyvw3p2k51rv84efnzpWAYiC8SUaQDNxQ==
--Exponent2: SvuYRaGyvo0zemE3oS+WRm2scxR8eiA8WJGeOc+obwOKCcBgeZblXzfdHGcEC1KaOcetOwNW/vwMA46lpLzJNw==
--Coefficient: 8+7ZN/JgByqv0NfULiFKTjtyegUcijRuyij7yNxYbCBneDvZGxJwKNi4YYXWx743pcAj4Oi4Oh86gcmxLs+hGw==
--Created: 20110302104537
--Publish: 20110302104537
--Activate: 20110302104537`
--
--	xk, _ := NewRR(pub)
--	k := xk.(*DNSKEY)
--	p, err := k.NewPrivateKey(priv)
--	if err != nil {
--		t.Logf("%v\n", err)
--		t.Fail()
--	}
--	switch priv := p.(type) {
--	case *rsa.PrivateKey:
--		if 65537 != priv.PublicKey.E {
--			t.Log("exponenent should be 65537")
--			t.Fail()
--		}
--	default:
--		t.Logf("we should have read an RSA key: %v", priv)
--		t.Fail()
--	}
--	if k.KeyTag() != 37350 {
--		t.Logf("%d %v\n", k.KeyTag(), k)
--		t.Log("keytag should be 37350")
--		t.Fail()
--	}
--
--	soa := new(SOA)
--	soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0}
--	soa.Ns = "open.nlnetlabs.nl."
--	soa.Mbox = "miekg.atoom.net."
--	soa.Serial = 1293945905
--	soa.Refresh = 14400
--	soa.Retry = 3600
--	soa.Expire = 604800
--	soa.Minttl = 86400
--
--	sig := new(RRSIG)
--	sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
--	sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
--	sig.Inception = 1293942305  // date -u '+%s' -d"2011-01-02 04:25:05"
--	sig.KeyTag = k.KeyTag()
--	sig.SignerName = k.Hdr.Name
--	sig.Algorithm = k.Algorithm
--
--	sig.Sign(p, []RR{soa})
--	if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" {
--		t.Log("signature is not correct")
--		t.Logf("%v\n", sig)
--		t.Fail()
--	}
--}
--
--func TestSignVerifyECDSA(t *testing.T) {
--	pub := `example.net. 3600 IN DNSKEY 257 3 14 (
--	xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1
--	w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8
--	/uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )`
--	priv := `Private-key-format: v1.2
--Algorithm: 14 (ECDSAP384SHA384)
--PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
--
--	eckey, err := NewRR(pub)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	// TODO: Create seperate test for this
--	ds := eckey.(*DNSKEY).ToDS(SHA384)
--	if ds.KeyTag != 10771 {
--		t.Fatal("wrong keytag on DS")
--	}
--	if ds.Digest != "72d7b62976ce06438e9c0bf319013cf801f09ecc84b8d7e9495f27e305c6a9b0563a9b5f4d288405c3008a946df983d6" {
--		t.Fatal("wrong DS Digest")
--	}
--	a, _ := NewRR("www.example.net. 3600 IN A 192.0.2.1")
--	sig := new(RRSIG)
--	sig.Hdr = RR_Header{"example.net.", TypeRRSIG, ClassINET, 14400, 0}
--	sig.Expiration, _ = StringToTime("20100909102025")
--	sig.Inception, _ = StringToTime("20100812102025")
--	sig.KeyTag = eckey.(*DNSKEY).KeyTag()
--	sig.SignerName = eckey.(*DNSKEY).Hdr.Name
--	sig.Algorithm = eckey.(*DNSKEY).Algorithm
--
--	if sig.Sign(privkey, []RR{a}) != nil {
--		t.Fatal("failure to sign the record")
--	}
--
--	if e := sig.Verify(eckey.(*DNSKEY), []RR{a}); e != nil {
--		t.Logf("\n%s\n%s\n%s\n\n%s\n\n",
--			eckey.(*DNSKEY).String(),
--			a.String(),
--			sig.String(),
--			eckey.(*DNSKEY).PrivateKeyString(privkey),
--		)
--
--		t.Fatalf("failure to validate: %s", e.Error())
--	}
--}
--
--func TestSignVerifyECDSA2(t *testing.T) {
--	srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.")
--	if err != nil {
--		t.Fatalf(err.Error())
--	}
--	srv := srv1.(*SRV)
--
--	// With this key
--	key := new(DNSKEY)
--	key.Hdr.Rrtype = TypeDNSKEY
--	key.Hdr.Name = "miek.nl."
--	key.Hdr.Class = ClassINET
--	key.Hdr.Ttl = 14400
--	key.Flags = 256
--	key.Protocol = 3
--	key.Algorithm = ECDSAP256SHA256
--	privkey, err := key.Generate(256)
--	if err != nil {
--		t.Fatal("failure to generate key")
--	}
--
--	// Fill in the values of the Sig, before signing
--	sig := new(RRSIG)
--	sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0}
--	sig.TypeCovered = srv.Hdr.Rrtype
--	sig.Labels = uint8(CountLabel(srv.Hdr.Name)) // works for all 3
--	sig.OrigTtl = srv.Hdr.Ttl
--	sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05"
--	sig.Inception = 1293942305  // date -u '+%s' -d"2011-01-02 04:25:05"
--	sig.KeyTag = key.KeyTag()   // Get the keyfrom the Key
--	sig.SignerName = key.Hdr.Name
--	sig.Algorithm = ECDSAP256SHA256
--
--	if sig.Sign(privkey, []RR{srv}) != nil {
--		t.Fatal("failure to sign the record")
--	}
--
--	err = sig.Verify(key, []RR{srv})
--	if err != nil {
--		t.Logf("\n%s\n%s\n%s\n\n%s\n\n",
--			key.String(),
--			srv.String(),
--			sig.String(),
--			key.PrivateKeyString(privkey),
--		)
--
--		t.Fatal("Failure to validate:", err)
--	}
--}
--
--// Here the test vectors from the relevant RFCs are checked.
--// rfc6605 6.1
--func TestRFC6605P256(t *testing.T) {
--	exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 13 (
--                 GojIhhXUN/u4v54ZQqGSnyhWJwaubCvTmeexv7bR6edb
--                 krSqQpF64cYbcB7wNcP+e+MAnLr+Wi9xMWyQLc8NAA== )`
--	exPriv := `Private-key-format: v1.2
--Algorithm: 13 (ECDSAP256SHA256)
--PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=`
--	rrDNSKEY, err := NewRR(exDNSKEY)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--
--	exDS := `example.net. 3600 IN DS 55648 13 2 (
--             b4c8c1fe2e7477127b27115656ad6256f424625bf5c1
--             e2770ce6d6e37df61d17 )`
--	rrDS, err := NewRR(exDS)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256)
--	if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
--		t.Errorf("DS record differs:\n%v\n%v\n", ourDS, rrDS.(*DS))
--	}
--
--	exA := `www.example.net. 3600 IN A 192.0.2.1`
--	exRRSIG := `www.example.net. 3600 IN RRSIG A 13 3 3600 (
--                20100909100439 20100812100439 55648 example.net.
--                qx6wLYqmh+l9oCKTN6qIc+bw6ya+KJ8oMz0YP107epXA
--                yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )`
--	rrA, err := NewRR(exA)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	rrRRSIG, err := NewRR(exRRSIG)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
--		t.Errorf("Failure to validate the spec RRSIG: %v", err)
--	}
--
--	ourRRSIG := &RRSIG{
--		Hdr: RR_Header{
--			Ttl: rrA.Header().Ttl,
--		},
--		KeyTag:     rrDNSKEY.(*DNSKEY).KeyTag(),
--		SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name,
--		Algorithm:  rrDNSKEY.(*DNSKEY).Algorithm,
--	}
--	ourRRSIG.Expiration, _ = StringToTime("20100909100439")
--	ourRRSIG.Inception, _ = StringToTime("20100812100439")
--	err = ourRRSIG.Sign(priv, []RR{rrA})
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--
--	if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
--		t.Errorf("Failure to validate our RRSIG: %v", err)
--	}
--
--	// Signatures are randomized
--	rrRRSIG.(*RRSIG).Signature = ""
--	ourRRSIG.Signature = ""
--	if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
--		t.Fatalf("RRSIG record differs:\n%v\n%v\n", ourRRSIG, rrRRSIG.(*RRSIG))
--	}
--}
--
--// rfc6605 6.2
--func TestRFC6605P384(t *testing.T) {
--	exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 14 (
--                 xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1
--                 w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8
--                 /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )`
--	exPriv := `Private-key-format: v1.2
--Algorithm: 14 (ECDSAP384SHA384)
--PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR`
--	rrDNSKEY, err := NewRR(exDNSKEY)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--
--	exDS := `example.net. 3600 IN DS 10771 14 4 (
--           72d7b62976ce06438e9c0bf319013cf801f09ecc84b8
--           d7e9495f27e305c6a9b0563a9b5f4d288405c3008a94
--           6df983d6 )`
--	rrDS, err := NewRR(exDS)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384)
--	if !reflect.DeepEqual(ourDS, rrDS.(*DS)) {
--		t.Fatalf("DS record differs:\n%v\n%v\n", ourDS, rrDS.(*DS))
--	}
--
--	exA := `www.example.net. 3600 IN A 192.0.2.1`
--	exRRSIG := `www.example.net. 3600 IN RRSIG A 14 3 3600 (
--           20100909102025 20100812102025 10771 example.net.
--           /L5hDKIvGDyI1fcARX3z65qrmPsVz73QD1Mr5CEqOiLP
--           95hxQouuroGCeZOvzFaxsT8Glr74hbavRKayJNuydCuz
--           WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )`
--	rrA, err := NewRR(exA)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	rrRRSIG, err := NewRR(exRRSIG)
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--	if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
--		t.Errorf("Failure to validate the spec RRSIG: %v", err)
--	}
--
--	ourRRSIG := &RRSIG{
--		Hdr: RR_Header{
--			Ttl: rrA.Header().Ttl,
--		},
--		KeyTag:     rrDNSKEY.(*DNSKEY).KeyTag(),
--		SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name,
--		Algorithm:  rrDNSKEY.(*DNSKEY).Algorithm,
--	}
--	ourRRSIG.Expiration, _ = StringToTime("20100909102025")
--	ourRRSIG.Inception, _ = StringToTime("20100812102025")
--	err = ourRRSIG.Sign(priv, []RR{rrA})
--	if err != nil {
--		t.Fatal(err.Error())
--	}
--
--	if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil {
--		t.Errorf("Failure to validate our RRSIG: %v", err)
--	}
--
--	// Signatures are randomized
--	rrRRSIG.(*RRSIG).Signature = ""
--	ourRRSIG.Signature = ""
--	if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) {
--		t.Fatalf("RRSIG record differs:\n%v\n%v\n", ourRRSIG, rrRRSIG.(*RRSIG))
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/dyn_test.go b/Godeps/_workspace/src/github.com/miekg/dns/dyn_test.go
-deleted file mode 100644
-index 09986a5..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/dyn_test.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--package dns
--
--// Find better solution
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/edns.go b/Godeps/_workspace/src/github.com/miekg/dns/edns.go
-deleted file mode 100644
-index 8b676e6..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/edns.go
-+++ /dev/null
-@@ -1,501 +0,0 @@
--// EDNS0
--//
--// EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
--// by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
--// abused.
--// Basic use pattern for creating an (empty) OPT RR:
--//
--//	o := new(dns.OPT)
--//	o.Hdr.Name = "." // MUST be the root zone, per definition.
--//	o.Hdr.Rrtype = dns.TypeOPT
--//
--// The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
--// interfaces. Currently only a few have been standardized: EDNS0_NSID
--// (RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
--// that these options may be combined in an OPT RR.
--// Basic use pattern for a server to check if (and which) options are set:
--//
--//	// o is a dns.OPT
--//	for _, s := range o.Option {
--//		switch e := s.(type) {
--//		case *dns.EDNS0_NSID:
--//			// do stuff with e.Nsid
--//		case *dns.EDNS0_SUBNET:
--//			// access e.Family, e.Address, etc.
--//		}
--//	}
--package dns
--
--import (
--	"encoding/hex"
--	"errors"
--	"net"
--	"strconv"
--)
--
--// EDNS0 Option codes.
--const (
--	EDNS0LLQ         = 0x1     // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
--	EDNS0UL          = 0x2     // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
--	EDNS0NSID        = 0x3     // nsid (RFC5001)
--	EDNS0DAU         = 0x5     // DNSSEC Algorithm Understood
--	EDNS0DHU         = 0x6     // DS Hash Understood
--	EDNS0N3U         = 0x7     // NSEC3 Hash Understood
--	EDNS0SUBNET      = 0x8     // client-subnet (RFC6891)
--	EDNS0EXPIRE      = 0x9     // EDNS0 expire
--	EDNS0SUBNETDRAFT = 0x50fa  // Don't use! Use EDNS0SUBNET
--	_DO              = 1 << 15 // dnssec ok
--)
--
--type OPT struct {
--	Hdr    RR_Header
--	Option []EDNS0 `dns:"opt"`
--}
--
--func (rr *OPT) Header() *RR_Header {
--	return &rr.Hdr
--}
--
--func (rr *OPT) String() string {
--	s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
--	if rr.Do() {
--		s += "flags: do; "
--	} else {
--		s += "flags: ; "
--	}
--	s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
--
--	for _, o := range rr.Option {
--		switch o.(type) {
--		case *EDNS0_NSID:
--			s += "\n; NSID: " + o.String()
--			h, e := o.pack()
--			var r string
--			if e == nil {
--				for _, c := range h {
--					r += "(" + string(c) + ")"
--				}
--				s += "  " + r
--			}
--		case *EDNS0_SUBNET:
--			s += "\n; SUBNET: " + o.String()
--			if o.(*EDNS0_SUBNET).DraftOption {
--				s += " (draft)"
--			}
--		case *EDNS0_UL:
--			s += "\n; UPDATE LEASE: " + o.String()
--		case *EDNS0_LLQ:
--			s += "\n; LONG LIVED QUERIES: " + o.String()
--		case *EDNS0_DAU:
--			s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
--		case *EDNS0_DHU:
--			s += "\n; DS HASH UNDERSTOOD: " + o.String()
--		case *EDNS0_N3U:
--			s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
--		}
--	}
--	return s
--}
--
--func (rr *OPT) len() int {
--	l := rr.Hdr.len()
--	for i := 0; i < len(rr.Option); i++ {
--		lo, _ := rr.Option[i].pack()
--		l += 2 + len(lo)
--	}
--	return l
--}
--
--func (rr *OPT) copy() RR {
--	return &OPT{*rr.Hdr.copyHeader(), rr.Option}
--}
--
--// return the old value -> delete SetVersion?
--
--// Version returns the EDNS version used. Only zero is defined.
--func (rr *OPT) Version() uint8 {
--	return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16)
--}
--
--// SetVersion sets the version of EDNS. This is usually zero.
--func (rr *OPT) SetVersion(v uint8) {
--	rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16)
--}
--
--// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
--func (rr *OPT) ExtendedRcode() uint8 {
--	return uint8((rr.Hdr.Ttl & 0xFF000000) >> 24)
--}
--
--// SetExtendedRcode sets the EDNS extended RCODE field.
--func (rr *OPT) SetExtendedRcode(v uint8) {
--	rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24)
--}
--
--// UDPSize returns the UDP buffer size.
--func (rr *OPT) UDPSize() uint16 {
--	return rr.Hdr.Class
--}
--
--// SetUDPSize sets the UDP buffer size.
--func (rr *OPT) SetUDPSize(size uint16) {
--	rr.Hdr.Class = size
--}
--
--// Do returns the value of the DO (DNSSEC OK) bit.
--func (rr *OPT) Do() bool {
--	return rr.Hdr.Ttl&_DO == _DO
--}
--
--// SetDo sets the DO (DNSSEC OK) bit.
--func (rr *OPT) SetDo() {
--	rr.Hdr.Ttl |= _DO
--}
--
--// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to
--// it.
--type EDNS0 interface {
--	// Option returns the option code for the option.
--	Option() uint16
--	// pack returns the bytes of the option data.
--	pack() ([]byte, error)
--	// unpack sets the data as found in the buffer. Is also sets
--	// the length of the slice as the length of the option data.
--	unpack([]byte) error
--	// String returns the string representation of the option.
--	String() string
--}
--
--// The nsid EDNS0 option is used to retrieve a nameserver
--// identifier. When sending a request Nsid must be set to the empty string
--// The identifier is an opaque string encoded as hex.
--// Basic use pattern for creating an nsid option:
--//
--//	o := new(dns.OPT)
--//	o.Hdr.Name = "."
--//	o.Hdr.Rrtype = dns.TypeOPT
--//	e := new(dns.EDNS0_NSID)
--//	e.Code = dns.EDNS0NSID
--//	e.Nsid = "AA"
--//	o.Option = append(o.Option, e)
--type EDNS0_NSID struct {
--	Code uint16 // Always EDNS0NSID
--	Nsid string // This string needs to be hex encoded
--}
--
--func (e *EDNS0_NSID) pack() ([]byte, error) {
--	h, err := hex.DecodeString(e.Nsid)
--	if err != nil {
--		return nil, err
--	}
--	return h, nil
--}
--
--func (e *EDNS0_NSID) Option() uint16        { return EDNS0NSID }
--func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
--func (e *EDNS0_NSID) String() string        { return string(e.Nsid) }
--
--// The subnet EDNS0 option is used to give the remote nameserver
--// an idea of where the client lives. It can then give back a different
--// answer depending on the location or network topology.
--// Basic use pattern for creating an subnet option:
--//
--//	o := new(dns.OPT)
--//	o.Hdr.Name = "."
--//	o.Hdr.Rrtype = dns.TypeOPT
--//	e := new(dns.EDNS0_SUBNET)
--//	e.Code = dns.EDNS0SUBNET
--//	e.Family = 1	// 1 for IPv4 source address, 2 for IPv6
--//	e.NetMask = 32	// 32 for IPV4, 128 for IPv6
--//	e.SourceScope = 0
--//	e.Address = net.ParseIP("127.0.0.1").To4()	// for IPv4
--//	// e.Address = net.ParseIP("2001:7b8:32a::2")	// for IPV6
--//	o.Option = append(o.Option, e)
--type EDNS0_SUBNET struct {
--	Code          uint16 // Always EDNS0SUBNET
--	Family        uint16 // 1 for IP, 2 for IP6
--	SourceNetmask uint8
--	SourceScope   uint8
--	Address       net.IP
--	DraftOption   bool // Set to true if using the old (0x50fa) option code
--}
--
--func (e *EDNS0_SUBNET) Option() uint16 {
--	if e.DraftOption {
--		return EDNS0SUBNETDRAFT
--	}
--	return EDNS0SUBNET
--}
--
--func (e *EDNS0_SUBNET) pack() ([]byte, error) {
--	b := make([]byte, 4)
--	b[0], b[1] = packUint16(e.Family)
--	b[2] = e.SourceNetmask
--	b[3] = e.SourceScope
--	switch e.Family {
--	case 1:
--		if e.SourceNetmask > net.IPv4len*8 {
--			return nil, errors.New("dns: bad netmask")
--		}
--		ip := make([]byte, net.IPv4len)
--		a := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
--		for i := 0; i < net.IPv4len; i++ {
--			if i+1 > len(e.Address) {
--				break
--			}
--			ip[i] = a[i]
--		}
--		needLength := e.SourceNetmask / 8
--		if e.SourceNetmask%8 > 0 {
--			needLength++
--		}
--		ip = ip[:needLength]
--		b = append(b, ip...)
--	case 2:
--		if e.SourceNetmask > net.IPv6len*8 {
--			return nil, errors.New("dns: bad netmask")
--		}
--		ip := make([]byte, net.IPv6len)
--		a := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
--		for i := 0; i < net.IPv6len; i++ {
--			if i+1 > len(e.Address) {
--				break
--			}
--			ip[i] = a[i]
--		}
--		needLength := e.SourceNetmask / 8
--		if e.SourceNetmask%8 > 0 {
--			needLength++
--		}
--		ip = ip[:needLength]
--		b = append(b, ip...)
--	default:
--		return nil, errors.New("dns: bad address family")
--	}
--	return b, nil
--}
--
--func (e *EDNS0_SUBNET) unpack(b []byte) error {
--	lb := len(b)
--	if lb < 4 {
--		return ErrBuf
--	}
--	e.Family, _ = unpackUint16(b, 0)
--	e.SourceNetmask = b[2]
--	e.SourceScope = b[3]
--	switch e.Family {
--	case 1:
--		addr := make([]byte, 4)
--		for i := 0; i < int(e.SourceNetmask/8); i++ {
--			if i >= len(addr) || 4+i >= len(b) {
--				return ErrBuf
--			}
--			addr[i] = b[4+i]
--		}
--		e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3])
--	case 2:
--		addr := make([]byte, 16)
--		for i := 0; i < int(e.SourceNetmask/8); i++ {
--			if i >= len(addr) || 4+i >= len(b) {
--				return ErrBuf
--			}
--			addr[i] = b[4+i]
--		}
--		e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4],
--			addr[5], addr[6], addr[7], addr[8], addr[9], addr[10],
--			addr[11], addr[12], addr[13], addr[14], addr[15]}
--	}
--	return nil
--}
--
--func (e *EDNS0_SUBNET) String() (s string) {
--	if e.Address == nil {
--		s = "<nil>"
--	} else if e.Address.To4() != nil {
--		s = e.Address.String()
--	} else {
--		s = "[" + e.Address.String() + "]"
--	}
--	s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
--	return
--}
--
--// The UL (Update Lease) EDNS0 (draft RFC) option is used to tell the server to set
--// an expiration on an update RR. This is helpful for clients that cannot clean
--// up after themselves. This is a draft RFC and more information can be found at
--// http://files.dns-sd.org/draft-sekar-dns-ul.txt
--//
--//	o := new(dns.OPT)
--//	o.Hdr.Name = "."
--//	o.Hdr.Rrtype = dns.TypeOPT
--//	e := new(dns.EDNS0_UL)
--//	e.Code = dns.EDNS0UL
--//	e.Lease = 120 // in seconds
--//	o.Option = append(o.Option, e)
--type EDNS0_UL struct {
--	Code  uint16 // Always EDNS0UL
--	Lease uint32
--}
--
--func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
--func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
--
--// Copied: http://golang.org/src/pkg/net/dnsmsg.go
--func (e *EDNS0_UL) pack() ([]byte, error) {
--	b := make([]byte, 4)
--	b[0] = byte(e.Lease >> 24)
--	b[1] = byte(e.Lease >> 16)
--	b[2] = byte(e.Lease >> 8)
--	b[3] = byte(e.Lease)
--	return b, nil
--}
--
--func (e *EDNS0_UL) unpack(b []byte) error {
--	if len(b) < 4 {
--		return ErrBuf
--	}
--	e.Lease = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
--	return nil
--}
--
--// Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
--// Implemented for completeness, as the EDNS0 type code is assigned.
--type EDNS0_LLQ struct {
--	Code      uint16 // Always EDNS0LLQ
--	Version   uint16
--	Opcode    uint16
--	Error     uint16
--	Id        uint64
--	LeaseLife uint32
--}
--
--func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
--
--func (e *EDNS0_LLQ) pack() ([]byte, error) {
--	b := make([]byte, 18)
--	b[0], b[1] = packUint16(e.Version)
--	b[2], b[3] = packUint16(e.Opcode)
--	b[4], b[5] = packUint16(e.Error)
--	b[6] = byte(e.Id >> 56)
--	b[7] = byte(e.Id >> 48)
--	b[8] = byte(e.Id >> 40)
--	b[9] = byte(e.Id >> 32)
--	b[10] = byte(e.Id >> 24)
--	b[11] = byte(e.Id >> 16)
--	b[12] = byte(e.Id >> 8)
--	b[13] = byte(e.Id)
--	b[14] = byte(e.LeaseLife >> 24)
--	b[15] = byte(e.LeaseLife >> 16)
--	b[16] = byte(e.LeaseLife >> 8)
--	b[17] = byte(e.LeaseLife)
--	return b, nil
--}
--
--func (e *EDNS0_LLQ) unpack(b []byte) error {
--	if len(b) < 18 {
--		return ErrBuf
--	}
--	e.Version, _ = unpackUint16(b, 0)
--	e.Opcode, _ = unpackUint16(b, 2)
--	e.Error, _ = unpackUint16(b, 4)
--	e.Id = uint64(b[6])<<56 | uint64(b[6+1])<<48 | uint64(b[6+2])<<40 |
--		uint64(b[6+3])<<32 | uint64(b[6+4])<<24 | uint64(b[6+5])<<16 | uint64(b[6+6])<<8 | uint64(b[6+7])
--	e.LeaseLife = uint32(b[14])<<24 | uint32(b[14+1])<<16 | uint32(b[14+2])<<8 | uint32(b[14+3])
--	return nil
--}
--
--func (e *EDNS0_LLQ) String() string {
--	s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
--		" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) +
--		" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
--	return s
--}
--
--type EDNS0_DAU struct {
--	Code    uint16 // Always EDNS0DAU
--	AlgCode []uint8
--}
--
--func (e *EDNS0_DAU) Option() uint16        { return EDNS0DAU }
--func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
--func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
--
--func (e *EDNS0_DAU) String() string {
--	s := ""
--	for i := 0; i < len(e.AlgCode); i++ {
--		if a, ok := AlgorithmToString[e.AlgCode[i]]; ok {
--			s += " " + a
--		} else {
--			s += " " + strconv.Itoa(int(e.AlgCode[i]))
--		}
--	}
--	return s
--}
--
--type EDNS0_DHU struct {
--	Code    uint16 // Always EDNS0DHU
--	AlgCode []uint8
--}
--
--func (e *EDNS0_DHU) Option() uint16        { return EDNS0DHU }
--func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
--func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
--
--func (e *EDNS0_DHU) String() string {
--	s := ""
--	for i := 0; i < len(e.AlgCode); i++ {
--		if a, ok := HashToString[e.AlgCode[i]]; ok {
--			s += " " + a
--		} else {
--			s += " " + strconv.Itoa(int(e.AlgCode[i]))
--		}
--	}
--	return s
--}
--
--type EDNS0_N3U struct {
--	Code    uint16 // Always EDNS0N3U
--	AlgCode []uint8
--}
--
--func (e *EDNS0_N3U) Option() uint16        { return EDNS0N3U }
--func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
--func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
--
--func (e *EDNS0_N3U) String() string {
--	// Re-use the hash map
--	s := ""
--	for i := 0; i < len(e.AlgCode); i++ {
--		if a, ok := HashToString[e.AlgCode[i]]; ok {
--			s += " " + a
--		} else {
--			s += " " + strconv.Itoa(int(e.AlgCode[i]))
--		}
--	}
--	return s
--}
--
--type EDNS0_EXPIRE struct {
--	Code   uint16 // Always EDNS0EXPIRE
--	Expire uint32
--}
--
--func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
--func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
--
--func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
--	b := make([]byte, 4)
--	b[0] = byte(e.Expire >> 24)
--	b[1] = byte(e.Expire >> 16)
--	b[2] = byte(e.Expire >> 8)
--	b[3] = byte(e.Expire)
--	return b, nil
--}
--
--func (e *EDNS0_EXPIRE) unpack(b []byte) error {
--	if len(b) < 4 {
--		return ErrBuf
--	}
--	e.Expire = uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/edns_test.go b/Godeps/_workspace/src/github.com/miekg/dns/edns_test.go
-deleted file mode 100644
-index 8ee82ab..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/edns_test.go
-+++ /dev/null
-@@ -1,48 +0,0 @@
--package dns
--
--import "testing"
--
--func TestOPTTtl(t *testing.T) {
--	e := &OPT{}
--	e.Hdr.Name = "."
--	e.Hdr.Rrtype = TypeOPT
--
--	if e.Do() {
--		t.Fail()
--	}
--
--	e.SetDo()
--	if !e.Do() {
--		t.Fail()
--	}
--
--	oldTtl := e.Hdr.Ttl
--
--	if e.Version() != 0 {
--		t.Fail()
--	}
--
--	e.SetVersion(42)
--	if e.Version() != 42 {
--		t.Fail()
--	}
--
--	e.SetVersion(0)
--	if e.Hdr.Ttl != oldTtl {
--		t.Fail()
--	}
--
--	if e.ExtendedRcode() != 0 {
--		t.Fail()
--	}
--
--	e.SetExtendedRcode(42)
--	if e.ExtendedRcode() != 42 {
--		t.Fail()
--	}
--
--	e.SetExtendedRcode(0)
--	if e.Hdr.Ttl != oldTtl {
--		t.Fail()
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/example_test.go b/Godeps/_workspace/src/github.com/miekg/dns/example_test.go
-deleted file mode 100644
-index 1578a4d..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/example_test.go
-+++ /dev/null
-@@ -1,147 +0,0 @@
--package dns_test
--
--import (
--	"errors"
--	"fmt"
--	"github.com/miekg/dns"
--	"log"
--	"net"
--)
--
--// Retrieve the MX records for miek.nl.
--func ExampleMX() {
--	config, _ := dns.ClientConfigFromFile("/etc/resolv.conf")
--	c := new(dns.Client)
--	m := new(dns.Msg)
--	m.SetQuestion("miek.nl.", dns.TypeMX)
--	m.RecursionDesired = true
--	r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port)
--	if err != nil {
--		return
--	}
--	if r.Rcode != dns.RcodeSuccess {
--		return
--	}
--	for _, a := range r.Answer {
--		if mx, ok := a.(*dns.MX); ok {
--			fmt.Printf("%s\n", mx.String())
--		}
--	}
--}
--
--// Retrieve the DNSKEY records of a zone and convert them
--// to DS records for SHA1, SHA256 and SHA384.
--func ExampleDS(zone string) {
--	config, _ := dns.ClientConfigFromFile("/etc/resolv.conf")
--	c := new(dns.Client)
--	m := new(dns.Msg)
--	if zone == "" {
--		zone = "miek.nl"
--	}
--	m.SetQuestion(dns.Fqdn(zone), dns.TypeDNSKEY)
--	m.SetEdns0(4096, true)
--	r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port)
--	if err != nil {
--		return
--	}
--	if r.Rcode != dns.RcodeSuccess {
--		return
--	}
--	for _, k := range r.Answer {
--		if key, ok := k.(*dns.DNSKEY); ok {
--			for _, alg := range []uint8{dns.SHA1, dns.SHA256, dns.SHA384} {
--				fmt.Printf("%s; %d\n", key.ToDS(alg).String(), key.Flags)
--			}
--		}
--	}
--}
--
--const TypeAPAIR = 0x0F99
--
--type APAIR struct {
--	addr [2]net.IP
--}
--
--func NewAPAIR() dns.PrivateRdata { return new(APAIR) }
--
--func (rd *APAIR) String() string { return rd.addr[0].String() + " " + rd.addr[1].String() }
--func (rd *APAIR) Parse(txt []string) error {
--	if len(txt) != 2 {
--		return errors.New("two addresses required for APAIR")
--	}
--	for i, s := range txt {
--		ip := net.ParseIP(s)
--		if ip == nil {
--			return errors.New("invalid IP in APAIR text representation")
--		}
--		rd.addr[i] = ip
--	}
--	return nil
--}
--
--func (rd *APAIR) Pack(buf []byte) (int, error) {
--	b := append([]byte(rd.addr[0]), []byte(rd.addr[1])...)
--	n := copy(buf, b)
--	if n != len(b) {
--		return n, dns.ErrBuf
--	}
--	return n, nil
--}
--
--func (rd *APAIR) Unpack(buf []byte) (int, error) {
--	ln := net.IPv4len * 2
--	if len(buf) != ln {
--		return 0, errors.New("invalid length of APAIR rdata")
--	}
--	cp := make([]byte, ln)
--	copy(cp, buf) // clone bytes to use them in IPs
--
--	rd.addr[0] = net.IP(cp[:3])
--	rd.addr[1] = net.IP(cp[4:])
--
--	return len(buf), nil
--}
--
--func (rd *APAIR) Copy(dest dns.PrivateRdata) error {
--	cp := make([]byte, rd.Len())
--	_, err := rd.Pack(cp)
--	if err != nil {
--		return err
--	}
--
--	d := dest.(*APAIR)
--	d.addr[0] = net.IP(cp[:3])
--	d.addr[1] = net.IP(cp[4:])
--	return nil
--}
--
--func (rd *APAIR) Len() int {
--	return net.IPv4len * 2
--}
--
--func ExamplePrivateHandle() {
--	dns.PrivateHandle("APAIR", TypeAPAIR, NewAPAIR)
--	defer dns.PrivateHandleRemove(TypeAPAIR)
--
--	rr, err := dns.NewRR("miek.nl. APAIR (1.2.3.4    1.2.3.5)")
--	if err != nil {
--		log.Fatal("could not parse APAIR record: ", err)
--	}
--	fmt.Println(rr)
--	// Output: miek.nl.	3600	IN	APAIR	1.2.3.4 1.2.3.5
--
--	m := new(dns.Msg)
--	m.Id = 12345
--	m.SetQuestion("miek.nl.", TypeAPAIR)
--	m.Answer = append(m.Answer, rr)
--
--	fmt.Println(m)
--	// ;; opcode: QUERY, status: NOERROR, id: 12345
--	// ;; flags: rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
--	//
--	// ;; QUESTION SECTION:
--	// ;miek.nl.	IN	 APAIR
--	//
--	// ;; ANSWER SECTION:
--	// miek.nl.	3600	IN	APAIR	1.2.3.4 1.2.3.5
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/idn/example_test.go b/Godeps/_workspace/src/github.com/miekg/dns/idn/example_test.go
-deleted file mode 100644
-index 8833cd9..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/idn/example_test.go
-+++ /dev/null
-@@ -1,18 +0,0 @@
--package idn_test
--
--import (
--	"fmt"
--	"github.com/miekg/dns/idn"
--)
--
--func ExampleToPunycode() {
--	name := "インターネット.テスト"
--	fmt.Printf("%s -> %s", name, idn.ToPunycode(name))
--	// Output: インターネット.テスト -> xn--eckucmux0ukc.xn--zckzah
--}
--
--func ExampleFromPunycode() {
--	name := "xn--mgbaja8a1hpac.xn--mgbachtv"
--	fmt.Printf("%s -> %s", name, idn.FromPunycode(name))
--	// Output: xn--mgbaja8a1hpac.xn--mgbachtv -> الانترنت.اختبار
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go b/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go
-deleted file mode 100644
-index faab402..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode.go
-+++ /dev/null
-@@ -1,268 +0,0 @@
--// Package idn implements encoding from and to punycode as speficied by RFC 3492.
--package idn
--
--import (
--	"bytes"
--	"github.com/miekg/dns"
--	"strings"
--	"unicode"
--)
--
--// Implementation idea from RFC itself and from from IDNA::Punycode created by
--// Tatsuhiko Miyagawa <miyagawa at bulknews.net> and released under Perl Artistic
--// License in 2002.
--
--const (
--	_MIN  rune = 1
--	_MAX  rune = 26
--	_SKEW rune = 38
--	_BASE rune = 36
--	_BIAS rune = 72
--	_N    rune = 128
--	_DAMP rune = 700
--
--	_DELIMITER = '-'
--	_PREFIX    = "xn--"
--)
--
--// ToPunycode converts unicode domain names to DNS-appropriate punycode names.
--// This function would return incorrect result for strings for non-canonical
--// unicode strings.
--func ToPunycode(s string) string {
--	tokens := dns.SplitDomainName(s)
--	switch {
--	case s == "":
--		return ""
--	case tokens == nil: // s == .
--		return "."
--	case s[len(s)-1] == '.':
--		tokens = append(tokens, "")
--	}
--
--	for i := range tokens {
--		tokens[i] = string(encode([]byte(tokens[i])))
--	}
--	return strings.Join(tokens, ".")
--}
--
--// FromPunycode returns unicode domain name from provided punycode string.
--func FromPunycode(s string) string {
--	tokens := dns.SplitDomainName(s)
--	switch {
--	case s == "":
--		return ""
--	case tokens == nil: // s == .
--		return "."
--	case s[len(s)-1] == '.':
--		tokens = append(tokens, "")
--	}
--	for i := range tokens {
--		tokens[i] = string(decode([]byte(tokens[i])))
--	}
--	return strings.Join(tokens, ".")
--}
--
--// digitval converts single byte into meaningful value that's used to calculate decoded unicode character.
--const errdigit = 0xffff
--
--func digitval(code rune) rune {
--	switch {
--	case code >= 'A' && code <= 'Z':
--		return code - 'A'
--	case code >= 'a' && code <= 'z':
--		return code - 'a'
--	case code >= '0' && code <= '9':
--		return code - '0' + 26
--	}
--	return errdigit
--}
--
--// lettercode finds BASE36 byte (a-z0-9) based on calculated number.
--func lettercode(digit rune) rune {
--	switch {
--	case digit >= 0 && digit <= 25:
--		return digit + 'a'
--	case digit >= 26 && digit <= 36:
--		return digit - 26 + '0'
--	}
--	panic("dns: not reached")
--}
--
--// adapt calculates next bias to be used for next iteration delta.
--func adapt(delta rune, numpoints int, firsttime bool) rune {
--	if firsttime {
--		delta /= _DAMP
--	} else {
--		delta /= 2
--	}
--
--	var k rune
--	for delta = delta + delta/rune(numpoints); delta > (_BASE-_MIN)*_MAX/2; k += _BASE {
--		delta /= _BASE - _MIN
--	}
--
--	return k + ((_BASE-_MIN+1)*delta)/(delta+_SKEW)
--}
--
--// next finds minimal rune (one with lowest codepoint value) that should be equal or above boundary.
--func next(b []rune, boundary rune) rune {
--	if len(b) == 0 {
--		panic("dns: invalid set of runes to determine next one")
--	}
--	m := b[0]
--	for _, x := range b[1:] {
--		if x >= boundary && (m < boundary || x < m) {
--			m = x
--		}
--	}
--	return m
--}
--
--// preprune converts unicode rune to lower case. At this time it's not
--// supporting all things described in RFCs
--func preprune(r rune) rune {
--	if unicode.IsUpper(r) {
--		r = unicode.ToLower(r)
--	}
--	return r
--}
--
--// tfunc is a function that helps calculate each character weight
--func tfunc(k, bias rune) rune {
--	switch {
--	case k <= bias:
--		return _MIN
--	case k >= bias+_MAX:
--		return _MAX
--	}
--	return k - bias
--}
--
--// encode transforms Unicode input bytes (that represent DNS label) into punycode bytestream
--func encode(input []byte) []byte {
--	n, bias := _N, _BIAS
--
--	b := bytes.Runes(input)
--	for i := range b {
--		b[i] = preprune(b[i])
--	}
--
--	basic := make([]byte, 0, len(b))
--	for _, ltr := range b {
--		if ltr <= 0x7f {
--			basic = append(basic, byte(ltr))
--		}
--	}
--	basiclen := len(basic)
--	fulllen := len(b)
--	if basiclen == fulllen {
--		return basic
--	}
--
--	var out bytes.Buffer
--
--	out.WriteString(_PREFIX)
--	if basiclen > 0 {
--		out.Write(basic)
--		out.WriteByte(_DELIMITER)
--	}
--
--	var (
--		ltr, nextltr rune
--		delta, q     rune // delta calculation (see rfc)
--		t, k, cp     rune // weight and codepoint calculation
--	)
--
--	s := &bytes.Buffer{}
--	for h := basiclen; h < fulllen; n, delta = n+1, delta+1 {
--		nextltr = next(b, n)
--		s.Truncate(0)
--		s.WriteRune(nextltr)
--		delta, n = delta+(nextltr-n)*rune(h+1), nextltr
--
--		for _, ltr = range b {
--			if ltr < n {
--				delta++
--			}
--			if ltr == n {
--				q = delta
--				for k = _BASE; ; k += _BASE {
--					t = tfunc(k, bias)
--					if q < t {
--						break
--					}
--					cp = t + ((q - t) % (_BASE - t))
--					out.WriteRune(lettercode(cp))
--					q = (q - t) / (_BASE - t)
--				}
--
--				out.WriteRune(lettercode(q))
--
--				bias = adapt(delta, h+1, h == basiclen)
--				h, delta = h+1, 0
--			}
--		}
--	}
--	return out.Bytes()
--}
--
--// decode transforms punycode input bytes (that represent DNS label) into Unicode bytestream
--func decode(b []byte) []byte {
--	src := b // b would move and we need to keep it
--
--	n, bias := _N, _BIAS
--	if !bytes.HasPrefix(b, []byte(_PREFIX)) {
--		return b
--	}
--	out := make([]rune, 0, len(b))
--	b = b[len(_PREFIX):]
--	for pos, x := range b {
--		if x == _DELIMITER {
--			out = append(out, bytes.Runes(b[:pos])...)
--			b = b[pos+1:] // trim source string
--			break
--		}
--	}
--	if len(b) == 0 {
--		return src
--	}
--	var (
--		i, oldi, w rune
--		ch         byte
--		t, digit   rune
--		ln         int
--	)
--
--	for i = 0; len(b) > 0; i++ {
--		oldi, w = i, 1
--		for k := _BASE; len(b) > 0; k += _BASE {
--			ch, b = b[0], b[1:]
--			digit = digitval(rune(ch))
--			if digit == errdigit {
--				return src
--			}
--			i += digit * w
--
--			t = tfunc(k, bias)
--			if digit < t {
--				break
--			}
--
--			w *= _BASE - t
--		}
--		ln = len(out) + 1
--		bias = adapt(i-oldi, ln, oldi == 0)
--		n += i / rune(ln)
--		i = i % rune(ln)
--		// insert
--		out = append(out, 0)
--		copy(out[i+1:], out[i:])
--		out[i] = n
--	}
--
--	var ret bytes.Buffer
--	for _, r := range out {
--		ret.WriteRune(r)
--	}
--	return ret.Bytes()
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go b/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go
-deleted file mode 100644
-index 3202450..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/idn/punycode_test.go
-+++ /dev/null
-@@ -1,94 +0,0 @@
--package idn
--
--import (
--	"strings"
--	"testing"
--)
--
--var testcases = [][2]string{
--	{"", ""},
--	{"a", "a"},
--	{"A-B", "a-b"},
--	{"AbC", "abc"},
--	{"я", "xn--41a"},
--	{"zя", "xn--z-0ub"},
--	{"ЯZ", "xn--z-zub"},
--	{"إختبار", "xn--kgbechtv"},
--	{"آزمایشی", "xn--hgbk6aj7f53bba"},
--	{"测试", "xn--0zwm56d"},
--	{"測試", "xn--g6w251d"},
--	{"Испытание", "xn--80akhbyknj4f"},
--	{"परीक्षा", "xn--11b5bs3a9aj6g"},
--	{"δοκιμή", "xn--jxalpdlp"},
--	{"테스트", "xn--9t4b11yi5a"},
--	{"טעסט", "xn--deba0ad"},
--	{"テスト", "xn--zckzah"},
--	{"பரிட்சை", "xn--hlcj6aya9esc7a"},
--}
--
--func TestEncodeDecodePunycode(t *testing.T) {
--	for _, tst := range testcases {
--		enc := encode([]byte(tst[0]))
--		if string(enc) != tst[1] {
--			t.Errorf("%s encodeded as %s but should be %s", tst[0], enc, tst[1])
--		}
--		dec := decode([]byte(tst[1]))
--		if string(dec) != strings.ToLower(tst[0]) {
--			t.Errorf("%s decoded as %s but should be %s", tst[1], dec, strings.ToLower(tst[0]))
--		}
--	}
--}
--
--func TestToFromPunycode(t *testing.T) {
--	for _, tst := range testcases {
--		// assert unicode.com == punycode.com
--		full := ToPunycode(tst[0] + ".com")
--		if full != tst[1]+".com" {
--			t.Errorf("invalid result from string conversion to punycode, %s and should be %s.com", full, tst[1])
--		}
--		// assert punycode.punycode == unicode.unicode
--		decoded := FromPunycode(tst[1] + "." + tst[1])
--		if decoded != strings.ToLower(tst[0]+"."+tst[0]) {
--			t.Errorf("invalid result from string conversion to punycode, %s and should be %s.%s", decoded, tst[0], tst[0])
--		}
--	}
--}
--
--func TestEncodeDecodeFinalPeriod(t *testing.T) {
--	for _, tst := range testcases {
--		// assert unicode.com. == punycode.com.
--		full := ToPunycode(tst[0] + ".")
--		if full != tst[1]+"." {
--			t.Errorf("invalid result from string conversion to punycode when period added at the end, %#v and should be %#v", full, tst[1]+".")
--		}
--		// assert punycode.com. == unicode.com.
--		decoded := FromPunycode(tst[1] + ".")
--		if decoded != strings.ToLower(tst[0]+".") {
--			t.Errorf("invalid result from string conversion to punycode when period added, %#v and should be %#v", decoded, tst[0]+".")
--		}
--		full = ToPunycode(tst[0])
--		if full != tst[1] {
--			t.Errorf("invalid result from string conversion to punycode when no period added at the end, %#v and should be %#v", full, tst[1]+".")
--		}
--		// assert punycode.com. == unicode.com.
--		decoded = FromPunycode(tst[1])
--		if decoded != strings.ToLower(tst[0]) {
--			t.Errorf("invalid result from string conversion to punycode when no period added, %#v and should be %#v", decoded, tst[0]+".")
--		}
--	}
--}
--
--var invalid = []string{
--	"xn--*",
--	"xn--",
--	"xn---",
--}
--
--func TestInvalidPunycode(t *testing.T) {
--	for _, d := range invalid {
--		s := FromPunycode(d)
--		if s != d {
--			t.Errorf("Changed invalid name %s to %#v", d, s)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/keygen.go b/Godeps/_workspace/src/github.com/miekg/dns/keygen.go
-deleted file mode 100644
-index dfe328e..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/keygen.go
-+++ /dev/null
-@@ -1,157 +0,0 @@
--package dns
--
--import (
--	"crypto/dsa"
--	"crypto/ecdsa"
--	"crypto/elliptic"
--	"crypto/rand"
--	"crypto/rsa"
--	"math/big"
--	"strconv"
--)
--
--const _FORMAT = "Private-key-format: v1.3\n"
--
--// Empty interface that is used as a wrapper around all possible
--// private key implementations from the crypto package.
--type PrivateKey interface{}
--
--// Generate generates a DNSKEY of the given bit size.
--// The public part is put inside the DNSKEY record.
--// The Algorithm in the key must be set as this will define
--// what kind of DNSKEY will be generated.
--// The ECDSA algorithms imply a fixed keysize, in that case
--// bits should be set to the size of the algorithm.
--func (r *DNSKEY) Generate(bits int) (PrivateKey, error) {
--	switch r.Algorithm {
--	case DSA, DSANSEC3SHA1:
--		if bits != 1024 {
--			return nil, ErrKeySize
--		}
--	case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
--		if bits < 512 || bits > 4096 {
--			return nil, ErrKeySize
--		}
--	case RSASHA512:
--		if bits < 1024 || bits > 4096 {
--			return nil, ErrKeySize
--		}
--	case ECDSAP256SHA256:
--		if bits != 256 {
--			return nil, ErrKeySize
--		}
--	case ECDSAP384SHA384:
--		if bits != 384 {
--			return nil, ErrKeySize
--		}
--	}
--
--	switch r.Algorithm {
--	case DSA, DSANSEC3SHA1:
--		params := new(dsa.Parameters)
--		if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
--			return nil, err
--		}
--		priv := new(dsa.PrivateKey)
--		priv.PublicKey.Parameters = *params
--		err := dsa.GenerateKey(priv, rand.Reader)
--		if err != nil {
--			return nil, err
--		}
--		r.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
--		return priv, nil
--	case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
--		priv, err := rsa.GenerateKey(rand.Reader, bits)
--		if err != nil {
--			return nil, err
--		}
--		r.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
--		return priv, nil
--	case ECDSAP256SHA256, ECDSAP384SHA384:
--		var c elliptic.Curve
--		switch r.Algorithm {
--		case ECDSAP256SHA256:
--			c = elliptic.P256()
--		case ECDSAP384SHA384:
--			c = elliptic.P384()
--		}
--		priv, err := ecdsa.GenerateKey(c, rand.Reader)
--		if err != nil {
--			return nil, err
--		}
--		r.setPublicKeyCurve(priv.PublicKey.X, priv.PublicKey.Y)
--		return priv, nil
--	default:
--		return nil, ErrAlg
--	}
--	return nil, nil // Dummy return
--}
--
--// PrivateKeyString converts a PrivateKey to a string. This
--// string has the same format as the private-key-file of BIND9 (Private-key-format: v1.3).
--// It needs some info from the key (hashing, keytag), so its a method of the DNSKEY.
--func (r *DNSKEY) PrivateKeyString(p PrivateKey) (s string) {
--	switch t := p.(type) {
--	case *rsa.PrivateKey:
--		algorithm := strconv.Itoa(int(r.Algorithm)) + " (" + AlgorithmToString[r.Algorithm] + ")"
--		modulus := toBase64(t.PublicKey.N.Bytes())
--		e := big.NewInt(int64(t.PublicKey.E))
--		publicExponent := toBase64(e.Bytes())
--		privateExponent := toBase64(t.D.Bytes())
--		prime1 := toBase64(t.Primes[0].Bytes())
--		prime2 := toBase64(t.Primes[1].Bytes())
--		// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
--		// and from: http://code.google.com/p/go/issues/detail?id=987
--		one := big.NewInt(1)
--		minusone := big.NewInt(-1)
--		p_1 := big.NewInt(0).Sub(t.Primes[0], one)
--		q_1 := big.NewInt(0).Sub(t.Primes[1], one)
--		exp1 := big.NewInt(0).Mod(t.D, p_1)
--		exp2 := big.NewInt(0).Mod(t.D, q_1)
--		coeff := big.NewInt(0).Exp(t.Primes[1], minusone, t.Primes[0])
--
--		exponent1 := toBase64(exp1.Bytes())
--		exponent2 := toBase64(exp2.Bytes())
--		coefficient := toBase64(coeff.Bytes())
--
--		s = _FORMAT +
--			"Algorithm: " + algorithm + "\n" +
--			"Modules: " + modulus + "\n" +
--			"PublicExponent: " + publicExponent + "\n" +
--			"PrivateExponent: " + privateExponent + "\n" +
--			"Prime1: " + prime1 + "\n" +
--			"Prime2: " + prime2 + "\n" +
--			"Exponent1: " + exponent1 + "\n" +
--			"Exponent2: " + exponent2 + "\n" +
--			"Coefficient: " + coefficient + "\n"
--	case *ecdsa.PrivateKey:
--		algorithm := strconv.Itoa(int(r.Algorithm)) + " (" + AlgorithmToString[r.Algorithm] + ")"
--		var intlen int
--		switch r.Algorithm {
--		case ECDSAP256SHA256:
--			intlen = 32
--		case ECDSAP384SHA384:
--			intlen = 48
--		}
--		private := toBase64(intToBytes(t.D, intlen))
--		s = _FORMAT +
--			"Algorithm: " + algorithm + "\n" +
--			"PrivateKey: " + private + "\n"
--	case *dsa.PrivateKey:
--		algorithm := strconv.Itoa(int(r.Algorithm)) + " (" + AlgorithmToString[r.Algorithm] + ")"
--		T := divRoundUp(divRoundUp(t.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
--		prime := toBase64(intToBytes(t.PublicKey.Parameters.P, 64+T*8))
--		subprime := toBase64(intToBytes(t.PublicKey.Parameters.Q, 20))
--		base := toBase64(intToBytes(t.PublicKey.Parameters.G, 64+T*8))
--		priv := toBase64(intToBytes(t.X, 20))
--		pub := toBase64(intToBytes(t.PublicKey.Y, 64+T*8))
--		s = _FORMAT +
--			"Algorithm: " + algorithm + "\n" +
--			"Prime(p): " + prime + "\n" +
--			"Subprime(q): " + subprime + "\n" +
--			"Base(g): " + base + "\n" +
--			"Private_value(x): " + priv + "\n" +
--			"Public_value(y): " + pub + "\n"
--	}
--	return
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/kscan.go b/Godeps/_workspace/src/github.com/miekg/dns/kscan.go
-deleted file mode 100644
-index c48ca2d..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/kscan.go
-+++ /dev/null
-@@ -1,244 +0,0 @@
--package dns
--
--import (
--	"crypto/dsa"
--	"crypto/ecdsa"
--	"crypto/rsa"
--	"io"
--	"math/big"
--	"strings"
--)
--
--func (k *DNSKEY) NewPrivateKey(s string) (PrivateKey, error) {
--	if s[len(s)-1] != '\n' { // We need a closing newline
--		return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
--	}
--	return k.ReadPrivateKey(strings.NewReader(s), "")
--}
--
--// ReadPrivateKey reads a private key from the io.Reader q. The string file is
--// only used in error reporting.
--// The public key must be
--// known, because some cryptographic algorithms embed the public inside the privatekey.
--func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (PrivateKey, error) {
--	m, e := parseKey(q, file)
--	if m == nil {
--		return nil, e
--	}
--	if _, ok := m["private-key-format"]; !ok {
--		return nil, ErrPrivKey
--	}
--	if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
--		return nil, ErrPrivKey
--	}
--	// TODO(mg): check if the pubkey matches the private key
--	switch m["algorithm"] {
--	case "3 (DSA)":
--		p, e := readPrivateKeyDSA(m)
--		if e != nil {
--			return nil, e
--		}
--		if !k.setPublicKeyInPrivate(p) {
--			return nil, ErrKey
--		}
--		return p, e
--	case "1 (RSAMD5)":
--		fallthrough
--	case "5 (RSASHA1)":
--		fallthrough
--	case "7 (RSASHA1NSEC3SHA1)":
--		fallthrough
--	case "8 (RSASHA256)":
--		fallthrough
--	case "10 (RSASHA512)":
--		p, e := readPrivateKeyRSA(m)
--		if e != nil {
--			return nil, e
--		}
--		if !k.setPublicKeyInPrivate(p) {
--			return nil, ErrKey
--		}
--		return p, e
--	case "12 (ECC-GOST)":
--		p, e := readPrivateKeyGOST(m)
--		if e != nil {
--			return nil, e
--		}
--		// setPublicKeyInPrivate(p)
--		return p, e
--	case "13 (ECDSAP256SHA256)":
--		fallthrough
--	case "14 (ECDSAP384SHA384)":
--		p, e := readPrivateKeyECDSA(m)
--		if e != nil {
--			return nil, e
--		}
--		if !k.setPublicKeyInPrivate(p) {
--			return nil, ErrKey
--		}
--		return p, e
--	}
--	return nil, ErrPrivKey
--}
--
--// Read a private key (file) string and create a public key. Return the private key.
--func readPrivateKeyRSA(m map[string]string) (PrivateKey, error) {
--	p := new(rsa.PrivateKey)
--	p.Primes = []*big.Int{nil, nil}
--	for k, v := range m {
--		switch k {
--		case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
--			v1, err := fromBase64([]byte(v))
--			if err != nil {
--				return nil, err
--			}
--			switch k {
--			case "modulus":
--				p.PublicKey.N = big.NewInt(0)
--				p.PublicKey.N.SetBytes(v1)
--			case "publicexponent":
--				i := big.NewInt(0)
--				i.SetBytes(v1)
--				p.PublicKey.E = int(i.Int64()) // int64 should be large enough
--			case "privateexponent":
--				p.D = big.NewInt(0)
--				p.D.SetBytes(v1)
--			case "prime1":
--				p.Primes[0] = big.NewInt(0)
--				p.Primes[0].SetBytes(v1)
--			case "prime2":
--				p.Primes[1] = big.NewInt(0)
--				p.Primes[1].SetBytes(v1)
--			}
--		case "exponent1", "exponent2", "coefficient":
--			// not used in Go (yet)
--		case "created", "publish", "activate":
--			// not used in Go (yet)
--		}
--	}
--	return p, nil
--}
--
--func readPrivateKeyDSA(m map[string]string) (PrivateKey, error) {
--	p := new(dsa.PrivateKey)
--	p.X = big.NewInt(0)
--	for k, v := range m {
--		switch k {
--		case "private_value(x)":
--			v1, err := fromBase64([]byte(v))
--			if err != nil {
--				return nil, err
--			}
--			p.X.SetBytes(v1)
--		case "created", "publish", "activate":
--			/* not used in Go (yet) */
--		}
--	}
--	return p, nil
--}
--
--func readPrivateKeyECDSA(m map[string]string) (PrivateKey, error) {
--	p := new(ecdsa.PrivateKey)
--	p.D = big.NewInt(0)
--	// TODO: validate that the required flags are present
--	for k, v := range m {
--		switch k {
--		case "privatekey":
--			v1, err := fromBase64([]byte(v))
--			if err != nil {
--				return nil, err
--			}
--			p.D.SetBytes(v1)
--		case "created", "publish", "activate":
--			/* not used in Go (yet) */
--		}
--	}
--	return p, nil
--}
--
--func readPrivateKeyGOST(m map[string]string) (PrivateKey, error) {
--	// TODO(miek)
--	return nil, nil
--}
--
--// parseKey reads a private key from r. It returns a map[string]string,
--// with the key-value pairs, or an error when the file is not correct.
--func parseKey(r io.Reader, file string) (map[string]string, error) {
--	s := scanInit(r)
--	m := make(map[string]string)
--	c := make(chan lex)
--	k := ""
--	// Start the lexer
--	go klexer(s, c)
--	for l := range c {
--		// It should alternate
--		switch l.value {
--		case _KEY:
--			k = l.token
--		case _VALUE:
--			if k == "" {
--				return nil, &ParseError{file, "no private key seen", l}
--			}
--			//println("Setting", strings.ToLower(k), "to", l.token, "b")
--			m[strings.ToLower(k)] = l.token
--			k = ""
--		}
--	}
--	return m, nil
--}
--
--// klexer scans the sourcefile and returns tokens on the channel c.
--func klexer(s *scan, c chan lex) {
--	var l lex
--	str := "" // Hold the current read text
--	commt := false
--	key := true
--	x, err := s.tokenText()
--	defer close(c)
--	for err == nil {
--		l.column = s.position.Column
--		l.line = s.position.Line
--		switch x {
--		case ':':
--			if commt {
--				break
--			}
--			l.token = str
--			if key {
--				l.value = _KEY
--				c <- l
--				// Next token is a space, eat it
--				s.tokenText()
--				key = false
--				str = ""
--			} else {
--				l.value = _VALUE
--			}
--		case ';':
--			commt = true
--		case '\n':
--			if commt {
--				// Reset a comment
--				commt = false
--			}
--			l.value = _VALUE
--			l.token = str
--			c <- l
--			str = ""
--			commt = false
--			key = true
--		default:
--			if commt {
--				break
--			}
--			str += string(x)
--		}
--		x, err = s.tokenText()
--	}
--	if len(str) > 0 {
--		// Send remainder
--		l.token = str
--		l.value = _VALUE
--		c <- l
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/labels.go b/Godeps/_workspace/src/github.com/miekg/dns/labels.go
-deleted file mode 100644
-index 758e578..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/labels.go
-+++ /dev/null
-@@ -1,162 +0,0 @@
--package dns
--
--// Holds a bunch of helper functions for dealing with labels.
--
--// SplitDomainName splits a name string into it's labels.
--// www.miek.nl. returns []string{"www", "miek", "nl"}
--// The root label (.) returns nil. Note that using
--// strings.Split(s) will work in most cases, but does not handle
--// escaped dots (\.) for instance.
--func SplitDomainName(s string) (labels []string) {
--	if len(s) == 0 {
--		return nil
--	}
--	fqdnEnd := 0 // offset of the final '.' or the length of the name
--	idx := Split(s)
--	begin := 0
--	if s[len(s)-1] == '.' {
--		fqdnEnd = len(s) - 1
--	} else {
--		fqdnEnd = len(s)
--	}
--
--	switch len(idx) {
--	case 0:
--		return nil
--	case 1:
--		// no-op
--	default:
--		end := 0
--		for i := 1; i < len(idx); i++ {
--			end = idx[i]
--			labels = append(labels, s[begin:end-1])
--			begin = end
--		}
--	}
--
--	labels = append(labels, s[begin:fqdnEnd])
--	return labels
--}
--
--// CompareDomainName compares the names s1 and s2 and
--// returns how many labels they have in common starting from the *right*.
--// The comparison stops at the first inequality. The names are not downcased
--// before the comparison.
--//
--// www.miek.nl. and miek.nl. have two labels in common: miek and nl
--// www.miek.nl. and www.bla.nl. have one label in common: nl
--func CompareDomainName(s1, s2 string) (n int) {
--	s1 = Fqdn(s1)
--	s2 = Fqdn(s2)
--	l1 := Split(s1)
--	l2 := Split(s2)
--
--	// the first check: root label
--	if l1 == nil || l2 == nil {
--		return
--	}
--
--	j1 := len(l1) - 1 // end
--	i1 := len(l1) - 2 // start
--	j2 := len(l2) - 1
--	i2 := len(l2) - 2
--	// the second check can be done here: last/only label
--	// before we fall through into the for-loop below
--	if s1[l1[j1]:] == s2[l2[j2]:] {
--		n++
--	} else {
--		return
--	}
--	for {
--		if i1 < 0 || i2 < 0 {
--			break
--		}
--		if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] {
--			n++
--		} else {
--			break
--		}
--		j1--
--		i1--
--		j2--
--		i2--
--	}
--	return
--}
--
--// CountLabel counts the the number of labels in the string s.
--func CountLabel(s string) (labels int) {
--	if s == "." {
--		return
--	}
--	off := 0
--	end := false
--	for {
--		off, end = NextLabel(s, off)
--		labels++
--		if end {
--			return
--		}
--	}
--	panic("dns: not reached")
--}
--
--// Split splits a name s into its label indexes.
--// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
--// The root name (.) returns nil. Also see dns.SplitDomainName.
--func Split(s string) []int {
--	if s == "." {
--		return nil
--	}
--	idx := make([]int, 1, 3)
--	off := 0
--	end := false
--
--	for {
--		off, end = NextLabel(s, off)
--		if end {
--			return idx
--		}
--		idx = append(idx, off)
--	}
--	panic("dns: not reached")
--}
--
--// NextLabel returns the index of the start of the next label in the
--// string s starting at offset.
--// The bool end is true when the end of the string has been reached.
--func NextLabel(s string, offset int) (i int, end bool) {
--	quote := false
--	for i = offset; i < len(s)-1; i++ {
--		switch s[i] {
--		case '\\':
--			quote = !quote
--		default:
--			quote = false
--		case '.':
--			if quote {
--				quote = !quote
--				continue
--			}
--			return i + 1, false
--		}
--	}
--	return i + 1, true
--}
--
--// PrevLabel returns the index of the label when starting from the right and
--// jumping n labels to the left.
--// The bool start is true when the start of the string has been overshot.
--func PrevLabel(s string, n int) (i int, start bool) {
--	if n == 0 {
--		return len(s), false
--	}
--	lab := Split(s)
--	if lab == nil {
--		return 0, true
--	}
--	if n > len(lab) {
--		return 0, true
--	}
--	return lab[len(lab)-n], false
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/labels_test.go b/Godeps/_workspace/src/github.com/miekg/dns/labels_test.go
-deleted file mode 100644
-index 1d8da15..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/labels_test.go
-+++ /dev/null
-@@ -1,214 +0,0 @@
--package dns
--
--import (
--	"testing"
--)
--
--func TestCompareDomainName(t *testing.T) {
--	s1 := "www.miek.nl."
--	s2 := "miek.nl."
--	s3 := "www.bla.nl."
--	s4 := "nl.www.bla."
--	s5 := "nl"
--	s6 := "miek.nl"
--
--	if CompareDomainName(s1, s2) != 2 {
--		t.Logf("%s with %s should be %d", s1, s2, 2)
--		t.Fail()
--	}
--	if CompareDomainName(s1, s3) != 1 {
--		t.Logf("%s with %s should be %d", s1, s3, 1)
--		t.Fail()
--	}
--	if CompareDomainName(s3, s4) != 0 {
--		t.Logf("%s with %s should be %d", s3, s4, 0)
--		t.Fail()
--	}
--	// Non qualified tests
--	if CompareDomainName(s1, s5) != 1 {
--		t.Logf("%s with %s should be %d", s1, s5, 1)
--		t.Fail()
--	}
--	if CompareDomainName(s1, s6) != 2 {
--		t.Logf("%s with %s should be %d", s1, s5, 2)
--		t.Fail()
--	}
--
--	if CompareDomainName(s1, ".") != 0 {
--		t.Logf("%s with %s should be %d", s1, s5, 0)
--		t.Fail()
--	}
--	if CompareDomainName(".", ".") != 0 {
--		t.Logf("%s with %s should be %d", ".", ".", 0)
--		t.Fail()
--	}
--}
--
--func TestSplit(t *testing.T) {
--	splitter := map[string]int{
--		"www.miek.nl.":   3,
--		"www.miek.nl":    3,
--		"www..miek.nl":   4,
--		`www\.miek.nl.`:  2,
--		`www\\.miek.nl.`: 3,
--		".":              0,
--		"nl.":            1,
--		"nl":             1,
--		"com.":           1,
--		".com.":          2,
--	}
--	for s, i := range splitter {
--		if x := len(Split(s)); x != i {
--			t.Logf("labels should be %d, got %d: %s %v\n", i, x, s, Split(s))
--			t.Fail()
--		} else {
--			t.Logf("%s %v\n", s, Split(s))
--		}
--	}
--}
--
--func TestSplit2(t *testing.T) {
--	splitter := map[string][]int{
--		"www.miek.nl.": []int{0, 4, 9},
--		"www.miek.nl":  []int{0, 4, 9},
--		"nl":           []int{0},
--	}
--	for s, i := range splitter {
--		x := Split(s)
--		switch len(i) {
--		case 1:
--			if x[0] != i[0] {
--				t.Logf("labels should be %v, got %v: %s\n", i, x, s)
--				t.Fail()
--			}
--		default:
--			if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] {
--				t.Logf("labels should be %v, got %v: %s\n", i, x, s)
--				t.Fail()
--			}
--		}
--	}
--}
--
--func TestPrevLabel(t *testing.T) {
--	type prev struct {
--		string
--		int
--	}
--	prever := map[prev]int{
--		prev{"www.miek.nl.", 0}: 12,
--		prev{"www.miek.nl.", 1}: 9,
--		prev{"www.miek.nl.", 2}: 4,
--
--		prev{"www.miek.nl", 0}: 11,
--		prev{"www.miek.nl", 1}: 9,
--		prev{"www.miek.nl", 2}: 4,
--
--		prev{"www.miek.nl.", 5}: 0,
--		prev{"www.miek.nl", 5}:  0,
--
--		prev{"www.miek.nl.", 3}: 0,
--		prev{"www.miek.nl", 3}:  0,
--	}
--	for s, i := range prever {
--		x, ok := PrevLabel(s.string, s.int)
--		if i != x {
--			t.Logf("label should be %d, got %d, %t: preving %d, %s\n", i, x, ok, s.int, s.string)
--			t.Fail()
--		}
--	}
--}
--
--func TestCountLabel(t *testing.T) {
--	splitter := map[string]int{
--		"www.miek.nl.": 3,
--		"www.miek.nl":  3,
--		"nl":           1,
--		".":            0,
--	}
--	for s, i := range splitter {
--		x := CountLabel(s)
--		if x != i {
--			t.Logf("CountLabel should have %d, got %d\n", i, x)
--			t.Fail()
--		}
--	}
--}
--
--func TestSplitDomainName(t *testing.T) {
--	labels := map[string][]string{
--		"miek.nl":       []string{"miek", "nl"},
--		".":             nil,
--		"www.miek.nl.":  []string{"www", "miek", "nl"},
--		"www.miek.nl":   []string{"www", "miek", "nl"},
--		"www..miek.nl":  []string{"www", "", "miek", "nl"},
--		`www\.miek.nl`:  []string{`www\.miek`, "nl"},
--		`www\\.miek.nl`: []string{`www\\`, "miek", "nl"},
--	}
--domainLoop:
--	for domain, splits := range labels {
--		parts := SplitDomainName(domain)
--		if len(parts) != len(splits) {
--			t.Logf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
--			t.Fail()
--			continue domainLoop
--		}
--		for i := range parts {
--			if parts[i] != splits[i] {
--				t.Logf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits)
--				t.Fail()
--				continue domainLoop
--			}
--		}
--	}
--}
--
--func TestIsDomainName(t *testing.T) {
--	type ret struct {
--		ok  bool
--		lab int
--	}
--	names := map[string]*ret{
--		"..":               &ret{false, 1},
--		"@.":               &ret{true, 1},
--		"www.example.com":  &ret{true, 3},
--		"www.e%ample.com":  &ret{true, 3},
--		"www.example.com.": &ret{true, 3},
--		"mi\\k.nl.":        &ret{true, 2},
--		"mi\\k.nl":         &ret{true, 2},
--	}
--	for d, ok := range names {
--		l, k := IsDomainName(d)
--		if ok.ok != k || ok.lab != l {
--			t.Logf(" got %v %d for %s ", k, l, d)
--			t.Logf("have %v %d for %s ", ok.ok, ok.lab, d)
--			t.Fail()
--		}
--	}
--}
--
--func BenchmarkSplitLabels(b *testing.B) {
--	for i := 0; i < b.N; i++ {
--		Split("www.example.com")
--	}
--}
--
--func BenchmarkLenLabels(b *testing.B) {
--	for i := 0; i < b.N; i++ {
--		CountLabel("www.example.com")
--	}
--}
--
--func BenchmarkCompareLabels(b *testing.B) {
--	for i := 0; i < b.N; i++ {
--		CompareDomainName("www.example.com", "aa.example.com")
--	}
--}
--
--func BenchmarkIsSubDomain(b *testing.B) {
--	for i := 0; i < b.N; i++ {
--		IsSubDomain("www.example.com", "aa.example.com")
--		IsSubDomain("example.com", "aa.example.com")
--		IsSubDomain("miek.nl", "aa.example.com")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/msg.go b/Godeps/_workspace/src/github.com/miekg/dns/msg.go
-deleted file mode 100644
-index ecba463..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/msg.go
-+++ /dev/null
-@@ -1,1899 +0,0 @@
--// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
--// and to - Pack() - wire format.
--// All the packers and unpackers take a (msg []byte, off int)
--// and return (off1 int, ok bool).  If they return ok==false, they
--// also return off1==len(msg), so that the next unpacker will
--// also fail.  This lets us avoid checks of ok until the end of a
--// packing sequence.
--
--package dns
--
--import (
--	"encoding/base32"
--	"encoding/base64"
--	"encoding/hex"
--	"math/big"
--	"math/rand"
--	"net"
--	"reflect"
--	"strconv"
--	"time"
--)
--
--const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
--
--var (
--	ErrAlg           error = &Error{err: "bad algorithm"}
--	ErrAuth          error = &Error{err: "bad authentication"}
--	ErrBuf           error = &Error{err: "buffer size too small"}
--	ErrConnEmpty     error = &Error{err: "conn has no connection"}
--	ErrConn          error = &Error{err: "conn holds both UDP and TCP connection"}
--	ErrExtendedRcode error = &Error{err: "bad extended rcode"}
--	ErrFqdn          error = &Error{err: "domain must be fully qualified"}
--	ErrId            error = &Error{err: "id mismatch"}
--	ErrKeyAlg        error = &Error{err: "bad key algorithm"}
--	ErrKey           error = &Error{err: "bad key"}
--	ErrKeySize       error = &Error{err: "bad key size"}
--	ErrNoSig         error = &Error{err: "no signature found"}
--	ErrPrivKey       error = &Error{err: "bad private key"}
--	ErrRcode         error = &Error{err: "bad rcode"}
--	ErrRdata         error = &Error{err: "bad rdata"}
--	ErrRRset         error = &Error{err: "bad rrset"}
--	ErrSecret        error = &Error{err: "no secrets defined"}
--	ErrServ          error = &Error{err: "no servers could be reached"}
--	ErrShortRead     error = &Error{err: "short read"}
--	ErrSig           error = &Error{err: "bad signature"}
--	ErrSigGen        error = &Error{err: "bad signature generation"}
--	ErrSoa           error = &Error{err: "no SOA"}
--	ErrTime          error = &Error{err: "bad time"}
--)
--
--// Id, by default, returns a 16 bits random number to be used as a
--// message id. The random provided should be good enough. This being a
--// variable the function can be reassigned to a custom function.
--// For instance, to make it return a static value:
--//
--//	dns.Id = func() uint16 { return 3 }
--var Id func() uint16 = id
--
--// A manually-unpacked version of (id, bits).
--// This is in its own struct for easy printing.
--type MsgHdr struct {
--	Id                 uint16
--	Response           bool
--	Opcode             int
--	Authoritative      bool
--	Truncated          bool
--	RecursionDesired   bool
--	RecursionAvailable bool
--	Zero               bool
--	AuthenticatedData  bool
--	CheckingDisabled   bool
--	Rcode              int
--}
--
--// The layout of a DNS message.
--type Msg struct {
--	MsgHdr
--	Compress bool       `json:"-"` // If true, the message will be compressed when converted to wire format. This not part of the official DNS packet format.
--	Question []Question // Holds the RR(s) of the question section.
--	Answer   []RR       // Holds the RR(s) of the answer section.
--	Ns       []RR       // Holds the RR(s) of the authority section.
--	Extra    []RR       // Holds the RR(s) of the additional section.
--}
--
--// Map of strings for each RR wire type.
--var TypeToString = map[uint16]string{
--	TypeA:          "A",
--	TypeAAAA:       "AAAA",
--	TypeAFSDB:      "AFSDB",
--	TypeANY:        "ANY", // Meta RR
--	TypeATMA:       "ATMA",
--	TypeAXFR:       "AXFR", // Meta RR
--	TypeCAA:        "CAA",
--	TypeCDNSKEY:    "CDNSKEY",
--	TypeCDS:        "CDS",
--	TypeCERT:       "CERT",
--	TypeCNAME:      "CNAME",
--	TypeDHCID:      "DHCID",
--	TypeDLV:        "DLV",
--	TypeDNAME:      "DNAME",
--	TypeDNSKEY:     "DNSKEY",
--	TypeDS:         "DS",
--	TypeEID:        "EID",
--	TypeEUI48:      "EUI48",
--	TypeEUI64:      "EUI64",
--	TypeGID:        "GID",
--	TypeGPOS:       "GPOS",
--	TypeHINFO:      "HINFO",
--	TypeHIP:        "HIP",
--	TypeIPSECKEY:   "IPSECKEY",
--	TypeISDN:       "ISDN",
--	TypeIXFR:       "IXFR", // Meta RR
--	TypeKEY:        "KEY",
--	TypeKX:         "KX",
--	TypeL32:        "L32",
--	TypeL64:        "L64",
--	TypeLOC:        "LOC",
--	TypeLP:         "LP",
--	TypeMB:         "MB",
--	TypeMD:         "MD",
--	TypeMF:         "MF",
--	TypeMG:         "MG",
--	TypeMINFO:      "MINFO",
--	TypeMR:         "MR",
--	TypeMX:         "MX",
--	TypeNAPTR:      "NAPTR",
--	TypeNID:        "NID",
--	TypeNINFO:      "NINFO",
--	TypeNIMLOC:     "NIMLOC",
--	TypeNS:         "NS",
--	TypeNSAP:       "NSAP",
--	TypeNSAPPTR:    "NSAP-PTR",
--	TypeNSEC3:      "NSEC3",
--	TypeNSEC3PARAM: "NSEC3PARAM",
--	TypeNSEC:       "NSEC",
--	TypeNULL:       "NULL",
--	TypeOPT:        "OPT",
--	TypeOPENPGPKEY: "OPENPGPKEY",
--	TypePTR:        "PTR",
--	TypeRKEY:       "RKEY",
--	TypeRP:         "RP",
--	TypeRRSIG:      "RRSIG",
--	TypeRT:         "RT",
--	TypeSIG:        "SIG",
--	TypeSOA:        "SOA",
--	TypeSPF:        "SPF",
--	TypeSRV:        "SRV",
--	TypeSSHFP:      "SSHFP",
--	TypeTA:         "TA",
--	TypeTALINK:     "TALINK",
--	TypeTKEY:       "TKEY", // Meta RR
--	TypeTLSA:       "TLSA",
--	TypeTSIG:       "TSIG", // Meta RR
--	TypeTXT:        "TXT",
--	TypePX:         "PX",
--	TypeUID:        "UID",
--	TypeUINFO:      "UINFO",
--	TypeUNSPEC:     "UNSPEC",
--	TypeURI:        "URI",
--	TypeWKS:        "WKS",
--	TypeX25:        "X25",
--}
--
--// Reverse, needed for string parsing.
--var StringToType = reverseInt16(TypeToString)
--var StringToClass = reverseInt16(ClassToString)
--
--// Map of opcodes strings.
--var StringToOpcode = reverseInt(OpcodeToString)
--
--// Map of rcodes strings.
--var StringToRcode = reverseInt(RcodeToString)
--
--// Map of strings for each CLASS wire type.
--var ClassToString = map[uint16]string{
--	ClassINET:   "IN",
--	ClassCSNET:  "CS",
--	ClassCHAOS:  "CH",
--	ClassHESIOD: "HS",
--	ClassNONE:   "NONE",
--	ClassANY:    "ANY",
--}
--
--// Map of strings for opcodes.
--var OpcodeToString = map[int]string{
--	OpcodeQuery:  "QUERY",
--	OpcodeIQuery: "IQUERY",
--	OpcodeStatus: "STATUS",
--	OpcodeNotify: "NOTIFY",
--	OpcodeUpdate: "UPDATE",
--}
--
--// Map of strings for rcodes.
--var RcodeToString = map[int]string{
--	RcodeSuccess:        "NOERROR",
--	RcodeFormatError:    "FORMERR",
--	RcodeServerFailure:  "SERVFAIL",
--	RcodeNameError:      "NXDOMAIN",
--	RcodeNotImplemented: "NOTIMPL",
--	RcodeRefused:        "REFUSED",
--	RcodeYXDomain:       "YXDOMAIN", // From RFC 2136
--	RcodeYXRrset:        "YXRRSET",
--	RcodeNXRrset:        "NXRRSET",
--	RcodeNotAuth:        "NOTAUTH",
--	RcodeNotZone:        "NOTZONE",
--	RcodeBadSig:         "BADSIG", // Also known as RcodeBadVers, see RFC 6891
--	//	RcodeBadVers:        "BADVERS",
--	RcodeBadKey:   "BADKEY",
--	RcodeBadTime:  "BADTIME",
--	RcodeBadMode:  "BADMODE",
--	RcodeBadName:  "BADNAME",
--	RcodeBadAlg:   "BADALG",
--	RcodeBadTrunc: "BADTRUNC",
--}
--
--// Rather than write the usual handful of routines to pack and
--// unpack every message that can appear on the wire, we use
--// reflection to write a generic pack/unpack for structs and then
--// use it. Thus, if in the future we need to define new message
--// structs, no new pack/unpack/printing code needs to be written.
--
--// Domain names are a sequence of counted strings
--// split at the dots. They end with a zero-length string.
--
--// PackDomainName packs a domain name s into msg[off:].
--// If compression is wanted compress must be true and the compression
--// map needs to hold a mapping between domain names and offsets
--// pointing into msg[].
--func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
--	off1, _, err = packDomainName(s, msg, off, compression, compress)
--	return
--}
--
--func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) {
--	// special case if msg == nil
--	lenmsg := 256
--	if msg != nil {
--		lenmsg = len(msg)
--	}
--	ls := len(s)
--	if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
--		return off, 0, nil
--	}
--	// If not fully qualified, error out, but only if msg == nil #ugly
--	switch {
--	case msg == nil:
--		if s[ls-1] != '.' {
--			s += "."
--			ls++
--		}
--	case msg != nil:
--		if s[ls-1] != '.' {
--			return lenmsg, 0, ErrFqdn
--		}
--	}
--	// Each dot ends a segment of the name.
--	// We trade each dot byte for a length byte.
--	// Except for escaped dots (\.), which are normal dots.
--	// There is also a trailing zero.
--
--	// Compression
--	nameoffset := -1
--	pointer := -1
--	// Emit sequence of counted strings, chopping at dots.
--	begin := 0
--	bs := []byte(s)
--	ro_bs, bs_fresh, escaped_dot := s, true, false
--	for i := 0; i < ls; i++ {
--		if bs[i] == '\\' {
--			for j := i; j < ls-1; j++ {
--				bs[j] = bs[j+1]
--			}
--			ls--
--			if off+1 > lenmsg {
--				return lenmsg, labels, ErrBuf
--			}
--			// check for \DDD
--			if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
--				bs[i] = dddToByte(bs[i:])
--				for j := i + 1; j < ls-2; j++ {
--					bs[j] = bs[j+2]
--				}
--				ls -= 2
--			} else if bs[i] == 't' {
--				bs[i] = '\t'
--			} else if bs[i] == 'r' {
--				bs[i] = '\r'
--			} else if bs[i] == 'n' {
--				bs[i] = '\n'
--			}
--			escaped_dot = bs[i] == '.'
--			bs_fresh = false
--			continue
--		}
--
--		if bs[i] == '.' {
--			if i > 0 && bs[i-1] == '.' && !escaped_dot {
--				// two dots back to back is not legal
--				return lenmsg, labels, ErrRdata
--			}
--			if i-begin >= 1<<6 { // top two bits of length must be clear
--				return lenmsg, labels, ErrRdata
--			}
--			// off can already (we're in a loop) be bigger than len(msg)
--			// this happens when a name isn't fully qualified
--			if off+1 > lenmsg {
--				return lenmsg, labels, ErrBuf
--			}
--			if msg != nil {
--				msg[off] = byte(i - begin)
--			}
--			offset := off
--			off++
--			for j := begin; j < i; j++ {
--				if off+1 > lenmsg {
--					return lenmsg, labels, ErrBuf
--				}
--				if msg != nil {
--					msg[off] = bs[j]
--				}
--				off++
--			}
--			if compress && !bs_fresh {
--				ro_bs = string(bs)
--				bs_fresh = true
--			}
--			// Dont try to compress '.'
--			if compress && ro_bs[begin:] != "." {
--				if p, ok := compression[ro_bs[begin:]]; !ok {
--					// Only offsets smaller than this can be used.
--					if offset < maxCompressionOffset {
--						compression[ro_bs[begin:]] = offset
--					}
--				} else {
--					// The first hit is the longest matching dname
--					// keep the pointer offset we get back and store
--					// the offset of the current name, because that's
--					// where we need to insert the pointer later
--
--					// If compress is true, we're allowed to compress this dname
--					if pointer == -1 && compress {
--						pointer = p         // Where to point to
--						nameoffset = offset // Where to point from
--						break
--					}
--				}
--			}
--			labels++
--			begin = i + 1
--		}
--		escaped_dot = false
--	}
--	// Root label is special
--	if len(bs) == 1 && bs[0] == '.' {
--		return off, labels, nil
--	}
--	// If we did compression and we find something add the pointer here
--	if pointer != -1 {
--		// We have two bytes (14 bits) to put the pointer in
--		// if msg == nil, we will never do compression
--		msg[nameoffset], msg[nameoffset+1] = packUint16(uint16(pointer ^ 0xC000))
--		off = nameoffset + 1
--		goto End
--	}
--	if msg != nil {
--		msg[off] = 0
--	}
--End:
--	off++
--	return off, labels, nil
--}
--
--// Unpack a domain name.
--// In addition to the simple sequences of counted strings above,
--// domain names are allowed to refer to strings elsewhere in the
--// packet, to avoid repeating common suffixes when returning
--// many entries in a single domain.  The pointers are marked
--// by a length byte with the top two bits set.  Ignoring those
--// two bits, that byte and the next give a 14 bit offset from msg[0]
--// where we should pick up the trail.
--// Note that if we jump elsewhere in the packet,
--// we return off1 == the offset after the first pointer we found,
--// which is where the next record will start.
--// In theory, the pointers are only allowed to jump backward.
--// We let them jump anywhere and stop jumping after a while.
--
--// UnpackDomainName unpacks a domain name into a string.
--func UnpackDomainName(msg []byte, off int) (string, int, error) {
--	s := make([]byte, 0, 64)
--	off1 := 0
--	lenmsg := len(msg)
--	ptr := 0 // number of pointers followed
--Loop:
--	for {
--		if off >= lenmsg {
--			return "", lenmsg, ErrBuf
--		}
--		c := int(msg[off])
--		off++
--		switch c & 0xC0 {
--		case 0x00:
--			if c == 0x00 {
--				// end of name
--				if len(s) == 0 {
--					return ".", off, nil
--				}
--				break Loop
--			}
--			// literal string
--			if off+c > lenmsg {
--				return "", lenmsg, ErrBuf
--			}
--			for j := off; j < off+c; j++ {
--				switch b := msg[j]; b {
--				case '.', '(', ')', ';', ' ', '@':
--					fallthrough
--				case '"', '\\':
--					s = append(s, '\\', b)
--				case '\t':
--					s = append(s, '\\', 't')
--				case '\r':
--					s = append(s, '\\', 'r')
--				default:
--					if b < 32 || b >= 127 { // unprintable use \DDD
--						var buf [3]byte
--						bufs := strconv.AppendInt(buf[:0], int64(b), 10)
--						s = append(s, '\\')
--						for i := 0; i < 3-len(bufs); i++ {
--							s = append(s, '0')
--						}
--						for _, r := range bufs {
--							s = append(s, r)
--						}
--					} else {
--						s = append(s, b)
--					}
--				}
--			}
--			s = append(s, '.')
--			off += c
--		case 0xC0:
--			// pointer to somewhere else in msg.
--			// remember location after first ptr,
--			// since that's how many bytes we consumed.
--			// also, don't follow too many pointers --
--			// maybe there's a loop.
--			if off >= lenmsg {
--				return "", lenmsg, ErrBuf
--			}
--			c1 := msg[off]
--			off++
--			if ptr == 0 {
--				off1 = off
--			}
--			if ptr++; ptr > 10 {
--				return "", lenmsg, &Error{err: "too many compression pointers"}
--			}
--			off = (c^0xC0)<<8 | int(c1)
--		default:
--			// 0x80 and 0x40 are reserved
--			return "", lenmsg, ErrRdata
--		}
--	}
--	if ptr == 0 {
--		off1 = off
--	}
--	return string(s), off1, nil
--}
--
--func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
--	var err error
--	if len(txt) == 0 {
--		if offset >= len(msg) {
--			return offset, ErrBuf
--		}
--		msg[offset] = 0
--		return offset, nil
--	}
--	for i := range txt {
--		if len(txt[i]) > len(tmp) {
--			return offset, ErrBuf
--		}
--		offset, err = packTxtString(txt[i], msg, offset, tmp)
--		if err != nil {
--			return offset, err
--		}
--	}
--	return offset, err
--}
--
--func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
--	lenByteOffset := offset
--	if offset >= len(msg) {
--		return offset, ErrBuf
--	}
--	offset++
--	bs := tmp[:len(s)]
--	copy(bs, s)
--	for i := 0; i < len(bs); i++ {
--		if len(msg) <= offset {
--			return offset, ErrBuf
--		}
--		if bs[i] == '\\' {
--			i++
--			if i == len(bs) {
--				break
--			}
--			// check for \DDD
--			if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
--				msg[offset] = dddToByte(bs[i:])
--				i += 2
--			} else if bs[i] == 't' {
--				msg[offset] = '\t'
--			} else if bs[i] == 'r' {
--				msg[offset] = '\r'
--			} else if bs[i] == 'n' {
--				msg[offset] = '\n'
--			} else {
--				msg[offset] = bs[i]
--			}
--		} else {
--			msg[offset] = bs[i]
--		}
--		offset++
--	}
--	l := offset - lenByteOffset - 1
--	if l > 255 {
--		return offset, &Error{err: "string exceeded 255 bytes in txt"}
--	}
--	msg[lenByteOffset] = byte(l)
--	return offset, nil
--}
--
--func unpackTxt(msg []byte, offset, rdend int) ([]string, int, error) {
--	var err error
--	var ss []string
--	var s string
--	for offset < rdend && err == nil {
--		s, offset, err = unpackTxtString(msg, offset)
--		if err == nil {
--			ss = append(ss, s)
--		}
--	}
--	return ss, offset, err
--}
--
--func unpackTxtString(msg []byte, offset int) (string, int, error) {
--	if offset+1 > len(msg) {
--		return "", offset, &Error{err: "overflow unpacking txt"}
--	}
--	l := int(msg[offset])
--	if offset+l+1 > len(msg) {
--		return "", offset, &Error{err: "overflow unpacking txt"}
--	}
--	s := make([]byte, 0, l)
--	for _, b := range msg[offset+1 : offset+1+l] {
--		switch b {
--		case '"', '\\':
--			s = append(s, '\\', b)
--		case '\t':
--			s = append(s, `\t`...)
--		case '\r':
--			s = append(s, `\r`...)
--		case '\n':
--			s = append(s, `\n`...)
--		default:
--			if b < 32 || b > 127 { // unprintable
--				var buf [3]byte
--				bufs := strconv.AppendInt(buf[:0], int64(b), 10)
--				s = append(s, '\\')
--				for i := 0; i < 3-len(bufs); i++ {
--					s = append(s, '0')
--				}
--				for _, r := range bufs {
--					s = append(s, r)
--				}
--			} else {
--				s = append(s, b)
--			}
--		}
--	}
--	offset += 1 + l
--	return string(s), offset, nil
--}
--
--// Pack a reflect.StructValue into msg.  Struct members can only be uint8, uint16, uint32, string,
--// slices and other (often anonymous) structs.
--func packStructValue(val reflect.Value, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
--	var txtTmp []byte
--	lenmsg := len(msg)
--	numfield := val.NumField()
--	for i := 0; i < numfield; i++ {
--		typefield := val.Type().Field(i)
--		if typefield.Tag == `dns:"-"` {
--			continue
--		}
--		switch fv := val.Field(i); fv.Kind() {
--		default:
--			return lenmsg, &Error{err: "bad kind packing"}
--		case reflect.Interface:
--			// PrivateRR is the only RR implementation that has interface field.
--			// therefore it's expected that this interface would be PrivateRdata
--			switch data := fv.Interface().(type) {
--			case PrivateRdata:
--				n, err := data.Pack(msg[off:])
--				if err != nil {
--					return lenmsg, err
--				}
--				off += n
--			default:
--				return lenmsg, &Error{err: "bad kind interface packing"}
--			}
--		case reflect.Slice:
--			switch typefield.Tag {
--			default:
--				return lenmsg, &Error{"bad tag packing slice: " + typefield.Tag.Get("dns")}
--			case `dns:"domain-name"`:
--				for j := 0; j < val.Field(i).Len(); j++ {
--					element := val.Field(i).Index(j).String()
--					off, err = PackDomainName(element, msg, off, compression, false && compress)
--					if err != nil {
--						return lenmsg, err
--					}
--				}
--			case `dns:"txt"`:
--				if txtTmp == nil {
--					txtTmp = make([]byte, 256*4+1)
--				}
--				off, err = packTxt(fv.Interface().([]string), msg, off, txtTmp)
--				if err != nil {
--					return lenmsg, err
--				}
--			case `dns:"opt"`: // edns
--				for j := 0; j < val.Field(i).Len(); j++ {
--					element := val.Field(i).Index(j).Interface()
--					b, e := element.(EDNS0).pack()
--					if e != nil {
--						return lenmsg, &Error{err: "overflow packing opt"}
--					}
--					// Option code
--					msg[off], msg[off+1] = packUint16(element.(EDNS0).Option())
--					// Length
--					msg[off+2], msg[off+3] = packUint16(uint16(len(b)))
--					off += 4
--					if off+len(b) > lenmsg {
--						copy(msg[off:], b)
--						off = lenmsg
--						continue
--					}
--					// Actual data
--					copy(msg[off:off+len(b)], b)
--					off += len(b)
--				}
--			case `dns:"a"`:
--				// It must be a slice of 4, even if it is 16, we encode
--				// only the first 4
--				if off+net.IPv4len > lenmsg {
--					return lenmsg, &Error{err: "overflow packing a"}
--				}
--				switch fv.Len() {
--				case net.IPv6len:
--					msg[off] = byte(fv.Index(12).Uint())
--					msg[off+1] = byte(fv.Index(13).Uint())
--					msg[off+2] = byte(fv.Index(14).Uint())
--					msg[off+3] = byte(fv.Index(15).Uint())
--					off += net.IPv4len
--				case net.IPv4len:
--					msg[off] = byte(fv.Index(0).Uint())
--					msg[off+1] = byte(fv.Index(1).Uint())
--					msg[off+2] = byte(fv.Index(2).Uint())
--					msg[off+3] = byte(fv.Index(3).Uint())
--					off += net.IPv4len
--				case 0:
--					// Allowed, for dynamic updates
--				default:
--					return lenmsg, &Error{err: "overflow packing a"}
--				}
--			case `dns:"aaaa"`:
--				if fv.Len() == 0 {
--					break
--				}
--				if fv.Len() > net.IPv6len || off+fv.Len() > lenmsg {
--					return lenmsg, &Error{err: "overflow packing aaaa"}
--				}
--				for j := 0; j < net.IPv6len; j++ {
--					msg[off] = byte(fv.Index(j).Uint())
--					off++
--				}
--			case `dns:"wks"`:
--				// TODO(miek): this is wrong should be lenrd
--				if off == lenmsg {
--					break // dyn. updates
--				}
--				if val.Field(i).Len() == 0 {
--					break
--				}
--				var bitmapbyte uint16
--				for j := 0; j < val.Field(i).Len(); j++ {
--					serv := uint16((fv.Index(j).Uint()))
--					bitmapbyte = uint16(serv / 8)
--					if int(bitmapbyte) > lenmsg {
--						return lenmsg, &Error{err: "overflow packing wks"}
--					}
--					bit := uint16(serv) - bitmapbyte*8
--					msg[bitmapbyte] = byte(1 << (7 - bit))
--				}
--				off += int(bitmapbyte)
--			case `dns:"nsec"`: // NSEC/NSEC3
--				// This is the uint16 type bitmap
--				if val.Field(i).Len() == 0 {
--					// Do absolutely nothing
--					break
--				}
--
--				lastwindow := uint16(0)
--				length := uint16(0)
--				if off+2 > lenmsg {
--					return lenmsg, &Error{err: "overflow packing nsecx"}
--				}
--				for j := 0; j < val.Field(i).Len(); j++ {
--					t := uint16((fv.Index(j).Uint()))
--					window := uint16(t / 256)
--					if lastwindow != window {
--						// New window, jump to the new offset
--						off += int(length) + 3
--						if off > lenmsg {
--							return lenmsg, &Error{err: "overflow packing nsecx bitmap"}
--						}
--					}
--					length = (t - window*256) / 8
--					bit := t - (window * 256) - (length * 8)
--					if off+2+int(length) > lenmsg {
--						return lenmsg, &Error{err: "overflow packing nsecx bitmap"}
--					}
--
--					// Setting the window #
--					msg[off] = byte(window)
--					// Setting the octets length
--					msg[off+1] = byte(length + 1)
--					// Setting the bit value for the type in the right octet
--					msg[off+2+int(length)] |= byte(1 << (7 - bit))
--					lastwindow = window
--				}
--				off += 2 + int(length)
--				off++
--				if off > lenmsg {
--					return lenmsg, &Error{err: "overflow packing nsecx bitmap"}
--				}
--			}
--		case reflect.Struct:
--			off, err = packStructValue(fv, msg, off, compression, compress)
--			if err != nil {
--				return lenmsg, err
--			}
--		case reflect.Uint8:
--			if off+1 > lenmsg {
--				return lenmsg, &Error{err: "overflow packing uint8"}
--			}
--			msg[off] = byte(fv.Uint())
--			off++
--		case reflect.Uint16:
--			if off+2 > lenmsg {
--				return lenmsg, &Error{err: "overflow packing uint16"}
--			}
--			i := fv.Uint()
--			msg[off] = byte(i >> 8)
--			msg[off+1] = byte(i)
--			off += 2
--		case reflect.Uint32:
--			if off+4 > lenmsg {
--				return lenmsg, &Error{err: "overflow packing uint32"}
--			}
--			i := fv.Uint()
--			msg[off] = byte(i >> 24)
--			msg[off+1] = byte(i >> 16)
--			msg[off+2] = byte(i >> 8)
--			msg[off+3] = byte(i)
--			off += 4
--		case reflect.Uint64:
--			switch typefield.Tag {
--			default:
--				if off+8 > lenmsg {
--					return lenmsg, &Error{err: "overflow packing uint64"}
--				}
--				i := fv.Uint()
--				msg[off] = byte(i >> 56)
--				msg[off+1] = byte(i >> 48)
--				msg[off+2] = byte(i >> 40)
--				msg[off+3] = byte(i >> 32)
--				msg[off+4] = byte(i >> 24)
--				msg[off+5] = byte(i >> 16)
--				msg[off+6] = byte(i >> 8)
--				msg[off+7] = byte(i)
--				off += 8
--			case `dns:"uint48"`:
--				// Used in TSIG, where it stops at 48 bits, so we discard the upper 16
--				if off+6 > lenmsg {
--					return lenmsg, &Error{err: "overflow packing uint64 as uint48"}
--				}
--				i := fv.Uint()
--				msg[off] = byte(i >> 40)
--				msg[off+1] = byte(i >> 32)
--				msg[off+2] = byte(i >> 24)
--				msg[off+3] = byte(i >> 16)
--				msg[off+4] = byte(i >> 8)
--				msg[off+5] = byte(i)
--				off += 6
--			}
--		case reflect.String:
--			// There are multiple string encodings.
--			// The tag distinguishes ordinary strings from domain names.
--			s := fv.String()
--			switch typefield.Tag {
--			default:
--				return lenmsg, &Error{"bad tag packing string: " + typefield.Tag.Get("dns")}
--			case `dns:"base64"`:
--				b64, e := fromBase64([]byte(s))
--				if e != nil {
--					return lenmsg, e
--				}
--				copy(msg[off:off+len(b64)], b64)
--				off += len(b64)
--			case `dns:"domain-name"`:
--				if off, err = PackDomainName(s, msg, off, compression, false && compress); err != nil {
--					return lenmsg, err
--				}
--			case `dns:"cdomain-name"`:
--				if off, err = PackDomainName(s, msg, off, compression, true && compress); err != nil {
--					return lenmsg, err
--				}
--			case `dns:"size-base32"`:
--				// This is purely for NSEC3 atm, the previous byte must
--				// holds the length of the encoded string. As NSEC3
--				// is only defined to SHA1, the hashlength is 20 (160 bits)
--				msg[off-1] = 20
--				fallthrough
--			case `dns:"base32"`:
--				b32, e := fromBase32([]byte(s))
--				if e != nil {
--					return lenmsg, e
--				}
--				copy(msg[off:off+len(b32)], b32)
--				off += len(b32)
--			case `dns:"size-hex"`:
--				fallthrough
--			case `dns:"hex"`:
--				// There is no length encoded here
--				h, e := hex.DecodeString(s)
--				if e != nil {
--					return lenmsg, e
--				}
--				if off+hex.DecodedLen(len(s)) > lenmsg {
--					return lenmsg, &Error{err: "overflow packing hex"}
--				}
--				copy(msg[off:off+hex.DecodedLen(len(s))], h)
--				off += hex.DecodedLen(len(s))
--			case `dns:"size"`:
--				// the size is already encoded in the RR, we can safely use the
--				// length of string. String is RAW (not encoded in hex, nor base64)
--				copy(msg[off:off+len(s)], s)
--				off += len(s)
--			case `dns:"txt"`:
--				fallthrough
--			case "":
--				if txtTmp == nil {
--					txtTmp = make([]byte, 256*4+1)
--				}
--				off, err = packTxtString(fv.String(), msg, off, txtTmp)
--				if err != nil {
--					return lenmsg, err
--				}
--			}
--		}
--	}
--	return off, nil
--}
--
--func structValue(any interface{}) reflect.Value {
--	return reflect.ValueOf(any).Elem()
--}
--
--// PackStruct packs any structure to wire format.
--func PackStruct(any interface{}, msg []byte, off int) (off1 int, err error) {
--	off, err = packStructValue(structValue(any), msg, off, nil, false)
--	return off, err
--}
--
--func packStructCompress(any interface{}, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
--	off, err = packStructValue(structValue(any), msg, off, compression, compress)
--	return off, err
--}
--
--// TODO(miek): Fix use of rdlength here
--
--// Unpack a reflect.StructValue from msg.
--// Same restrictions as packStructValue.
--func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err error) {
--	var lenrd int
--	lenmsg := len(msg)
--	for i := 0; i < val.NumField(); i++ {
--		if lenrd != 0 && lenrd == off {
--			break
--		}
--		if off > lenmsg {
--			return lenmsg, &Error{"bad offset unpacking"}
--		}
--		switch fv := val.Field(i); fv.Kind() {
--		default:
--			return lenmsg, &Error{err: "bad kind unpacking"}
--		case reflect.Interface:
--			// PrivateRR is the only RR implementation that has interface field.
--			// therefore it's expected that this interface would be PrivateRdata
--			switch data := fv.Interface().(type) {
--			case PrivateRdata:
--				n, err := data.Unpack(msg[off:lenrd])
--				if err != nil {
--					return lenmsg, err
--				}
--				off += n
--			default:
--				return lenmsg, &Error{err: "bad kind interface unpacking"}
--			}
--		case reflect.Slice:
--			switch val.Type().Field(i).Tag {
--			default:
--				return lenmsg, &Error{"bad tag unpacking slice: " + val.Type().Field(i).Tag.Get("dns")}
--			case `dns:"domain-name"`:
--				// HIP record slice of name (or none)
--				servers := make([]string, 0)
--				var s string
--				for off < lenrd {
--					s, off, err = UnpackDomainName(msg, off)
--					if err != nil {
--						return lenmsg, err
--					}
--					servers = append(servers, s)
--				}
--				fv.Set(reflect.ValueOf(servers))
--			case `dns:"txt"`:
--				if off == lenmsg || lenrd == off {
--					break
--				}
--				var txt []string
--				txt, off, err = unpackTxt(msg, off, lenrd)
--				if err != nil {
--					return lenmsg, err
--				}
--				fv.Set(reflect.ValueOf(txt))
--			case `dns:"opt"`: // edns0
--				if off == lenrd {
--					// This is an EDNS0 (OPT Record) with no rdata
--					// We can safely return here.
--					break
--				}
--				edns := make([]EDNS0, 0)
--			Option:
--				code := uint16(0)
--				if off+2 > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking opt"}
--				}
--				code, off = unpackUint16(msg, off)
--				optlen, off1 := unpackUint16(msg, off)
--				if off1+int(optlen) > lenrd {
--					return lenmsg, &Error{err: "overflow unpacking opt"}
--				}
--				switch code {
--				case EDNS0NSID:
--					e := new(EDNS0_NSID)
--					if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
--						return lenmsg, err
--					}
--					edns = append(edns, e)
--					off = off1 + int(optlen)
--				case EDNS0SUBNET, EDNS0SUBNETDRAFT:
--					e := new(EDNS0_SUBNET)
--					if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
--						return lenmsg, err
--					}
--					edns = append(edns, e)
--					off = off1 + int(optlen)
--					if code == EDNS0SUBNETDRAFT {
--						e.DraftOption = true
--					}
--				case EDNS0UL:
--					e := new(EDNS0_UL)
--					if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
--						return lenmsg, err
--					}
--					edns = append(edns, e)
--					off = off1 + int(optlen)
--				case EDNS0LLQ:
--					e := new(EDNS0_LLQ)
--					if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
--						return lenmsg, err
--					}
--					edns = append(edns, e)
--					off = off1 + int(optlen)
--				case EDNS0DAU:
--					e := new(EDNS0_DAU)
--					if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
--						return lenmsg, err
--					}
--					edns = append(edns, e)
--					off = off1 + int(optlen)
--				case EDNS0DHU:
--					e := new(EDNS0_DHU)
--					if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
--						return lenmsg, err
--					}
--					edns = append(edns, e)
--					off = off1 + int(optlen)
--				case EDNS0N3U:
--					e := new(EDNS0_N3U)
--					if err := e.unpack(msg[off1 : off1+int(optlen)]); err != nil {
--						return lenmsg, err
--					}
--					edns = append(edns, e)
--					off = off1 + int(optlen)
--				default:
--					// do nothing?
--					off = off1 + int(optlen)
--				}
--				if off < lenrd {
--					goto Option
--				}
--				fv.Set(reflect.ValueOf(edns))
--			case `dns:"a"`:
--				if off == lenrd {
--					break // dyn. update
--				}
--				if off+net.IPv4len > lenrd || off+net.IPv4len > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking a"}
--				}
--				fv.Set(reflect.ValueOf(net.IPv4(msg[off], msg[off+1], msg[off+2], msg[off+3])))
--				off += net.IPv4len
--			case `dns:"aaaa"`:
--				if off == lenrd {
--					break
--				}
--				if off+net.IPv6len > lenrd || off+net.IPv6len > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking aaaa"}
--				}
--				fv.Set(reflect.ValueOf(net.IP{msg[off], msg[off+1], msg[off+2], msg[off+3], msg[off+4],
--					msg[off+5], msg[off+6], msg[off+7], msg[off+8], msg[off+9], msg[off+10],
--					msg[off+11], msg[off+12], msg[off+13], msg[off+14], msg[off+15]}))
--				off += net.IPv6len
--			case `dns:"wks"`:
--				// Rest of the record is the bitmap
--				serv := make([]uint16, 0)
--				j := 0
--				for off < lenrd {
--					if off+1 > lenmsg {
--						return lenmsg, &Error{err: "overflow unpacking wks"}
--					}
--					b := msg[off]
--					// Check the bits one by one, and set the type
--					if b&0x80 == 0x80 {
--						serv = append(serv, uint16(j*8+0))
--					}
--					if b&0x40 == 0x40 {
--						serv = append(serv, uint16(j*8+1))
--					}
--					if b&0x20 == 0x20 {
--						serv = append(serv, uint16(j*8+2))
--					}
--					if b&0x10 == 0x10 {
--						serv = append(serv, uint16(j*8+3))
--					}
--					if b&0x8 == 0x8 {
--						serv = append(serv, uint16(j*8+4))
--					}
--					if b&0x4 == 0x4 {
--						serv = append(serv, uint16(j*8+5))
--					}
--					if b&0x2 == 0x2 {
--						serv = append(serv, uint16(j*8+6))
--					}
--					if b&0x1 == 0x1 {
--						serv = append(serv, uint16(j*8+7))
--					}
--					j++
--					off++
--				}
--				fv.Set(reflect.ValueOf(serv))
--			case `dns:"nsec"`: // NSEC/NSEC3
--				if off == lenrd {
--					break
--				}
--				// Rest of the record is the type bitmap
--				if off+2 > lenrd || off+2 > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking nsecx"}
--				}
--				nsec := make([]uint16, 0)
--				length := 0
--				window := 0
--				for off+2 < lenrd {
--					window = int(msg[off])
--					length = int(msg[off+1])
--					//println("off, windows, length, end", off, window, length, endrr)
--					if length == 0 {
--						// A length window of zero is strange. If there
--						// the window should not have been specified. Bail out
--						// println("dns: length == 0 when unpacking NSEC")
--						return lenmsg, &Error{err: "overflow unpacking nsecx"}
--					}
--					if length > 32 {
--						return lenmsg, &Error{err: "overflow unpacking nsecx"}
--					}
--
--					// Walk the bytes in the window - and check the bit settings...
--					off += 2
--					for j := 0; j < length; j++ {
--						if off+j+1 > lenmsg {
--							return lenmsg, &Error{err: "overflow unpacking nsecx"}
--						}
--						b := msg[off+j]
--						// Check the bits one by one, and set the type
--						if b&0x80 == 0x80 {
--							nsec = append(nsec, uint16(window*256+j*8+0))
--						}
--						if b&0x40 == 0x40 {
--							nsec = append(nsec, uint16(window*256+j*8+1))
--						}
--						if b&0x20 == 0x20 {
--							nsec = append(nsec, uint16(window*256+j*8+2))
--						}
--						if b&0x10 == 0x10 {
--							nsec = append(nsec, uint16(window*256+j*8+3))
--						}
--						if b&0x8 == 0x8 {
--							nsec = append(nsec, uint16(window*256+j*8+4))
--						}
--						if b&0x4 == 0x4 {
--							nsec = append(nsec, uint16(window*256+j*8+5))
--						}
--						if b&0x2 == 0x2 {
--							nsec = append(nsec, uint16(window*256+j*8+6))
--						}
--						if b&0x1 == 0x1 {
--							nsec = append(nsec, uint16(window*256+j*8+7))
--						}
--					}
--					off += length
--				}
--				fv.Set(reflect.ValueOf(nsec))
--			}
--		case reflect.Struct:
--			off, err = unpackStructValue(fv, msg, off)
--			if err != nil {
--				return lenmsg, err
--			}
--			if val.Type().Field(i).Name == "Hdr" {
--				lenrd = off + int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
--			}
--		case reflect.Uint8:
--			if off == lenmsg {
--				break
--			}
--			if off+1 > lenmsg {
--				return lenmsg, &Error{err: "overflow unpacking uint8"}
--			}
--			fv.SetUint(uint64(uint8(msg[off])))
--			off++
--		case reflect.Uint16:
--			if off == lenmsg {
--				break
--			}
--			var i uint16
--			if off+2 > lenmsg {
--				return lenmsg, &Error{err: "overflow unpacking uint16"}
--			}
--			i, off = unpackUint16(msg, off)
--			fv.SetUint(uint64(i))
--		case reflect.Uint32:
--			if off == lenmsg {
--				break
--			}
--			if off+4 > lenmsg {
--				return lenmsg, &Error{err: "overflow unpacking uint32"}
--			}
--			fv.SetUint(uint64(uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3])))
--			off += 4
--		case reflect.Uint64:
--			switch val.Type().Field(i).Tag {
--			default:
--				if off+8 > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking uint64"}
--				}
--				fv.SetUint(uint64(uint64(msg[off])<<56 | uint64(msg[off+1])<<48 | uint64(msg[off+2])<<40 |
--					uint64(msg[off+3])<<32 | uint64(msg[off+4])<<24 | uint64(msg[off+5])<<16 | uint64(msg[off+6])<<8 | uint64(msg[off+7])))
--				off += 8
--			case `dns:"uint48"`:
--				// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
--				if off+6 > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking uint64 as uint48"}
--				}
--				fv.SetUint(uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
--					uint64(msg[off+4])<<8 | uint64(msg[off+5])))
--				off += 6
--			}
--		case reflect.String:
--			var s string
--			if off == lenmsg {
--				break
--			}
--			switch val.Type().Field(i).Tag {
--			default:
--				return lenmsg, &Error{"bad tag unpacking string: " + val.Type().Field(i).Tag.Get("dns")}
--			case `dns:"hex"`:
--				hexend := lenrd
--				if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) {
--					hexend = off + int(val.FieldByName("HitLength").Uint())
--				}
--				if hexend > lenrd || hexend > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking hex"}
--				}
--				s = hex.EncodeToString(msg[off:hexend])
--				off = hexend
--			case `dns:"base64"`:
--				// Rest of the RR is base64 encoded value
--				b64end := lenrd
--				if val.FieldByName("Hdr").FieldByName("Rrtype").Uint() == uint64(TypeHIP) {
--					b64end = off + int(val.FieldByName("PublicKeyLength").Uint())
--				}
--				if b64end > lenrd || b64end > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking base64"}
--				}
--				s = toBase64(msg[off:b64end])
--				off = b64end
--			case `dns:"cdomain-name"`:
--				fallthrough
--			case `dns:"domain-name"`:
--				if off == lenmsg {
--					// zero rdata foo, OK for dyn. updates
--					break
--				}
--				s, off, err = UnpackDomainName(msg, off)
--				if err != nil {
--					return lenmsg, err
--				}
--			case `dns:"size-base32"`:
--				var size int
--				switch val.Type().Name() {
--				case "NSEC3":
--					switch val.Type().Field(i).Name {
--					case "NextDomain":
--						name := val.FieldByName("HashLength")
--						size = int(name.Uint())
--					}
--				}
--				if off+size > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking base32"}
--				}
--				s = toBase32(msg[off : off+size])
--				off += size
--			case `dns:"size-hex"`:
--				// a "size" string, but it must be encoded in hex in the string
--				var size int
--				switch val.Type().Name() {
--				case "NSEC3":
--					switch val.Type().Field(i).Name {
--					case "Salt":
--						name := val.FieldByName("SaltLength")
--						size = int(name.Uint())
--					case "NextDomain":
--						name := val.FieldByName("HashLength")
--						size = int(name.Uint())
--					}
--				case "TSIG":
--					switch val.Type().Field(i).Name {
--					case "MAC":
--						name := val.FieldByName("MACSize")
--						size = int(name.Uint())
--					case "OtherData":
--						name := val.FieldByName("OtherLen")
--						size = int(name.Uint())
--					}
--				}
--				if off+size > lenmsg {
--					return lenmsg, &Error{err: "overflow unpacking hex"}
--				}
--				s = hex.EncodeToString(msg[off : off+size])
--				off += size
--			case `dns:"txt"`:
--				fallthrough
--			case "":
--				s, off, err = unpackTxtString(msg, off)
--			}
--			fv.SetString(s)
--		}
--	}
--	return off, nil
--}
--
--// Helpers for dealing with escaped bytes
--func isDigit(b byte) bool { return b >= '0' && b <= '9' }
--
--func dddToByte(s []byte) byte {
--	return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
--}
--
--// UnpackStruct unpacks a binary message from offset off to the interface
--// value given.
--func UnpackStruct(any interface{}, msg []byte, off int) (int, error) {
--	return unpackStructValue(structValue(any), msg, off)
--}
--
--// Helper function for packing and unpacking
--func intToBytes(i *big.Int, length int) []byte {
--	buf := i.Bytes()
--	if len(buf) < length {
--		b := make([]byte, length)
--		copy(b[length-len(buf):], buf)
--		return b
--	}
--	return buf
--}
--
--func unpackUint16(msg []byte, off int) (uint16, int) {
--	return uint16(msg[off])<<8 | uint16(msg[off+1]), off + 2
--}
--
--func packUint16(i uint16) (byte, byte) {
--	return byte(i >> 8), byte(i)
--}
--
--func toBase32(b []byte) string {
--	return base32.HexEncoding.EncodeToString(b)
--}
--
--func fromBase32(s []byte) (buf []byte, err error) {
--	buflen := base32.HexEncoding.DecodedLen(len(s))
--	buf = make([]byte, buflen)
--	n, err := base32.HexEncoding.Decode(buf, s)
--	buf = buf[:n]
--	return
--}
--
--func toBase64(b []byte) string {
--	return base64.StdEncoding.EncodeToString(b)
--}
--
--func fromBase64(s []byte) (buf []byte, err error) {
--	buflen := base64.StdEncoding.DecodedLen(len(s))
--	buf = make([]byte, buflen)
--	n, err := base64.StdEncoding.Decode(buf, s)
--	buf = buf[:n]
--	return
--}
--
--// PackRR packs a resource record rr into msg[off:].
--// See PackDomainName for documentation about the compression.
--func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
--	if rr == nil {
--		return len(msg), &Error{err: "nil rr"}
--	}
--
--	off1, err = packStructCompress(rr, msg, off, compression, compress)
--	if err != nil {
--		return len(msg), err
--	}
--	if rawSetRdlength(msg, off, off1) {
--		return off1, nil
--	}
--	return off, ErrRdata
--}
--
--// UnpackRR unpacks msg[off:] into an RR.
--func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
--	// unpack just the header, to find the rr type and length
--	var h RR_Header
--	off0 := off
--	if off, err = UnpackStruct(&h, msg, off); err != nil {
--		return nil, len(msg), err
--	}
--	end := off + int(h.Rdlength)
--	// make an rr of that type and re-unpack.
--	mk, known := typeToRR[h.Rrtype]
--	if !known {
--		rr = new(RFC3597)
--	} else {
--		rr = mk()
--	}
--	off, err = UnpackStruct(rr, msg, off0)
--	if off != end {
--		return &h, end, &Error{err: "bad rdlength"}
--	}
--	return rr, off, err
--}
--
--// Reverse a map
--func reverseInt8(m map[uint8]string) map[string]uint8 {
--	n := make(map[string]uint8)
--	for u, s := range m {
--		n[s] = u
--	}
--	return n
--}
--
--func reverseInt16(m map[uint16]string) map[string]uint16 {
--	n := make(map[string]uint16)
--	for u, s := range m {
--		n[s] = u
--	}
--	return n
--}
--
--func reverseInt(m map[int]string) map[string]int {
--	n := make(map[string]int)
--	for u, s := range m {
--		n[s] = u
--	}
--	return n
--}
--
--// Convert a MsgHdr to a string, with dig-like headers:
--//
--//;; opcode: QUERY, status: NOERROR, id: 48404
--//
--//;; flags: qr aa rd ra;
--func (h *MsgHdr) String() string {
--	if h == nil {
--		return "<nil> MsgHdr"
--	}
--
--	s := ";; opcode: " + OpcodeToString[h.Opcode]
--	s += ", status: " + RcodeToString[h.Rcode]
--	s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
--
--	s += ";; flags:"
--	if h.Response {
--		s += " qr"
--	}
--	if h.Authoritative {
--		s += " aa"
--	}
--	if h.Truncated {
--		s += " tc"
--	}
--	if h.RecursionDesired {
--		s += " rd"
--	}
--	if h.RecursionAvailable {
--		s += " ra"
--	}
--	if h.Zero { // Hmm
--		s += " z"
--	}
--	if h.AuthenticatedData {
--		s += " ad"
--	}
--	if h.CheckingDisabled {
--		s += " cd"
--	}
--
--	s += ";"
--	return s
--}
--
--// Pack packs a Msg: it is converted to to wire format.
--// If the dns.Compress is true the message will be in compressed wire format.
--func (dns *Msg) Pack() (msg []byte, err error) {
--	return dns.PackBuffer(nil)
--}
--
--// PackBuffer packs a Msg, using the given buffer buf. If buf is too small
--// a new buffer is allocated.
--func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
--	var dh Header
--	var compression map[string]int
--	if dns.Compress {
--		compression = make(map[string]int) // Compression pointer mappings
--	}
--
--	if dns.Rcode < 0 || dns.Rcode > 0xFFF {
--		return nil, ErrRcode
--	}
--	if dns.Rcode > 0xF {
--		// Regular RCODE field is 4 bits
--		opt := dns.IsEdns0()
--		if opt == nil {
--			return nil, ErrExtendedRcode
--		}
--		opt.SetExtendedRcode(uint8(dns.Rcode >> 4))
--		dns.Rcode &= 0xF
--	}
--
--	// Convert convenient Msg into wire-like Header.
--	dh.Id = dns.Id
--	dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode)
--	if dns.Response {
--		dh.Bits |= _QR
--	}
--	if dns.Authoritative {
--		dh.Bits |= _AA
--	}
--	if dns.Truncated {
--		dh.Bits |= _TC
--	}
--	if dns.RecursionDesired {
--		dh.Bits |= _RD
--	}
--	if dns.RecursionAvailable {
--		dh.Bits |= _RA
--	}
--	if dns.Zero {
--		dh.Bits |= _Z
--	}
--	if dns.AuthenticatedData {
--		dh.Bits |= _AD
--	}
--	if dns.CheckingDisabled {
--		dh.Bits |= _CD
--	}
--
--	// Prepare variable sized arrays.
--	question := dns.Question
--	answer := dns.Answer
--	ns := dns.Ns
--	extra := dns.Extra
--
--	dh.Qdcount = uint16(len(question))
--	dh.Ancount = uint16(len(answer))
--	dh.Nscount = uint16(len(ns))
--	dh.Arcount = uint16(len(extra))
--
--	// We need the uncompressed length here, because we first pack it and then compress it.
--	msg = buf
--	compress := dns.Compress
--	dns.Compress = false
--	if packLen := dns.Len() + 1; len(msg) < packLen {
--		msg = make([]byte, packLen)
--	}
--	dns.Compress = compress
--
--	// Pack it in: header and then the pieces.
--	off := 0
--	off, err = packStructCompress(&dh, msg, off, compression, dns.Compress)
--	if err != nil {
--		return nil, err
--	}
--	for i := 0; i < len(question); i++ {
--		off, err = packStructCompress(&question[i], msg, off, compression, dns.Compress)
--		if err != nil {
--			return nil, err
--		}
--	}
--	for i := 0; i < len(answer); i++ {
--		off, err = PackRR(answer[i], msg, off, compression, dns.Compress)
--		if err != nil {
--			return nil, err
--		}
--	}
--	for i := 0; i < len(ns); i++ {
--		off, err = PackRR(ns[i], msg, off, compression, dns.Compress)
--		if err != nil {
--			return nil, err
--		}
--	}
--	for i := 0; i < len(extra); i++ {
--		off, err = PackRR(extra[i], msg, off, compression, dns.Compress)
--		if err != nil {
--			return nil, err
--		}
--	}
--	return msg[:off], nil
--}
--
--// Unpack unpacks a binary message to a Msg structure.
--func (dns *Msg) Unpack(msg []byte) (err error) {
--	// Header.
--	var dh Header
--	off := 0
--	if off, err = UnpackStruct(&dh, msg, off); err != nil {
--		return err
--	}
--	dns.Id = dh.Id
--	dns.Response = (dh.Bits & _QR) != 0
--	dns.Opcode = int(dh.Bits>>11) & 0xF
--	dns.Authoritative = (dh.Bits & _AA) != 0
--	dns.Truncated = (dh.Bits & _TC) != 0
--	dns.RecursionDesired = (dh.Bits & _RD) != 0
--	dns.RecursionAvailable = (dh.Bits & _RA) != 0
--	dns.Zero = (dh.Bits & _Z) != 0
--	dns.AuthenticatedData = (dh.Bits & _AD) != 0
--	dns.CheckingDisabled = (dh.Bits & _CD) != 0
--	dns.Rcode = int(dh.Bits & 0xF)
--
--	// Arrays.
--	dns.Question = make([]Question, dh.Qdcount)
--	dns.Answer = make([]RR, dh.Ancount)
--	dns.Ns = make([]RR, dh.Nscount)
--	dns.Extra = make([]RR, dh.Arcount)
--
--	for i := 0; i < len(dns.Question); i++ {
--		off, err = UnpackStruct(&dns.Question[i], msg, off)
--		if err != nil {
--			return err
--		}
--	}
--	// If we see a TC bit being set we return here, without
--	// an error, because technically it isn't an error. So return
--	// without parsing the potentially corrupt packet and hitting an error.
--	// TODO(miek): this isn't the best strategy!
--	if dns.Truncated {
--		dns.Answer = nil
--		dns.Ns = nil
--		dns.Extra = nil
--		return nil
--	}
--	for i := 0; i < len(dns.Answer); i++ {
--		dns.Answer[i], off, err = UnpackRR(msg, off)
--		if err != nil {
--			return err
--		}
--	}
--	for i := 0; i < len(dns.Ns); i++ {
--		dns.Ns[i], off, err = UnpackRR(msg, off)
--		if err != nil {
--			return err
--		}
--	}
--	for i := 0; i < len(dns.Extra); i++ {
--		dns.Extra[i], off, err = UnpackRR(msg, off)
--		if err != nil {
--			return err
--		}
--	}
--	if off != len(msg) {
--		// TODO(miek) make this an error?
--		// use PackOpt to let people tell how detailed the error reporting should be?
--		// println("dns: extra bytes in dns packet", off, "<", len(msg))
--	}
--	return nil
--}
--
--// Convert a complete message to a string with dig-like output.
--func (dns *Msg) String() string {
--	if dns == nil {
--		return "<nil> MsgHdr"
--	}
--	s := dns.MsgHdr.String() + " "
--	s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
--	s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
--	s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
--	s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
--	if len(dns.Question) > 0 {
--		s += "\n;; QUESTION SECTION:\n"
--		for i := 0; i < len(dns.Question); i++ {
--			s += dns.Question[i].String() + "\n"
--		}
--	}
--	if len(dns.Answer) > 0 {
--		s += "\n;; ANSWER SECTION:\n"
--		for i := 0; i < len(dns.Answer); i++ {
--			if dns.Answer[i] != nil {
--				s += dns.Answer[i].String() + "\n"
--			}
--		}
--	}
--	if len(dns.Ns) > 0 {
--		s += "\n;; AUTHORITY SECTION:\n"
--		for i := 0; i < len(dns.Ns); i++ {
--			if dns.Ns[i] != nil {
--				s += dns.Ns[i].String() + "\n"
--			}
--		}
--	}
--	if len(dns.Extra) > 0 {
--		s += "\n;; ADDITIONAL SECTION:\n"
--		for i := 0; i < len(dns.Extra); i++ {
--			if dns.Extra[i] != nil {
--				s += dns.Extra[i].String() + "\n"
--			}
--		}
--	}
--	return s
--}
--
--// Len returns the message length when in (un)compressed wire format.
--// If dns.Compress is true compression it is taken into account. Len()
--// is provided to be a faster way to get the size of the resulting packet,
--// than packing it, measuring the size and discarding the buffer.
--func (dns *Msg) Len() int {
--	// We always return one more than needed.
--	l := 12 // Message header is always 12 bytes
--	var compression map[string]int
--	if dns.Compress {
--		compression = make(map[string]int)
--	}
--	for i := 0; i < len(dns.Question); i++ {
--		l += dns.Question[i].len()
--		if dns.Compress {
--			compressionLenHelper(compression, dns.Question[i].Name)
--		}
--	}
--	for i := 0; i < len(dns.Answer); i++ {
--		l += dns.Answer[i].len()
--		if dns.Compress {
--			k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name)
--			if ok {
--				l += 1 - k
--			}
--			compressionLenHelper(compression, dns.Answer[i].Header().Name)
--			k, ok = compressionLenSearchType(compression, dns.Answer[i])
--			if ok {
--				l += 1 - k
--			}
--			compressionLenHelperType(compression, dns.Answer[i])
--		}
--	}
--	for i := 0; i < len(dns.Ns); i++ {
--		l += dns.Ns[i].len()
--		if dns.Compress {
--			k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name)
--			if ok {
--				l += 1 - k
--			}
--			compressionLenHelper(compression, dns.Ns[i].Header().Name)
--			k, ok = compressionLenSearchType(compression, dns.Ns[i])
--			if ok {
--				l += 1 - k
--			}
--			compressionLenHelperType(compression, dns.Ns[i])
--		}
--	}
--	for i := 0; i < len(dns.Extra); i++ {
--		l += dns.Extra[i].len()
--		if dns.Compress {
--			k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name)
--			if ok {
--				l += 1 - k
--			}
--			compressionLenHelper(compression, dns.Extra[i].Header().Name)
--			k, ok = compressionLenSearchType(compression, dns.Extra[i])
--			if ok {
--				l += 1 - k
--			}
--			compressionLenHelperType(compression, dns.Extra[i])
--		}
--	}
--	return l
--}
--
--// Put the parts of the name in the compression map.
--func compressionLenHelper(c map[string]int, s string) {
--	pref := ""
--	lbs := Split(s)
--	for j := len(lbs) - 1; j >= 0; j-- {
--		pref = s[lbs[j]:]
--		if _, ok := c[pref]; !ok {
--			c[pref] = len(pref)
--		}
--	}
--}
--
--// Look for each part in the compression map and returns its length,
--// keep on searching so we get the longest match.
--func compressionLenSearch(c map[string]int, s string) (int, bool) {
--	off := 0
--	end := false
--	if s == "" { // don't bork on bogus data
--		return 0, false
--	}
--	for {
--		if _, ok := c[s[off:]]; ok {
--			return len(s[off:]), true
--		}
--		if end {
--			break
--		}
--		off, end = NextLabel(s, off)
--	}
--	return 0, false
--}
--
--// TODO(miek): should add all types, because the all can be *used* for compression.
--func compressionLenHelperType(c map[string]int, r RR) {
--	switch x := r.(type) {
--	case *NS:
--		compressionLenHelper(c, x.Ns)
--	case *MX:
--		compressionLenHelper(c, x.Mx)
--	case *CNAME:
--		compressionLenHelper(c, x.Target)
--	case *PTR:
--		compressionLenHelper(c, x.Ptr)
--	case *SOA:
--		compressionLenHelper(c, x.Ns)
--		compressionLenHelper(c, x.Mbox)
--	case *MB:
--		compressionLenHelper(c, x.Mb)
--	case *MG:
--		compressionLenHelper(c, x.Mg)
--	case *MR:
--		compressionLenHelper(c, x.Mr)
--	case *MF:
--		compressionLenHelper(c, x.Mf)
--	case *MD:
--		compressionLenHelper(c, x.Md)
--	case *RT:
--		compressionLenHelper(c, x.Host)
--	case *MINFO:
--		compressionLenHelper(c, x.Rmail)
--		compressionLenHelper(c, x.Email)
--	case *AFSDB:
--		compressionLenHelper(c, x.Hostname)
--	}
--}
--
--// Only search on compressing these types.
--func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
--	switch x := r.(type) {
--	case *NS:
--		return compressionLenSearch(c, x.Ns)
--	case *MX:
--		return compressionLenSearch(c, x.Mx)
--	case *CNAME:
--		return compressionLenSearch(c, x.Target)
--	case *PTR:
--		return compressionLenSearch(c, x.Ptr)
--	case *SOA:
--		k, ok := compressionLenSearch(c, x.Ns)
--		k1, ok1 := compressionLenSearch(c, x.Mbox)
--		if !ok && !ok1 {
--			return 0, false
--		}
--		return k + k1, true
--	case *MB:
--		return compressionLenSearch(c, x.Mb)
--	case *MG:
--		return compressionLenSearch(c, x.Mg)
--	case *MR:
--		return compressionLenSearch(c, x.Mr)
--	case *MF:
--		return compressionLenSearch(c, x.Mf)
--	case *MD:
--		return compressionLenSearch(c, x.Md)
--	case *RT:
--		return compressionLenSearch(c, x.Host)
--	case *MINFO:
--		k, ok := compressionLenSearch(c, x.Rmail)
--		k1, ok1 := compressionLenSearch(c, x.Email)
--		if !ok && !ok1 {
--			return 0, false
--		}
--		return k + k1, true
--	case *AFSDB:
--		return compressionLenSearch(c, x.Hostname)
--	}
--	return 0, false
--}
--
--// id returns a 16 bits random number to be used as a
--// message id. The random provided should be good enough.
--func id() uint16 {
--	return uint16(rand.Int()) ^ uint16(time.Now().Nanosecond())
--}
--
--// Copy returns a new RR which is a deep-copy of r.
--func Copy(r RR) RR {
--	r1 := r.copy()
--	return r1
--}
--
--// Copy returns a new *Msg which is a deep-copy of dns.
--func (dns *Msg) Copy() *Msg {
--	r1 := new(Msg)
--	r1.MsgHdr = dns.MsgHdr
--	r1.Compress = dns.Compress
--
--	if len(dns.Question) > 0 {
--		r1.Question = make([]Question, len(dns.Question))
--		copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
--	}
--
--	if len(dns.Answer) > 0 {
--		r1.Answer = make([]RR, len(dns.Answer))
--		for i := 0; i < len(dns.Answer); i++ {
--			r1.Answer[i] = dns.Answer[i].copy()
--		}
--	}
--
--	if len(dns.Ns) > 0 {
--		r1.Ns = make([]RR, len(dns.Ns))
--		for i := 0; i < len(dns.Ns); i++ {
--			r1.Ns[i] = dns.Ns[i].copy()
--		}
--	}
--
--	if len(dns.Extra) > 0 {
--		r1.Extra = make([]RR, len(dns.Extra))
--		for i := 0; i < len(dns.Extra); i++ {
--			r1.Extra[i] = dns.Extra[i].copy()
--		}
--	}
--
--	return r1
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go b/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go
-deleted file mode 100644
-index ac48da0..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/nsecx.go
-+++ /dev/null
-@@ -1,110 +0,0 @@
--package dns
--
--import (
--	"crypto/sha1"
--	"hash"
--	"io"
--	"strings"
--)
--
--type saltWireFmt struct {
--	Salt string `dns:"size-hex"`
--}
--
--// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in
--// uppercase.
--func HashName(label string, ha uint8, iter uint16, salt string) string {
--	saltwire := new(saltWireFmt)
--	saltwire.Salt = salt
--	wire := make([]byte, DefaultMsgSize)
--	n, err := PackStruct(saltwire, wire, 0)
--	if err != nil {
--		return ""
--	}
--	wire = wire[:n]
--	name := make([]byte, 255)
--	off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
--	if err != nil {
--		return ""
--	}
--	name = name[:off]
--	var s hash.Hash
--	switch ha {
--	case SHA1:
--		s = sha1.New()
--	default:
--		return ""
--	}
--
--	// k = 0
--	name = append(name, wire...)
--	io.WriteString(s, string(name))
--	nsec3 := s.Sum(nil)
--	// k > 0
--	for k := uint16(0); k < iter; k++ {
--		s.Reset()
--		nsec3 = append(nsec3, wire...)
--		io.WriteString(s, string(nsec3))
--		nsec3 = s.Sum(nil)
--	}
--	return toBase32(nsec3)
--}
--
--type Denialer interface {
--	// Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
--	Cover(name string) bool
--	// Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3.
--	Match(name string) bool
--}
--
--// Cover implements the Denialer interface.
--func (rr *NSEC) Cover(name string) bool {
--	return true
--}
--
--// Match implements the Denialer interface.
--func (rr *NSEC) Match(name string) bool {
--	return true
--}
--
--// Cover implements the Denialer interface.
--func (rr *NSEC3) Cover(name string) bool {
--	// FIXME(miek): check if the zones match
--	// FIXME(miek): check if we're not dealing with parent nsec3
--	hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
--	labels := Split(rr.Hdr.Name)
--	if len(labels) < 2 {
--		return false
--	}
--	hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot
--	if hash == rr.NextDomain {
--		return false // empty interval
--	}
--	if hash > rr.NextDomain { // last name, points to apex
--		// hname > hash
--		// hname > rr.NextDomain
--		// TODO(miek)
--	}
--	if hname <= hash {
--		return false
--	}
--	if hname >= rr.NextDomain {
--		return false
--	}
--	return true
--}
--
--// Match implements the Denialer interface.
--func (rr *NSEC3) Match(name string) bool {
--	// FIXME(miek): Check if we are in the same zone
--	hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
--	labels := Split(rr.Hdr.Name)
--	if len(labels) < 2 {
--		return false
--	}
--	hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the .
--	if hash == hname {
--		return true
--	}
--	return false
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go b/Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go
-deleted file mode 100644
-index 72f641a..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/nsecx_test.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--package dns
--
--import (
--	"testing"
--)
--
--func TestPackNsec3(t *testing.T) {
--	nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD")
--	if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" {
--		t.Logf("%v\n", nsec3)
--		t.Fail()
--	}
--
--	nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD")
--	if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" {
--		t.Logf("%v\n", nsec3)
--		t.Fail()
--	}
--}
--
--func TestNsec3(t *testing.T) {
--	// examples taken from .nl
--	nsec3, _ := NewRR("39p91242oslggest5e6a7cci4iaeqvnk.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6 NS DS RRSIG")
--	if !nsec3.(*NSEC3).Cover("snasajsksasasa.nl.") { // 39p94jrinub66hnpem8qdpstrec86pg3
--		t.Logf("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6")
--		t.Fail()
--	}
--	nsec3, _ = NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM")
--	if !nsec3.(*NSEC3).Match("nl.") { // sk4e8fj94u78smusb40o1n0oltbblu2r.nl.
--		t.Logf("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.")
--		t.Fail()
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go b/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go
-deleted file mode 100644
-index dd2799d..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/parse_test.go
-+++ /dev/null
-@@ -1,1276 +0,0 @@
--package dns
--
--import (
--	"bytes"
--	"crypto/rsa"
--	"encoding/hex"
--	"fmt"
--	"math/rand"
--	"net"
--	"reflect"
--	"strconv"
--	"strings"
--	"testing"
--	"testing/quick"
--	"time"
--)
--
--func TestDotInName(t *testing.T) {
--	buf := make([]byte, 20)
--	PackDomainName("aa\\.bb.nl.", buf, 0, nil, false)
--	// index 3 must be a real dot
--	if buf[3] != '.' {
--		t.Log("dot should be a real dot")
--		t.Fail()
--	}
--
--	if buf[6] != 2 {
--		t.Log("this must have the value 2")
--		t.Fail()
--	}
--	dom, _, _ := UnpackDomainName(buf, 0)
--	// printing it should yield the backspace again
--	if dom != "aa\\.bb.nl." {
--		t.Log("dot should have been escaped: " + dom)
--		t.Fail()
--	}
--}
--
--func TestDotLastInLabel(t *testing.T) {
--	sample := "aa\\..au."
--	buf := make([]byte, 20)
--	_, err := PackDomainName(sample, buf, 0, nil, false)
--	if err != nil {
--		t.Fatalf("unexpected error packing domain: %s", err)
--	}
--	dom, _, _ := UnpackDomainName(buf, 0)
--	if dom != sample {
--		t.Fatalf("unpacked domain `%s' doesn't match packed domain", dom)
--	}
--}
--
--func TestTooLongDomainName(t *testing.T) {
--	l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt."
--	dom := l + l + l + l + l + l + l
--	_, e := NewRR(dom + " IN A 127.0.0.1")
--	if e == nil {
--		t.Log("should be too long")
--		t.Fail()
--	} else {
--		t.Logf("error is %s", e.Error())
--	}
--	_, e = NewRR("..com. IN A 127.0.0.1")
--	if e == nil {
--		t.Log("should fail")
--		t.Fail()
--	} else {
--		t.Logf("error is %s", e.Error())
--	}
--}
--
--func TestDomainName(t *testing.T) {
--	tests := []string{"r\\.gieben.miek.nl.", "www\\.www.miek.nl.",
--		"www.*.miek.nl.", "www.*.miek.nl.",
--	}
--	dbuff := make([]byte, 40)
--
--	for _, ts := range tests {
--		if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil {
--			t.Log("not a valid domain name")
--			t.Fail()
--			continue
--		}
--		n, _, err := UnpackDomainName(dbuff, 0)
--		if err != nil {
--			t.Log("failed to unpack packed domain name")
--			t.Fail()
--			continue
--		}
--		if ts != n {
--			t.Logf("must be equal: in: %s, out: %s\n", ts, n)
--			t.Fail()
--		}
--	}
--}
--
--func TestDomainNameAndTXTEscapes(t *testing.T) {
--	tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', '\t', '\r', '\n', 0, 255}
--	for _, b := range tests {
--		rrbytes := []byte{
--			1, b, 0, // owner
--			byte(TypeTXT >> 8), byte(TypeTXT),
--			byte(ClassINET >> 8), byte(ClassINET),
--			0, 0, 0, 1, // TTL
--			0, 2, 1, b, // Data
--		}
--		rr1, _, err := UnpackRR(rrbytes, 0)
--		if err != nil {
--			panic(err)
--		}
--		s := rr1.String()
--		rr2, err := NewRR(s)
--		if err != nil {
--			t.Logf("Error parsing unpacked RR's string: %v", err)
--			t.Logf(" Bytes: %v\n", rrbytes)
--			t.Logf("String: %v\n", s)
--			t.Fail()
--		}
--		repacked := make([]byte, len(rrbytes))
--		if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil {
--			t.Logf("error packing parsed RR: %v", err)
--			t.Logf(" original Bytes: %v\n", rrbytes)
--			t.Logf("unpacked Struct: %V\n", rr1)
--			t.Logf("  parsed Struct: %V\n", rr2)
--			t.Fail()
--		}
--		if !bytes.Equal(repacked, rrbytes) {
--			t.Log("packed bytes don't match original bytes")
--			t.Logf(" original bytes: %v", rrbytes)
--			t.Logf("   packed bytes: %v", repacked)
--			t.Logf("unpacked struct: %V", rr1)
--			t.Logf("  parsed struct: %V", rr2)
--			t.Fail()
--		}
--	}
--}
--
--func TestTXTEscapeParsing(t *testing.T) {
--	test := [][]string{
--		{`";"`, `";"`},
--		{`\;`, `";"`},
--		{`"\t"`, `"\t"`},
--		{`"\r"`, `"\r"`},
--		{`"\ "`, `" "`},
--		{`"\;"`, `";"`},
--		{`"\;\""`, `";\""`},
--		{`"\(a\)"`, `"(a)"`},
--		{`"\(a)"`, `"(a)"`},
--		{`"(a\)"`, `"(a)"`},
--		{`"(a)"`, `"(a)"`},
--		{`"\048"`, `"0"`},
--		{`"\` + "\n" + `"`, `"\n"`},
--		{`"\` + "\r" + `"`, `"\r"`},
--		{`"\` + "\x11" + `"`, `"\017"`},
--		{`"\'"`, `"'"`},
--	}
--	for _, s := range test {
--		rr, err := NewRR(fmt.Sprintf("example.com. IN TXT %v", s[0]))
--		if err != nil {
--			t.Errorf("Could not parse %v TXT: %s", s[0], err)
--			continue
--		}
--
--		txt := sprintTxt(rr.(*TXT).Txt)
--		if txt != s[1] {
--			t.Errorf("Mismatch after parsing `%v` TXT record: `%v` != `%v`", s[0], txt, s[1])
--		}
--	}
--}
--
--func GenerateDomain(r *rand.Rand, size int) []byte {
--	dnLen := size % 70 // artificially limit size so there's less to intrepret if a failure occurs
--	var dn []byte
--	done := false
--	for i := 0; i < dnLen && !done; {
--		max := dnLen - i
--		if max > 63 {
--			max = 63
--		}
--		lLen := max
--		if lLen != 0 {
--			lLen = int(r.Int31()) % max
--		}
--		done = lLen == 0
--		if done {
--			continue
--		}
--		l := make([]byte, lLen+1)
--		l[0] = byte(lLen)
--		for j := 0; j < lLen; j++ {
--			l[j+1] = byte(rand.Int31())
--		}
--		dn = append(dn, l...)
--		i += 1 + lLen
--	}
--	return append(dn, 0)
--}
--
--func TestDomainQuick(t *testing.T) {
--	r := rand.New(rand.NewSource(0))
--	f := func(l int) bool {
--		db := GenerateDomain(r, l)
--		ds, _, err := UnpackDomainName(db, 0)
--		if err != nil {
--			panic(err)
--		}
--		buf := make([]byte, 255)
--		off, err := PackDomainName(ds, buf, 0, nil, false)
--		if err != nil {
--			t.Logf("error packing domain: %s", err.Error())
--			t.Logf(" bytes: %v\n", db)
--			t.Logf("string: %v\n", ds)
--			return false
--		}
--		if !bytes.Equal(db, buf[:off]) {
--			t.Logf("repacked domain doesn't match original:")
--			t.Logf("src bytes: %v", db)
--			t.Logf("   string: %v", ds)
--			t.Logf("out bytes: %v", buf[:off])
--			return false
--		}
--		return true
--	}
--	if err := quick.Check(f, nil); err != nil {
--		t.Error(err)
--	}
--}
--
--func GenerateTXT(r *rand.Rand, size int) []byte {
--	rdLen := size % 300 // artificially limit size so there's less to intrepret if a failure occurs
--	var rd []byte
--	for i := 0; i < rdLen; {
--		max := rdLen - 1
--		if max > 255 {
--			max = 255
--		}
--		sLen := max
--		if max != 0 {
--			sLen = int(r.Int31()) % max
--		}
--		s := make([]byte, sLen+1)
--		s[0] = byte(sLen)
--		for j := 0; j < sLen; j++ {
--			s[j+1] = byte(rand.Int31())
--		}
--		rd = append(rd, s...)
--		i += 1 + sLen
--	}
--	return rd
--}
--
--func TestTXTRRQuick(t *testing.T) {
--	s := rand.NewSource(0)
--	r := rand.New(s)
--	typeAndClass := []byte{
--		byte(TypeTXT >> 8), byte(TypeTXT),
--		byte(ClassINET >> 8), byte(ClassINET),
--		0, 0, 0, 1, // TTL
--	}
--	f := func(l int) bool {
--		owner := GenerateDomain(r, l)
--		rdata := GenerateTXT(r, l)
--		rrbytes := make([]byte, 0, len(owner)+2+2+4+2+len(rdata))
--		rrbytes = append(rrbytes, owner...)
--		rrbytes = append(rrbytes, typeAndClass...)
--		rrbytes = append(rrbytes, byte(len(rdata)>>8))
--		rrbytes = append(rrbytes, byte(len(rdata)))
--		rrbytes = append(rrbytes, rdata...)
--		rr, _, err := UnpackRR(rrbytes, 0)
--		if err != nil {
--			panic(err)
--		}
--		buf := make([]byte, len(rrbytes)*3)
--		off, err := PackRR(rr, buf, 0, nil, false)
--		if err != nil {
--			t.Logf("pack Error: %s\nRR: %V", err.Error(), rr)
--			return false
--		}
--		buf = buf[:off]
--		if !bytes.Equal(buf, rrbytes) {
--			t.Logf("packed bytes don't match original bytes")
--			t.Logf("src bytes: %v", rrbytes)
--			t.Logf("   struct: %V", rr)
--			t.Logf("oUt bytes: %v", buf)
--			return false
--		}
--		if len(rdata) == 0 {
--			// string'ing won't produce any data to parse
--			return true
--		}
--		rrString := rr.String()
--		rr2, err := NewRR(rrString)
--		if err != nil {
--			t.Logf("error parsing own output: %s", err.Error())
--			t.Logf("struct: %V", rr)
--			t.Logf("string: %v", rrString)
--			return false
--		}
--		if rr2.String() != rrString {
--			t.Logf("parsed rr.String() doesn't match original string")
--			t.Logf("original: %v", rrString)
--			t.Logf("  parsed: %v", rr2.String())
--			return false
--		}
--
--		buf = make([]byte, len(rrbytes)*3)
--		off, err = PackRR(rr2, buf, 0, nil, false)
--		if err != nil {
--			t.Logf("error packing parsed rr: %s", err.Error())
--			t.Logf("unpacked Struct: %V", rr)
--			t.Logf("         string: %v", rrString)
--			t.Logf("  parsed Struct: %V", rr2)
--			return false
--		}
--		buf = buf[:off]
--		if !bytes.Equal(buf, rrbytes) {
--			t.Logf("parsed packed bytes don't match original bytes")
--			t.Logf("   source bytes: %v", rrbytes)
--			t.Logf("unpacked struct: %V", rr)
--			t.Logf("         string: %v", rrString)
--			t.Logf("  parsed struct: %V", rr2)
--			t.Logf(" repacked bytes: %v", buf)
--			return false
--		}
--		return true
--	}
--	c := &quick.Config{MaxCountScale: 10}
--	if err := quick.Check(f, c); err != nil {
--		t.Error(err)
--	}
--}
--
--func TestParseDirectiveMisc(t *testing.T) {
--	tests := map[string]string{
--		"$ORIGIN miek.nl.\na IN NS b": "a.miek.nl.\t3600\tIN\tNS\tb.miek.nl.",
--		"$TTL 2H\nmiek.nl. IN NS b.":  "miek.nl.\t7200\tIN\tNS\tb.",
--		"miek.nl. 1D IN NS b.":        "miek.nl.\t86400\tIN\tNS\tb.",
--		`name. IN SOA  a6.nstld.com. hostmaster.nic.name. (
--        203362132 ; serial
--        5m        ; refresh (5 minutes)
--        5m        ; retry (5 minutes)
--        2w        ; expire (2 weeks)
--        300       ; minimum (5 minutes)
--)`: "name.\t3600\tIN\tSOA\ta6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300",
--		". 3600000  IN  NS ONE.MY-ROOTS.NET.":        ".\t3600000\tIN\tNS\tONE.MY-ROOTS.NET.",
--		"ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1",
--	}
--	for i, o := range tests {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error())
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is     `%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestNSEC(t *testing.T) {
--	nsectests := map[string]string{
--		"nl. IN NSEC3PARAM 1 0 5 30923C44C6CBBB8F":                                                                                                 "nl.\t3600\tIN\tNSEC3PARAM\t1 0 5 30923C44C6CBBB8F",
--		"p2209hipbpnm681knjnu0m1febshlv4e.nl. IN NSEC3 1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM": "p2209hipbpnm681knjnu0m1febshlv4e.nl.\t3600\tIN\tNSEC3\t1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM",
--		"localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC":                                                                                 "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC",
--		"localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC TYPE65534":                                                                       "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534",
--		"localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534":                                                                       "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534",
--	}
--	for i, o := range nsectests {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error())
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is     `%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestParseLOC(t *testing.T) {
--	lt := map[string]string{
--		"SW1A2AA.find.me.uk.	LOC	51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m",
--		"SW1A2AA.find.me.uk.	LOC	51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m",
--	}
--	for i, o := range lt {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error())
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is     `%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestParseDS(t *testing.T) {
--	dt := map[string]string{
--		"example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F",
--	}
--	for i, o := range dt {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error())
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is     `%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestQuotes(t *testing.T) {
--	tests := map[string]string{
--		`t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"",
--		`t.example.com. IN TXT "a
-- bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\n bc\"",
--		`t.example.com. IN TXT ""`:                                                           "t.example.com.\t3600\tIN\tTXT\t\"\"",
--		`t.example.com. IN TXT "a"`:                                                          "t.example.com.\t3600\tIN\tTXT\t\"a\"",
--		`t.example.com. IN TXT "aa"`:                                                         "t.example.com.\t3600\tIN\tTXT\t\"aa\"",
--		`t.example.com. IN TXT "aaa" ;`:                                                      "t.example.com.\t3600\tIN\tTXT\t\"aaa\"",
--		`t.example.com. IN TXT "abc" "DEF"`:                                                  "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"",
--		`t.example.com. IN TXT "abc" ( "DEF" )`:                                              "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"",
--		`t.example.com. IN TXT aaa ;`:                                                        "t.example.com.\t3600\tIN\tTXT\t\"aaa \"",
--		`t.example.com. IN TXT aaa aaa;`:                                                     "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"",
--		`t.example.com. IN TXT aaa aaa`:                                                      "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"",
--		`t.example.com. IN TXT aaa`:                                                          "t.example.com.\t3600\tIN\tTXT\t\"aaa\"",
--		"cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\"    \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.",
--		"cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\"         \"\" _rcds._udp.gatech.edu.":  "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.",
--		"cid.urn.arpa. NAPTR 100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.":  "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.",
--		"cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .":     "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .",
--	}
--	for i, o := range tests {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error())
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is\n`%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestParseClass(t *testing.T) {
--	tests := map[string]string{
--		"t.example.com. IN A 127.0.0.1": "t.example.com.	3600	IN	A	127.0.0.1",
--		"t.example.com. CS A 127.0.0.1": "t.example.com.	3600	CS	A	127.0.0.1",
--		"t.example.com. CH A 127.0.0.1": "t.example.com.	3600	CH	A	127.0.0.1",
--		// ClassANY can not occur in zone files
--		// "t.example.com. ANY A 127.0.0.1": "t.example.com.	3600	ANY	A	127.0.0.1",
--		"t.example.com. NONE A 127.0.0.1": "t.example.com.	3600	NONE	A	127.0.0.1",
--	}
--	for i, o := range tests {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error())
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is\n`%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestBrace(t *testing.T) {
--	tests := map[string]string{
--		"(miek.nl.) 3600 IN A 127.0.1.1":                 "miek.nl.\t3600\tIN\tA\t127.0.1.1",
--		"miek.nl. (3600) IN MX (10) elektron.atoom.net.": "miek.nl.\t3600\tIN\tMX\t10 elektron.atoom.net.",
--		`miek.nl. IN (
--                        3600 A 127.0.0.1)`: "miek.nl.\t3600\tIN\tA\t127.0.0.1",
--		"(miek.nl.) (A) (127.0.2.1)":                          "miek.nl.\t3600\tIN\tA\t127.0.2.1",
--		"miek.nl A 127.0.3.1":                                 "miek.nl.\t3600\tIN\tA\t127.0.3.1",
--		"_ssh._tcp.local. 60 IN (PTR) stora._ssh._tcp.local.": "_ssh._tcp.local.\t60\tIN\tPTR\tstora._ssh._tcp.local.",
--		"miek.nl. NS ns.miek.nl":                              "miek.nl.\t3600\tIN\tNS\tns.miek.nl.",
--		`(miek.nl.) (
--                        (IN)
--                        (AAAA)
--                        (::1) )`: "miek.nl.\t3600\tIN\tAAAA\t::1",
--		`(miek.nl.) (
--                        (IN)
--                        (AAAA)
--                        (::1))`: "miek.nl.\t3600\tIN\tAAAA\t::1",
--		"miek.nl. IN AAAA ::2": "miek.nl.\t3600\tIN\tAAAA\t::2",
--		`((m)(i)ek.(n)l.) (SOA) (soa.) (soa.) (
--                                2009032802 ; serial
--                                21600      ; refresh (6 hours)
--                                7(2)00       ; retry (2 hours)
--                                604()800     ; expire (1 week)
--                                3600       ; minimum (1 hour)
--                        )`: "miek.nl.\t3600\tIN\tSOA\tsoa. soa. 2009032802 21600 7200 604800 3600",
--		"miek\\.nl. IN A 127.0.0.10": "miek\\.nl.\t3600\tIN\tA\t127.0.0.10",
--		"miek.nl. IN A 127.0.0.11":   "miek.nl.\t3600\tIN\tA\t127.0.0.11",
--		"miek.nl. A 127.0.0.12":      "miek.nl.\t3600\tIN\tA\t127.0.0.12",
--		`miek.nl.       86400 IN SOA elektron.atoom.net. miekg.atoom.net. (
--                                2009032802 ; serial
--                                21600      ; refresh (6 hours)
--                                7200       ; retry (2 hours)
--                                604800     ; expire (1 week)
--                                3600       ; minimum (1 hour)
--                        )`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600",
--	}
--	for i, o := range tests {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error() + "\n\t" + i)
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is     `%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestParseFailure(t *testing.T) {
--	tests := []string{"miek.nl. IN A 327.0.0.1",
--		"miek.nl. IN AAAA ::x",
--		"miek.nl. IN MX a0 miek.nl.",
--		"miek.nl aap IN MX mx.miek.nl.",
--		"miek.nl 200 IN mxx 10 mx.miek.nl.",
--		"miek.nl. inn MX 10 mx.miek.nl.",
--		// "miek.nl. IN CNAME ", // actually valid nowadays, zero size rdata
--		"miek.nl. IN CNAME ..",
--		"miek.nl. PA MX 10 miek.nl.",
--		"miek.nl. ) IN MX 10 miek.nl.",
--	}
--
--	for _, s := range tests {
--		_, err := NewRR(s)
--		if err == nil {
--			t.Logf("should have triggered an error: \"%s\"", s)
--			t.Fail()
--		}
--	}
--}
--
--func TestZoneParsing(t *testing.T) {
--	// parse_test.db
--	db := `
--a.example.com.                IN A 127.0.0.1
--8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG
--$ORIGIN a.example.com.
--test                          IN A 127.0.0.1
--$ORIGIN b.example.com.
--test                          IN CNAME test.a.example.com.
--`
--	start := time.Now().UnixNano()
--	to := ParseZone(strings.NewReader(db), "", "parse_test.db")
--	var i int
--	for x := range to {
--		i++
--		if x.Error != nil {
--			t.Logf("%s\n", x.Error)
--			t.Fail()
--			continue
--		}
--		t.Logf("%s\n", x.RR)
--	}
--	delta := time.Now().UnixNano() - start
--	t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9))
--}
--
--func ExampleZone() {
--	zone := `$ORIGIN .
--$TTL 3600       ; 1 hour
--name                    IN SOA  a6.nstld.com. hostmaster.nic.name. (
--                                203362132  ; serial
--                                300        ; refresh (5 minutes)
--                                300        ; retry (5 minutes)
--                                1209600    ; expire (2 weeks)
--                                300        ; minimum (5 minutes)
--                                )
--$TTL 10800      ; 3 hours
--name.	10800	IN	NS	name.
--               IN       NS      g6.nstld.com.
--               7200     NS      h6.nstld.com.
--             3600 IN    NS      j6.nstld.com.
--             IN 3600    NS      k6.nstld.com.
--                        NS      l6.nstld.com.
--                        NS      a6.nstld.com.
--                        NS      c6.nstld.com.
--                        NS      d6.nstld.com.
--                        NS      f6.nstld.com.
--                        NS      m6.nstld.com.
--(
--			NS	m7.nstld.com.
--)
--$ORIGIN name.
--0-0onlus                NS      ns7.ehiweb.it.
--                        NS      ns8.ehiweb.it.
--0-g                     MX      10 mx01.nic
--                        MX      10 mx02.nic
--                        MX      10 mx03.nic
--                        MX      10 mx04.nic
--$ORIGIN 0-g.name
--moutamassey             NS      ns01.yahoodomains.jp.
--                        NS      ns02.yahoodomains.jp.
--`
--	to := ParseZone(strings.NewReader(zone), "", "testzone")
--	for x := range to {
--		fmt.Printf("%s\n", x.RR)
--	}
--	// Output:
--	// name.	3600	IN	SOA	a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300
--	// name.	10800	IN	NS	name.
--	// name.	10800	IN	NS	g6.nstld.com.
--	// name.	7200	IN	NS	h6.nstld.com.
--	// name.	3600	IN	NS	j6.nstld.com.
--	// name.	3600	IN	NS	k6.nstld.com.
--	// name.	10800	IN	NS	l6.nstld.com.
--	// name.	10800	IN	NS	a6.nstld.com.
--	// name.	10800	IN	NS	c6.nstld.com.
--	// name.	10800	IN	NS	d6.nstld.com.
--	// name.	10800	IN	NS	f6.nstld.com.
--	// name.	10800	IN	NS	m6.nstld.com.
--	// name.	10800	IN	NS	m7.nstld.com.
--	// 0-0onlus.name.	10800	IN	NS	ns7.ehiweb.it.
--	// 0-0onlus.name.	10800	IN	NS	ns8.ehiweb.it.
--	// 0-g.name.	10800	IN	MX	10 mx01.nic.name.
--	// 0-g.name.	10800	IN	MX	10 mx02.nic.name.
--	// 0-g.name.	10800	IN	MX	10 mx03.nic.name.
--	// 0-g.name.	10800	IN	MX	10 mx04.nic.name.
--	// moutamassey.0-g.name.name.	10800	IN	NS	ns01.yahoodomains.jp.
--	// moutamassey.0-g.name.name.	10800	IN	NS	ns02.yahoodomains.jp.
--}
--
--func ExampleHIP() {
--	h := `www.example.com     IN  HIP ( 2 200100107B1A74DF365639CC39F1D578
--                AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p
--9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ
--b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
--        rvs.example.com. )`
--	if hip, err := NewRR(h); err == nil {
--		fmt.Printf("%s\n", hip.String())
--	}
--	// Output:
--	// www.example.com.	3600	IN	HIP	2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com.
--}
--
--func TestHIP(t *testing.T) {
--	h := `www.example.com.      IN  HIP ( 2 200100107B1A74DF365639CC39F1D578
--                                AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p
--9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ
--b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D
--                                rvs1.example.com.
--                                rvs2.example.com. )`
--	rr, err := NewRR(h)
--	if err != nil {
--		t.Fatalf("failed to parse RR: %s", err)
--	}
--	t.Logf("RR: %s", rr)
--	msg := new(Msg)
--	msg.Answer = []RR{rr, rr}
--	bytes, err := msg.Pack()
--	if err != nil {
--		t.Fatalf("failed to pack msg: %s", err)
--	}
--	if err := msg.Unpack(bytes); err != nil {
--		t.Fatalf("failed to unpack msg: %s", err)
--	}
--	if len(msg.Answer) != 2 {
--		t.Fatalf("2 answers expected: %V", msg)
--	}
--	for i, rr := range msg.Answer {
--		rr := rr.(*HIP)
--		t.Logf("RR: %s", rr)
--		if l := len(rr.RendezvousServers); l != 2 {
--			t.Fatalf("2 servers expected, only %d in record %d:\n%V", l, i, msg)
--		}
--		for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} {
--			if rr.RendezvousServers[j] != s {
--				t.Fatalf("expected server %d of record %d to be %s:\n%V", j, i, s, msg)
--			}
--		}
--	}
--}
--
--func ExampleSOA() {
--	s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100"
--	if soa, err := NewRR(s); err == nil {
--		fmt.Printf("%s\n", soa.String())
--	}
--	// Output:
--	// example.com.	1000	IN	SOA	master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100
--}
--
--func TestLineNumberError(t *testing.T) {
--	s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100"
--	if _, err := NewRR(s); err != nil {
--		if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" {
--			t.Logf("not expecting this error: " + err.Error())
--			t.Fail()
--		}
--	}
--}
--
--// Test with no known RR on the line
--func TestLineNumberError2(t *testing.T) {
--	tests := map[string]string{
--		"example.com. 1000 SO master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100": "dns: expecting RR type or class, not this...: \"SO\" at line: 1:21",
--		"example.com 1000 IN TALINK a.example.com. b..example.com.":                                          "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:57",
--		"example.com 1000 IN TALINK ( a.example.com. b..example.com. )":                                      "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:60",
--		`example.com 1000 IN TALINK ( a.example.com.
--	bb..example.com. )`: "dns: bad TALINK NextName: \"bb..example.com.\" at line: 2:18",
--		// This is a bug, it should report an error on line 1, but the new is already processed.
--		`example.com 1000 IN TALINK ( a.example.com.  b...example.com.
--	)`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"}
--
--	for in, err := range tests {
--		_, e := NewRR(in)
--		if e == nil {
--			t.Fail()
--		} else {
--			if e.Error() != err {
--				t.Logf("%s\n", in)
--				t.Logf("error should be %s is %s\n", err, e.Error())
--				t.Fail()
--			}
--		}
--	}
--}
--
--// Test if the calculations are correct
--func TestRfc1982(t *testing.T) {
--	// If the current time and the timestamp are more than 68 years apart
--	// it means the date has wrapped. 0 is 1970
--
--	// fall in the current 68 year span
--	strtests := []string{"20120525134203", "19700101000000", "20380119031408"}
--	for _, v := range strtests {
--		if x, _ := StringToTime(v); v != TimeToString(x) {
--			t.Logf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x)
--			t.Fail()
--		}
--	}
--
--	inttests := map[uint32]string{0: "19700101000000",
--		1 << 31:   "20380119031408",
--		1<<32 - 1: "21060207062815",
--	}
--	for i, v := range inttests {
--		if TimeToString(i) != v {
--			t.Logf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i))
--			t.Fail()
--		}
--	}
--
--	// Future tests, these dates get parsed to a date within the current 136 year span
--	future := map[string]string{"22680119031408": "20631123173144",
--		"19010101121212": "20370206184028",
--		"19210101121212": "20570206184028",
--		"19500101121212": "20860206184028",
--		"19700101000000": "19700101000000",
--		"19690101000000": "21050207062816",
--		"29210101121212": "21040522212236",
--	}
--	for from, to := range future {
--		x, _ := StringToTime(from)
--		y := TimeToString(x)
--		if y != to {
--			t.Logf("1982 arithmetic future failure %s:%s (%s)", from, to, y)
--			t.Fail()
--		}
--	}
--}
--
--func TestEmpty(t *testing.T) {
--	for _ = range ParseZone(strings.NewReader(""), "", "") {
--		t.Logf("should be empty")
--		t.Fail()
--	}
--}
--
--func TestLowercaseTokens(t *testing.T) {
--	var testrecords = []string{
--		"example.org. 300 IN a 1.2.3.4",
--		"example.org. 300 in A 1.2.3.4",
--		"example.org. 300 in a 1.2.3.4",
--		"example.org. 300 a 1.2.3.4",
--		"example.org. 300 A 1.2.3.4",
--		"example.org. IN a 1.2.3.4",
--		"example.org. in A 1.2.3.4",
--		"example.org. in a 1.2.3.4",
--		"example.org. a 1.2.3.4",
--		"example.org. A 1.2.3.4",
--		"example.org. a 1.2.3.4",
--		"$ORIGIN example.org.\n a 1.2.3.4",
--		"$Origin example.org.\n a 1.2.3.4",
--		"$origin example.org.\n a 1.2.3.4",
--		"example.org. Class1 Type1 1.2.3.4",
--	}
--	for _, testrr := range testrecords {
--		_, err := NewRR(testrr)
--		if err != nil {
--			t.Errorf("failed to parse %#v, got %s", testrr, err.Error())
--		}
--	}
--}
--
--func ExampleGenerate() {
--	// From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761
--	zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0"
--	to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "")
--	for x := range to {
--		if x.Error == nil {
--			fmt.Printf("%s\n", x.RR.String())
--		}
--	}
--	// Output:
--	// 0.0.0.192.IN-ADDR.ARPA.	3600	IN	NS	SERVER1.EXAMPLE.
--	// 0.0.0.192.IN-ADDR.ARPA.	3600	IN	NS	SERVER2.EXAMPLE.
--	// 1.0.0.192.IN-ADDR.ARPA.	3600	IN	CNAME	1.0.0.0.192.IN-ADDR.ARPA.
--	// 2.0.0.192.IN-ADDR.ARPA.	3600	IN	CNAME	2.0.0.0.192.IN-ADDR.ARPA.
--	// 3.0.0.192.IN-ADDR.ARPA.	3600	IN	CNAME	3.0.0.0.192.IN-ADDR.ARPA.
--	// 4.0.0.192.IN-ADDR.ARPA.	3600	IN	CNAME	4.0.0.0.192.IN-ADDR.ARPA.
--	// 5.0.0.192.IN-ADDR.ARPA.	3600	IN	CNAME	5.0.0.0.192.IN-ADDR.ARPA.
--	// 6.0.0.192.IN-ADDR.ARPA.	3600	IN	CNAME	6.0.0.0.192.IN-ADDR.ARPA.
--	// 7.0.0.192.IN-ADDR.ARPA.	3600	IN	CNAME	7.0.0.0.192.IN-ADDR.ARPA.
--	// 8.0.0.192.IN-ADDR.ARPA.	3600	IN	CNAME	8.0.0.0.192.IN-ADDR.ARPA.
--}
--
--func TestSRVPacking(t *testing.T) {
--	msg := Msg{}
--
--	things := []string{"1.2.3.4:8484",
--		"45.45.45.45:8484",
--		"84.84.84.84:8484",
--	}
--
--	for i, n := range things {
--		h, p, err := net.SplitHostPort(n)
--		if err != nil {
--			continue
--		}
--		port := 8484
--		tmp, err := strconv.Atoi(p)
--		if err == nil {
--			port = tmp
--		}
--
--		rr := &SRV{
--			Hdr: RR_Header{Name: "somename.",
--				Rrtype: TypeSRV,
--				Class:  ClassINET,
--				Ttl:    5},
--			Priority: uint16(i),
--			Weight:   5,
--			Port:     uint16(port),
--			Target:   h + ".",
--		}
--
--		msg.Answer = append(msg.Answer, rr)
--	}
--
--	_, err := msg.Pack()
--	if err != nil {
--		t.Fatalf("couldn't pack %v\n", msg)
--	}
--}
--
--func TestParseBackslash(t *testing.T) {
--	if r, e := NewRR("nul\\000gap.test.globnix.net. 600 IN	A 192.0.2.10"); e != nil {
--		t.Fatalf("could not create RR with \\000 in it")
--	} else {
--		t.Logf("parsed %s\n", r.String())
--	}
--	if r, e := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); e != nil {
--		t.Fatalf("could not create RR with \\000 in it")
--	} else {
--		t.Logf("parsed %s\n", r.String())
--	}
--	if r, e := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); e != nil {
--		t.Fatalf("could not create RR with \\ and \\@ in it")
--	} else {
--		t.Logf("parsed %s\n", r.String())
--	}
--}
--
--func TestILNP(t *testing.T) {
--	tests := []string{
--		"host1.example.com.\t3600\tIN\tNID\t10 0014:4fff:ff20:ee64",
--		"host1.example.com.\t3600\tIN\tNID\t20 0015:5fff:ff21:ee65",
--		"host2.example.com.\t3600\tIN\tNID\t10 0016:6fff:ff22:ee66",
--		"host1.example.com.\t3600\tIN\tL32\t10 10.1.2.0",
--		"host1.example.com.\t3600\tIN\tL32\t20 10.1.4.0",
--		"host2.example.com.\t3600\tIN\tL32\t10 10.1.8.0",
--		"host1.example.com.\t3600\tIN\tL64\t10 2001:0DB8:1140:1000",
--		"host1.example.com.\t3600\tIN\tL64\t20 2001:0DB8:2140:2000",
--		"host2.example.com.\t3600\tIN\tL64\t10 2001:0DB8:4140:4000",
--		"host1.example.com.\t3600\tIN\tLP\t10 l64-subnet1.example.com.",
--		"host1.example.com.\t3600\tIN\tLP\t10 l64-subnet2.example.com.",
--		"host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.",
--	}
--	for _, t1 := range tests {
--		r, e := NewRR(t1)
--		if e != nil {
--			t.Fatalf("an error occured: %s\n", e.Error())
--		} else {
--			if t1 != r.String() {
--				t.Fatalf("strings should be equal %s %s", t1, r.String())
--			}
--		}
--	}
--}
--
--func TestNsapGposEidNimloc(t *testing.T) {
--	dt := map[string]string{
--		"foo.bar.com.    IN  NSAP   21 47000580ffff000000321099991111222233334444": "foo.bar.com.\t3600\tIN\tNSAP\t21 47000580ffff000000321099991111222233334444",
--		"host.school.de  IN  NSAP   17 39276f3100111100002222333344449876":         "host.school.de.\t3600\tIN\tNSAP\t17 39276f3100111100002222333344449876",
--		"444433332222111199990123000000ff. NSAP-PTR foo.bar.com.":                  "444433332222111199990123000000ff.\t3600\tIN\tNSAP-PTR\tfoo.bar.com.",
--		"lillee. IN  GPOS -32.6882 116.8652 10.0":                                  "lillee.\t3600\tIN\tGPOS\t-32.6882 116.8652 10.0",
--		"hinault. IN GPOS -22.6882 116.8652 250.0":                                 "hinault.\t3600\tIN\tGPOS\t-22.6882 116.8652 250.0",
--		"VENERA.   IN NIMLOC  75234159EAC457800920":                                "VENERA.\t3600\tIN\tNIMLOC\t75234159EAC457800920",
--		"VAXA.     IN EID     3141592653589793":                                    "VAXA.\t3600\tIN\tEID\t3141592653589793",
--	}
--	for i, o := range dt {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error())
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is     `%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestPX(t *testing.T) {
--	dt := map[string]string{
--		"*.net2.it. IN PX 10 net2.it. PRMD-net2.ADMD-p400.C-it.":      "*.net2.it.\t3600\tIN\tPX\t10 net2.it. PRMD-net2.ADMD-p400.C-it.",
--		"ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.",
--	}
--	for i, o := range dt {
--		rr, e := NewRR(i)
--		if e != nil {
--			t.Log("failed to parse RR: " + e.Error())
--			t.Fail()
--			continue
--		}
--		if rr.String() != o {
--			t.Logf("`%s' should be equal to\n`%s', but is     `%s'\n", i, o, rr.String())
--			t.Fail()
--		} else {
--			t.Logf("RR is OK: `%s'", rr.String())
--		}
--	}
--}
--
--func TestComment(t *testing.T) {
--	// Comments we must see
--	comments := map[string]bool{"; this is comment 1": true,
--		"; this is comment 4": true, "; this is comment 6": true,
--		"; this is comment 7": true, "; this is comment 8": true}
--	zone := `
--foo. IN A 10.0.0.1 ; this is comment 1
--foo. IN A (
--	10.0.0.2 ; this is comment2
--)
--; this is comment3
--foo. IN A 10.0.0.3
--foo. IN A ( 10.0.0.4 ); this is comment 4
--
--foo. IN A 10.0.0.5
--; this is comment5
--
--foo. IN A 10.0.0.6
--
--foo. IN DNSKEY 256 3 5 AwEAAb+8l ; this is comment 6
--foo. IN NSEC miek.nl. TXT RRSIG NSEC; this is comment 7
--foo. IN TXT "THIS IS TEXT MAN"; this is comment 8
--`
--	for x := range ParseZone(strings.NewReader(zone), ".", "") {
--		if x.Error == nil {
--			if x.Comment != "" {
--				if _, ok := comments[x.Comment]; !ok {
--					t.Logf("wrong comment %s", x.Comment)
--					t.Fail()
--				}
--			}
--		}
--	}
--}
--
--func TestEUIxx(t *testing.T) {
--	tests := map[string]string{
--		"host.example. IN EUI48 00-00-5e-90-01-2a":       "host.example.\t3600\tIN\tEUI48\t00-00-5e-90-01-2a",
--		"host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a",
--	}
--	for i, o := range tests {
--		r, e := NewRR(i)
--		if e != nil {
--			t.Logf("failed to parse %s: %s\n", i, e.Error())
--			t.Fail()
--		}
--		if r.String() != o {
--			t.Logf("want %s, got %s\n", o, r.String())
--			t.Fail()
--		}
--	}
--}
--
--func TestUserRR(t *testing.T) {
--	tests := map[string]string{
--		"host.example. IN UID 1234":              "host.example.\t3600\tIN\tUID\t1234",
--		"host.example. IN GID 1234556":           "host.example.\t3600\tIN\tGID\t1234556",
--		"host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"",
--	}
--	for i, o := range tests {
--		r, e := NewRR(i)
--		if e != nil {
--			t.Logf("failed to parse %s: %s\n", i, e.Error())
--			t.Fail()
--		}
--		if r.String() != o {
--			t.Logf("want %s, got %s\n", o, r.String())
--			t.Fail()
--		}
--	}
--}
--
--func TestTXT(t *testing.T) {
--	// Test single entry TXT record
--	rr, err := NewRR(`_raop._tcp.local. 60 IN TXT "single value"`)
--	if err != nil {
--		t.Error("failed to parse single value TXT record", err)
--	} else if rr, ok := rr.(*TXT); !ok {
--		t.Error("wrong type, record should be of type TXT")
--	} else {
--		if len(rr.Txt) != 1 {
--			t.Error("bad size of TXT value:", len(rr.Txt))
--		} else if rr.Txt[0] != "single value" {
--			t.Error("bad single value")
--		}
--		if rr.String() != `_raop._tcp.local.	60	IN	TXT	"single value"` {
--			t.Error("bad representation of TXT record:", rr.String())
--		}
--		if rr.len() != 28+1+12 {
--			t.Error("bad size of serialized record:", rr.len())
--		}
--	}
--
--	// Test multi entries TXT record
--	rr, err = NewRR(`_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"`)
--	if err != nil {
--		t.Error("failed to parse multi-values TXT record", err)
--	} else if rr, ok := rr.(*TXT); !ok {
--		t.Error("wrong type, record should be of type TXT")
--	} else {
--		if len(rr.Txt) != 4 {
--			t.Error("bad size of TXT multi-value:", len(rr.Txt))
--		} else if rr.Txt[0] != "a=1" || rr.Txt[1] != "b=2" || rr.Txt[2] != "c=3" || rr.Txt[3] != "d=4" {
--			t.Error("bad values in TXT records")
--		}
--		if rr.String() != `_raop._tcp.local.	60	IN	TXT	"a=1" "b=2" "c=3" "d=4"` {
--			t.Error("bad representation of TXT multi value record:", rr.String())
--		}
--		if rr.len() != 28+1+3+1+3+1+3+1+3 {
--			t.Error("bad size of serialized multi value record:", rr.len())
--		}
--	}
--
--	// Test empty-string in TXT record
--	rr, err = NewRR(`_raop._tcp.local. 60 IN TXT ""`)
--	if err != nil {
--		t.Error("failed to parse empty-string TXT record", err)
--	} else if rr, ok := rr.(*TXT); !ok {
--		t.Error("wrong type, record should be of type TXT")
--	} else {
--		if len(rr.Txt) != 1 {
--			t.Error("bad size of TXT empty-string value:", len(rr.Txt))
--		} else if rr.Txt[0] != "" {
--			t.Error("bad value for empty-string TXT record")
--		}
--		if rr.String() != `_raop._tcp.local.	60	IN	TXT	""` {
--			t.Error("bad representation of empty-string TXT record:", rr.String())
--		}
--		if rr.len() != 28+1 {
--			t.Error("bad size of serialized record:", rr.len())
--		}
--	}
--}
--
--func TestTypeXXXX(t *testing.T) {
--	_, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd")
--	if err != nil {
--		t.Logf("failed to parse TYPE1234 RR: %s", err.Error())
--		t.Fail()
--	}
--	_, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd")
--	if err == nil {
--		t.Logf("this should not work, for TYPE655341")
--		t.Fail()
--	}
--	_, err = NewRR("example.com IN TYPE1 \\# 4 0a000001")
--	if err == nil {
--		t.Logf("this should not work")
--		t.Fail()
--	}
--}
--
--func TestPTR(t *testing.T) {
--	_, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.")
--	if err != nil {
--		t.Error("failed to parse ", err.Error())
--	}
--}
--
--func TestDigit(t *testing.T) {
--	tests := map[string]byte{
--		"miek\\000.nl. 100 IN TXT \"A\"": 0,
--		"miek\\001.nl. 100 IN TXT \"A\"": 1,
--		"miek\\254.nl. 100 IN TXT \"A\"": 254,
--		"miek\\255.nl. 100 IN TXT \"A\"": 255,
--		"miek\\256.nl. 100 IN TXT \"A\"": 0,
--		"miek\\257.nl. 100 IN TXT \"A\"": 1,
--		"miek\\004.nl. 100 IN TXT \"A\"": 4,
--	}
--	for s, i := range tests {
--		r, e := NewRR(s)
--		buf := make([]byte, 40)
--		if e != nil {
--			t.Fatalf("failed to parse %s\n", e.Error())
--		}
--		PackRR(r, buf, 0, nil, false)
--		t.Logf("%v\n", buf)
--		if buf[5] != i {
--			t.Fatalf("5 pos must be %d, is %d", i, buf[5])
--		}
--		r1, _, _ := UnpackRR(buf, 0)
--		if r1.Header().Ttl != 100 {
--			t.Fatalf("TTL should %d, is %d", 100, r1.Header().Ttl)
--		}
--	}
--}
--
--func TestParseRRSIGTimestamp(t *testing.T) {
--	tests := map[string]bool{
--		`miek.nl.  IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true,
--		`miek.nl.  IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`:          true,
--	}
--	for r, _ := range tests {
--		_, e := NewRR(r)
--		if e != nil {
--			t.Fail()
--			t.Logf("%s\n", e.Error())
--		}
--	}
--}
--
--func TestTxtEqual(t *testing.T) {
--	rr1 := new(TXT)
--	rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
--	rr1.Txt = []string{"a\"a", "\"", "b"}
--	rr2, _ := NewRR(rr1.String())
--	if rr1.String() != rr2.String() {
--		t.Logf("these two TXT records should match")
--		t.Logf("\n%s\n%s\n", rr1.String(), rr2.String())
--		t.Fail() // This is not an error, but keep this test.
--	}
--	t.Logf("\n%s\n%s\n", rr1.String(), rr2.String())
--}
--
--func TestTxtLong(t *testing.T) {
--	rr1 := new(TXT)
--	rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}
--	// Make a long txt record, this breaks when sending the packet,
--	// but not earlier.
--	rr1.Txt = []string{"start-"}
--	for i := 0; i < 200; i++ {
--		rr1.Txt[0] += "start-"
--	}
--	str := rr1.String()
--	if len(str) < len(rr1.Txt[0]) {
--		t.Logf("string conversion should work")
--		t.Fail()
--	}
--}
--
--// Basically, don't crash.
--func TestMalformedPackets(t *testing.T) {
--	var packets = []string{
--		"0021641c0000000100000000000078787878787878787878787303636f6d0000100001",
--	}
--
--	// com = 63 6f 6d
--	for _, packet := range packets {
--		data, _ := hex.DecodeString(packet)
--		//		for _, v := range data {
--		//			t.Logf("%s ", string(v))
--		//		}
--		var msg Msg
--		msg.Unpack(data)
--		//		println(msg.String())
--	}
--}
--
--type algorithm struct {
--	name uint8
--	bits int
--}
--
--func TestNewPrivateKey(t *testing.T) {
--	if testing.Short() {
--		t.Skip("skipping test in short mode.")
--	}
--	algorithms := []algorithm{
--		algorithm{ECDSAP256SHA256, 256},
--		algorithm{ECDSAP384SHA384, 384},
--		algorithm{RSASHA1, 1024},
--		algorithm{RSASHA256, 2048},
--		algorithm{DSA, 1024},
--	}
--
--	for _, algo := range algorithms {
--		key := new(DNSKEY)
--		key.Hdr.Rrtype = TypeDNSKEY
--		key.Hdr.Name = "miek.nl."
--		key.Hdr.Class = ClassINET
--		key.Hdr.Ttl = 14400
--		key.Flags = 256
--		key.Protocol = 3
--		key.Algorithm = algo.name
--		privkey, err := key.Generate(algo.bits)
--		if err != nil {
--			t.Fatal(err.Error())
--		}
--
--		newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey))
--		if err != nil {
--			t.Log(key.String())
--			t.Log(key.PrivateKeyString(privkey))
--
--			t.Fatal(err.Error())
--		}
--
--		switch newPrivKey := newPrivKey.(type) {
--		case *rsa.PrivateKey:
--			newPrivKey.Precompute()
--		}
--
--		if !reflect.DeepEqual(privkey, newPrivKey) {
--			t.Errorf("[%v] Private keys differ:\n%#v\n%#v\n", AlgorithmToString[algo.name], privkey, newPrivKey)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/privaterr.go b/Godeps/_workspace/src/github.com/miekg/dns/privaterr.go
-deleted file mode 100644
-index 2257137..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/privaterr.go
-+++ /dev/null
-@@ -1,122 +0,0 @@
--/*
--PRIVATE RR
--
--RFC 6895 sets aside a range of type codes for private use. This range
--is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
--can be used, before requesting an official type code from IANA.
--*/
--package dns
--
--import (
--	"fmt"
--	"strings"
--)
--
--// PrivateRdata is an interface used for implementing "Private Use" RR types, see
--// RFC 6895. This allows one to experiment with new RR types, without requesting an
--// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
--type PrivateRdata interface {
--	// String returns the text presentaton of the Rdata of the Private RR.
--	String() string
--	// Parse parses the Rdata of the private RR.
--	Parse([]string) error
--	// Pack is used when packing a private RR into a buffer.
--	Pack([]byte) (int, error)
--	// Unpack is used when unpacking a private RR from a buffer.
--	// TODO(miek): diff. signature than Pack, see edns0.go for instance.
--	Unpack([]byte) (int, error)
--	// Copy copies the Rdata.
--	Copy(PrivateRdata) error
--	// Len returns the length in octets of the Rdata.
--	Len() int
--}
--
--// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
--// It mocks normal RRs and implements dns.RR interface.
--type PrivateRR struct {
--	Hdr  RR_Header
--	Data PrivateRdata
--}
--
--func mkPrivateRR(rrtype uint16) *PrivateRR {
--	// Panics if RR is not an instance of PrivateRR.
--	rrfunc, ok := typeToRR[rrtype]
--	if !ok {
--		panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
--	}
--
--	anyrr := rrfunc()
--	switch rr := anyrr.(type) {
--	case *PrivateRR:
--		return rr
--	}
--	panic(fmt.Sprintf("dns: RR is not a PrivateRR, typeToRR[%d] generator returned %T", rrtype, anyrr))
--}
--
--func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
--func (r *PrivateRR) String() string     { return r.Hdr.String() + r.Data.String() }
--
--// Private len and copy parts to satisfy RR interface.
--func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
--func (r *PrivateRR) copy() RR {
--	// make new RR like this:
--	rr := mkPrivateRR(r.Hdr.Rrtype)
--	newh := r.Hdr.copyHeader()
--	rr.Hdr = *newh
--
--	err := r.Data.Copy(rr.Data)
--	if err != nil {
--		panic("dns: got value that could not be used to copy Private rdata")
--	}
--	return rr
--}
--
--// PrivateHandle registers a private resource record type. It requires
--// string and numeric representation of private RR type and generator function as argument.
--func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
--	rtypestr = strings.ToUpper(rtypestr)
--
--	typeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
--	TypeToString[rtype] = rtypestr
--	StringToType[rtypestr] = rtype
--
--	setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--		rr := mkPrivateRR(h.Rrtype)
--		rr.Hdr = h
--
--		var l lex
--		text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
--	FETCH:
--		for {
--			// TODO(miek): we could also be returning _QUOTE, this might or might not
--			// be an issue (basically parsing TXT becomes hard)
--			switch l = <-c; l.value {
--			case _NEWLINE, _EOF:
--				break FETCH
--			case _STRING:
--				text = append(text, l.token)
--			}
--		}
--
--		err := rr.Data.Parse(text)
--		if err != nil {
--			return nil, &ParseError{f, err.Error(), l}, ""
--		}
--
--		return rr, nil, ""
--	}
--
--	typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
--}
--
--// PrivateHandleRemove removes defenitions required to support private RR type.
--func PrivateHandleRemove(rtype uint16) {
--	rtypestr, ok := TypeToString[rtype]
--	if ok {
--		delete(typeToRR, rtype)
--		delete(TypeToString, rtype)
--		delete(typeToparserFunc, rtype)
--		delete(StringToType, rtypestr)
--	}
--	return
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go b/Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go
-deleted file mode 100644
-index 6f1dff8..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/privaterr_test.go
-+++ /dev/null
-@@ -1,169 +0,0 @@
--package dns_test
--
--import (
--	"github.com/miekg/dns"
--	"strings"
--	"testing"
--)
--
--const TypeISBN uint16 = 0x0F01
--
--// A crazy new RR type :)
--type ISBN struct {
--	x string // rdata with 10 or 13 numbers, dashes or spaces allowed
--}
--
--func NewISBN() dns.PrivateRdata { return &ISBN{""} }
--
--func (rd *ISBN) Len() int       { return len([]byte(rd.x)) }
--func (rd *ISBN) String() string { return rd.x }
--
--func (rd *ISBN) Parse(txt []string) error {
--	rd.x = strings.TrimSpace(strings.Join(txt, " "))
--	return nil
--}
--
--func (rd *ISBN) Pack(buf []byte) (int, error) {
--	b := []byte(rd.x)
--	n := copy(buf, b)
--	if n != len(b) {
--		return n, dns.ErrBuf
--	}
--	return n, nil
--}
--
--func (rd *ISBN) Unpack(buf []byte) (int, error) {
--	rd.x = string(buf)
--	return len(buf), nil
--}
--
--func (rd *ISBN) Copy(dest dns.PrivateRdata) error {
--	isbn, ok := dest.(*ISBN)
--	if !ok {
--		return dns.ErrRdata
--	}
--	isbn.x = rd.x
--	return nil
--}
--
--var testrecord = strings.Join([]string{"example.org.", "3600", "IN", "ISBN", "12-3 456789-0-123"}, "\t")
--
--func TestPrivateText(t *testing.T) {
--	dns.PrivateHandle("ISBN", TypeISBN, NewISBN)
--	defer dns.PrivateHandleRemove(TypeISBN)
--
--	rr, err := dns.NewRR(testrecord)
--	if err != nil {
--		t.Fatal(err)
--	}
--	if rr.String() != testrecord {
--		t.Errorf("record string representation did not match original %#v != %#v", rr.String(), testrecord)
--	} else {
--		t.Log(rr.String())
--	}
--}
--
--func TestPrivateByteSlice(t *testing.T) {
--	dns.PrivateHandle("ISBN", TypeISBN, NewISBN)
--	defer dns.PrivateHandleRemove(TypeISBN)
--
--	rr, err := dns.NewRR(testrecord)
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	buf := make([]byte, 100)
--	off, err := dns.PackRR(rr, buf, 0, nil, false)
--	if err != nil {
--		t.Errorf("got error packing ISBN: %s", err)
--	}
--
--	custrr := rr.(*dns.PrivateRR)
--	if ln := custrr.Data.Len() + len(custrr.Header().Name) + 11; ln != off {
--		t.Errorf("offset is not matching to length of Private RR: %d!=%d", off, ln)
--	}
--
--	rr1, off1, err := dns.UnpackRR(buf[:off], 0)
--	if err != nil {
--		t.Errorf("got error unpacking ISBN: %s", err)
--	}
--
--	if off1 != off {
--		t.Errorf("Offset after unpacking differs: %d != %d", off1, off)
--	}
--
--	if rr1.String() != testrecord {
--		t.Errorf("Record string representation did not match original %#v != %#v", rr1.String(), testrecord)
--	} else {
--		t.Log(rr1.String())
--	}
--}
--
--const TypeVERSION uint16 = 0x0F02
--
--type VERSION struct {
--	x string
--}
--
--func NewVersion() dns.PrivateRdata { return &VERSION{""} }
--
--func (rd *VERSION) String() string { return rd.x }
--func (rd *VERSION) Parse(txt []string) error {
--	rd.x = strings.TrimSpace(strings.Join(txt, " "))
--	return nil
--}
--
--func (rd *VERSION) Pack(buf []byte) (int, error) {
--	b := []byte(rd.x)
--	n := copy(buf, b)
--	if n != len(b) {
--		return n, dns.ErrBuf
--	}
--	return n, nil
--}
--
--func (rd *VERSION) Unpack(buf []byte) (int, error) {
--	rd.x = string(buf)
--	return len(buf), nil
--}
--
--func (rd *VERSION) Copy(dest dns.PrivateRdata) error {
--	isbn, ok := dest.(*VERSION)
--	if !ok {
--		return dns.ErrRdata
--	}
--	isbn.x = rd.x
--	return nil
--}
--
--func (rd *VERSION) Len() int {
--	return len([]byte(rd.x))
--}
--
--var smallzone = `$ORIGIN example.org.
--@ SOA	sns.dns.icann.org. noc.dns.icann.org. (
--		2014091518 7200 3600 1209600 3600
--)
--    A   1.2.3.4
--ok ISBN 1231-92110-12
--go VERSION (
--	1.3.1 ; comment
--)
--www ISBN 1231-92110-16
--*  CNAME @
--`
--
--func TestPrivateZoneParser(t *testing.T) {
--	dns.PrivateHandle("ISBN", TypeISBN, NewISBN)
--	dns.PrivateHandle("VERSION", TypeVERSION, NewVersion)
--	defer dns.PrivateHandleRemove(TypeISBN)
--	defer dns.PrivateHandleRemove(TypeVERSION)
--
--	r := strings.NewReader(smallzone)
--	for x := range dns.ParseZone(r, ".", "") {
--		if err := x.Error; err != nil {
--			t.Fatal(err)
--		}
--		t.Log(x.RR)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/rawmsg.go b/Godeps/_workspace/src/github.com/miekg/dns/rawmsg.go
-deleted file mode 100644
-index f138b77..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/rawmsg.go
-+++ /dev/null
-@@ -1,95 +0,0 @@
--package dns
--
--// These raw* functions do not use reflection, they directly set the values
--// in the buffer. There are faster than their reflection counterparts.
--
--// RawSetId sets the message id in buf.
--func rawSetId(msg []byte, i uint16) bool {
--	if len(msg) < 2 {
--		return false
--	}
--	msg[0], msg[1] = packUint16(i)
--	return true
--}
--
--// rawSetQuestionLen sets the length of the question section.
--func rawSetQuestionLen(msg []byte, i uint16) bool {
--	if len(msg) < 6 {
--		return false
--	}
--	msg[4], msg[5] = packUint16(i)
--	return true
--}
--
--// rawSetAnswerLen sets the lenght of the answer section.
--func rawSetAnswerLen(msg []byte, i uint16) bool {
--	if len(msg) < 8 {
--		return false
--	}
--	msg[6], msg[7] = packUint16(i)
--	return true
--}
--
--// rawSetsNsLen sets the lenght of the authority section.
--func rawSetNsLen(msg []byte, i uint16) bool {
--	if len(msg) < 10 {
--		return false
--	}
--	msg[8], msg[9] = packUint16(i)
--	return true
--}
--
--// rawSetExtraLen sets the lenght of the additional section.
--func rawSetExtraLen(msg []byte, i uint16) bool {
--	if len(msg) < 12 {
--		return false
--	}
--	msg[10], msg[11] = packUint16(i)
--	return true
--}
--
--// rawSetRdlength sets the rdlength in the header of
--// the RR. The offset 'off' must be positioned at the
--// start of the header of the RR, 'end' must be the
--// end of the RR.
--func rawSetRdlength(msg []byte, off, end int) bool {
--	l := len(msg)
--Loop:
--	for {
--		if off+1 > l {
--			return false
--		}
--		c := int(msg[off])
--		off++
--		switch c & 0xC0 {
--		case 0x00:
--			if c == 0x00 {
--				// End of the domainname
--				break Loop
--			}
--			if off+c > l {
--				return false
--			}
--			off += c
--
--		case 0xC0:
--			// pointer, next byte included, ends domainname
--			off++
--			break Loop
--		}
--	}
--	// The domainname has been seen, we at the start of the fixed part in the header.
--	// Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length.
--	off += 2 + 2 + 4
--	if off+2 > l {
--		return false
--	}
--	//off+1 is the end of the header, 'end' is the end of the rr
--	//so 'end' - 'off+2' is the length of the rdata
--	rdatalen := end - (off + 2)
--	if rdatalen > 0xFFFF {
--		return false
--	}
--	msg[off], msg[off+1] = packUint16(uint16(rdatalen))
--	return true
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/scanner.go b/Godeps/_workspace/src/github.com/miekg/dns/scanner.go
-deleted file mode 100644
-index c29bc2f..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/scanner.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--package dns
--
--// Implement a simple scanner, return a byte stream from an io reader.
--
--import (
--	"bufio"
--	"io"
--	"text/scanner"
--)
--
--type scan struct {
--	src      *bufio.Reader
--	position scanner.Position
--	eof      bool // Have we just seen a eof
--}
--
--func scanInit(r io.Reader) *scan {
--	s := new(scan)
--	s.src = bufio.NewReader(r)
--	s.position.Line = 1
--	return s
--}
--
--// tokenText returns the next byte from the input
--func (s *scan) tokenText() (byte, error) {
--	c, err := s.src.ReadByte()
--	if err != nil {
--		return c, err
--	}
--	// delay the newline handling until the next token is delivered,
--	// fixes off-by-one errors when reporting a parse error.
--	if s.eof == true {
--		s.position.Line++
--		s.position.Column = 0
--		s.eof = false
--	}
--	if c == '\n' {
--		s.eof = true
--		return c, nil
--	}
--	s.position.Column++
--	return c, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/server.go b/Godeps/_workspace/src/github.com/miekg/dns/server.go
-deleted file mode 100644
-index c250ccc..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/server.go
-+++ /dev/null
-@@ -1,626 +0,0 @@
--// DNS server implementation.
--
--package dns
--
--import (
--	"bytes"
--	"io"
--	"net"
--	"sync"
--	"time"
--)
--
--type Handler interface {
--	ServeDNS(w ResponseWriter, r *Msg)
--}
--
--// A ResponseWriter interface is used by an DNS handler to
--// construct an DNS response.
--type ResponseWriter interface {
--	// LocalAddr returns the net.Addr of the server
--	LocalAddr() net.Addr
--	// RemoteAddr returns the net.Addr of the client that sent the current request.
--	RemoteAddr() net.Addr
--	// WriteMsg writes a reply back to the client.
--	WriteMsg(*Msg) error
--	// Write writes a raw buffer back to the client.
--	Write([]byte) (int, error)
--	// Close closes the connection.
--	Close() error
--	// TsigStatus returns the status of the Tsig.
--	TsigStatus() error
--	// TsigTimersOnly sets the tsig timers only boolean.
--	TsigTimersOnly(bool)
--	// Hijack lets the caller take over the connection.
--	// After a call to Hijack(), the DNS package will not do anything with the connection.
--	Hijack()
--}
--
--type response struct {
--	hijacked       bool // connection has been hijacked by handler
--	tsigStatus     error
--	tsigTimersOnly bool
--	tsigRequestMAC string
--	tsigSecret     map[string]string // the tsig secrets
--	udp            *net.UDPConn      // i/o connection if UDP was used
--	tcp            *net.TCPConn      // i/o connection if TCP was used
--	udpSession     *sessionUDP       // oob data to get egress interface right
--	remoteAddr     net.Addr          // address of the client
--}
--
--// ServeMux is an DNS request multiplexer. It matches the
--// zone name of each incoming request against a list of
--// registered patterns add calls the handler for the pattern
--// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning
--// that queries for the DS record are redirected to the parent zone (if that
--// is also registered), otherwise the child gets the query.
--// ServeMux is also safe for concurrent access from multiple goroutines.
--type ServeMux struct {
--	z map[string]Handler
--	m *sync.RWMutex
--}
--
--// NewServeMux allocates and returns a new ServeMux.
--func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} }
--
--// DefaultServeMux is the default ServeMux used by Serve.
--var DefaultServeMux = NewServeMux()
--
--// The HandlerFunc type is an adapter to allow the use of
--// ordinary functions as DNS handlers.  If f is a function
--// with the appropriate signature, HandlerFunc(f) is a
--// Handler object that calls f.
--type HandlerFunc func(ResponseWriter, *Msg)
--
--// ServerDNS calls f(w, r)
--func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
--	f(w, r)
--}
--
--// FailedHandler returns a HandlerFunc that returns SERVFAIL for every request it gets.
--func HandleFailed(w ResponseWriter, r *Msg) {
--	m := new(Msg)
--	m.SetRcode(r, RcodeServerFailure)
--	// does not matter if this write fails
--	w.WriteMsg(m)
--}
--
--func failedHandler() Handler { return HandlerFunc(HandleFailed) }
--
--// ListenAndServe Starts a server on addresss and network speficied. Invoke handler
--// for incoming queries.
--func ListenAndServe(addr string, network string, handler Handler) error {
--	server := &Server{Addr: addr, Net: network, Handler: handler}
--	return server.ListenAndServe()
--}
--
--// ActivateAndServe activates a server with a listener from systemd,
--// l and p should not both be non-nil.
--// If both l and p are not nil only p will be used.
--// Invoke handler for incoming queries.
--func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
--	server := &Server{Listener: l, PacketConn: p, Handler: handler}
--	return server.ActivateAndServe()
--}
--
--func (mux *ServeMux) match(q string, t uint16) Handler {
--	mux.m.RLock()
--	defer mux.m.RUnlock()
--	var handler Handler
--	b := make([]byte, len(q)) // worst case, one label of length q
--	off := 0
--	end := false
--	for {
--		l := len(q[off:])
--		for i := 0; i < l; i++ {
--			b[i] = q[off+i]
--			if b[i] >= 'A' && b[i] <= 'Z' {
--				b[i] |= ('a' - 'A')
--			}
--		}
--		if h, ok := mux.z[string(b[:l])]; ok { // 'causes garbage, might want to change the map key
--			if t != TypeDS {
--				return h
--			} else {
--				// Continue for DS to see if we have a parent too, if so delegeate to the parent
--				handler = h
--			}
--		}
--		off, end = NextLabel(q, off)
--		if end {
--			break
--		}
--	}
--	// Wildcard match, if we have found nothing try the root zone as a last resort.
--	if h, ok := mux.z["."]; ok {
--		return h
--	}
--	return handler
--}
--
--// Handle adds a handler to the ServeMux for pattern.
--func (mux *ServeMux) Handle(pattern string, handler Handler) {
--	if pattern == "" {
--		panic("dns: invalid pattern " + pattern)
--	}
--	mux.m.Lock()
--	mux.z[Fqdn(pattern)] = handler
--	mux.m.Unlock()
--}
--
--// Handle adds a handler to the ServeMux for pattern.
--func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
--	mux.Handle(pattern, HandlerFunc(handler))
--}
--
--// HandleRemove deregistrars the handler specific for pattern from the ServeMux.
--func (mux *ServeMux) HandleRemove(pattern string) {
--	if pattern == "" {
--		panic("dns: invalid pattern " + pattern)
--	}
--	// don't need a mutex here, because deleting is OK, even if the
--	// entry is note there.
--	delete(mux.z, Fqdn(pattern))
--}
--
--// ServeDNS dispatches the request to the handler whose
--// pattern most closely matches the request message. If DefaultServeMux
--// is used the correct thing for DS queries is done: a possible parent
--// is sought.
--// If no handler is found a standard SERVFAIL message is returned
--// If the request message does not have exactly one question in the
--// question section a SERVFAIL is returned, unlesss Unsafe is true.
--func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) {
--	var h Handler
--	if len(request.Question) < 1 { // allow more than one question
--		h = failedHandler()
--	} else {
--		if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil {
--			h = failedHandler()
--		}
--	}
--	h.ServeDNS(w, request)
--}
--
--// Handle registers the handler with the given pattern
--// in the DefaultServeMux. The documentation for
--// ServeMux explains how patterns are matched.
--func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
--
--// HandleRemove deregisters the handle with the given pattern
--// in the DefaultServeMux.
--func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
--
--// HandleFunc registers the handler function with the given pattern
--// in the DefaultServeMux.
--func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
--	DefaultServeMux.HandleFunc(pattern, handler)
--}
--
--// A Server defines parameters for running an DNS server.
--type Server struct {
--	// Address to listen on, ":dns" if empty.
--	Addr string
--	// if "tcp" it will invoke a TCP listener, otherwise an UDP one.
--	Net string
--	// TCP Listener to use, this is to aid in systemd's socket activation.
--	Listener net.Listener
--	// UDP "Listener" to use, this is to aid in systemd's socket activation.
--	PacketConn net.PacketConn
--	// Handler to invoke, dns.DefaultServeMux if nil.
--	Handler Handler
--	// Default buffer size to use to read incoming UDP messages. If not set
--	// it defaults to MinMsgSize (512 B).
--	UDPSize int
--	// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
--	ReadTimeout time.Duration
--	// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
--	WriteTimeout time.Duration
--	// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
--	IdleTimeout func() time.Duration
--	// Secret(s) for Tsig map[<zonename>]<base64 secret>.
--	TsigSecret map[string]string
--	// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
--	// the handler. It will specfically not check if the query has the QR bit not set.
--	Unsafe bool
--	// If NotifyStartedFunc is set is is called, once the server has started listening. 
--	NotifyStartedFunc func()
--
--	// For graceful shutdown.
--	stopUDP chan bool
--	stopTCP chan bool
--	wgUDP   sync.WaitGroup
--	wgTCP   sync.WaitGroup
--
--	// make start/shutdown not racy
--	lock    sync.Mutex
--	started bool
--}
--
--// ListenAndServe starts a nameserver on the configured address in *Server.
--func (srv *Server) ListenAndServe() error {
--	srv.lock.Lock()
--	if srv.started {
--		return &Error{err: "server already started"}
--	}
--	srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
--	srv.started = true
--	srv.lock.Unlock()
--	addr := srv.Addr
--	if addr == "" {
--		addr = ":domain"
--	}
--	if srv.UDPSize == 0 {
--		srv.UDPSize = MinMsgSize
--	}
--	switch srv.Net {
--	case "tcp", "tcp4", "tcp6":
--		a, e := net.ResolveTCPAddr(srv.Net, addr)
--		if e != nil {
--			return e
--		}
--		l, e := net.ListenTCP(srv.Net, a)
--		if e != nil {
--			return e
--		}
--		return srv.serveTCP(l)
--	case "udp", "udp4", "udp6":
--		a, e := net.ResolveUDPAddr(srv.Net, addr)
--		if e != nil {
--			return e
--		}
--		l, e := net.ListenUDP(srv.Net, a)
--		if e != nil {
--			return e
--		}
--		if e := setUDPSocketOptions(l); e != nil {
--			return e
--		}
--		return srv.serveUDP(l)
--	}
--	return &Error{err: "bad network"}
--}
--
--// ActivateAndServe starts a nameserver with the PacketConn or Listener
--// configured in *Server. Its main use is to start a server from systemd.
--func (srv *Server) ActivateAndServe() error {
--	srv.lock.Lock()
--	if srv.started {
--		return &Error{err: "server already started"}
--	}
--	srv.stopUDP, srv.stopTCP = make(chan bool), make(chan bool)
--	srv.started = true
--	srv.lock.Unlock()
--	if srv.PacketConn != nil {
--		if srv.UDPSize == 0 {
--			srv.UDPSize = MinMsgSize
--		}
--		if t, ok := srv.PacketConn.(*net.UDPConn); ok {
--			if e := setUDPSocketOptions(t); e != nil {
--				return e
--			}
--			return srv.serveUDP(t)
--		}
--	}
--	if srv.Listener != nil {
--		if t, ok := srv.Listener.(*net.TCPListener); ok {
--			return srv.serveTCP(t)
--		}
--	}
--	return &Error{err: "bad listeners"}
--}
--
--// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and
--// ActivateAndServe will return. All in progress queries are completed before the server
--// is taken down. If the Shutdown is taking longer than the reading timeout and error
--// is returned.
--func (srv *Server) Shutdown() error {
--	srv.lock.Lock()
--	if !srv.started {
--		return &Error{err: "server not started"}
--	}
--	srv.started = false
--	srv.lock.Unlock()
--	net, addr := srv.Net, srv.Addr
--	switch {
--	case srv.Listener != nil:
--		a := srv.Listener.Addr()
--		net, addr = a.Network(), a.String()
--	case srv.PacketConn != nil:
--		a := srv.PacketConn.LocalAddr()
--		net, addr = a.Network(), a.String()
--	}
--
--	fin := make(chan bool)
--	switch net {
--	case "tcp", "tcp4", "tcp6":
--		go func() {
--			srv.stopTCP <- true
--			srv.wgTCP.Wait()
--			fin <- true
--		}()
--
--	case "udp", "udp4", "udp6":
--		go func() {
--			srv.stopUDP <- true
--			srv.wgUDP.Wait()
--			fin <- true
--		}()
--	}
--
--	c := &Client{Net: net}
--	go c.Exchange(new(Msg), addr) // extra query to help ReadXXX loop to pass
--
--	select {
--	case <-time.After(srv.getReadTimeout()):
--		return &Error{err: "server shutdown is pending"}
--	case <-fin:
--		return nil
--	}
--}
--
--// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
--func (srv *Server) getReadTimeout() time.Duration {
--	rtimeout := dnsTimeout
--	if srv.ReadTimeout != 0 {
--		rtimeout = srv.ReadTimeout
--	}
--	return rtimeout
--}
--
--// serveTCP starts a TCP listener for the server.
--// Each request is handled in a seperate goroutine.
--func (srv *Server) serveTCP(l *net.TCPListener) error {
--	defer l.Close()
--
--	if srv.NotifyStartedFunc != nil {
--		srv.NotifyStartedFunc()
--	}
--
--	handler := srv.Handler
--	if handler == nil {
--		handler = DefaultServeMux
--	}
--	rtimeout := srv.getReadTimeout()
--	// deadline is not used here
--	for {
--		rw, e := l.AcceptTCP()
--		if e != nil {
--			continue
--		}
--		m, e := srv.readTCP(rw, rtimeout)
--		select {
--		case <-srv.stopTCP:
--			return nil
--		default:
--		}
--		if e != nil {
--			continue
--		}
--		srv.wgTCP.Add(1)
--		go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
--	}
--	panic("dns: not reached")
--}
--
--// serveUDP starts a UDP listener for the server.
--// Each request is handled in a seperate goroutine.
--func (srv *Server) serveUDP(l *net.UDPConn) error {
--	defer l.Close()
--
--	if srv.NotifyStartedFunc != nil {
--		srv.NotifyStartedFunc()
--	}
--
--	handler := srv.Handler
--	if handler == nil {
--		handler = DefaultServeMux
--	}
--	rtimeout := srv.getReadTimeout()
--	// deadline is not used here
--	for {
--		m, s, e := srv.readUDP(l, rtimeout)
--		select {
--		case <-srv.stopUDP:
--			return nil
--		default:
--		}
--		if e != nil {
--			continue
--		}
--		srv.wgUDP.Add(1)
--		go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
--	}
--	panic("dns: not reached")
--}
--
--// Serve a new connection.
--func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *sessionUDP, t *net.TCPConn) {
--	w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
--	q := 0
--	defer func() {
--		if u != nil {
--			srv.wgUDP.Done()
--		}
--		if t != nil {
--			srv.wgTCP.Done()
--		}
--	}()
--Redo:
--	// Ideally we want use isMsg here before we allocate memory to actually parse the packet.
--	req := new(Msg)
--	err := req.Unpack(m)
--	if err != nil { // Send a FormatError back
--		x := new(Msg)
--		x.SetRcodeFormatError(req)
--		w.WriteMsg(x)
--		goto Exit
--	}
--	if !srv.Unsafe && req.Response {
--		goto Exit
--	}
--
--	w.tsigStatus = nil
--	if w.tsigSecret != nil {
--		if t := req.IsTsig(); t != nil {
--			secret := t.Hdr.Name
--			if _, ok := w.tsigSecret[secret]; !ok {
--				w.tsigStatus = ErrKeyAlg
--			}
--			w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false)
--			w.tsigTimersOnly = false
--			w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
--		}
--	}
--	h.ServeDNS(w, req) // Writes back to the client
--
--Exit:
--	if w.hijacked {
--		return // client calls Close()
--	}
--	if u != nil { // UDP, "close" and return
--		w.Close()
--		return
--	}
--	idleTimeout := tcpIdleTimeout
--	if srv.IdleTimeout != nil {
--		idleTimeout = srv.IdleTimeout()
--	}
--	m, e := srv.readTCP(w.tcp, idleTimeout)
--	if e == nil {
--		q++
--		// TODO(miek): make this number configurable?
--		if q > 128 { // close socket after this many queries
--			w.Close()
--			return
--		}
--		goto Redo
--	}
--	w.Close()
--	return
--}
--
--func (srv *Server) readTCP(conn *net.TCPConn, timeout time.Duration) ([]byte, error) {
--	conn.SetReadDeadline(time.Now().Add(timeout))
--	l := make([]byte, 2)
--	n, err := conn.Read(l)
--	if err != nil || n != 2 {
--		if err != nil {
--			return nil, err
--		}
--		return nil, ErrShortRead
--	}
--	length, _ := unpackUint16(l, 0)
--	if length == 0 {
--		return nil, ErrShortRead
--	}
--	m := make([]byte, int(length))
--	n, err = conn.Read(m[:int(length)])
--	if err != nil || n == 0 {
--		if err != nil {
--			return nil, err
--		}
--		return nil, ErrShortRead
--	}
--	i := n
--	for i < int(length) {
--		j, err := conn.Read(m[i:int(length)])
--		if err != nil {
--			return nil, err
--		}
--		i += j
--	}
--	n = i
--	m = m[:n]
--	return m, nil
--}
--
--func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *sessionUDP, error) {
--	conn.SetReadDeadline(time.Now().Add(timeout))
--	m := make([]byte, srv.UDPSize)
--	n, s, e := readFromSessionUDP(conn, m)
--	if e != nil || n == 0 {
--		if e != nil {
--			return nil, nil, e
--		}
--		return nil, nil, ErrShortRead
--	}
--	m = m[:n]
--	return m, s, nil
--}
--
--// WriteMsg implements the ResponseWriter.WriteMsg method.
--func (w *response) WriteMsg(m *Msg) (err error) {
--	var data []byte
--	if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
--		if t := m.IsTsig(); t != nil {
--			data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
--			if err != nil {
--				return err
--			}
--			_, err = w.Write(data)
--			return err
--		}
--	}
--	data, err = m.Pack()
--	if err != nil {
--		return err
--	}
--	_, err = w.Write(data)
--	return err
--}
--
--// Write implements the ResponseWriter.Write method.
--func (w *response) Write(m []byte) (int, error) {
--	switch {
--	case w.udp != nil:
--		n, err := writeToSessionUDP(w.udp, m, w.udpSession)
--		return n, err
--	case w.tcp != nil:
--		lm := len(m)
--		if lm < 2 {
--			return 0, io.ErrShortBuffer
--		}
--		if lm > MaxMsgSize {
--			return 0, &Error{err: "message too large"}
--		}
--		l := make([]byte, 2, 2+lm)
--		l[0], l[1] = packUint16(uint16(lm))
--		m = append(l, m...)
--
--		n, err := io.Copy(w.tcp, bytes.NewReader(m))
--		return int(n), err
--	}
--	panic("not reached")
--}
--
--// LocalAddr implements the ResponseWriter.LocalAddr method.
--func (w *response) LocalAddr() net.Addr {
--	if w.tcp != nil {
--		return w.tcp.LocalAddr()
--	}
--	return w.udp.LocalAddr()
--}
--
--// RemoteAddr implements the ResponseWriter.RemoteAddr method.
--func (w *response) RemoteAddr() net.Addr { return w.remoteAddr }
--
--// TsigStatus implements the ResponseWriter.TsigStatus method.
--func (w *response) TsigStatus() error { return w.tsigStatus }
--
--// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
--func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
--
--// Hijack implements the ResponseWriter.Hijack method.
--func (w *response) Hijack() { w.hijacked = true }
--
--// Close implements the ResponseWriter.Close method
--func (w *response) Close() error {
--	// Can't close the udp conn, as that is actually the listener.
--	if w.tcp != nil {
--		e := w.tcp.Close()
--		w.tcp = nil
--		return e
--	}
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/server_test.go b/Godeps/_workspace/src/github.com/miekg/dns/server_test.go
-deleted file mode 100644
-index 5efb1cf..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/server_test.go
-+++ /dev/null
-@@ -1,401 +0,0 @@
--package dns
--
--import (
--	"fmt"
--	"net"
--	"runtime"
--	"sync"
--	"testing"
--)
--
--func HelloServer(w ResponseWriter, req *Msg) {
--	m := new(Msg)
--	m.SetReply(req)
--
--	m.Extra = make([]RR, 1)
--	m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
--	w.WriteMsg(m)
--}
--
--func AnotherHelloServer(w ResponseWriter, req *Msg) {
--	m := new(Msg)
--	m.SetReply(req)
--
--	m.Extra = make([]RR, 1)
--	m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello example"}}
--	w.WriteMsg(m)
--}
--
--func RunLocalUDPServer(laddr string) (*Server, string, error) {
--	pc, err := net.ListenPacket("udp", laddr)
--	if err != nil {
--		return nil, "", err
--	}
--	server := &Server{PacketConn: pc}
--
--	waitLock := sync.Mutex{}
--	waitLock.Lock()
--	server.NotifyStartedFunc = waitLock.Unlock
--
--	go func() {
--		server.ActivateAndServe()
--		pc.Close()
--	}()
--
--	waitLock.Lock()
--	return server, pc.LocalAddr().String(), nil
--}
--
--func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) {
--	pc, err := net.ListenPacket("udp", laddr)
--	if err != nil {
--		return nil, "", err
--	}
--	server := &Server{PacketConn: pc, Unsafe: true}
--
--	waitLock := sync.Mutex{}
--	waitLock.Lock()
--	server.NotifyStartedFunc = waitLock.Unlock
--
--	go func() {
--		server.ActivateAndServe()
--		pc.Close()
--	}()
--
--	waitLock.Lock()
--	return server, pc.LocalAddr().String(), nil
--}
--
--func RunLocalTCPServer(laddr string) (*Server, string, error) {
--	l, err := net.Listen("tcp", laddr)
--	if err != nil {
--		return nil, "", err
--	}
--
--	server := &Server{Listener: l}
--
--	waitLock := sync.Mutex{}
--	waitLock.Lock()
--	server.NotifyStartedFunc = waitLock.Unlock
--
--	go func() {
--		server.ActivateAndServe()
--		l.Close()
--	}()
--
--	waitLock.Lock()
--	return server, l.Addr().String(), nil
--}
--
--func TestServing(t *testing.T) {
--	HandleFunc("miek.nl.", HelloServer)
--	HandleFunc("example.com.", AnotherHelloServer)
--	defer HandleRemove("miek.nl.")
--	defer HandleRemove("example.com.")
--
--	s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	c := new(Client)
--	m := new(Msg)
--	m.SetQuestion("miek.nl.", TypeTXT)
--	r, _, err := c.Exchange(m, addrstr)
--	if err != nil || len(r.Extra) == 0 {
--		t.Log("failed to exchange miek.nl", err)
--		t.Fatal()
--	}
--	txt := r.Extra[0].(*TXT).Txt[0]
--	if txt != "Hello world" {
--		t.Log("Unexpected result for miek.nl", txt, "!= Hello world")
--		t.Fail()
--	}
--
--	m.SetQuestion("example.com.", TypeTXT)
--	r, _, err = c.Exchange(m, addrstr)
--	if err != nil {
--		t.Log("failed to exchange example.com", err)
--		t.Fatal()
--	}
--	txt = r.Extra[0].(*TXT).Txt[0]
--	if txt != "Hello example" {
--		t.Log("Unexpected result for example.com", txt, "!= Hello example")
--		t.Fail()
--	}
--
--	// Test Mixes cased as noticed by Ask.
--	m.SetQuestion("eXaMplE.cOm.", TypeTXT)
--	r, _, err = c.Exchange(m, addrstr)
--	if err != nil {
--		t.Log("failed to exchange eXaMplE.cOm", err)
--		t.Fail()
--	}
--	txt = r.Extra[0].(*TXT).Txt[0]
--	if txt != "Hello example" {
--		t.Log("Unexpected result for example.com", txt, "!= Hello example")
--		t.Fail()
--	}
--}
--
--func BenchmarkServe(b *testing.B) {
--	b.StopTimer()
--	HandleFunc("miek.nl.", HelloServer)
--	defer HandleRemove("miek.nl.")
--	a := runtime.GOMAXPROCS(4)
--
--	s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		b.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	c := new(Client)
--	m := new(Msg)
--	m.SetQuestion("miek.nl", TypeSOA)
--
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		c.Exchange(m, addrstr)
--	}
--	runtime.GOMAXPROCS(a)
--}
--
--func benchmarkServe6(b *testing.B) {
--	b.StopTimer()
--	HandleFunc("miek.nl.", HelloServer)
--	defer HandleRemove("miek.nl.")
--	a := runtime.GOMAXPROCS(4)
--	s, addrstr, err := RunLocalUDPServer("[::1]:0")
--	if err != nil {
--		b.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	c := new(Client)
--	m := new(Msg)
--	m.SetQuestion("miek.nl", TypeSOA)
--
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		c.Exchange(m, addrstr)
--	}
--	runtime.GOMAXPROCS(a)
--}
--
--func HelloServerCompress(w ResponseWriter, req *Msg) {
--	m := new(Msg)
--	m.SetReply(req)
--	m.Extra = make([]RR, 1)
--	m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
--	m.Compress = true
--	w.WriteMsg(m)
--}
--
--func BenchmarkServeCompress(b *testing.B) {
--	b.StopTimer()
--	HandleFunc("miek.nl.", HelloServerCompress)
--	defer HandleRemove("miek.nl.")
--	a := runtime.GOMAXPROCS(4)
--	s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		b.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	c := new(Client)
--	m := new(Msg)
--	m.SetQuestion("miek.nl", TypeSOA)
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		c.Exchange(m, addrstr)
--	}
--	runtime.GOMAXPROCS(a)
--}
--
--func TestDotAsCatchAllWildcard(t *testing.T) {
--	mux := NewServeMux()
--	mux.Handle(".", HandlerFunc(HelloServer))
--	mux.Handle("example.com.", HandlerFunc(AnotherHelloServer))
--
--	handler := mux.match("www.miek.nl.", TypeTXT)
--	if handler == nil {
--		t.Error("wildcard match failed")
--	}
--
--	handler = mux.match("www.example.com.", TypeTXT)
--	if handler == nil {
--		t.Error("example.com match failed")
--	}
--
--	handler = mux.match("a.www.example.com.", TypeTXT)
--	if handler == nil {
--		t.Error("a.www.example.com match failed")
--	}
--
--	handler = mux.match("boe.", TypeTXT)
--	if handler == nil {
--		t.Error("boe. match failed")
--	}
--}
--
--func TestCaseFolding(t *testing.T) {
--	mux := NewServeMux()
--	mux.Handle("_udp.example.com.", HandlerFunc(HelloServer))
--
--	handler := mux.match("_dns._udp.example.com.", TypeSRV)
--	if handler == nil {
--		t.Error("case sensitive characters folded")
--	}
--
--	handler = mux.match("_DNS._UDP.EXAMPLE.COM.", TypeSRV)
--	if handler == nil {
--		t.Error("case insensitive characters not folded")
--	}
--}
--
--func TestRootServer(t *testing.T) {
--	mux := NewServeMux()
--	mux.Handle(".", HandlerFunc(HelloServer))
--
--	handler := mux.match(".", TypeNS)
--	if handler == nil {
--		t.Error("root match failed")
--	}
--}
--
--type maxRec struct {
--	max int
--	sync.RWMutex
--}
--
--var M = new(maxRec)
--
--func HelloServerLargeResponse(resp ResponseWriter, req *Msg) {
--	m := new(Msg)
--	m.SetReply(req)
--	m.Authoritative = true
--	m1 := 0
--	M.RLock()
--	m1 = M.max
--	M.RUnlock()
--	for i := 0; i < m1; i++ {
--		aRec := &A{
--			Hdr: RR_Header{
--				Name:   req.Question[0].Name,
--				Rrtype: TypeA,
--				Class:  ClassINET,
--				Ttl:    0,
--			},
--			A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i+1)).To4(),
--		}
--		m.Answer = append(m.Answer, aRec)
--	}
--	resp.WriteMsg(m)
--}
--
--func TestServingLargeResponses(t *testing.T) {
--	HandleFunc("example.", HelloServerLargeResponse)
--	defer HandleRemove("example.")
--
--	s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	// Create request
--	m := new(Msg)
--	m.SetQuestion("web.service.example.", TypeANY)
--
--	c := new(Client)
--	c.Net = "udp"
--	M.Lock()
--	M.max = 2
--	M.Unlock()
--	_, _, err = c.Exchange(m, addrstr)
--	if err != nil {
--		t.Logf("failed to exchange: %s", err.Error())
--		t.Fail()
--	}
--	// This must fail
--	M.Lock()
--	M.max = 20
--	M.Unlock()
--	_, _, err = c.Exchange(m, addrstr)
--	if err == nil {
--		t.Logf("failed to fail exchange, this should generate packet error")
--		t.Fail()
--	}
--	// But this must work again
--	c.UDPSize = 7000
--	_, _, err = c.Exchange(m, addrstr)
--	if err != nil {
--		t.Logf("failed to exchange: %s", err.Error())
--		t.Fail()
--	}
--}
--
--func TestServingResponse(t *testing.T) {
--	if testing.Short() {
--		t.Skip("skipping test in short mode.")
--	}
--	HandleFunc("miek.nl.", HelloServer)
--	s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--
--	c := new(Client)
--	m := new(Msg)
--	m.SetQuestion("miek.nl.", TypeTXT)
--	m.Response = false
--	_, _, err = c.Exchange(m, addrstr)
--	if err != nil {
--		t.Log("failed to exchange", err)
--		t.Fatal()
--	}
--	m.Response = true
--	_, _, err = c.Exchange(m, addrstr)
--	if err == nil {
--		t.Log("exchanged response message")
--		t.Fatal()
--	}
--
--	s.Shutdown()
--	s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--	defer s.Shutdown()
--
--	m.Response = true
--	_, _, err = c.Exchange(m, addrstr)
--	if err != nil {
--		t.Log("could exchanged response message in Unsafe mode")
--		t.Fatal()
--	}
--}
--
--func TestShutdownTCP(t *testing.T) {
--	s, _, err := RunLocalTCPServer("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--	err = s.Shutdown()
--	if err != nil {
--		t.Errorf("Could not shutdown test TCP server, %s", err)
--	}
--}
--
--func TestShutdownUDP(t *testing.T) {
--	s, _, err := RunLocalUDPServer("127.0.0.1:0")
--	if err != nil {
--		t.Fatalf("Unable to run test server: %s", err)
--	}
--	err = s.Shutdown()
--	if err != nil {
--		t.Errorf("Could not shutdown test UDP server, %s", err)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/sig0.go b/Godeps/_workspace/src/github.com/miekg/dns/sig0.go
-deleted file mode 100644
-index d96b31b..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/sig0.go
-+++ /dev/null
-@@ -1,262 +0,0 @@
--// SIG(0)
--//
--// From RFC 2931:
--//
--//     SIG(0) provides protection for DNS transactions and requests ....
--//     ... protection for glue records, DNS requests, protection for message headers
--//     on requests and responses, and protection of the overall integrity of a response.
--//
--// It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
--// secret approach in TSIG.
--// Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
--// RSASHA512.
--//
--// Signing subsequent messages in multi-message sessions is not implemented.
--//
--package dns
--
--import (
--	"crypto"
--	"crypto/dsa"
--	"crypto/ecdsa"
--	"crypto/rand"
--	"crypto/rsa"
--	"math/big"
--	"strings"
--	"time"
--)
--
--// Sign signs a dns.Msg. It fills the signature with the appropriate data.
--// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
--// and Expiration set.
--func (rr *SIG) Sign(k PrivateKey, m *Msg) ([]byte, error) {
--	if k == nil {
--		return nil, ErrPrivKey
--	}
--	if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
--		return nil, ErrKey
--	}
--	rr.Header().Rrtype = TypeSIG
--	rr.Header().Class = ClassANY
--	rr.Header().Ttl = 0
--	rr.Header().Name = "."
--	rr.OrigTtl = 0
--	rr.TypeCovered = 0
--	rr.Labels = 0
--
--	buf := make([]byte, m.Len()+rr.len())
--	mbuf, err := m.PackBuffer(buf)
--	if err != nil {
--		return nil, err
--	}
--	if &buf[0] != &mbuf[0] {
--		return nil, ErrBuf
--	}
--	off, err := PackRR(rr, buf, len(mbuf), nil, false)
--	if err != nil {
--		return nil, err
--	}
--	buf = buf[:off:cap(buf)]
--	var hash crypto.Hash
--	var intlen int
--	switch rr.Algorithm {
--	case DSA, RSASHA1:
--		hash = crypto.SHA1
--	case RSASHA256, ECDSAP256SHA256:
--		hash = crypto.SHA256
--		intlen = 32
--	case ECDSAP384SHA384:
--		hash = crypto.SHA384
--		intlen = 48
--	case RSASHA512:
--		hash = crypto.SHA512
--	default:
--		return nil, ErrAlg
--	}
--	hasher := hash.New()
--	// Write SIG rdata
--	hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
--	// Write message
--	hasher.Write(buf[:len(mbuf)])
--	hashed := hasher.Sum(nil)
--
--	var sig []byte
--	switch p := k.(type) {
--	case *dsa.PrivateKey:
--		t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
--		r1, s1, err := dsa.Sign(rand.Reader, p, hashed)
--		if err != nil {
--			return nil, err
--		}
--		sig = append(sig, byte(t))
--		sig = append(sig, intToBytes(r1, 20)...)
--		sig = append(sig, intToBytes(s1, 20)...)
--	case *rsa.PrivateKey:
--		sig, err = rsa.SignPKCS1v15(rand.Reader, p, hash, hashed)
--		if err != nil {
--			return nil, err
--		}
--	case *ecdsa.PrivateKey:
--		r1, s1, err := ecdsa.Sign(rand.Reader, p, hashed)
--		if err != nil {
--			return nil, err
--		}
--		sig = intToBytes(r1, intlen)
--		sig = append(sig, intToBytes(s1, intlen)...)
--	default:
--		return nil, ErrAlg
--	}
--	rr.Signature = toBase64(sig)
--	buf = append(buf, sig...)
--	if len(buf) > int(^uint16(0)) {
--		return nil, ErrBuf
--	}
--	// Adjust sig data length
--	rdoff := len(mbuf) + 1 + 2 + 2 + 4
--	rdlen, _ := unpackUint16(buf, rdoff)
--	rdlen += uint16(len(sig))
--	buf[rdoff], buf[rdoff+1] = packUint16(rdlen)
--	// Adjust additional count
--	adc, _ := unpackUint16(buf, 10)
--	adc += 1
--	buf[10], buf[11] = packUint16(adc)
--	return buf, nil
--}
--
--// Verify validates the message buf using the key k.
--// It's assumed that buf is a valid message from which rr was unpacked.
--func (rr *SIG) Verify(k *KEY, buf []byte) error {
--	if k == nil {
--		return ErrKey
--	}
--	if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
--		return ErrKey
--	}
--
--	var hash crypto.Hash
--	switch rr.Algorithm {
--	case DSA, RSASHA1:
--		hash = crypto.SHA1
--	case RSASHA256, ECDSAP256SHA256:
--		hash = crypto.SHA256
--	case ECDSAP384SHA384:
--		hash = crypto.SHA384
--	case RSASHA512:
--		hash = crypto.SHA512
--	default:
--		return ErrAlg
--	}
--	hasher := hash.New()
--
--	buflen := len(buf)
--	qdc, _ := unpackUint16(buf, 4)
--	anc, _ := unpackUint16(buf, 6)
--	auc, _ := unpackUint16(buf, 8)
--	adc, offset := unpackUint16(buf, 10)
--	var err error
--	for i := uint16(0); i < qdc && offset < buflen; i++ {
--		_, offset, err = UnpackDomainName(buf, offset)
--		if err != nil {
--			return err
--		}
--		// Skip past Type and Class
--		offset += 2 + 2
--	}
--	for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
--		_, offset, err = UnpackDomainName(buf, offset)
--		if err != nil {
--			return err
--		}
--		// Skip past Type, Class and TTL
--		offset += 2 + 2 + 4
--		if offset+1 >= buflen {
--			continue
--		}
--		var rdlen uint16
--		rdlen, offset = unpackUint16(buf, offset)
--		offset += int(rdlen)
--	}
--	if offset >= buflen {
--		return &Error{err: "overflowing unpacking signed message"}
--	}
--
--	// offset should be just prior to SIG
--	bodyend := offset
--	// owner name SHOULD be root
--	_, offset, err = UnpackDomainName(buf, offset)
--	if err != nil {
--		return err
--	}
--	// Skip Type, Class, TTL, RDLen
--	offset += 2 + 2 + 4 + 2
--	sigstart := offset
--	// Skip Type Covered, Algorithm, Labels, Original TTL
--	offset += 2 + 1 + 1 + 4
--	if offset+4+4 >= buflen {
--		return &Error{err: "overflow unpacking signed message"}
--	}
--	expire := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
--	offset += 4
--	incept := uint32(buf[offset])<<24 | uint32(buf[offset+1])<<16 | uint32(buf[offset+2])<<8 | uint32(buf[offset+3])
--	offset += 4
--	now := uint32(time.Now().Unix())
--	if now < incept || now > expire {
--		return ErrTime
--	}
--	// Skip key tag
--	offset += 2
--	var signername string
--	signername, offset, err = UnpackDomainName(buf, offset)
--	if err != nil {
--		return err
--	}
--	// If key has come from the DNS name compression might
--	// have mangled the case of the name
--	if strings.ToLower(signername) != strings.ToLower(k.Header().Name) {
--		return &Error{err: "signer name doesn't match key name"}
--	}
--	sigend := offset
--	hasher.Write(buf[sigstart:sigend])
--	hasher.Write(buf[:10])
--	hasher.Write([]byte{
--		byte((adc - 1) << 8),
--		byte(adc - 1),
--	})
--	hasher.Write(buf[12:bodyend])
--
--	hashed := hasher.Sum(nil)
--	sig := buf[sigend:]
--	switch k.Algorithm {
--	case DSA:
--		pk := k.publicKeyDSA()
--		sig = sig[1:]
--		r := big.NewInt(0)
--		r.SetBytes(sig[:len(sig)/2])
--		s := big.NewInt(0)
--		s.SetBytes(sig[len(sig)/2:])
--		if pk != nil {
--			if dsa.Verify(pk, hashed, r, s) {
--				return nil
--			}
--			return ErrSig
--		}
--	case RSASHA1, RSASHA256, RSASHA512:
--		pk := k.publicKeyRSA()
--		if pk != nil {
--			return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
--		}
--	case ECDSAP256SHA256, ECDSAP384SHA384:
--		pk := k.publicKeyCurve()
--		r := big.NewInt(0)
--		r.SetBytes(sig[:len(sig)/2])
--		s := big.NewInt(0)
--		s.SetBytes(sig[len(sig)/2:])
--		if pk != nil {
--			if ecdsa.Verify(pk, hashed, r, s) {
--				return nil
--			}
--			return ErrSig
--		}
--	}
--	return ErrKeyAlg
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go b/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go
-deleted file mode 100644
-index 6ca76fb..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/sig0_test.go
-+++ /dev/null
-@@ -1,96 +0,0 @@
--package dns
--
--import (
--	"testing"
--	"time"
--)
--
--func TestSIG0(t *testing.T) {
--	if testing.Short() {
--		t.Skip("skipping test in short mode.")
--	}
--	m := new(Msg)
--	m.SetQuestion("example.org.", TypeSOA)
--	for _, alg := range []uint8{DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256, RSASHA512} {
--		algstr := AlgorithmToString[alg]
--		keyrr := new(KEY)
--		keyrr.Hdr.Name = algstr + "."
--		keyrr.Hdr.Rrtype = TypeKEY
--		keyrr.Hdr.Class = ClassINET
--		keyrr.Algorithm = alg
--		keysize := 1024
--		switch alg {
--		case ECDSAP256SHA256:
--			keysize = 256
--		case ECDSAP384SHA384:
--			keysize = 384
--		}
--		pk, err := keyrr.Generate(keysize)
--		if err != nil {
--			t.Logf("Failed to generate key for “%s”: %v", algstr, err)
--			t.Fail()
--			continue
--		}
--		now := uint32(time.Now().Unix())
--		sigrr := new(SIG)
--		sigrr.Hdr.Name = "."
--		sigrr.Hdr.Rrtype = TypeSIG
--		sigrr.Hdr.Class = ClassANY
--		sigrr.Algorithm = alg
--		sigrr.Expiration = now + 300
--		sigrr.Inception = now - 300
--		sigrr.KeyTag = keyrr.KeyTag()
--		sigrr.SignerName = keyrr.Hdr.Name
--		mb, err := sigrr.Sign(pk, m)
--		if err != nil {
--			t.Logf("Failed to sign message using “%s”: %v", algstr, err)
--			t.Fail()
--			continue
--		}
--		m := new(Msg)
--		if err := m.Unpack(mb); err != nil {
--			t.Logf("Failed to unpack message signed using “%s”: %v", algstr, err)
--			t.Fail()
--			continue
--		}
--		if len(m.Extra) != 1 {
--			t.Logf("Missing SIG for message signed using “%s”", algstr)
--			t.Fail()
--			continue
--		}
--		var sigrrwire *SIG
--		switch rr := m.Extra[0].(type) {
--		case *SIG:
--			sigrrwire = rr
--		default:
--			t.Logf("Expected SIG RR, instead: %v", rr)
--			t.Fail()
--			continue
--		}
--		for _, rr := range []*SIG{sigrr, sigrrwire} {
--			id := "sigrr"
--			if rr == sigrrwire {
--				id = "sigrrwire"
--			}
--			if err := rr.Verify(keyrr, mb); err != nil {
--				t.Logf("Failed to verify “%s” signed SIG(%s): %v", algstr, id, err)
--				t.Fail()
--				continue
--			}
--		}
--		mb[13]++
--		if err := sigrr.Verify(keyrr, mb); err == nil {
--			t.Logf("Verify succeeded on an altered message using “%s”", algstr)
--			t.Fail()
--			continue
--		}
--		sigrr.Expiration = 2
--		sigrr.Inception = 1
--		mb, _ = sigrr.Sign(pk, m)
--		if err := sigrr.Verify(keyrr, mb); err == nil {
--			t.Logf("Verify succeeded on an expired message using “%s”", algstr)
--			t.Fail()
--			continue
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/singleinflight.go b/Godeps/_workspace/src/github.com/miekg/dns/singleinflight.go
-deleted file mode 100644
-index 9573c7d..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/singleinflight.go
-+++ /dev/null
-@@ -1,57 +0,0 @@
--// Copyright 2013 The Go Authors.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Adapted for dns package usage by Miek Gieben.
--
--package dns
--
--import "sync"
--import "time"
--
--// call is an in-flight or completed singleflight.Do call
--type call struct {
--	wg   sync.WaitGroup
--	val  *Msg
--	rtt  time.Duration
--	err  error
--	dups int
--}
--
--// singleflight represents a class of work and forms a namespace in
--// which units of work can be executed with duplicate suppression.
--type singleflight struct {
--	sync.Mutex                  // protects m
--	m          map[string]*call // lazily initialized
--}
--
--// Do executes and returns the results of the given function, making
--// sure that only one execution is in-flight for a given key at a
--// time. If a duplicate comes in, the duplicate caller waits for the
--// original to complete and receives the same results.
--// The return value shared indicates whether v was given to multiple callers.
--func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
--	g.Lock()
--	if g.m == nil {
--		g.m = make(map[string]*call)
--	}
--	if c, ok := g.m[key]; ok {
--		c.dups++
--		g.Unlock()
--		c.wg.Wait()
--		return c.val, c.rtt, c.err, true
--	}
--	c := new(call)
--	c.wg.Add(1)
--	g.m[key] = c
--	g.Unlock()
--
--	c.val, c.rtt, c.err = fn()
--	c.wg.Done()
--
--	g.Lock()
--	delete(g.m, key)
--	g.Unlock()
--
--	return c.val, c.rtt, c.err, c.dups > 0
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/tlsa.go b/Godeps/_workspace/src/github.com/miekg/dns/tlsa.go
-deleted file mode 100644
-index d3bc3b0..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/tlsa.go
-+++ /dev/null
-@@ -1,84 +0,0 @@
--package dns
--
--import (
--	"crypto/sha256"
--	"crypto/sha512"
--	"crypto/x509"
--	"encoding/hex"
--	"errors"
--	"io"
--	"net"
--	"strconv"
--)
--
--// CertificateToDANE converts a certificate to a hex string as used in the TLSA record.
--func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
--	switch matchingType {
--	case 0:
--		switch selector {
--		case 0:
--			return hex.EncodeToString(cert.Raw), nil
--		case 1:
--			return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
--		}
--	case 1:
--		h := sha256.New()
--		switch selector {
--		case 0:
--			return hex.EncodeToString(cert.Raw), nil
--		case 1:
--			io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
--			return hex.EncodeToString(h.Sum(nil)), nil
--		}
--	case 2:
--		h := sha512.New()
--		switch selector {
--		case 0:
--			return hex.EncodeToString(cert.Raw), nil
--		case 1:
--			io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
--			return hex.EncodeToString(h.Sum(nil)), nil
--		}
--	}
--	return "", errors.New("dns: bad TLSA MatchingType or TLSA Selector")
--}
--
--// Sign creates a TLSA record from an SSL certificate.
--func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
--	r.Hdr.Rrtype = TypeTLSA
--	r.Usage = uint8(usage)
--	r.Selector = uint8(selector)
--	r.MatchingType = uint8(matchingType)
--
--	r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
--	if err != nil {
--		return err
--	}
--	return nil
--}
--
--// Verify verifies a TLSA record against an SSL certificate. If it is OK
--// a nil error is returned.
--func (r *TLSA) Verify(cert *x509.Certificate) error {
--	c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
--	if err != nil {
--		return err // Not also ErrSig?
--	}
--	if r.Certificate == c {
--		return nil
--	}
--	return ErrSig // ErrSig, really?
--}
--
--// TLSAName returns the ownername of a TLSA resource record as per the
--// rules specified in RFC 6698, Section 3.
--func TLSAName(name, service, network string) (string, error) {
--	if !IsFqdn(name) {
--		return "", ErrFqdn
--	}
--	p, e := net.LookupPort(network, service)
--	if e != nil {
--		return "", e
--	}
--	return "_" + strconv.Itoa(p) + "_" + network + "." + name, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/tsig.go b/Godeps/_workspace/src/github.com/miekg/dns/tsig.go
-deleted file mode 100644
-index 2c64ee8..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/tsig.go
-+++ /dev/null
-@@ -1,378 +0,0 @@
--// TRANSACTION SIGNATURE
--//
--// An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
--// The supported algorithms include: HmacMD5, HmacSHA1 and HmacSHA256.
--//
--// Basic use pattern when querying with a TSIG name "axfr." (note that these key names
--// must be fully qualified - as they are domain names) and the base64 secret
--// "so6ZGir4GPAqINNh9U5c3A==":
--//
--//	c := new(dns.Client)
--//	c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
--//	m := new(dns.Msg)
--//	m.SetQuestion("miek.nl.", dns.TypeMX)
--//	m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
--//	...
--//	// When sending the TSIG RR is calculated and filled in before sending
--//
--// When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
--// TSIG, this is the basic use pattern. In this example we request an AXFR for
--// miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
--// and using the server 176.58.119.54:
--//
--//	t := new(dns.Transfer)
--//	m := new(dns.Msg)
--//	t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
--//	m.SetAxfr("miek.nl.")
--//	m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
--//	c, err := t.In(m, "176.58.119.54:53")
--//	for r := range c { /* r.RR */ }
--//
--// You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
--// If something is not correct an error is returned.
--//
--// Basic use pattern validating and replying to a message that has TSIG set.
--//
--//	server := &dns.Server{Addr: ":53", Net: "udp"}
--//	server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
--//	go server.ListenAndServe()
--//	dns.HandleFunc(".", handleRequest)
--//
--// 	func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
--//		m := new(Msg)
--//		m.SetReply(r)
--//		if r.IsTsig() {
--//			if w.TsigStatus() == nil {
--//				// *Msg r has an TSIG record and it was validated
--//				m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
--//			} else {
--//				// *Msg r has an TSIG records and it was not valided
--//			}
--//		}
--//		w.WriteMsg(m)
--//	}
--package dns
--
--import (
--	"crypto/hmac"
--	"crypto/md5"
--	"crypto/sha1"
--	"crypto/sha256"
--	"encoding/hex"
--	"hash"
--	"io"
--	"strconv"
--	"strings"
--	"time"
--)
--
--// HMAC hashing codes. These are transmitted as domain names.
--const (
--	HmacMD5    = "hmac-md5.sig-alg.reg.int."
--	HmacSHA1   = "hmac-sha1."
--	HmacSHA256 = "hmac-sha256."
--)
--
--type TSIG struct {
--	Hdr        RR_Header
--	Algorithm  string `dns:"domain-name"`
--	TimeSigned uint64 `dns:"uint48"`
--	Fudge      uint16
--	MACSize    uint16
--	MAC        string `dns:"size-hex"`
--	OrigId     uint16
--	Error      uint16
--	OtherLen   uint16
--	OtherData  string `dns:"size-hex"`
--}
--
--func (rr *TSIG) Header() *RR_Header {
--	return &rr.Hdr
--}
--
--// TSIG has no official presentation format, but this will suffice.
--
--func (rr *TSIG) String() string {
--	s := "\n;; TSIG PSEUDOSECTION:\n"
--	s += rr.Hdr.String() +
--		" " + rr.Algorithm +
--		" " + tsigTimeToString(rr.TimeSigned) +
--		" " + strconv.Itoa(int(rr.Fudge)) +
--		" " + strconv.Itoa(int(rr.MACSize)) +
--		" " + strings.ToUpper(rr.MAC) +
--		" " + strconv.Itoa(int(rr.OrigId)) +
--		" " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
--		" " + strconv.Itoa(int(rr.OtherLen)) +
--		" " + rr.OtherData
--	return s
--}
--
--func (rr *TSIG) len() int {
--	return rr.Hdr.len() + len(rr.Algorithm) + 1 + 6 +
--		4 + len(rr.MAC)/2 + 1 + 6 + len(rr.OtherData)/2 + 1
--}
--
--func (rr *TSIG) copy() RR {
--	return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
--}
--
--// The following values must be put in wireformat, so that the MAC can be calculated.
--// RFC 2845, section 3.4.2. TSIG Variables.
--type tsigWireFmt struct {
--	// From RR_Header
--	Name  string `dns:"domain-name"`
--	Class uint16
--	Ttl   uint32
--	// Rdata of the TSIG
--	Algorithm  string `dns:"domain-name"`
--	TimeSigned uint64 `dns:"uint48"`
--	Fudge      uint16
--	// MACSize, MAC and OrigId excluded
--	Error     uint16
--	OtherLen  uint16
--	OtherData string `dns:"size-hex"`
--}
--
--// If we have the MAC use this type to convert it to wiredata.
--// Section 3.4.3. Request MAC
--type macWireFmt struct {
--	MACSize uint16
--	MAC     string `dns:"size-hex"`
--}
--
--// 3.3. Time values used in TSIG calculations
--type timerWireFmt struct {
--	TimeSigned uint64 `dns:"uint48"`
--	Fudge      uint16
--}
--
--// TsigGenerate fills out the TSIG record attached to the message.
--// The message should contain
--// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
--// time fudge (defaults to 300 seconds) and the current time
--// The TSIG MAC is saved in that Tsig RR.
--// When TsigGenerate is called for the first time requestMAC is set to the empty string and
--// timersOnly is false.
--// If something goes wrong an error is returned, otherwise it is nil.
--func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
--	if m.IsTsig() == nil {
--		panic("dns: TSIG not last RR in additional")
--	}
--	// If we barf here, the caller is to blame
--	rawsecret, err := fromBase64([]byte(secret))
--	if err != nil {
--		return nil, "", err
--	}
--
--	rr := m.Extra[len(m.Extra)-1].(*TSIG)
--	m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
--	mbuf, err := m.Pack()
--	if err != nil {
--		return nil, "", err
--	}
--	buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
--
--	t := new(TSIG)
--	var h hash.Hash
--	switch rr.Algorithm {
--	case HmacMD5:
--		h = hmac.New(md5.New, []byte(rawsecret))
--	case HmacSHA1:
--		h = hmac.New(sha1.New, []byte(rawsecret))
--	case HmacSHA256:
--		h = hmac.New(sha256.New, []byte(rawsecret))
--	default:
--		return nil, "", ErrKeyAlg
--	}
--	io.WriteString(h, string(buf))
--	t.MAC = hex.EncodeToString(h.Sum(nil))
--	t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
--
--	t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
--	t.Fudge = rr.Fudge
--	t.TimeSigned = rr.TimeSigned
--	t.Algorithm = rr.Algorithm
--	t.OrigId = m.Id
--
--	tbuf := make([]byte, t.len())
--	if off, err := PackRR(t, tbuf, 0, nil, false); err == nil {
--		tbuf = tbuf[:off] // reset to actual size used
--	} else {
--		return nil, "", err
--	}
--	mbuf = append(mbuf, tbuf...)
--	rawSetExtraLen(mbuf, uint16(len(m.Extra)+1))
--	return mbuf, t.MAC, nil
--}
--
--// TsigVerify verifies the TSIG on a message.
--// If the signature does not validate err contains the
--// error, otherwise it is nil.
--func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
--	rawsecret, err := fromBase64([]byte(secret))
--	if err != nil {
--		return err
--	}
--	// Strip the TSIG from the incoming msg
--	stripped, tsig, err := stripTsig(msg)
--	if err != nil {
--		return err
--	}
--
--	msgMAC, err := hex.DecodeString(tsig.MAC)
--	if err != nil {
--		return err
--	}
--
--	buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
--
--	// Fudge factor works both ways. A message can arrive before it was signed because
--	// of clock skew.
--	now := uint64(time.Now().Unix())
--	ti := now - tsig.TimeSigned
--	if now < tsig.TimeSigned {
--		ti = tsig.TimeSigned - now
--	}
--	if uint64(tsig.Fudge) < ti {
--		return ErrTime
--	}
--
--	var h hash.Hash
--	switch tsig.Algorithm {
--	case HmacMD5:
--		h = hmac.New(md5.New, rawsecret)
--	case HmacSHA1:
--		h = hmac.New(sha1.New, rawsecret)
--	case HmacSHA256:
--		h = hmac.New(sha256.New, rawsecret)
--	default:
--		return ErrKeyAlg
--	}
--	h.Write(buf)
--	if !hmac.Equal(h.Sum(nil), msgMAC) {
--		return ErrSig
--	}
--	return nil
--}
--
--// Create a wiredata buffer for the MAC calculation.
--func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
--	var buf []byte
--	if rr.TimeSigned == 0 {
--		rr.TimeSigned = uint64(time.Now().Unix())
--	}
--	if rr.Fudge == 0 {
--		rr.Fudge = 300 // Standard (RFC) default.
--	}
--
--	if requestMAC != "" {
--		m := new(macWireFmt)
--		m.MACSize = uint16(len(requestMAC) / 2)
--		m.MAC = requestMAC
--		buf = make([]byte, len(requestMAC)) // long enough
--		n, _ := PackStruct(m, buf, 0)
--		buf = buf[:n]
--	}
--
--	tsigvar := make([]byte, DefaultMsgSize)
--	if timersOnly {
--		tsig := new(timerWireFmt)
--		tsig.TimeSigned = rr.TimeSigned
--		tsig.Fudge = rr.Fudge
--		n, _ := PackStruct(tsig, tsigvar, 0)
--		tsigvar = tsigvar[:n]
--	} else {
--		tsig := new(tsigWireFmt)
--		tsig.Name = strings.ToLower(rr.Hdr.Name)
--		tsig.Class = ClassANY
--		tsig.Ttl = rr.Hdr.Ttl
--		tsig.Algorithm = strings.ToLower(rr.Algorithm)
--		tsig.TimeSigned = rr.TimeSigned
--		tsig.Fudge = rr.Fudge
--		tsig.Error = rr.Error
--		tsig.OtherLen = rr.OtherLen
--		tsig.OtherData = rr.OtherData
--		n, _ := PackStruct(tsig, tsigvar, 0)
--		tsigvar = tsigvar[:n]
--	}
--
--	if requestMAC != "" {
--		x := append(buf, msgbuf...)
--		buf = append(x, tsigvar...)
--	} else {
--		buf = append(msgbuf, tsigvar...)
--	}
--	return buf
--}
--
--// Strip the TSIG from the raw message.
--func stripTsig(msg []byte) ([]byte, *TSIG, error) {
--	// Copied from msg.go's Unpack()
--	// Header.
--	var dh Header
--	var err error
--	dns := new(Msg)
--	rr := new(TSIG)
--	off := 0
--	tsigoff := 0
--	if off, err = UnpackStruct(&dh, msg, off); err != nil {
--		return nil, nil, err
--	}
--	if dh.Arcount == 0 {
--		return nil, nil, ErrNoSig
--	}
--	// Rcode, see msg.go Unpack()
--	if int(dh.Bits&0xF) == RcodeNotAuth {
--		return nil, nil, ErrAuth
--	}
--
--	// Arrays.
--	dns.Question = make([]Question, dh.Qdcount)
--	dns.Answer = make([]RR, dh.Ancount)
--	dns.Ns = make([]RR, dh.Nscount)
--	dns.Extra = make([]RR, dh.Arcount)
--
--	for i := 0; i < len(dns.Question); i++ {
--		off, err = UnpackStruct(&dns.Question[i], msg, off)
--		if err != nil {
--			return nil, nil, err
--		}
--	}
--	for i := 0; i < len(dns.Answer); i++ {
--		dns.Answer[i], off, err = UnpackRR(msg, off)
--		if err != nil {
--			return nil, nil, err
--		}
--	}
--	for i := 0; i < len(dns.Ns); i++ {
--		dns.Ns[i], off, err = UnpackRR(msg, off)
--		if err != nil {
--			return nil, nil, err
--		}
--	}
--	for i := 0; i < len(dns.Extra); i++ {
--		tsigoff = off
--		dns.Extra[i], off, err = UnpackRR(msg, off)
--		if err != nil {
--			return nil, nil, err
--		}
--		if dns.Extra[i].Header().Rrtype == TypeTSIG {
--			rr = dns.Extra[i].(*TSIG)
--			// Adjust Arcount.
--			arcount, _ := unpackUint16(msg, 10)
--			msg[10], msg[11] = packUint16(arcount - 1)
--			break
--		}
--	}
--	if rr == nil {
--		return nil, nil, ErrNoSig
--	}
--	return msg[:tsigoff], rr, nil
--}
--
--// Translate the TSIG time signed into a date. There is no
--// need for RFC1982 calculations as this date is 48 bits.
--func tsigTimeToString(t uint64) string {
--	ti := time.Unix(int64(t), 0).UTC()
--	return ti.Format("20060102150405")
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/types.go b/Godeps/_workspace/src/github.com/miekg/dns/types.go
-deleted file mode 100644
-index 16bb181..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/types.go
-+++ /dev/null
-@@ -1,1697 +0,0 @@
--package dns
--
--import (
--	"encoding/base64"
--	"fmt"
--	"net"
--	"strconv"
--	"strings"
--	"time"
--)
--
--type (
--	Type  uint16 // Type is a DNS type.
--	Class uint16 // Class is a DNS class.
--	Name  string // Name is a DNS domain name.
--)
--
--// Packet formats
--
--// Wire constants and supported types.
--const (
--	// valid RR_Header.Rrtype and Question.qtype
--	TypeNone       uint16 = 0
--	TypeA          uint16 = 1
--	TypeNS         uint16 = 2
--	TypeMD         uint16 = 3
--	TypeMF         uint16 = 4
--	TypeCNAME      uint16 = 5
--	TypeSOA        uint16 = 6
--	TypeMB         uint16 = 7
--	TypeMG         uint16 = 8
--	TypeMR         uint16 = 9
--	TypeNULL       uint16 = 10
--	TypeWKS        uint16 = 11
--	TypePTR        uint16 = 12
--	TypeHINFO      uint16 = 13
--	TypeMINFO      uint16 = 14
--	TypeMX         uint16 = 15
--	TypeTXT        uint16 = 16
--	TypeRP         uint16 = 17
--	TypeAFSDB      uint16 = 18
--	TypeX25        uint16 = 19
--	TypeISDN       uint16 = 20
--	TypeRT         uint16 = 21
--	TypeNSAP       uint16 = 22
--	TypeNSAPPTR    uint16 = 23
--	TypeSIG        uint16 = 24
--	TypeKEY        uint16 = 25
--	TypePX         uint16 = 26
--	TypeGPOS       uint16 = 27
--	TypeAAAA       uint16 = 28
--	TypeLOC        uint16 = 29
--	TypeNXT        uint16 = 30
--	TypeEID        uint16 = 31
--	TypeNIMLOC     uint16 = 32
--	TypeSRV        uint16 = 33
--	TypeATMA       uint16 = 34
--	TypeNAPTR      uint16 = 35
--	TypeKX         uint16 = 36
--	TypeCERT       uint16 = 37
--	TypeDNAME      uint16 = 39
--	TypeOPT        uint16 = 41 // EDNS
--	TypeDS         uint16 = 43
--	TypeSSHFP      uint16 = 44
--	TypeIPSECKEY   uint16 = 45
--	TypeRRSIG      uint16 = 46
--	TypeNSEC       uint16 = 47
--	TypeDNSKEY     uint16 = 48
--	TypeDHCID      uint16 = 49
--	TypeNSEC3      uint16 = 50
--	TypeNSEC3PARAM uint16 = 51
--	TypeTLSA       uint16 = 52
--	TypeHIP        uint16 = 55
--	TypeNINFO      uint16 = 56
--	TypeRKEY       uint16 = 57
--	TypeTALINK     uint16 = 58
--	TypeCDS        uint16 = 59
--	TypeCDNSKEY    uint16 = 60
--	TypeOPENPGPKEY uint16 = 61
--	TypeSPF        uint16 = 99
--	TypeUINFO      uint16 = 100
--	TypeUID        uint16 = 101
--	TypeGID        uint16 = 102
--	TypeUNSPEC     uint16 = 103
--	TypeNID        uint16 = 104
--	TypeL32        uint16 = 105
--	TypeL64        uint16 = 106
--	TypeLP         uint16 = 107
--	TypeEUI48      uint16 = 108
--	TypeEUI64      uint16 = 109
--
--	TypeTKEY uint16 = 249
--	TypeTSIG uint16 = 250
--	// valid Question.Qtype only
--	TypeIXFR  uint16 = 251
--	TypeAXFR  uint16 = 252
--	TypeMAILB uint16 = 253
--	TypeMAILA uint16 = 254
--	TypeANY   uint16 = 255
--
--	TypeURI      uint16 = 256
--	TypeCAA      uint16 = 257
--	TypeTA       uint16 = 32768
--	TypeDLV      uint16 = 32769
--	TypeReserved uint16 = 65535
--
--	// valid Question.Qclass
--	ClassINET   = 1
--	ClassCSNET  = 2
--	ClassCHAOS  = 3
--	ClassHESIOD = 4
--	ClassNONE   = 254
--	ClassANY    = 255
--
--	// Msg.rcode
--	RcodeSuccess        = 0
--	RcodeFormatError    = 1
--	RcodeServerFailure  = 2
--	RcodeNameError      = 3
--	RcodeNotImplemented = 4
--	RcodeRefused        = 5
--	RcodeYXDomain       = 6
--	RcodeYXRrset        = 7
--	RcodeNXRrset        = 8
--	RcodeNotAuth        = 9
--	RcodeNotZone        = 10
--	RcodeBadSig         = 16 // TSIG
--	RcodeBadVers        = 16 // EDNS0
--	RcodeBadKey         = 17
--	RcodeBadTime        = 18
--	RcodeBadMode        = 19 // TKEY
--	RcodeBadName        = 20
--	RcodeBadAlg         = 21
--	RcodeBadTrunc       = 22 // TSIG
--
--	// Opcode
--	OpcodeQuery  = 0
--	OpcodeIQuery = 1
--	OpcodeStatus = 2
--	// There is no 3
--	OpcodeNotify = 4
--	OpcodeUpdate = 5
--)
--
--// The wire format for the DNS packet header.
--type Header struct {
--	Id                                 uint16
--	Bits                               uint16
--	Qdcount, Ancount, Nscount, Arcount uint16
--}
--
--const (
--	// Header.Bits
--	_QR = 1 << 15 // query/response (response=1)
--	_AA = 1 << 10 // authoritative
--	_TC = 1 << 9  // truncated
--	_RD = 1 << 8  // recursion desired
--	_RA = 1 << 7  // recursion available
--	_Z  = 1 << 6  // Z
--	_AD = 1 << 5  // authticated data
--	_CD = 1 << 4  // checking disabled
--
--	LOC_EQUATOR       = 1 << 31 // RFC 1876, Section 2.
--	LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
--
--	LOC_HOURS   = 60 * 1000
--	LOC_DEGREES = 60 * LOC_HOURS
--
--	LOC_ALTITUDEBASE = 100000
--)
--
--// RFC 4398, Section 2.1
--const (
--	CertPKIX = 1 + iota
--	CertSPKI
--	CertPGP
--	CertIPIX
--	CertISPKI
--	CertIPGP
--	CertACPKIX
--	CertIACPKIX
--	CertURI = 253
--	CertOID = 254
--)
--
--var CertTypeToString = map[uint16]string{
--	CertPKIX:    "PKIX",
--	CertSPKI:    "SPKI",
--	CertPGP:     "PGP",
--	CertIPIX:    "IPIX",
--	CertISPKI:   "ISPKI",
--	CertIPGP:    "IPGP",
--	CertACPKIX:  "ACPKIX",
--	CertIACPKIX: "IACPKIX",
--	CertURI:     "URI",
--	CertOID:     "OID",
--}
--
--var StringToCertType = reverseInt16(CertTypeToString)
--
--// DNS queries.
--type Question struct {
--	Name   string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
--	Qtype  uint16
--	Qclass uint16
--}
--
--func (q *Question) String() (s string) {
--	// prefix with ; (as in dig)
--	s = ";" + sprintName(q.Name) + "\t"
--	s += Class(q.Qclass).String() + "\t"
--	s += " " + Type(q.Qtype).String()
--	return s
--}
--
--func (q *Question) len() int {
--	l := len(q.Name) + 1
--	return l + 4
--}
--
--type ANY struct {
--	Hdr RR_Header
--	// Does not have any rdata
--}
--
--func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
--func (rr *ANY) copy() RR           { return &ANY{*rr.Hdr.copyHeader()} }
--func (rr *ANY) String() string     { return rr.Hdr.String() }
--func (rr *ANY) len() int           { return rr.Hdr.len() }
--
--type CNAME struct {
--	Hdr    RR_Header
--	Target string `dns:"cdomain-name"`
--}
--
--func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
--func (rr *CNAME) copy() RR           { return &CNAME{*rr.Hdr.copyHeader(), sprintName(rr.Target)} }
--func (rr *CNAME) String() string     { return rr.Hdr.String() + rr.Target }
--func (rr *CNAME) len() int           { return rr.Hdr.len() + len(rr.Target) + 1 }
--
--type HINFO struct {
--	Hdr RR_Header
--	Cpu string
--	Os  string
--}
--
--func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
--func (rr *HINFO) copy() RR           { return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} }
--func (rr *HINFO) String() string     { return rr.Hdr.String() + rr.Cpu + " " + rr.Os }
--func (rr *HINFO) len() int           { return rr.Hdr.len() + len(rr.Cpu) + len(rr.Os) }
--
--type MB struct {
--	Hdr RR_Header
--	Mb  string `dns:"cdomain-name"`
--}
--
--func (rr *MB) Header() *RR_Header { return &rr.Hdr }
--func (rr *MB) copy() RR           { return &MB{*rr.Hdr.copyHeader(), sprintName(rr.Mb)} }
--
--func (rr *MB) String() string { return rr.Hdr.String() + rr.Mb }
--func (rr *MB) len() int       { return rr.Hdr.len() + len(rr.Mb) + 1 }
--
--type MG struct {
--	Hdr RR_Header
--	Mg  string `dns:"cdomain-name"`
--}
--
--func (rr *MG) Header() *RR_Header { return &rr.Hdr }
--func (rr *MG) copy() RR           { return &MG{*rr.Hdr.copyHeader(), rr.Mg} }
--func (rr *MG) len() int           { l := len(rr.Mg) + 1; return rr.Hdr.len() + l }
--func (rr *MG) String() string     { return rr.Hdr.String() + sprintName(rr.Mg) }
--
--type MINFO struct {
--	Hdr   RR_Header
--	Rmail string `dns:"cdomain-name"`
--	Email string `dns:"cdomain-name"`
--}
--
--func (rr *MINFO) Header() *RR_Header { return &rr.Hdr }
--func (rr *MINFO) copy() RR           { return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} }
--
--func (rr *MINFO) String() string {
--	return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email)
--}
--
--func (rr *MINFO) len() int {
--	l := len(rr.Rmail) + 1
--	n := len(rr.Email) + 1
--	return rr.Hdr.len() + l + n
--}
--
--type MR struct {
--	Hdr RR_Header
--	Mr  string `dns:"cdomain-name"`
--}
--
--func (rr *MR) Header() *RR_Header { return &rr.Hdr }
--func (rr *MR) copy() RR           { return &MR{*rr.Hdr.copyHeader(), rr.Mr} }
--func (rr *MR) len() int           { l := len(rr.Mr) + 1; return rr.Hdr.len() + l }
--
--func (rr *MR) String() string {
--	return rr.Hdr.String() + sprintName(rr.Mr)
--}
--
--type MF struct {
--	Hdr RR_Header
--	Mf  string `dns:"cdomain-name"`
--}
--
--func (rr *MF) Header() *RR_Header { return &rr.Hdr }
--func (rr *MF) copy() RR           { return &MF{*rr.Hdr.copyHeader(), rr.Mf} }
--func (rr *MF) len() int           { return rr.Hdr.len() + len(rr.Mf) + 1 }
--
--func (rr *MF) String() string {
--	return rr.Hdr.String() + sprintName(rr.Mf)
--}
--
--type MD struct {
--	Hdr RR_Header
--	Md  string `dns:"cdomain-name"`
--}
--
--func (rr *MD) Header() *RR_Header { return &rr.Hdr }
--func (rr *MD) copy() RR           { return &MD{*rr.Hdr.copyHeader(), rr.Md} }
--func (rr *MD) len() int           { return rr.Hdr.len() + len(rr.Md) + 1 }
--
--func (rr *MD) String() string {
--	return rr.Hdr.String() + sprintName(rr.Md)
--}
--
--type MX struct {
--	Hdr        RR_Header
--	Preference uint16
--	Mx         string `dns:"cdomain-name"`
--}
--
--func (rr *MX) Header() *RR_Header { return &rr.Hdr }
--func (rr *MX) copy() RR           { return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} }
--func (rr *MX) len() int           { l := len(rr.Mx) + 1; return rr.Hdr.len() + l + 2 }
--
--func (rr *MX) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx)
--}
--
--type AFSDB struct {
--	Hdr      RR_Header
--	Subtype  uint16
--	Hostname string `dns:"cdomain-name"`
--}
--
--func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
--func (rr *AFSDB) copy() RR           { return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} }
--func (rr *AFSDB) len() int           { l := len(rr.Hostname) + 1; return rr.Hdr.len() + l + 2 }
--
--func (rr *AFSDB) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname)
--}
--
--type X25 struct {
--	Hdr         RR_Header
--	PSDNAddress string
--}
--
--func (rr *X25) Header() *RR_Header { return &rr.Hdr }
--func (rr *X25) copy() RR           { return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} }
--func (rr *X25) len() int           { return rr.Hdr.len() + len(rr.PSDNAddress) + 1 }
--
--func (rr *X25) String() string {
--	return rr.Hdr.String() + rr.PSDNAddress
--}
--
--type RT struct {
--	Hdr        RR_Header
--	Preference uint16
--	Host       string `dns:"cdomain-name"`
--}
--
--func (rr *RT) Header() *RR_Header { return &rr.Hdr }
--func (rr *RT) copy() RR           { return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} }
--func (rr *RT) len() int           { l := len(rr.Host) + 1; return rr.Hdr.len() + l + 2 }
--
--func (rr *RT) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host)
--}
--
--type NS struct {
--	Hdr RR_Header
--	Ns  string `dns:"cdomain-name"`
--}
--
--func (rr *NS) Header() *RR_Header { return &rr.Hdr }
--func (rr *NS) len() int           { l := len(rr.Ns) + 1; return rr.Hdr.len() + l }
--func (rr *NS) copy() RR           { return &NS{*rr.Hdr.copyHeader(), rr.Ns} }
--
--func (rr *NS) String() string {
--	return rr.Hdr.String() + sprintName(rr.Ns)
--}
--
--type PTR struct {
--	Hdr RR_Header
--	Ptr string `dns:"cdomain-name"`
--}
--
--func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
--func (rr *PTR) copy() RR           { return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} }
--func (rr *PTR) len() int           { l := len(rr.Ptr) + 1; return rr.Hdr.len() + l }
--
--func (rr *PTR) String() string {
--	return rr.Hdr.String() + sprintName(rr.Ptr)
--}
--
--type RP struct {
--	Hdr  RR_Header
--	Mbox string `dns:"domain-name"`
--	Txt  string `dns:"domain-name"`
--}
--
--func (rr *RP) Header() *RR_Header { return &rr.Hdr }
--func (rr *RP) copy() RR           { return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} }
--func (rr *RP) len() int           { return rr.Hdr.len() + len(rr.Mbox) + 1 + len(rr.Txt) + 1 }
--
--func (rr *RP) String() string {
--	return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt})
--}
--
--type SOA struct {
--	Hdr     RR_Header
--	Ns      string `dns:"cdomain-name"`
--	Mbox    string `dns:"cdomain-name"`
--	Serial  uint32
--	Refresh uint32
--	Retry   uint32
--	Expire  uint32
--	Minttl  uint32
--}
--
--func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
--func (rr *SOA) copy() RR {
--	return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
--}
--
--func (rr *SOA) String() string {
--	return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) +
--		" " + strconv.FormatInt(int64(rr.Serial), 10) +
--		" " + strconv.FormatInt(int64(rr.Refresh), 10) +
--		" " + strconv.FormatInt(int64(rr.Retry), 10) +
--		" " + strconv.FormatInt(int64(rr.Expire), 10) +
--		" " + strconv.FormatInt(int64(rr.Minttl), 10)
--}
--
--func (rr *SOA) len() int {
--	l := len(rr.Ns) + 1
--	n := len(rr.Mbox) + 1
--	return rr.Hdr.len() + l + n + 20
--}
--
--type TXT struct {
--	Hdr RR_Header
--	Txt []string `dns:"txt"`
--}
--
--func (rr *TXT) Header() *RR_Header { return &rr.Hdr }
--func (rr *TXT) copy() RR {
--	cp := make([]string, len(rr.Txt), cap(rr.Txt))
--	copy(cp, rr.Txt)
--	return &TXT{*rr.Hdr.copyHeader(), cp}
--}
--
--func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
--
--func sprintName(s string) string {
--	src := []byte(s)
--	dst := make([]byte, 0, len(src))
--	for i := 0; i < len(src); {
--		if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' {
--			dst = append(dst, src[i:i+2]...)
--			i += 2
--		} else {
--			b, n := nextByte(src, i)
--			if n == 0 {
--				i++ // dangling back slash
--			} else if b == '.' {
--				dst = append(dst, b)
--			} else {
--				dst = appendDomainNameByte(dst, b)
--			}
--			i += n
--		}
--	}
--	return string(dst)
--}
--
--func sprintTxt(txt []string) string {
--	var out []byte
--	for i, s := range txt {
--		if i > 0 {
--			out = append(out, ` "`...)
--		} else {
--			out = append(out, '"')
--		}
--		bs := []byte(s)
--		for j := 0; j < len(bs); {
--			b, n := nextByte(bs, j)
--			if n == 0 {
--				break
--			}
--			out = appendTXTStringByte(out, b)
--			j += n
--		}
--		out = append(out, '"')
--	}
--	return string(out)
--}
--
--func appendDomainNameByte(s []byte, b byte) []byte {
--	switch b {
--	case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape
--		return append(s, '\\', b)
--	}
--	return appendTXTStringByte(s, b)
--}
--
--func appendTXTStringByte(s []byte, b byte) []byte {
--	switch b {
--	case '\t':
--		return append(s, '\\', 't')
--	case '\r':
--		return append(s, '\\', 'r')
--	case '\n':
--		return append(s, '\\', 'n')
--	case '"', '\\':
--		return append(s, '\\', b)
--	}
--	if b < ' ' || b > '~' {
--		var buf [3]byte
--		bufs := strconv.AppendInt(buf[:0], int64(b), 10)
--		s = append(s, '\\')
--		for i := 0; i < 3-len(bufs); i++ {
--			s = append(s, '0')
--		}
--		for _, r := range bufs {
--			s = append(s, r)
--		}
--		return s
--
--	}
--	return append(s, b)
--}
--
--func nextByte(b []byte, offset int) (byte, int) {
--	if offset >= len(b) {
--		return 0, 0
--	}
--	if b[offset] != '\\' {
--		// not an escape sequence
--		return b[offset], 1
--	}
--	switch len(b) - offset {
--	case 1: // dangling escape
--		return 0, 0
--	case 2, 3: // too short to be \ddd
--	default: // maybe \ddd
--		if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) {
--			return dddToByte(b[offset+1:]), 4
--		}
--	}
--	// not \ddd, maybe a control char
--	switch b[offset+1] {
--	case 't':
--		return '\t', 2
--	case 'r':
--		return '\r', 2
--	case 'n':
--		return '\n', 2
--	default:
--		return b[offset+1], 2
--	}
--}
--
--func (rr *TXT) len() int {
--	l := rr.Hdr.len()
--	for _, t := range rr.Txt {
--		l += len(t) + 1
--	}
--	return l
--}
--
--type SPF struct {
--	Hdr RR_Header
--	Txt []string `dns:"txt"`
--}
--
--func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
--func (rr *SPF) copy() RR {
--	cp := make([]string, len(rr.Txt), cap(rr.Txt))
--	copy(cp, rr.Txt)
--	return &SPF{*rr.Hdr.copyHeader(), cp}
--}
--
--func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
--
--func (rr *SPF) len() int {
--	l := rr.Hdr.len()
--	for _, t := range rr.Txt {
--		l += len(t) + 1
--	}
--	return l
--}
--
--type SRV struct {
--	Hdr      RR_Header
--	Priority uint16
--	Weight   uint16
--	Port     uint16
--	Target   string `dns:"domain-name"`
--}
--
--func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
--func (rr *SRV) len() int           { l := len(rr.Target) + 1; return rr.Hdr.len() + l + 6 }
--func (rr *SRV) copy() RR {
--	return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target}
--}
--
--func (rr *SRV) String() string {
--	return rr.Hdr.String() +
--		strconv.Itoa(int(rr.Priority)) + " " +
--		strconv.Itoa(int(rr.Weight)) + " " +
--		strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target)
--}
--
--type NAPTR struct {
--	Hdr         RR_Header
--	Order       uint16
--	Preference  uint16
--	Flags       string
--	Service     string
--	Regexp      string
--	Replacement string `dns:"domain-name"`
--}
--
--func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr }
--func (rr *NAPTR) copy() RR {
--	return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
--}
--
--func (rr *NAPTR) String() string {
--	return rr.Hdr.String() +
--		strconv.Itoa(int(rr.Order)) + " " +
--		strconv.Itoa(int(rr.Preference)) + " " +
--		"\"" + rr.Flags + "\" " +
--		"\"" + rr.Service + "\" " +
--		"\"" + rr.Regexp + "\" " +
--		rr.Replacement
--}
--
--func (rr *NAPTR) len() int {
--	return rr.Hdr.len() + 4 + len(rr.Flags) + 1 + len(rr.Service) + 1 +
--		len(rr.Regexp) + 1 + len(rr.Replacement) + 1
--}
--
--// See RFC 4398.
--type CERT struct {
--	Hdr         RR_Header
--	Type        uint16
--	KeyTag      uint16
--	Algorithm   uint8
--	Certificate string `dns:"base64"`
--}
--
--func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
--func (rr *CERT) copy() RR {
--	return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
--}
--
--func (rr *CERT) String() string {
--	var (
--		ok                  bool
--		certtype, algorithm string
--	)
--	if certtype, ok = CertTypeToString[rr.Type]; !ok {
--		certtype = strconv.Itoa(int(rr.Type))
--	}
--	if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok {
--		algorithm = strconv.Itoa(int(rr.Algorithm))
--	}
--	return rr.Hdr.String() + certtype +
--		" " + strconv.Itoa(int(rr.KeyTag)) +
--		" " + algorithm +
--		" " + rr.Certificate
--}
--
--func (rr *CERT) len() int {
--	return rr.Hdr.len() + 5 +
--		base64.StdEncoding.DecodedLen(len(rr.Certificate))
--}
--
--// See RFC 2672.
--type DNAME struct {
--	Hdr    RR_Header
--	Target string `dns:"domain-name"`
--}
--
--func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
--func (rr *DNAME) copy() RR           { return &DNAME{*rr.Hdr.copyHeader(), rr.Target} }
--func (rr *DNAME) len() int           { l := len(rr.Target) + 1; return rr.Hdr.len() + l }
--
--func (rr *DNAME) String() string {
--	return rr.Hdr.String() + sprintName(rr.Target)
--}
--
--type A struct {
--	Hdr RR_Header
--	A   net.IP `dns:"a"`
--}
--
--func (rr *A) Header() *RR_Header { return &rr.Hdr }
--func (rr *A) copy() RR           { return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} }
--func (rr *A) len() int           { return rr.Hdr.len() + net.IPv4len }
--
--func (rr *A) String() string {
--	if rr.A == nil {
--		return rr.Hdr.String()
--	}
--	return rr.Hdr.String() + rr.A.String()
--}
--
--type AAAA struct {
--	Hdr  RR_Header
--	AAAA net.IP `dns:"aaaa"`
--}
--
--func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
--func (rr *AAAA) copy() RR           { return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} }
--func (rr *AAAA) len() int           { return rr.Hdr.len() + net.IPv6len }
--
--func (rr *AAAA) String() string {
--	if rr.AAAA == nil {
--		return rr.Hdr.String()
--	}
--	return rr.Hdr.String() + rr.AAAA.String()
--}
--
--type PX struct {
--	Hdr        RR_Header
--	Preference uint16
--	Map822     string `dns:"domain-name"`
--	Mapx400    string `dns:"domain-name"`
--}
--
--func (rr *PX) Header() *RR_Header { return &rr.Hdr }
--func (rr *PX) copy() RR           { return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} }
--func (rr *PX) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400)
--}
--func (rr *PX) len() int { return rr.Hdr.len() + 2 + len(rr.Map822) + 1 + len(rr.Mapx400) + 1 }
--
--type GPOS struct {
--	Hdr       RR_Header
--	Longitude string
--	Latitude  string
--	Altitude  string
--}
--
--func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
--func (rr *GPOS) copy() RR           { return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} }
--func (rr *GPOS) len() int {
--	return rr.Hdr.len() + len(rr.Longitude) + len(rr.Latitude) + len(rr.Altitude) + 3
--}
--func (rr *GPOS) String() string {
--	return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude
--}
--
--type LOC struct {
--	Hdr       RR_Header
--	Version   uint8
--	Size      uint8
--	HorizPre  uint8
--	VertPre   uint8
--	Latitude  uint32
--	Longitude uint32
--	Altitude  uint32
--}
--
--func (rr *LOC) Header() *RR_Header { return &rr.Hdr }
--func (rr *LOC) len() int           { return rr.Hdr.len() + 4 + 12 }
--func (rr *LOC) copy() RR {
--	return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
--}
--
--// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent
--// format and returns a string in m (two decimals for the cm)
--func cmToM(m, e uint8) string {
--	if e < 2 {
--		if e == 1 {
--			m *= 10
--		}
--
--		return fmt.Sprintf("0.%02d", m)
--	}
--
--	s := fmt.Sprintf("%d", m)
--	for e > 2 {
--		s += "0"
--		e -= 1
--	}
--	return s
--}
--
--// String returns a string version of a LOC
--func (rr *LOC) String() string {
--	s := rr.Hdr.String()
--
--	lat := rr.Latitude
--	ns := "N"
--	if lat > LOC_EQUATOR {
--		lat = lat - LOC_EQUATOR
--	} else {
--		ns = "S"
--		lat = LOC_EQUATOR - lat
--	}
--	h := lat / LOC_DEGREES
--	lat = lat % LOC_DEGREES
--	m := lat / LOC_HOURS
--	lat = lat % LOC_HOURS
--	s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns)
--
--	lon := rr.Longitude
--	ew := "E"
--	if lon > LOC_PRIMEMERIDIAN {
--		lon = lon - LOC_PRIMEMERIDIAN
--	} else {
--		ew = "W"
--		lon = LOC_PRIMEMERIDIAN - lon
--	}
--	h = lon / LOC_DEGREES
--	lon = lon % LOC_DEGREES
--	m = lon / LOC_HOURS
--	lon = lon % LOC_HOURS
--	s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew)
--
--	var alt float64 = float64(rr.Altitude) / 100
--	alt -= LOC_ALTITUDEBASE
--	if rr.Altitude%100 != 0 {
--		s += fmt.Sprintf("%.2fm ", alt)
--	} else {
--		s += fmt.Sprintf("%.0fm ", alt)
--	}
--
--	s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m "
--	s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m "
--	s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m"
--
--	return s
--}
--
--// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931.
--type SIG struct {
--	RRSIG
--}
--
--type RRSIG struct {
--	Hdr         RR_Header
--	TypeCovered uint16
--	Algorithm   uint8
--	Labels      uint8
--	OrigTtl     uint32
--	Expiration  uint32
--	Inception   uint32
--	KeyTag      uint16
--	SignerName  string `dns:"domain-name"`
--	Signature   string `dns:"base64"`
--}
--
--func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr }
--func (rr *RRSIG) copy() RR {
--	return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
--}
--
--func (rr *RRSIG) String() string {
--	s := rr.Hdr.String()
--	s += Type(rr.TypeCovered).String()
--	s += " " + strconv.Itoa(int(rr.Algorithm)) +
--		" " + strconv.Itoa(int(rr.Labels)) +
--		" " + strconv.FormatInt(int64(rr.OrigTtl), 10) +
--		" " + TimeToString(rr.Expiration) +
--		" " + TimeToString(rr.Inception) +
--		" " + strconv.Itoa(int(rr.KeyTag)) +
--		" " + sprintName(rr.SignerName) +
--		" " + rr.Signature
--	return s
--}
--
--func (rr *RRSIG) len() int {
--	return rr.Hdr.len() + len(rr.SignerName) + 1 +
--		base64.StdEncoding.DecodedLen(len(rr.Signature)) + 18
--}
--
--type NSEC struct {
--	Hdr        RR_Header
--	NextDomain string   `dns:"domain-name"`
--	TypeBitMap []uint16 `dns:"nsec"`
--}
--
--func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
--func (rr *NSEC) copy() RR {
--	cp := make([]uint16, len(rr.TypeBitMap), cap(rr.TypeBitMap))
--	copy(cp, rr.TypeBitMap)
--	return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, cp}
--}
--
--func (rr *NSEC) String() string {
--	s := rr.Hdr.String() + sprintName(rr.NextDomain)
--	for i := 0; i < len(rr.TypeBitMap); i++ {
--		s += " " + Type(rr.TypeBitMap[i]).String()
--	}
--	return s
--}
--
--func (rr *NSEC) len() int {
--	l := rr.Hdr.len() + len(rr.NextDomain) + 1
--	lastwindow := uint32(2 ^ 32 + 1)
--	for _, t := range rr.TypeBitMap {
--		window := t / 256
--		if uint32(window) != lastwindow {
--			l += 1 + 32
--		}
--		lastwindow = uint32(window)
--	}
--	return l
--}
--
--type DLV struct {
--	DS
--}
--
--type CDS struct {
--	DS
--}
--
--type DS struct {
--	Hdr        RR_Header
--	KeyTag     uint16
--	Algorithm  uint8
--	DigestType uint8
--	Digest     string `dns:"hex"`
--}
--
--func (rr *DS) Header() *RR_Header { return &rr.Hdr }
--func (rr *DS) len() int           { return rr.Hdr.len() + 4 + len(rr.Digest)/2 }
--func (rr *DS) copy() RR {
--	return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
--}
--
--func (rr *DS) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
--		" " + strconv.Itoa(int(rr.Algorithm)) +
--		" " + strconv.Itoa(int(rr.DigestType)) +
--		" " + strings.ToUpper(rr.Digest)
--}
--
--type KX struct {
--	Hdr        RR_Header
--	Preference uint16
--	Exchanger  string `dns:"domain-name"`
--}
--
--func (rr *KX) Header() *RR_Header { return &rr.Hdr }
--func (rr *KX) len() int           { return rr.Hdr.len() + 2 + len(rr.Exchanger) + 1 }
--func (rr *KX) copy() RR           { return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} }
--
--func (rr *KX) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
--		" " + sprintName(rr.Exchanger)
--}
--
--type TA struct {
--	Hdr        RR_Header
--	KeyTag     uint16
--	Algorithm  uint8
--	DigestType uint8
--	Digest     string `dns:"hex"`
--}
--
--func (rr *TA) Header() *RR_Header { return &rr.Hdr }
--func (rr *TA) len() int           { return rr.Hdr.len() + 4 + len(rr.Digest)/2 }
--func (rr *TA) copy() RR {
--	return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
--}
--
--func (rr *TA) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
--		" " + strconv.Itoa(int(rr.Algorithm)) +
--		" " + strconv.Itoa(int(rr.DigestType)) +
--		" " + strings.ToUpper(rr.Digest)
--}
--
--type TALINK struct {
--	Hdr          RR_Header
--	PreviousName string `dns:"domain-name"`
--	NextName     string `dns:"domain-name"`
--}
--
--func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
--func (rr *TALINK) copy() RR           { return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} }
--func (rr *TALINK) len() int           { return rr.Hdr.len() + len(rr.PreviousName) + len(rr.NextName) + 2 }
--
--func (rr *TALINK) String() string {
--	return rr.Hdr.String() +
--		sprintName(rr.PreviousName) + " " + sprintName(rr.NextName)
--}
--
--type SSHFP struct {
--	Hdr         RR_Header
--	Algorithm   uint8
--	Type        uint8
--	FingerPrint string `dns:"hex"`
--}
--
--func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
--func (rr *SSHFP) len() int           { return rr.Hdr.len() + 2 + len(rr.FingerPrint)/2 }
--func (rr *SSHFP) copy() RR {
--	return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint}
--}
--
--func (rr *SSHFP) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) +
--		" " + strconv.Itoa(int(rr.Type)) +
--		" " + strings.ToUpper(rr.FingerPrint)
--}
--
--type IPSECKEY struct {
--	Hdr         RR_Header
--	Precedence  uint8
--	GatewayType uint8
--	Algorithm   uint8
--	Gateway     string `dns:"ipseckey"`
--	PublicKey   string `dns:"base64"`
--}
--
--func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr }
--func (rr *IPSECKEY) copy() RR {
--	return &IPSECKEY{*rr.Hdr.copyHeader(), rr.Precedence, rr.GatewayType, rr.Algorithm, rr.Gateway, rr.PublicKey}
--}
--
--func (rr *IPSECKEY) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) +
--		" " + strconv.Itoa(int(rr.GatewayType)) +
--		" " + strconv.Itoa(int(rr.Algorithm)) +
--		" " + rr.Gateway +
--		" " + rr.PublicKey
--}
--
--func (rr *IPSECKEY) len() int {
--	return rr.Hdr.len() + 3 + len(rr.Gateway) + 1 +
--		base64.StdEncoding.DecodedLen(len(rr.PublicKey))
--}
--
--type KEY struct {
--	DNSKEY
--}
--
--type CDNSKEY struct {
--	DNSKEY
--}
--
--type DNSKEY struct {
--	Hdr       RR_Header
--	Flags     uint16
--	Protocol  uint8
--	Algorithm uint8
--	PublicKey string `dns:"base64"`
--}
--
--func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr }
--func (rr *DNSKEY) len() int {
--	return rr.Hdr.len() + 4 + base64.StdEncoding.DecodedLen(len(rr.PublicKey))
--}
--func (rr *DNSKEY) copy() RR {
--	return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
--}
--
--func (rr *DNSKEY) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
--		" " + strconv.Itoa(int(rr.Protocol)) +
--		" " + strconv.Itoa(int(rr.Algorithm)) +
--		" " + rr.PublicKey
--}
--
--type RKEY struct {
--	Hdr       RR_Header
--	Flags     uint16
--	Protocol  uint8
--	Algorithm uint8
--	PublicKey string `dns:"base64"`
--}
--
--func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
--func (rr *RKEY) len() int           { return rr.Hdr.len() + 4 + base64.StdEncoding.DecodedLen(len(rr.PublicKey)) }
--func (rr *RKEY) copy() RR {
--	return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
--}
--
--func (rr *RKEY) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
--		" " + strconv.Itoa(int(rr.Protocol)) +
--		" " + strconv.Itoa(int(rr.Algorithm)) +
--		" " + rr.PublicKey
--}
--
--type NSAP struct {
--	Hdr    RR_Header
--	Length uint8
--	Nsap   string
--}
--
--func (rr *NSAP) Header() *RR_Header { return &rr.Hdr }
--func (rr *NSAP) copy() RR           { return &NSAP{*rr.Hdr.copyHeader(), rr.Length, rr.Nsap} }
--func (rr *NSAP) String() string     { return rr.Hdr.String() + strconv.Itoa(int(rr.Length)) + " " + rr.Nsap }
--func (rr *NSAP) len() int           { return rr.Hdr.len() + 1 + len(rr.Nsap) + 1 }
--
--type NSAPPTR struct {
--	Hdr RR_Header
--	Ptr string `dns:"domain-name"`
--}
--
--func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
--func (rr *NSAPPTR) copy() RR           { return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} }
--func (rr *NSAPPTR) String() string     { return rr.Hdr.String() + sprintName(rr.Ptr) }
--func (rr *NSAPPTR) len() int           { return rr.Hdr.len() + len(rr.Ptr) }
--
--type NSEC3 struct {
--	Hdr        RR_Header
--	Hash       uint8
--	Flags      uint8
--	Iterations uint16
--	SaltLength uint8
--	Salt       string `dns:"size-hex"`
--	HashLength uint8
--	NextDomain string   `dns:"size-base32"`
--	TypeBitMap []uint16 `dns:"nsec"`
--}
--
--func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
--func (rr *NSEC3) copy() RR {
--	cp := make([]uint16, len(rr.TypeBitMap), cap(rr.TypeBitMap))
--	copy(cp, rr.TypeBitMap)
--	return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, cp}
--}
--
--func (rr *NSEC3) String() string {
--	s := rr.Hdr.String()
--	s += strconv.Itoa(int(rr.Hash)) +
--		" " + strconv.Itoa(int(rr.Flags)) +
--		" " + strconv.Itoa(int(rr.Iterations)) +
--		" " + saltToString(rr.Salt) +
--		" " + rr.NextDomain
--	for i := 0; i < len(rr.TypeBitMap); i++ {
--		s += " " + Type(rr.TypeBitMap[i]).String()
--	}
--	return s
--}
--
--func (rr *NSEC3) len() int {
--	l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1
--	lastwindow := uint32(2 ^ 32 + 1)
--	for _, t := range rr.TypeBitMap {
--		window := t / 256
--		if uint32(window) != lastwindow {
--			l += 1 + 32
--		}
--		lastwindow = uint32(window)
--	}
--	return l
--}
--
--type NSEC3PARAM struct {
--	Hdr        RR_Header
--	Hash       uint8
--	Flags      uint8
--	Iterations uint16
--	SaltLength uint8
--	Salt       string `dns:"hex"`
--}
--
--func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
--func (rr *NSEC3PARAM) len() int           { return rr.Hdr.len() + 2 + 4 + 1 + len(rr.Salt)/2 }
--func (rr *NSEC3PARAM) copy() RR {
--	return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
--}
--
--func (rr *NSEC3PARAM) String() string {
--	s := rr.Hdr.String()
--	s += strconv.Itoa(int(rr.Hash)) +
--		" " + strconv.Itoa(int(rr.Flags)) +
--		" " + strconv.Itoa(int(rr.Iterations)) +
--		" " + saltToString(rr.Salt)
--	return s
--}
--
--type TKEY struct {
--	Hdr        RR_Header
--	Algorithm  string `dns:"domain-name"`
--	Inception  uint32
--	Expiration uint32
--	Mode       uint16
--	Error      uint16
--	KeySize    uint16
--	Key        string
--	OtherLen   uint16
--	OtherData  string
--}
--
--func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
--func (rr *TKEY) copy() RR {
--	return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
--}
--
--func (rr *TKEY) String() string {
--	// It has no presentation format
--	return ""
--}
--
--func (rr *TKEY) len() int {
--	return rr.Hdr.len() + len(rr.Algorithm) + 1 + 4 + 4 + 6 +
--		len(rr.Key) + 2 + len(rr.OtherData)
--}
--
--// RFC3597 represents an unknown/generic RR.
--type RFC3597 struct {
--	Hdr   RR_Header
--	Rdata string `dns:"hex"`
--}
--
--func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
--func (rr *RFC3597) copy() RR           { return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} }
--func (rr *RFC3597) len() int           { return rr.Hdr.len() + len(rr.Rdata)/2 + 2 }
--
--func (rr *RFC3597) String() string {
--	// Let's call it a hack
--	s := rfc3597Header(rr.Hdr)
--
--	s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata
--	return s
--}
--
--func rfc3597Header(h RR_Header) string {
--	var s string
--
--	s += sprintName(h.Name) + "\t"
--	s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
--	s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t"
--	s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t"
--	return s
--}
--
--type URI struct {
--	Hdr      RR_Header
--	Priority uint16
--	Weight   uint16
--	Target   []string `dns:"txt"`
--}
--
--func (rr *URI) Header() *RR_Header { return &rr.Hdr }
--func (rr *URI) copy() RR {
--	cp := make([]string, len(rr.Target), cap(rr.Target))
--	copy(cp, rr.Target)
--	return &URI{*rr.Hdr.copyHeader(), rr.Weight, rr.Priority, cp}
--}
--
--func (rr *URI) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) +
--		" " + strconv.Itoa(int(rr.Weight)) + sprintTxt(rr.Target)
--}
--
--func (rr *URI) len() int {
--	l := rr.Hdr.len() + 4
--	for _, t := range rr.Target {
--		l += len(t) + 1
--	}
--	return l
--}
--
--type DHCID struct {
--	Hdr    RR_Header
--	Digest string `dns:"base64"`
--}
--
--func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
--func (rr *DHCID) copy() RR           { return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} }
--func (rr *DHCID) String() string     { return rr.Hdr.String() + rr.Digest }
--func (rr *DHCID) len() int           { return rr.Hdr.len() + base64.StdEncoding.DecodedLen(len(rr.Digest)) }
--
--type TLSA struct {
--	Hdr          RR_Header
--	Usage        uint8
--	Selector     uint8
--	MatchingType uint8
--	Certificate  string `dns:"hex"`
--}
--
--func (rr *TLSA) Header() *RR_Header { return &rr.Hdr }
--func (rr *TLSA) len() int           { return rr.Hdr.len() + 3 + len(rr.Certificate)/2 }
--
--func (rr *TLSA) copy() RR {
--	return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
--}
--
--func (rr *TLSA) String() string {
--	return rr.Hdr.String() +
--		strconv.Itoa(int(rr.Usage)) +
--		" " + strconv.Itoa(int(rr.Selector)) +
--		" " + strconv.Itoa(int(rr.MatchingType)) +
--		" " + rr.Certificate
--}
--
--type HIP struct {
--	Hdr                RR_Header
--	HitLength          uint8
--	PublicKeyAlgorithm uint8
--	PublicKeyLength    uint16
--	Hit                string   `dns:"hex"`
--	PublicKey          string   `dns:"base64"`
--	RendezvousServers  []string `dns:"domain-name"`
--}
--
--func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
--func (rr *HIP) copy() RR {
--	cp := make([]string, len(rr.RendezvousServers), cap(rr.RendezvousServers))
--	copy(cp, rr.RendezvousServers)
--	return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, cp}
--}
--
--func (rr *HIP) String() string {
--	s := rr.Hdr.String() +
--		strconv.Itoa(int(rr.PublicKeyAlgorithm)) +
--		" " + rr.Hit +
--		" " + rr.PublicKey
--	for _, d := range rr.RendezvousServers {
--		s += " " + sprintName(d)
--	}
--	return s
--}
--
--func (rr *HIP) len() int {
--	l := rr.Hdr.len() + 4 +
--		len(rr.Hit)/2 +
--		base64.StdEncoding.DecodedLen(len(rr.PublicKey))
--	for _, d := range rr.RendezvousServers {
--		l += len(d) + 1
--	}
--	return l
--}
--
--type NINFO struct {
--	Hdr    RR_Header
--	ZSData []string `dns:"txt"`
--}
--
--func (rr *NINFO) Header() *RR_Header { return &rr.Hdr }
--func (rr *NINFO) copy() RR {
--	cp := make([]string, len(rr.ZSData), cap(rr.ZSData))
--	copy(cp, rr.ZSData)
--	return &NINFO{*rr.Hdr.copyHeader(), cp}
--}
--
--func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) }
--
--func (rr *NINFO) len() int {
--	l := rr.Hdr.len()
--	for _, t := range rr.ZSData {
--		l += len(t) + 1
--	}
--	return l
--}
--
--type WKS struct {
--	Hdr      RR_Header
--	Address  net.IP `dns:"a"`
--	Protocol uint8
--	BitMap   []uint16 `dns:"wks"`
--}
--
--func (rr *WKS) Header() *RR_Header { return &rr.Hdr }
--func (rr *WKS) len() int           { return rr.Hdr.len() + net.IPv4len + 1 }
--
--func (rr *WKS) copy() RR {
--	cp := make([]uint16, len(rr.BitMap), cap(rr.BitMap))
--	copy(cp, rr.BitMap)
--	return &WKS{*rr.Hdr.copyHeader(), copyIP(rr.Address), rr.Protocol, cp}
--}
--
--func (rr *WKS) String() (s string) {
--	s = rr.Hdr.String()
--	if rr.Address != nil {
--		s += rr.Address.String()
--	}
--	for i := 0; i < len(rr.BitMap); i++ {
--		// should lookup the port
--		s += " " + strconv.Itoa(int(rr.BitMap[i]))
--	}
--	return s
--}
--
--type NID struct {
--	Hdr        RR_Header
--	Preference uint16
--	NodeID     uint64
--}
--
--func (rr *NID) Header() *RR_Header { return &rr.Hdr }
--func (rr *NID) copy() RR           { return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} }
--func (rr *NID) len() int           { return rr.Hdr.len() + 2 + 8 }
--
--func (rr *NID) String() string {
--	s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
--	node := fmt.Sprintf("%0.16x", rr.NodeID)
--	s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
--	return s
--}
--
--type L32 struct {
--	Hdr        RR_Header
--	Preference uint16
--	Locator32  net.IP `dns:"a"`
--}
--
--func (rr *L32) Header() *RR_Header { return &rr.Hdr }
--func (rr *L32) copy() RR           { return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} }
--func (rr *L32) len() int           { return rr.Hdr.len() + net.IPv4len }
--
--func (rr *L32) String() string {
--	if rr.Locator32 == nil {
--		return rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
--	}
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
--		" " + rr.Locator32.String()
--}
--
--type L64 struct {
--	Hdr        RR_Header
--	Preference uint16
--	Locator64  uint64
--}
--
--func (rr *L64) Header() *RR_Header { return &rr.Hdr }
--func (rr *L64) copy() RR           { return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} }
--func (rr *L64) len() int           { return rr.Hdr.len() + 2 + 8 }
--
--func (rr *L64) String() string {
--	s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
--	node := fmt.Sprintf("%0.16X", rr.Locator64)
--	s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
--	return s
--}
--
--type LP struct {
--	Hdr        RR_Header
--	Preference uint16
--	Fqdn       string `dns:"domain-name"`
--}
--
--func (rr *LP) Header() *RR_Header { return &rr.Hdr }
--func (rr *LP) copy() RR           { return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} }
--func (rr *LP) len() int           { return rr.Hdr.len() + 2 + len(rr.Fqdn) + 1 }
--
--func (rr *LP) String() string {
--	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn)
--}
--
--type EUI48 struct {
--	Hdr     RR_Header
--	Address uint64 `dns:"uint48"`
--}
--
--func (rr *EUI48) Header() *RR_Header { return &rr.Hdr }
--func (rr *EUI48) copy() RR           { return &EUI48{*rr.Hdr.copyHeader(), rr.Address} }
--func (rr *EUI48) String() string     { return rr.Hdr.String() + euiToString(rr.Address, 48) }
--func (rr *EUI48) len() int           { return rr.Hdr.len() + 6 }
--
--type EUI64 struct {
--	Hdr     RR_Header
--	Address uint64
--}
--
--func (rr *EUI64) Header() *RR_Header { return &rr.Hdr }
--func (rr *EUI64) copy() RR           { return &EUI64{*rr.Hdr.copyHeader(), rr.Address} }
--func (rr *EUI64) String() string     { return rr.Hdr.String() + euiToString(rr.Address, 64) }
--func (rr *EUI64) len() int           { return rr.Hdr.len() + 8 }
--
--// Support in incomplete - just handle it as unknown record
--/*
--type CAA struct {
--	Hdr   RR_Header
--	Flag  uint8
--	Tag   string
--	Value string `dns:"octet"`
--}
--
--func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
--func (rr *CAA) copy() RR           { return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} }
--func (rr *CAA) len() int           { return rr.Hdr.len() + 1 + len(rr.Tag) + 1 + len(rr.Value) }
--
--func (rr *CAA) String() string {
--	s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Flag), 10) + " " + rr.Tag
--	s += strconv.QuoteToASCII(rr.Value)
--	return s
--}
--*/
--
--type UID struct {
--	Hdr RR_Header
--	Uid uint32
--}
--
--func (rr *UID) Header() *RR_Header { return &rr.Hdr }
--func (rr *UID) copy() RR           { return &UID{*rr.Hdr.copyHeader(), rr.Uid} }
--func (rr *UID) String() string     { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) }
--func (rr *UID) len() int           { return rr.Hdr.len() + 4 }
--
--type GID struct {
--	Hdr RR_Header
--	Gid uint32
--}
--
--func (rr *GID) Header() *RR_Header { return &rr.Hdr }
--func (rr *GID) copy() RR           { return &GID{*rr.Hdr.copyHeader(), rr.Gid} }
--func (rr *GID) String() string     { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) }
--func (rr *GID) len() int           { return rr.Hdr.len() + 4 }
--
--type UINFO struct {
--	Hdr   RR_Header
--	Uinfo string
--}
--
--func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
--func (rr *UINFO) copy() RR           { return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} }
--func (rr *UINFO) String() string     { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) }
--func (rr *UINFO) len() int           { return rr.Hdr.len() + len(rr.Uinfo) + 1 }
--
--type EID struct {
--	Hdr      RR_Header
--	Endpoint string `dns:"hex"`
--}
--
--func (rr *EID) Header() *RR_Header { return &rr.Hdr }
--func (rr *EID) copy() RR           { return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} }
--func (rr *EID) String() string     { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) }
--func (rr *EID) len() int           { return rr.Hdr.len() + len(rr.Endpoint)/2 }
--
--type NIMLOC struct {
--	Hdr     RR_Header
--	Locator string `dns:"hex"`
--}
--
--func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr }
--func (rr *NIMLOC) copy() RR           { return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} }
--func (rr *NIMLOC) String() string     { return rr.Hdr.String() + strings.ToUpper(rr.Locator) }
--func (rr *NIMLOC) len() int           { return rr.Hdr.len() + len(rr.Locator)/2 }
--
--type OPENPGPKEY struct {
--	Hdr       RR_Header
--	PublicKey string `dns:"base64"`
--}
--
--func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
--func (rr *OPENPGPKEY) copy() RR           { return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} }
--func (rr *OPENPGPKEY) String() string     { return rr.Hdr.String() + rr.PublicKey }
--func (rr *OPENPGPKEY) len() int {
--	return rr.Hdr.len() + base64.StdEncoding.DecodedLen(len(rr.PublicKey))
--}
--
--// TimeToString translates the RRSIG's incep. and expir. times to the
--// string representation used when printing the record.
--// It takes serial arithmetic (RFC 1982) into account.
--func TimeToString(t uint32) string {
--	mod := ((int64(t) - time.Now().Unix()) / year68) - 1
--	if mod < 0 {
--		mod = 0
--	}
--	ti := time.Unix(int64(t)-(mod*year68), 0).UTC()
--	return ti.Format("20060102150405")
--}
--
--// StringToTime translates the RRSIG's incep. and expir. times from
--// string values like "20110403154150" to an 32 bit integer.
--// It takes serial arithmetic (RFC 1982) into account.
--func StringToTime(s string) (uint32, error) {
--	t, e := time.Parse("20060102150405", s)
--	if e != nil {
--		return 0, e
--	}
--	mod := (t.Unix() / year68) - 1
--	if mod < 0 {
--		mod = 0
--	}
--	return uint32(t.Unix() - (mod * year68)), nil
--}
--
--// saltToString converts a NSECX salt to uppercase and
--// returns "-" when it is empty
--func saltToString(s string) string {
--	if len(s) == 0 {
--		return "-"
--	}
--	return strings.ToUpper(s)
--}
--
--func euiToString(eui uint64, bits int) (hex string) {
--	switch bits {
--	case 64:
--		hex = fmt.Sprintf("%16.16x", eui)
--		hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
--			"-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16]
--	case 48:
--		hex = fmt.Sprintf("%12.12x", eui)
--		hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
--			"-" + hex[8:10] + "-" + hex[10:12]
--	}
--	return
--}
--
--// copyIP returns a copy of ip.
--func copyIP(ip net.IP) net.IP {
--	p := make(net.IP, len(ip))
--	copy(p, ip)
--	return p
--}
--
--// Map of constructors for each RR type.
--var typeToRR = map[uint16]func() RR{
--	TypeA:     func() RR { return new(A) },
--	TypeAAAA:  func() RR { return new(AAAA) },
--	TypeAFSDB: func() RR { return new(AFSDB) },
--	//	TypeCAA:        func() RR { return new(CAA) },
--	TypeCDS:        func() RR { return new(CDS) },
--	TypeCERT:       func() RR { return new(CERT) },
--	TypeCNAME:      func() RR { return new(CNAME) },
--	TypeDHCID:      func() RR { return new(DHCID) },
--	TypeDLV:        func() RR { return new(DLV) },
--	TypeDNAME:      func() RR { return new(DNAME) },
--	TypeKEY:        func() RR { return new(KEY) },
--	TypeDNSKEY:     func() RR { return new(DNSKEY) },
--	TypeDS:         func() RR { return new(DS) },
--	TypeEUI48:      func() RR { return new(EUI48) },
--	TypeEUI64:      func() RR { return new(EUI64) },
--	TypeGID:        func() RR { return new(GID) },
--	TypeGPOS:       func() RR { return new(GPOS) },
--	TypeEID:        func() RR { return new(EID) },
--	TypeHINFO:      func() RR { return new(HINFO) },
--	TypeHIP:        func() RR { return new(HIP) },
--	TypeKX:         func() RR { return new(KX) },
--	TypeL32:        func() RR { return new(L32) },
--	TypeL64:        func() RR { return new(L64) },
--	TypeLOC:        func() RR { return new(LOC) },
--	TypeLP:         func() RR { return new(LP) },
--	TypeMB:         func() RR { return new(MB) },
--	TypeMD:         func() RR { return new(MD) },
--	TypeMF:         func() RR { return new(MF) },
--	TypeMG:         func() RR { return new(MG) },
--	TypeMINFO:      func() RR { return new(MINFO) },
--	TypeMR:         func() RR { return new(MR) },
--	TypeMX:         func() RR { return new(MX) },
--	TypeNAPTR:      func() RR { return new(NAPTR) },
--	TypeNID:        func() RR { return new(NID) },
--	TypeNINFO:      func() RR { return new(NINFO) },
--	TypeNIMLOC:     func() RR { return new(NIMLOC) },
--	TypeNS:         func() RR { return new(NS) },
--	TypeNSAP:       func() RR { return new(NSAP) },
--	TypeNSAPPTR:    func() RR { return new(NSAPPTR) },
--	TypeNSEC3:      func() RR { return new(NSEC3) },
--	TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
--	TypeNSEC:       func() RR { return new(NSEC) },
--	TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
--	TypeOPT:        func() RR { return new(OPT) },
--	TypePTR:        func() RR { return new(PTR) },
--	TypeRKEY:       func() RR { return new(RKEY) },
--	TypeRP:         func() RR { return new(RP) },
--	TypePX:         func() RR { return new(PX) },
--	TypeSIG:        func() RR { return new(SIG) },
--	TypeRRSIG:      func() RR { return new(RRSIG) },
--	TypeRT:         func() RR { return new(RT) },
--	TypeSOA:        func() RR { return new(SOA) },
--	TypeSPF:        func() RR { return new(SPF) },
--	TypeSRV:        func() RR { return new(SRV) },
--	TypeSSHFP:      func() RR { return new(SSHFP) },
--	TypeTA:         func() RR { return new(TA) },
--	TypeTALINK:     func() RR { return new(TALINK) },
--	TypeTKEY:       func() RR { return new(TKEY) },
--	TypeTLSA:       func() RR { return new(TLSA) },
--	TypeTSIG:       func() RR { return new(TSIG) },
--	TypeTXT:        func() RR { return new(TXT) },
--	TypeUID:        func() RR { return new(UID) },
--	TypeUINFO:      func() RR { return new(UINFO) },
--	TypeURI:        func() RR { return new(URI) },
--	TypeWKS:        func() RR { return new(WKS) },
--	TypeX25:        func() RR { return new(X25) },
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/types_test.go b/Godeps/_workspace/src/github.com/miekg/dns/types_test.go
-deleted file mode 100644
-index 1186129..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/types_test.go
-+++ /dev/null
-@@ -1,42 +0,0 @@
--package dns
--
--import (
--	"testing"
--)
--
--func TestCmToM(t *testing.T) {
--	s := cmToM(0, 0)
--	if s != "0.00" {
--		t.Error("0, 0")
--	}
--
--	s = cmToM(1, 0)
--	if s != "0.01" {
--		t.Error("1, 0")
--	}
--
--	s = cmToM(3, 1)
--	if s != "0.30" {
--		t.Error("3, 1")
--	}
--
--	s = cmToM(4, 2)
--	if s != "4" {
--		t.Error("4, 2")
--	}
--
--	s = cmToM(5, 3)
--	if s != "50" {
--		t.Error("5, 3")
--	}
--
--	s = cmToM(7, 5)
--	if s != "7000" {
--		t.Error("7, 5")
--	}
--
--	s = cmToM(9, 9)
--	if s != "90000000" {
--		t.Error("9, 9")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/udp.go b/Godeps/_workspace/src/github.com/miekg/dns/udp.go
-deleted file mode 100644
-index 0342543..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/udp.go
-+++ /dev/null
-@@ -1,55 +0,0 @@
--// +build !windows
--
--package dns
--
--import (
--	"net"
--	"syscall"
--)
--
--type sessionUDP struct {
--	raddr   *net.UDPAddr
--	context []byte
--}
--
--func (s *sessionUDP) RemoteAddr() net.Addr { return s.raddr }
--
--// setUDPSocketOptions sets the UDP socket options.
--// This function is implemented on a per platform basis. See udp_*.go for more details
--func setUDPSocketOptions(conn *net.UDPConn) error {
--	sa, err := getUDPSocketName(conn)
--	if err != nil {
--		return err
--	}
--	switch sa.(type) {
--	case *syscall.SockaddrInet6:
--		v6only, err := getUDPSocketOptions6Only(conn)
--		if err != nil {
--			return err
--		}
--		setUDPSocketOptions6(conn)
--		if !v6only {
--			setUDPSocketOptions4(conn)
--		}
--	case *syscall.SockaddrInet4:
--		setUDPSocketOptions4(conn)
--	}
--	return nil
--}
--
--// readFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
--// net.UDPAddr.
--func readFromSessionUDP(conn *net.UDPConn, b []byte) (int, *sessionUDP, error) {
--	oob := make([]byte, 40)
--	n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
--	if err != nil {
--		return n, nil, err
--	}
--	return n, &sessionUDP{raddr, oob[:oobn]}, err
--}
--
--// writeToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *sessionUDP instead of a net.Addr.
--func writeToSessionUDP(conn *net.UDPConn, b []byte, session *sessionUDP) (int, error) {
--	n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
--	return n, err
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/udp_linux.go b/Godeps/_workspace/src/github.com/miekg/dns/udp_linux.go
-deleted file mode 100644
-index 7a10785..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/udp_linux.go
-+++ /dev/null
-@@ -1,63 +0,0 @@
--// +build linux
--
--package dns
--
--// See:
--// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and
--// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/
--//
--// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing
--// interface, this might not always be the correct one. This code will make sure the egress
--// packet's interface matched the ingress' one.
--
--import (
--	"net"
--	"syscall"
--)
--
--// setUDPSocketOptions4 prepares the v4 socket for sessions.
--func setUDPSocketOptions4(conn *net.UDPConn) error {
--	file, err := conn.File()
--	if err != nil {
--		return err
--	}
--	if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil {
--		return err
--	}
--	return nil
--}
--
--// setUDPSocketOptions6 prepares the v6 socket for sessions.
--func setUDPSocketOptions6(conn *net.UDPConn) error {
--	file, err := conn.File()
--	if err != nil {
--		return err
--	}
--	if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil {
--		return err
--	}
--	return nil
--}
--
--// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined
--// (dualstack).
--func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) {
--	file, err := conn.File()
--	if err != nil {
--		return false, err
--	}
--	// dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections
--	v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY)
--	if err != nil {
--		return false, err
--	}
--	return v6only == 1, nil
--}
--
--func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) {
--	file, err := conn.File()
--	if err != nil {
--		return nil, err
--	}
--	return syscall.Getsockname(int(file.Fd()))
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/udp_other.go b/Godeps/_workspace/src/github.com/miekg/dns/udp_other.go
-deleted file mode 100644
-index c38dd3e..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/udp_other.go
-+++ /dev/null
-@@ -1,17 +0,0 @@
--// +build !linux
--
--package dns
--
--import (
--	"net"
--	"syscall"
--)
--
--// These do nothing. See udp_linux.go for an example of how to implement this.
--
--// We tried to adhire to some kind of naming scheme.
--
--func setUDPSocketOptions4(conn *net.UDPConn) error                 { return nil }
--func setUDPSocketOptions6(conn *net.UDPConn) error                 { return nil }
--func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error)     { return false, nil }
--func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil }
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go b/Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go
-deleted file mode 100644
-index 4c48723..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/udp_windows.go
-+++ /dev/null
-@@ -1,34 +0,0 @@
--// +build windows
--
--package dns
--
--import "net"
--
--type sessionUDP struct {
--	raddr *net.UDPAddr
--}
--
--// readFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
--// net.UDPAddr.
--func readFromSessionUDP(conn *net.UDPConn, b []byte) (int, *sessionUDP, error) {
--	n, raddr, err := conn.ReadFrom(b)
--	if err != nil {
--		return n, nil, err
--	}
--	session := &sessionUDP{raddr.(*net.UDPAddr)}
--	return n, session, err
--}
--
--// writeToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *sessionUDP instead of a net.Addr.
--func writeToSessionUDP(conn *net.UDPConn, b []byte, session *sessionUDP) (int, error) {
--	n, err := conn.WriteTo(b, session.raddr)
--	return n, err
--}
--
--func (s *sessionUDP) RemoteAddr() net.Addr { return s.raddr }
--
--// setUDPSocketOptions sets the UDP socket options.
--// This function is implemented on a per platform basis. See udp_*.go for more details
--func setUDPSocketOptions(conn *net.UDPConn) error {
--	return nil
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/update.go b/Godeps/_workspace/src/github.com/miekg/dns/update.go
-deleted file mode 100644
-index 275d4e6..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/update.go
-+++ /dev/null
-@@ -1,138 +0,0 @@
--// DYNAMIC UPDATES
--//
--// Dynamic updates reuses the DNS message format, but renames three of
--// the sections. Question is Zone, Answer is Prerequisite, Authority is
--// Update, only the Additional is not renamed. See RFC 2136 for the gory details.
--//
--// You can set a rather complex set of rules for the existence of absence of
--// certain resource records or names in a zone to specify if resource records
--// should be added or removed. The table from RFC 2136 supplemented with the Go
--// DNS function shows which functions exist to specify the prerequisites.
--//
--// 3.2.4 - Table Of Metavalues Used In Prerequisite Section
--//
--//   CLASS    TYPE     RDATA    Meaning                    Function
--//   --------------------------------------------------------------
--//   ANY      ANY      empty    Name is in use             dns.NameUsed
--//   ANY      rrset    empty    RRset exists (value indep) dns.RRsetUsed
--//   NONE     ANY      empty    Name is not in use         dns.NameNotUsed
--//   NONE     rrset    empty    RRset does not exist       dns.RRsetNotUsed
--//   zone     rrset    rr       RRset exists (value dep)   dns.Used
--//
--// The prerequisite section can also be left empty.
--// If you have decided on the prerequisites you can tell what RRs should
--// be added or deleted. The next table shows the options you have and
--// what functions to call.
--//
--// 3.4.2.6 - Table Of Metavalues Used In Update Section
--//
--//   CLASS    TYPE     RDATA    Meaning                     Function
--//   ---------------------------------------------------------------
--//   ANY      ANY      empty    Delete all RRsets from name dns.RemoveName
--//   ANY      rrset    empty    Delete an RRset             dns.RemoveRRset
--//   NONE     rrset    rr       Delete an RR from RRset     dns.Remove
--//   zone     rrset    rr       Add to an RRset             dns.Insert
--//
--package dns
--
--// NameUsed sets the RRs in the prereq section to
--// "Name is in use" RRs. RFC 2136 section 2.4.4.
--func (u *Msg) NameUsed(rr []RR) {
--	u.Answer = make([]RR, len(rr))
--	for i, r := range rr {
--		u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}
--	}
--}
--
--// NameNotUsed sets the RRs in the prereq section to
--// "Name is in not use" RRs. RFC 2136 section 2.4.5.
--func (u *Msg) NameNotUsed(rr []RR) {
--	u.Answer = make([]RR, len(rr))
--	for i, r := range rr {
--		u.Answer[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}
--	}
--}
--
--// Used sets the RRs in the prereq section to
--// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2.
--func (u *Msg) Used(rr []RR) {
--	if len(u.Question) == 0 {
--		panic("dns: empty question section")
--	}
--	u.Answer = make([]RR, len(rr))
--	for i, r := range rr {
--		u.Answer[i] = r
--		u.Answer[i].Header().Class = u.Question[0].Qclass
--	}
--}
--
--// RRsetUsed sets the RRs in the prereq section to
--// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
--func (u *Msg) RRsetUsed(rr []RR) {
--	u.Answer = make([]RR, len(rr))
--	for i, r := range rr {
--		u.Answer[i] = r
--		u.Answer[i].Header().Class = ClassANY
--		u.Answer[i].Header().Ttl = 0
--		u.Answer[i].Header().Rdlength = 0
--	}
--}
--
--// RRsetNotUsed sets the RRs in the prereq section to
--// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
--func (u *Msg) RRsetNotUsed(rr []RR) {
--	u.Answer = make([]RR, len(rr))
--	for i, r := range rr {
--		u.Answer[i] = r
--		u.Answer[i].Header().Class = ClassNONE
--		u.Answer[i].Header().Rdlength = 0
--		u.Answer[i].Header().Ttl = 0
--	}
--}
--
--// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
--func (u *Msg) Insert(rr []RR) {
--	if len(u.Question) == 0 {
--		panic("dns: empty question section")
--	}
--	u.Ns = make([]RR, len(rr))
--	for i, r := range rr {
--		u.Ns[i] = r
--		u.Ns[i].Header().Class = u.Question[0].Qclass
--	}
--}
--
--// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
--func (u *Msg) RemoveRRset(rr []RR) {
--	m := make(map[RR_Header]struct{})
--	u.Ns = make([]RR, 0, len(rr))
--	for _, r := range rr {
--		h := *r.Header().copyHeader()
--		h.Class = ClassANY
--		h.Ttl = 0
--		h.Rdlength = 0
--		if _, ok := m[h]; ok {
--			continue
--		}
--		m[h] = struct{}{}
--		u.Ns = append(u.Ns, &ANY{h})
--	}
--}
--
--// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
--func (u *Msg) RemoveName(rr []RR) {
--	u.Ns = make([]RR, len(rr))
--	for i, r := range rr {
--		u.Ns[i] = &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}
--	}
--}
--
--// Remove creates a dynamic update packet deletes RR from the RRSset, see RFC 2136 section 2.5.4
--func (u *Msg) Remove(rr []RR) {
--	u.Ns = make([]RR, len(rr))
--	for i, r := range rr {
--		u.Ns[i] = r
--		u.Ns[i].Header().Class = ClassNONE
--		u.Ns[i].Header().Ttl = 0
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/update_test.go b/Godeps/_workspace/src/github.com/miekg/dns/update_test.go
-deleted file mode 100644
-index fc22536..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/update_test.go
-+++ /dev/null
-@@ -1,105 +0,0 @@
--package dns
--
--import (
--	"bytes"
--	"testing"
--)
--
--func TestDynamicUpdateParsing(t *testing.T) {
--	prefix := "example.com. IN "
--	for _, typ := range TypeToString {
--		if typ == "CAA" || typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" ||
--			typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" {
--			continue
--		}
--		r, e := NewRR(prefix + typ)
--		if e != nil {
--			t.Log("failure to parse: " + prefix + typ)
--			t.Fail()
--		} else {
--			t.Logf("parsed: %s", r.String())
--		}
--	}
--}
--
--func TestDynamicUpdateUnpack(t *testing.T) {
--	// From https://github.com/miekg/dns/issues/150#issuecomment-62296803
--	// It should be an update message for the zone "example.",
--	// deleting the A RRset "example." and then adding an A record at "example.".
--	// class ANY, TYPE A
--	buf := []byte{171, 68, 40, 0, 0, 1, 0, 0, 0, 2, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 0, 0, 6, 0, 1, 192, 12, 0, 1, 0, 255, 0, 0, 0, 0, 0, 0, 192, 12, 0, 1, 0, 1, 0, 0, 0, 0, 0, 4, 127, 0, 0, 1}
--	msg := new(Msg)
--	err := msg.Unpack(buf)
--	if err != nil {
--		t.Log("failed to unpack: " + err.Error() + "\n" + msg.String())
--		t.Fail()
--	}
--}
--
--func TestDynamicUpdateZeroRdataUnpack(t *testing.T) {
--	m := new(Msg)
--	rr := &RR_Header{Name: ".", Rrtype: 0, Class: 1, Ttl: ^uint32(0), Rdlength: 0}
--	m.Answer = []RR{rr, rr, rr, rr, rr}
--	m.Ns = m.Answer
--	for n, s := range TypeToString {
--		rr.Rrtype = n
--		bytes, err := m.Pack()
--		if err != nil {
--			t.Logf("failed to pack %s: %v", s, err)
--			t.Fail()
--			continue
--		}
--		if err := new(Msg).Unpack(bytes); err != nil {
--			t.Logf("failed to unpack %s: %v", s, err)
--			t.Fail()
--		}
--	}
--}
--
--func TestRemoveRRset(t *testing.T) {
--	// Should add a zero data RR in Class ANY with a TTL of 0
--	// for each set mentioned in the RRs provided to it.
--	rr, err := NewRR(". 100 IN A 127.0.0.1")
--	if err != nil {
--		t.Fatalf("Error constructing RR: %v", err)
--	}
--	m := new(Msg)
--	m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}}
--	expectstr := m.String()
--	expect, err := m.Pack()
--	if err != nil {
--		t.Fatalf("Error packing expected msg: %v", err)
--	}
--
--	m.Ns = nil
--	m.RemoveRRset([]RR{rr})
--	actual, err := m.Pack()
--	if err != nil {
--		t.Fatalf("Error packing actual msg: %v", err)
--	}
--	if !bytes.Equal(actual, expect) {
--		tmp := new(Msg)
--		if err := tmp.Unpack(actual); err != nil {
--			t.Fatalf("Error unpacking actual msg: %v", err)
--		}
--		t.Logf("Expected msg:\n%s", expectstr)
--		t.Logf("Actual msg:\n%v", tmp)
--		t.Fail()
--	}
--
--	m.Ns = nil
--	m.RemoveRRset([]RR{rr, rr})
--	actual, err = m.Pack()
--	if err != nil {
--		t.Fatalf("Error packing actual msg: %v", err)
--	}
--	if !bytes.Equal(actual, expect) {
--		tmp := new(Msg)
--		if err := tmp.Unpack(actual); err != nil {
--			t.Fatalf("Error unpacking actual msg: %v", err)
--		}
--		t.Logf("Expected msg:\n%v", expectstr)
--		t.Logf("Actual msg:\n%v", tmp)
--		t.Fail()
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/xfr.go b/Godeps/_workspace/src/github.com/miekg/dns/xfr.go
-deleted file mode 100644
-index 57bfb16..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/xfr.go
-+++ /dev/null
-@@ -1,236 +0,0 @@
--package dns
--
--import (
--	"time"
--)
--
--// Envelope is used when doing a zone transfer with a remote server.
--type Envelope struct {
--	RR    []RR  // The set of RRs in the answer section of the xfr reply message.
--	Error error // If something went wrong, this contains the error.
--}
--
--// A Transfer defines parameters that are used during a zone transfer.
--type Transfer struct {
--	*Conn
--	DialTimeout    time.Duration     // net.DialTimeout (ns), defaults to 2 * 1e9
--	ReadTimeout    time.Duration     // net.Conn.SetReadTimeout value for connections (ns), defaults to 2 * 1e9
--	WriteTimeout   time.Duration     // net.Conn.SetWriteTimeout value for connections (ns), defaults to 2 * 1e9
--	TsigSecret     map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
--	tsigTimersOnly bool
--}
--
--// Think we need to away to stop the transfer
--
--// In performs an incoming transfer with the server in a.
--func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
--	timeout := dnsTimeout
--	if t.DialTimeout != 0 {
--		timeout = t.DialTimeout
--	}
--	t.Conn, err = DialTimeout("tcp", a, timeout)
--	if err != nil {
--		return nil, err
--	}
--	if err := t.WriteMsg(q); err != nil {
--		return nil, err
--	}
--	env = make(chan *Envelope)
--	go func() {
--		if q.Question[0].Qtype == TypeAXFR {
--			go t.inAxfr(q.Id, env)
--			return
--		}
--		if q.Question[0].Qtype == TypeIXFR {
--			go t.inIxfr(q.Id, env)
--			return
--		}
--	}()
--	return env, nil
--}
--
--func (t *Transfer) inAxfr(id uint16, c chan *Envelope) {
--	first := true
--	defer t.Close()
--	defer close(c)
--	timeout := dnsTimeout
--	if t.ReadTimeout != 0 {
--		timeout = t.ReadTimeout
--	}
--	for {
--		t.Conn.SetReadDeadline(time.Now().Add(timeout))
--		in, err := t.ReadMsg()
--		if err != nil {
--			c <- &Envelope{nil, err}
--			return
--		}
--		if id != in.Id {
--			c <- &Envelope{in.Answer, ErrId}
--			return
--		}
--		if first {
--			if !isSOAFirst(in) {
--				c <- &Envelope{in.Answer, ErrSoa}
--				return
--			}
--			first = !first
--			// only one answer that is SOA, receive more
--			if len(in.Answer) == 1 {
--				t.tsigTimersOnly = true
--				c <- &Envelope{in.Answer, nil}
--				continue
--			}
--		}
--
--		if !first {
--			t.tsigTimersOnly = true // Subsequent envelopes use this.
--			if isSOALast(in) {
--				c <- &Envelope{in.Answer, nil}
--				return
--			}
--			c <- &Envelope{in.Answer, nil}
--		}
--	}
--	panic("dns: not reached")
--}
--
--func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
--	serial := uint32(0) // The first serial seen is the current server serial
--	first := true
--	defer t.Close()
--	defer close(c)
--	timeout := dnsTimeout
--	if t.ReadTimeout != 0 {
--		timeout = t.ReadTimeout
--	}
--	for {
--		t.SetReadDeadline(time.Now().Add(timeout))
--		in, err := t.ReadMsg()
--		if err != nil {
--			c <- &Envelope{in.Answer, err}
--			return
--		}
--		if id != in.Id {
--			c <- &Envelope{in.Answer, ErrId}
--			return
--		}
--		if first {
--			// A single SOA RR signals "no changes"
--			if len(in.Answer) == 1 && isSOAFirst(in) {
--				c <- &Envelope{in.Answer, nil}
--				return
--			}
--
--			// Check if the returned answer is ok
--			if !isSOAFirst(in) {
--				c <- &Envelope{in.Answer, ErrSoa}
--				return
--			}
--			// This serial is important
--			serial = in.Answer[0].(*SOA).Serial
--			first = !first
--		}
--
--		// Now we need to check each message for SOA records, to see what we need to do
--		if !first {
--			t.tsigTimersOnly = true
--			// If the last record in the IXFR contains the servers' SOA,  we should quit
--			if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok {
--				if v.Serial == serial {
--					c <- &Envelope{in.Answer, nil}
--					return
--				}
--			}
--			c <- &Envelope{in.Answer, nil}
--		}
--	}
--}
--
--// Out performs an outgoing transfer with the client connecting in w.
--// Basic use pattern:
--//
--//	ch := make(chan *dns.Envelope)
--//	tr := new(dns.Transfer)
--//	tr.Out(w, r, ch)
--//	c <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
--//	close(ch)
--//	w.Hijack()
--//	// w.Close() // Client closes connection
--//
--// The server is responsible for sending the correct sequence of RRs through the
--// channel ch.
--func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
--	r := new(Msg)
--	// Compress?
--	r.SetReply(q)
--	r.Authoritative = true
--
--	go func() {
--		for x := range ch {
--			// assume it fits TODO(miek): fix
--			r.Answer = append(r.Answer, x.RR...)
--			if err := w.WriteMsg(r); err != nil {
--				return
--			}
--		}
--		w.TsigTimersOnly(true)
--		r.Answer = nil
--	}()
--	return nil
--}
--
--// ReadMsg reads a message from the transfer connection t.
--func (t *Transfer) ReadMsg() (*Msg, error) {
--	m := new(Msg)
--	p := make([]byte, MaxMsgSize)
--	n, err := t.Read(p)
--	if err != nil && n == 0 {
--		return nil, err
--	}
--	p = p[:n]
--	if err := m.Unpack(p); err != nil {
--		return nil, err
--	}
--	if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
--		if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
--			return m, ErrSecret
--		}
--		// Need to work on the original message p, as that was used to calculate the tsig.
--		err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
--	}
--	return m, err
--}
--
--// WriteMsg writes a message through the transfer connection t.
--func (t *Transfer) WriteMsg(m *Msg) (err error) {
--	var out []byte
--	if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
--		if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
--			return ErrSecret
--		}
--		out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
--	} else {
--		out, err = m.Pack()
--	}
--	if err != nil {
--		return err
--	}
--	if _, err = t.Write(out); err != nil {
--		return err
--	}
--	return nil
--}
--
--func isSOAFirst(in *Msg) bool {
--	if len(in.Answer) > 0 {
--		return in.Answer[0].Header().Rrtype == TypeSOA
--	}
--	return false
--}
--
--func isSOALast(in *Msg) bool {
--	if len(in.Answer) > 0 {
--		return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
--	}
--	return false
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go b/Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go
-deleted file mode 100644
-index 7f1183e..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/zgenerate.go
-+++ /dev/null
-@@ -1,157 +0,0 @@
--package dns
--
--import (
--	"fmt"
--	"strconv"
--	"strings"
--)
--
--// Parse the $GENERATE statement as used in BIND9 zones.
--// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
--// We are called after '$GENERATE '. After which we expect:
--// * the range (12-24/2)
--// * lhs (ownername)
--// * [[ttl][class]]
--// * type
--// * rhs (rdata)
--// But we are lazy here, only the range is parsed *all* occurences
--// of $ after that are interpreted.
--// Any error are returned as a string value, the empty string signals
--// "no error".
--func generate(l lex, c chan lex, t chan *Token, o string) string {
--	step := 1
--	if i := strings.IndexAny(l.token, "/"); i != -1 {
--		if i+1 == len(l.token) {
--			return "bad step in $GENERATE range"
--		}
--		if s, e := strconv.Atoi(l.token[i+1:]); e != nil {
--			return "bad step in $GENERATE range"
--		} else {
--			if s < 0 {
--				return "bad step in $GENERATE range"
--			}
--			step = s
--		}
--		l.token = l.token[:i]
--	}
--	sx := strings.SplitN(l.token, "-", 2)
--	if len(sx) != 2 {
--		return "bad start-stop in $GENERATE range"
--	}
--	start, err := strconv.Atoi(sx[0])
--	if err != nil {
--		return "bad start in $GENERATE range"
--	}
--	end, err := strconv.Atoi(sx[1])
--	if err != nil {
--		return "bad stop in $GENERATE range"
--	}
--	if end < 0 || start < 0 || end <= start {
--		return "bad range in $GENERATE range"
--	}
--
--	<-c // _BLANK
--	// Create a complete new string, which we then parse again.
--	s := ""
--BuildRR:
--	l = <-c
--	if l.value != _NEWLINE && l.value != _EOF {
--		s += l.token
--		goto BuildRR
--	}
--	for i := start; i <= end; i += step {
--		var (
--			escape bool
--			dom    string
--			mod    string
--			err    string
--			offset int
--		)
--
--		for j := 0; j < len(s); j++ { // No 'range' because we need to jump around
--			switch s[j] {
--			case '\\':
--				if escape {
--					dom += "\\"
--					escape = false
--					continue
--				}
--				escape = true
--			case '$':
--				mod = "%d"
--				offset = 0
--				if escape {
--					dom += "$"
--					escape = false
--					continue
--				}
--				escape = false
--				if j+1 >= len(s) { // End of the string
--					dom += fmt.Sprintf(mod, i+offset)
--					continue
--				} else {
--					if s[j+1] == '$' {
--						dom += "$"
--						j++
--						continue
--					}
--				}
--				// Search for { and }
--				if s[j+1] == '{' { // Modifier block
--					sep := strings.Index(s[j+2:], "}")
--					if sep == -1 {
--						return "bad modifier in $GENERATE"
--					}
--					mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
--					if err != "" {
--						return err
--					}
--					j += 2 + sep // Jump to it
--				}
--				dom += fmt.Sprintf(mod, i+offset)
--			default:
--				if escape { // Pretty useless here
--					escape = false
--					continue
--				}
--				dom += string(s[j])
--			}
--		}
--		// Re-parse the RR and send it on the current channel t
--		rx, e := NewRR("$ORIGIN " + o + "\n" + dom)
--		if e != nil {
--			return e.(*ParseError).err
--		}
--		t <- &Token{RR: rx}
--		// Its more efficient to first built the rrlist and then parse it in
--		// one go! But is this a problem?
--	}
--	return ""
--}
--
--// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
--func modToPrintf(s string) (string, int, string) {
--	xs := strings.SplitN(s, ",", 3)
--	if len(xs) != 3 {
--		return "", 0, "bad modifier in $GENERATE"
--	}
--	// xs[0] is offset, xs[1] is width, xs[2] is base
--	if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
--		return "", 0, "bad base in $GENERATE"
--	}
--	offset, err := strconv.Atoi(xs[0])
--	if err != nil {
--		return "", 0, "bad offset in $GENERATE"
--	}
--	width, err := strconv.Atoi(xs[1])
--	if err != nil {
--		return "", offset, "bad width in $GENERATE"
--	}
--	switch {
--	case width < 0:
--		return "", offset, "bad width in $GENERATE"
--	case width == 0:
--		return "%" + xs[1] + xs[2], offset, ""
--	}
--	return "%0" + xs[1] + xs[2], offset, ""
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/zscan.go b/Godeps/_workspace/src/github.com/miekg/dns/zscan.go
-deleted file mode 100644
-index 7ba5198..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/zscan.go
-+++ /dev/null
-@@ -1,956 +0,0 @@
--package dns
--
--import (
--	"io"
--	"log"
--	"os"
--	"strconv"
--	"strings"
--)
--
--type debugging bool
--
--const debug debugging = false
--
--func (d debugging) Printf(format string, args ...interface{}) {
--	if d {
--		log.Printf(format, args...)
--	}
--}
--
--const maxTok = 2048 // Largest token we can return.
--const maxUint16 = 1<<16 - 1
--
--// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
--// * Add ownernames if they are left blank;
--// * Suppress sequences of spaces;
--// * Make each RR fit on one line (_NEWLINE is send as last)
--// * Handle comments: ;
--// * Handle braces - anywhere.
--const (
--	// Zonefile
--	_EOF = iota
--	_STRING
--	_BLANK
--	_QUOTE
--	_NEWLINE
--	_RRTYPE
--	_OWNER
--	_CLASS
--	_DIRORIGIN   // $ORIGIN
--	_DIRTTL      // $TTL
--	_DIRINCLUDE  // $INCLUDE
--	_DIRGENERATE // $GENERATE
--
--	// Privatekey file
--	_VALUE
--	_KEY
--
--	_EXPECT_OWNER_DIR      // Ownername
--	_EXPECT_OWNER_BL       // Whitespace after the ownername
--	_EXPECT_ANY            // Expect rrtype, ttl or class
--	_EXPECT_ANY_NOCLASS    // Expect rrtype or ttl
--	_EXPECT_ANY_NOCLASS_BL // The whitespace after _EXPECT_ANY_NOCLASS
--	_EXPECT_ANY_NOTTL      // Expect rrtype or class
--	_EXPECT_ANY_NOTTL_BL   // Whitespace after _EXPECT_ANY_NOTTL
--	_EXPECT_RRTYPE         // Expect rrtype
--	_EXPECT_RRTYPE_BL      // Whitespace BEFORE rrtype
--	_EXPECT_RDATA          // The first element of the rdata
--	_EXPECT_DIRTTL_BL      // Space after directive $TTL
--	_EXPECT_DIRTTL         // Directive $TTL
--	_EXPECT_DIRORIGIN_BL   // Space after directive $ORIGIN
--	_EXPECT_DIRORIGIN      // Directive $ORIGIN
--	_EXPECT_DIRINCLUDE_BL  // Space after directive $INCLUDE
--	_EXPECT_DIRINCLUDE     // Directive $INCLUDE
--	_EXPECT_DIRGENERATE    // Directive $GENERATE
--	_EXPECT_DIRGENERATE_BL // Space after directive $GENERATE
--)
--
--// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
--// where the error occured.
--type ParseError struct {
--	file string
--	err  string
--	lex  lex
--}
--
--func (e *ParseError) Error() (s string) {
--	if e.file != "" {
--		s = e.file + ": "
--	}
--	s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
--		strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
--	return
--}
--
--type lex struct {
--	token      string // text of the token
--	tokenUpper string // uppercase text of the token
--	length     int    // lenght of the token
--	err        bool   // when true, token text has lexer error
--	value      uint8  // value: _STRING, _BLANK, etc.
--	line       int    // line in the file
--	column     int    // column in the file
--	torc       uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
--	comment    string // any comment text seen
--}
--
--// *Tokens are returned when a zone file is parsed.
--type Token struct {
--	RR                  // the scanned resource record when error is not nil
--	Error   *ParseError // when an error occured, this has the error specifics
--	Comment string      // a potential comment positioned after the RR and on the same line
--}
--
--// NewRR reads the RR contained in the string s. Only the first RR is returned.
--// The class defaults to IN and TTL defaults to 3600. The full zone file
--// syntax like $TTL, $ORIGIN, etc. is supported.
--// All fields of the returned RR are set, except RR.Header().Rdlength which is set to 0.
--func NewRR(s string) (RR, error) {
--	if s[len(s)-1] != '\n' { // We need a closing newline
--		return ReadRR(strings.NewReader(s+"\n"), "")
--	}
--	return ReadRR(strings.NewReader(s), "")
--}
--
--// ReadRR reads the RR contained in q.
--// See NewRR for more documentation.
--func ReadRR(q io.Reader, filename string) (RR, error) {
--	r := <-parseZoneHelper(q, ".", filename, 1)
--	if r.Error != nil {
--		return nil, r.Error
--	}
--	return r.RR, nil
--}
--
--// ParseZone reads a RFC 1035 style one from r. It returns *Tokens on the
--// returned channel, which consist out the parsed RR, a potential comment or an error.
--// If there is an error the RR is nil. The string file is only used
--// in error reporting. The string origin is used as the initial origin, as
--// if the file would start with: $ORIGIN origin  .
--// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
--// The channel t is closed by ParseZone when the end of r is reached.
--//
--// Basic usage pattern when reading from a string (z) containing the
--// zone data:
--//
--//	for x := range dns.ParseZone(strings.NewReader(z), "", "") {
--//		if x.Error != nil {
--//			// Do something with x.RR
--//		}
--//	}
--//
--// Comments specified after an RR (and on the same line!) are returned too:
--//
--//	foo. IN A 10.0.0.1 ; this is a comment
--//
--// The text "; this is comment" is returned in Token.Comment . Comments inside the
--// RR are discarded. Comments on a line by themselves are discarded too.
--func ParseZone(r io.Reader, origin, file string) chan *Token {
--	return parseZoneHelper(r, origin, file, 10000)
--}
--
--func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token {
--	t := make(chan *Token, chansize)
--	go parseZone(r, origin, file, t, 0)
--	return t
--}
--
--func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
--	defer func() {
--		if include == 0 {
--			close(t)
--		}
--	}()
--	s := scanInit(r)
--	c := make(chan lex, 1000)
--	// Start the lexer
--	go zlexer(s, c)
--	// 6 possible beginnings of a line, _ is a space
--	// 0. _RRTYPE                              -> all omitted until the rrtype
--	// 1. _OWNER _ _RRTYPE                     -> class/ttl omitted
--	// 2. _OWNER _ _STRING _ _RRTYPE           -> class omitted
--	// 3. _OWNER _ _STRING _ _CLASS  _ _RRTYPE -> ttl/class
--	// 4. _OWNER _ _CLASS  _ _RRTYPE           -> ttl omitted
--	// 5. _OWNER _ _CLASS  _ _STRING _ _RRTYPE -> class/ttl (reversed)
--	// After detecting these, we know the _RRTYPE so we can jump to functions
--	// handling the rdata for each of these types.
--
--	if origin == "" {
--		origin = "."
--	}
--	origin = Fqdn(origin)
--	if _, ok := IsDomainName(origin); !ok {
--		t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
--		return
--	}
--
--	st := _EXPECT_OWNER_DIR // initial state
--	var h RR_Header
--	var defttl uint32 = defaultTtl
--	var prevName string
--	for l := range c {
--		// Lexer spotted an error already
--		if l.err == true {
--			t <- &Token{Error: &ParseError{f, l.token, l}}
--			return
--
--		}
--		switch st {
--		case _EXPECT_OWNER_DIR:
--			// We can also expect a directive, like $TTL or $ORIGIN
--			h.Ttl = defttl
--			h.Class = ClassINET
--			switch l.value {
--			case _NEWLINE: // Empty line
--				st = _EXPECT_OWNER_DIR
--			case _OWNER:
--				h.Name = l.token
--				if l.token[0] == '@' {
--					h.Name = origin
--					prevName = h.Name
--					st = _EXPECT_OWNER_BL
--					break
--				}
--				if h.Name[l.length-1] != '.' {
--					h.Name = appendOrigin(h.Name, origin)
--				}
--				_, ok := IsDomainName(l.token)
--				if !ok {
--					t <- &Token{Error: &ParseError{f, "bad owner name", l}}
--					return
--				}
--				prevName = h.Name
--				st = _EXPECT_OWNER_BL
--			case _DIRTTL:
--				st = _EXPECT_DIRTTL_BL
--			case _DIRORIGIN:
--				st = _EXPECT_DIRORIGIN_BL
--			case _DIRINCLUDE:
--				st = _EXPECT_DIRINCLUDE_BL
--			case _DIRGENERATE:
--				st = _EXPECT_DIRGENERATE_BL
--			case _RRTYPE: // Everthing has been omitted, this is the first thing on the line
--				h.Name = prevName
--				h.Rrtype = l.torc
--				st = _EXPECT_RDATA
--			case _CLASS: // First thing on the line is the class
--				h.Name = prevName
--				h.Class = l.torc
--				st = _EXPECT_ANY_NOCLASS_BL
--			case _BLANK:
--				// Discard, can happen when there is nothing on the
--				// line except the RR type
--			case _STRING: // First thing on the is the ttl
--				if ttl, ok := stringToTtl(l.token); !ok {
--					t <- &Token{Error: &ParseError{f, "not a TTL", l}}
--					return
--				} else {
--					h.Ttl = ttl
--					// Don't about the defttl, we should take the $TTL value
--					// defttl = ttl
--				}
--				st = _EXPECT_ANY_NOTTL_BL
--
--			default:
--				t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
--				return
--			}
--		case _EXPECT_DIRINCLUDE_BL:
--			if l.value != _BLANK {
--				t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
--				return
--			}
--			st = _EXPECT_DIRINCLUDE
--		case _EXPECT_DIRINCLUDE:
--			if l.value != _STRING {
--				t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
--				return
--			}
--			neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
--			l := <-c
--			switch l.value {
--			case _BLANK:
--				l := <-c
--				if l.value == _STRING {
--					if _, ok := IsDomainName(l.token); !ok {
--						t <- &Token{Error: &ParseError{f, "bad origin name", l}}
--						return
--					}
--					// a new origin is specified.
--					if l.token[l.length-1] != '.' {
--						if origin != "." { // Prevent .. endings
--							neworigin = l.token + "." + origin
--						} else {
--							neworigin = l.token + origin
--						}
--					} else {
--						neworigin = l.token
--					}
--				}
--			case _NEWLINE, _EOF:
--				// Ok
--			default:
--				t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
--				return
--			}
--			// Start with the new file
--			r1, e1 := os.Open(l.token)
--			if e1 != nil {
--				t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}}
--				return
--			}
--			if include+1 > 7 {
--				t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
--				return
--			}
--			parseZone(r1, l.token, neworigin, t, include+1)
--			st = _EXPECT_OWNER_DIR
--		case _EXPECT_DIRTTL_BL:
--			if l.value != _BLANK {
--				t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
--				return
--			}
--			st = _EXPECT_DIRTTL
--		case _EXPECT_DIRTTL:
--			if l.value != _STRING {
--				t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
--				return
--			}
--			if e, _ := slurpRemainder(c, f); e != nil {
--				t <- &Token{Error: e}
--				return
--			}
--			if ttl, ok := stringToTtl(l.token); !ok {
--				t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
--				return
--			} else {
--				defttl = ttl
--			}
--			st = _EXPECT_OWNER_DIR
--		case _EXPECT_DIRORIGIN_BL:
--			if l.value != _BLANK {
--				t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
--				return
--			}
--			st = _EXPECT_DIRORIGIN
--		case _EXPECT_DIRORIGIN:
--			if l.value != _STRING {
--				t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
--				return
--			}
--			if e, _ := slurpRemainder(c, f); e != nil {
--				t <- &Token{Error: e}
--			}
--			if _, ok := IsDomainName(l.token); !ok {
--				t <- &Token{Error: &ParseError{f, "bad origin name", l}}
--				return
--			}
--			if l.token[l.length-1] != '.' {
--				if origin != "." { // Prevent .. endings
--					origin = l.token + "." + origin
--				} else {
--					origin = l.token + origin
--				}
--			} else {
--				origin = l.token
--			}
--			st = _EXPECT_OWNER_DIR
--		case _EXPECT_DIRGENERATE_BL:
--			if l.value != _BLANK {
--				t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
--				return
--			}
--			st = _EXPECT_DIRGENERATE
--		case _EXPECT_DIRGENERATE:
--			if l.value != _STRING {
--				t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
--				return
--			}
--			if e := generate(l, c, t, origin); e != "" {
--				t <- &Token{Error: &ParseError{f, e, l}}
--				return
--			}
--			st = _EXPECT_OWNER_DIR
--		case _EXPECT_OWNER_BL:
--			if l.value != _BLANK {
--				t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
--				return
--			}
--			st = _EXPECT_ANY
--		case _EXPECT_ANY:
--			switch l.value {
--			case _RRTYPE:
--				h.Rrtype = l.torc
--				st = _EXPECT_RDATA
--			case _CLASS:
--				h.Class = l.torc
--				st = _EXPECT_ANY_NOCLASS_BL
--			case _STRING: // TTL is this case
--				if ttl, ok := stringToTtl(l.token); !ok {
--					t <- &Token{Error: &ParseError{f, "not a TTL", l}}
--					return
--				} else {
--					h.Ttl = ttl
--					// defttl = ttl // don't set the defttl here
--				}
--				st = _EXPECT_ANY_NOTTL_BL
--			default:
--				t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
--				return
--			}
--		case _EXPECT_ANY_NOCLASS_BL:
--			if l.value != _BLANK {
--				t <- &Token{Error: &ParseError{f, "no blank before class", l}}
--				return
--			}
--			st = _EXPECT_ANY_NOCLASS
--		case _EXPECT_ANY_NOTTL_BL:
--			if l.value != _BLANK {
--				t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
--				return
--			}
--			st = _EXPECT_ANY_NOTTL
--		case _EXPECT_ANY_NOTTL:
--			switch l.value {
--			case _CLASS:
--				h.Class = l.torc
--				st = _EXPECT_RRTYPE_BL
--			case _RRTYPE:
--				h.Rrtype = l.torc
--				st = _EXPECT_RDATA
--			default:
--				t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
--				return
--			}
--		case _EXPECT_ANY_NOCLASS:
--			switch l.value {
--			case _STRING: // TTL
--				if ttl, ok := stringToTtl(l.token); !ok {
--					t <- &Token{Error: &ParseError{f, "not a TTL", l}}
--					return
--				} else {
--					h.Ttl = ttl
--					// defttl = ttl // don't set the def ttl anymore
--				}
--				st = _EXPECT_RRTYPE_BL
--			case _RRTYPE:
--				h.Rrtype = l.torc
--				st = _EXPECT_RDATA
--			default:
--				t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
--				return
--			}
--		case _EXPECT_RRTYPE_BL:
--			if l.value != _BLANK {
--				t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
--				return
--			}
--			st = _EXPECT_RRTYPE
--		case _EXPECT_RRTYPE:
--			if l.value != _RRTYPE {
--				t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
--				return
--			}
--			h.Rrtype = l.torc
--			st = _EXPECT_RDATA
--		case _EXPECT_RDATA:
--			r, e, c1 := setRR(h, c, origin, f)
--			if e != nil {
--				// If e.lex is nil than we have encounter a unknown RR type
--				// in that case we substitute our current lex token
--				if e.lex.token == "" && e.lex.value == 0 {
--					e.lex = l // Uh, dirty
--				}
--				t <- &Token{Error: e}
--				return
--			}
--			t <- &Token{RR: r, Comment: c1}
--			st = _EXPECT_OWNER_DIR
--		}
--	}
--	// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
--	// is not an error, because an empty zone file is still a zone file.
--}
--
--// zlexer scans the sourcefile and returns tokens on the channel c.
--func zlexer(s *scan, c chan lex) {
--	var l lex
--	str := make([]byte, maxTok) // Should be enough for any token
--	stri := 0                   // Offset in str (0 means empty)
--	com := make([]byte, maxTok) // Hold comment text
--	comi := 0
--	quote := false
--	escape := false
--	space := false
--	commt := false
--	rrtype := false
--	owner := true
--	brace := 0
--	x, err := s.tokenText()
--	defer close(c)
--	for err == nil {
--		l.column = s.position.Column
--		l.line = s.position.Line
--		if stri > maxTok {
--			l.token = "token length insufficient for parsing"
--			l.err = true
--			debug.Printf("[%+v]", l.token)
--			c <- l
--			return
--		}
--		if comi > maxTok {
--			l.token = "comment length insufficient for parsing"
--			l.err = true
--			debug.Printf("[%+v]", l.token)
--			c <- l
--			return
--		}
--
--		switch x {
--		case ' ', '\t':
--			if escape {
--				escape = false
--				str[stri] = x
--				stri++
--				break
--			}
--			if quote {
--				// Inside quotes this is legal
--				str[stri] = x
--				stri++
--				break
--			}
--			if commt {
--				com[comi] = x
--				comi++
--				break
--			}
--			if stri == 0 {
--				// Space directly in the beginning, handled in the grammar
--			} else if owner {
--				// If we have a string and its the first, make it an owner
--				l.value = _OWNER
--				l.token = string(str[:stri])
--				l.tokenUpper = strings.ToUpper(l.token)
--				l.length = stri
--				// escape $... start with a \ not a $, so this will work
--				switch l.tokenUpper {
--				case "$TTL":
--					l.value = _DIRTTL
--				case "$ORIGIN":
--					l.value = _DIRORIGIN
--				case "$INCLUDE":
--					l.value = _DIRINCLUDE
--				case "$GENERATE":
--					l.value = _DIRGENERATE
--				}
--				debug.Printf("[7 %+v]", l.token)
--				c <- l
--			} else {
--				l.value = _STRING
--				l.token = string(str[:stri])
--				l.tokenUpper = strings.ToUpper(l.token)
--				l.length = stri
--				if !rrtype {
--					if t, ok := StringToType[l.tokenUpper]; ok {
--						l.value = _RRTYPE
--						l.torc = t
--						rrtype = true
--					} else {
--						if strings.HasPrefix(l.tokenUpper, "TYPE") {
--							if t, ok := typeToInt(l.token); !ok {
--								l.token = "unknown RR type"
--								l.err = true
--								c <- l
--								return
--							} else {
--								l.value = _RRTYPE
--								l.torc = t
--							}
--						}
--					}
--					if t, ok := StringToClass[l.tokenUpper]; ok {
--						l.value = _CLASS
--						l.torc = t
--					} else {
--						if strings.HasPrefix(l.tokenUpper, "CLASS") {
--							if t, ok := classToInt(l.token); !ok {
--								l.token = "unknown class"
--								l.err = true
--								c <- l
--								return
--							} else {
--								l.value = _CLASS
--								l.torc = t
--							}
--						}
--					}
--				}
--				debug.Printf("[6 %+v]", l.token)
--				c <- l
--			}
--			stri = 0
--			// I reverse space stuff here
--			if !space && !commt {
--				l.value = _BLANK
--				l.token = " "
--				l.length = 1
--				debug.Printf("[5 %+v]", l.token)
--				c <- l
--			}
--			owner = false
--			space = true
--		case ';':
--			if escape {
--				escape = false
--				str[stri] = x
--				stri++
--				break
--			}
--			if quote {
--				// Inside quotes this is legal
--				str[stri] = x
--				stri++
--				break
--			}
--			if stri > 0 {
--				l.value = _STRING
--				l.token = string(str[:stri])
--				l.length = stri
--				debug.Printf("[4 %+v]", l.token)
--				c <- l
--				stri = 0
--			}
--			commt = true
--			com[comi] = ';'
--			comi++
--		case '\r':
--			escape = false
--			if quote {
--				str[stri] = x
--				stri++
--				break
--			}
--			// discard if outside of quotes
--		case '\n':
--			escape = false
--			// Escaped newline
--			if quote {
--				str[stri] = x
--				stri++
--				break
--			}
--			// inside quotes this is legal
--			if commt {
--				// Reset a comment
--				commt = false
--				rrtype = false
--				stri = 0
--				// If not in a brace this ends the comment AND the RR
--				if brace == 0 {
--					owner = true
--					owner = true
--					l.value = _NEWLINE
--					l.token = "\n"
--					l.length = 1
--					l.comment = string(com[:comi])
--					debug.Printf("[3 %+v %+v]", l.token, l.comment)
--					c <- l
--					l.comment = ""
--					comi = 0
--					break
--				}
--				com[comi] = ' ' // convert newline to space
--				comi++
--				break
--			}
--
--			if brace == 0 {
--				// If there is previous text, we should output it here
--				if stri != 0 {
--					l.value = _STRING
--					l.token = string(str[:stri])
--					l.tokenUpper = strings.ToUpper(l.token)
--
--					l.length = stri
--					if !rrtype {
--						if t, ok := StringToType[l.tokenUpper]; ok {
--							l.value = _RRTYPE
--							l.torc = t
--							rrtype = true
--						}
--					}
--					debug.Printf("[2 %+v]", l.token)
--					c <- l
--				}
--				l.value = _NEWLINE
--				l.token = "\n"
--				l.length = 1
--				debug.Printf("[1 %+v]", l.token)
--				c <- l
--				stri = 0
--				commt = false
--				rrtype = false
--				owner = true
--				comi = 0
--			}
--		case '\\':
--			// comments do not get escaped chars, everything is copied
--			if commt {
--				com[comi] = x
--				comi++
--				break
--			}
--			// something already escaped must be in string
--			if escape {
--				str[stri] = x
--				stri++
--				escape = false
--				break
--			}
--			// something escaped outside of string gets added to string
--			str[stri] = x
--			stri++
--			escape = true
--		case '"':
--			if commt {
--				com[comi] = x
--				comi++
--				break
--			}
--			if escape {
--				str[stri] = x
--				stri++
--				escape = false
--				break
--			}
--			space = false
--			// send previous gathered text and the quote
--			if stri != 0 {
--				l.value = _STRING
--				l.token = string(str[:stri])
--				l.length = stri
--
--				debug.Printf("[%+v]", l.token)
--				c <- l
--				stri = 0
--			}
--
--			// send quote itself as separate token
--			l.value = _QUOTE
--			l.token = "\""
--			l.length = 1
--			c <- l
--			quote = !quote
--		case '(', ')':
--			if commt {
--				com[comi] = x
--				comi++
--				break
--			}
--			if escape {
--				str[stri] = x
--				stri++
--				escape = false
--				break
--			}
--			if quote {
--				str[stri] = x
--				stri++
--				break
--			}
--			switch x {
--			case ')':
--				brace--
--				if brace < 0 {
--					l.token = "extra closing brace"
--					l.err = true
--					debug.Printf("[%+v]", l.token)
--					c <- l
--					return
--				}
--			case '(':
--				brace++
--			}
--		default:
--			escape = false
--			if commt {
--				com[comi] = x
--				comi++
--				break
--			}
--			str[stri] = x
--			stri++
--			space = false
--		}
--		x, err = s.tokenText()
--	}
--	if stri > 0 {
--		// Send remainder
--		l.token = string(str[:stri])
--		l.length = stri
--		l.value = _STRING
--		debug.Printf("[%+v]", l.token)
--		c <- l
--	}
--}
--
--// Extract the class number from CLASSxx
--func classToInt(token string) (uint16, bool) {
--	class, ok := strconv.Atoi(token[5:])
--	if ok != nil || class > maxUint16 {
--		return 0, false
--	}
--	return uint16(class), true
--}
--
--// Extract the rr number from TYPExxx
--func typeToInt(token string) (uint16, bool) {
--	typ, ok := strconv.Atoi(token[4:])
--	if ok != nil || typ > maxUint16 {
--		return 0, false
--	}
--	return uint16(typ), true
--}
--
--// Parse things like 2w, 2m, etc, Return the time in seconds.
--func stringToTtl(token string) (uint32, bool) {
--	s := uint32(0)
--	i := uint32(0)
--	for _, c := range token {
--		switch c {
--		case 's', 'S':
--			s += i
--			i = 0
--		case 'm', 'M':
--			s += i * 60
--			i = 0
--		case 'h', 'H':
--			s += i * 60 * 60
--			i = 0
--		case 'd', 'D':
--			s += i * 60 * 60 * 24
--			i = 0
--		case 'w', 'W':
--			s += i * 60 * 60 * 24 * 7
--			i = 0
--		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
--			i *= 10
--			i += uint32(c) - '0'
--		default:
--			return 0, false
--		}
--	}
--	return s + i, true
--}
--
--// Parse LOC records' <digits>[.<digits>][mM] into a
--// mantissa exponent format. Token should contain the entire
--// string (i.e. no spaces allowed)
--func stringToCm(token string) (e, m uint8, ok bool) {
--	if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
--		token = token[0 : len(token)-1]
--	}
--	s := strings.SplitN(token, ".", 2)
--	var meters, cmeters, val int
--	var err error
--	switch len(s) {
--	case 2:
--		if cmeters, err = strconv.Atoi(s[1]); err != nil {
--			return
--		}
--		fallthrough
--	case 1:
--		if meters, err = strconv.Atoi(s[0]); err != nil {
--			return
--		}
--	case 0:
--		// huh?
--		return 0, 0, false
--	}
--	ok = true
--	if meters > 0 {
--		e = 2
--		val = meters
--	} else {
--		e = 0
--		val = cmeters
--	}
--	for val > 10 {
--		e++
--		val /= 10
--	}
--	if e > 9 {
--		ok = false
--	}
--	m = uint8(val)
--	return
--}
--
--func appendOrigin(name, origin string) string {
--	if origin == "." {
--		return name + origin
--	}
--	return name + "." + origin
--}
--
--// LOC record helper function
--func locCheckNorth(token string, latitude uint32) (uint32, bool) {
--	switch token {
--	case "n", "N":
--		return LOC_EQUATOR + latitude, true
--	case "s", "S":
--		return LOC_EQUATOR - latitude, true
--	}
--	return latitude, false
--}
--
--// LOC record helper function
--func locCheckEast(token string, longitude uint32) (uint32, bool) {
--	switch token {
--	case "e", "E":
--		return LOC_EQUATOR + longitude, true
--	case "w", "W":
--		return LOC_EQUATOR - longitude, true
--	}
--	return longitude, false
--}
--
--// "Eat" the rest of the "line". Return potential comments
--func slurpRemainder(c chan lex, f string) (*ParseError, string) {
--	l := <-c
--	com := ""
--	switch l.value {
--	case _BLANK:
--		l = <-c
--		com = l.comment
--		if l.value != _NEWLINE && l.value != _EOF {
--			return &ParseError{f, "garbage after rdata", l}, ""
--		}
--	case _NEWLINE:
--		com = l.comment
--	case _EOF:
--	default:
--		return &ParseError{f, "garbage after rdata", l}, ""
--	}
--	return nil, com
--}
--
--// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
--// Used for NID and L64 record.
--func stringToNodeID(l lex) (uint64, *ParseError) {
--	if len(l.token) < 19 {
--		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
--	}
--	// There must be three colons at fixes postitions, if not its a parse error
--	if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
--		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
--	}
--	s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
--	u, e := strconv.ParseUint(s, 16, 64)
--	if e != nil {
--		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
--	}
--	return u, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go b/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go
-deleted file mode 100644
-index 5088cdb..0000000
---- a/Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go
-+++ /dev/null
-@@ -1,2155 +0,0 @@
--package dns
--
--import (
--	"encoding/base64"
--	"net"
--	"strconv"
--	"strings"
--)
--
--type parserFunc struct {
--	// Func defines the function that parses the tokens and returns the RR
--	// or an error. The last string contains any comments in the line as
--	// they returned by the lexer as well.
--	Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string)
--	// Signals if the RR ending is of variable length, like TXT or records
--	// that have Hexadecimal or Base64 as their last element in the Rdata. Records
--	// that have a fixed ending or for instance A, AAAA, SOA and etc.
--	Variable bool
--}
--
--// Parse the rdata of each rrtype.
--// All data from the channel c is either _STRING or _BLANK.
--// After the rdata there may come a _BLANK and then a _NEWLINE
--// or immediately a _NEWLINE. If this is not the case we flag
--// an *ParseError: garbage after rdata.
--func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	parserfunc, ok := typeToparserFunc[h.Rrtype]
--	if ok {
--		r, e, cm := parserfunc.Func(h, c, o, f)
--		if parserfunc.Variable {
--			return r, e, cm
--		}
--		if e != nil {
--			return nil, e, ""
--		}
--		e, cm = slurpRemainder(c, f)
--		if e != nil {
--			return nil, e, ""
--		}
--		return r, nil, cm
--	}
--	// RFC3957 RR (Unknown RR handling)
--	return setRFC3597(h, c, o, f)
--}
--
--// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
--// or an error
--func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) {
--	s := ""
--	l := <-c // _STRING
--	for l.value != _NEWLINE && l.value != _EOF {
--		switch l.value {
--		case _STRING:
--			s += l.token
--		case _BLANK: // Ok
--		default:
--			return "", &ParseError{f, errstr, l}, ""
--		}
--		l = <-c
--	}
--	return s, nil, l.comment
--}
--
--// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces)
--// or an error
--func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) {
--	// Get the remaining data until we see a NEWLINE
--	quote := false
--	l := <-c
--	var s []string
--	switch l.value == _QUOTE {
--	case true: // A number of quoted string
--		s = make([]string, 0)
--		empty := true
--		for l.value != _NEWLINE && l.value != _EOF {
--			switch l.value {
--			case _STRING:
--				empty = false
--				s = append(s, l.token)
--			case _BLANK:
--				if quote {
--					// _BLANK can only be seen in between txt parts.
--					return nil, &ParseError{f, errstr, l}, ""
--				}
--			case _QUOTE:
--				if empty && quote {
--					s = append(s, "")
--				}
--				quote = !quote
--				empty = true
--			default:
--				return nil, &ParseError{f, errstr, l}, ""
--			}
--			l = <-c
--		}
--		if quote {
--			return nil, &ParseError{f, errstr, l}, ""
--		}
--	case false: // Unquoted text record
--		s = make([]string, 1)
--		for l.value != _NEWLINE && l.value != _EOF {
--			s[0] += l.token
--			l = <-c
--		}
--	}
--	return s, nil, l.comment
--}
--
--func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(A)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 { // Dynamic updates.
--		return rr, nil, ""
--	}
--	rr.A = net.ParseIP(l.token)
--	if rr.A == nil {
--		return nil, &ParseError{f, "bad A A", l}, ""
--	}
--	return rr, nil, ""
--}
--
--func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(AAAA)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	rr.AAAA = net.ParseIP(l.token)
--	if rr.AAAA == nil {
--		return nil, &ParseError{f, "bad AAAA AAAA", l}, ""
--	}
--	return rr, nil, ""
--}
--
--func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NS)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Ns = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Ns = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad NS Ns", l}, ""
--	}
--	if rr.Ns[l.length-1] != '.' {
--		rr.Ns = appendOrigin(rr.Ns, o)
--	}
--	return rr, nil, ""
--}
--
--func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(PTR)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Ptr = l.token
--	if l.length == 0 { // dynamic update rr.
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Ptr = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad PTR Ptr", l}, ""
--	}
--	if rr.Ptr[l.length-1] != '.' {
--		rr.Ptr = appendOrigin(rr.Ptr, o)
--	}
--	return rr, nil, ""
--}
--
--func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NSAPPTR)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Ptr = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Ptr = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, ""
--	}
--	if rr.Ptr[l.length-1] != '.' {
--		rr.Ptr = appendOrigin(rr.Ptr, o)
--	}
--	return rr, nil, ""
--}
--
--func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(RP)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Mbox = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Mbox = o
--	} else {
--		_, ok := IsDomainName(l.token)
--		if !ok {
--			return nil, &ParseError{f, "bad RP Mbox", l}, ""
--		}
--		if rr.Mbox[l.length-1] != '.' {
--			rr.Mbox = appendOrigin(rr.Mbox, o)
--		}
--	}
--	<-c // _BLANK
--	l = <-c
--	rr.Txt = l.token
--	if l.token == "@" {
--		rr.Txt = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad RP Txt", l}, ""
--	}
--	if rr.Txt[l.length-1] != '.' {
--		rr.Txt = appendOrigin(rr.Txt, o)
--	}
--	return rr, nil, ""
--}
--
--func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(MR)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Mr = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Mr = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad MR Mr", l}, ""
--	}
--	if rr.Mr[l.length-1] != '.' {
--		rr.Mr = appendOrigin(rr.Mr, o)
--	}
--	return rr, nil, ""
--}
--
--func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(MB)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Mb = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Mb = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad MB Mb", l}, ""
--	}
--	if rr.Mb[l.length-1] != '.' {
--		rr.Mb = appendOrigin(rr.Mb, o)
--	}
--	return rr, nil, ""
--}
--
--func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(MG)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Mg = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Mg = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad MG Mg", l}, ""
--	}
--	if rr.Mg[l.length-1] != '.' {
--		rr.Mg = appendOrigin(rr.Mg, o)
--	}
--	return rr, nil, ""
--}
--
--func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(HINFO)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Cpu = l.token
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Os = l.token
--
--	return rr, nil, ""
--}
--
--func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(MINFO)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Rmail = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Rmail = o
--	} else {
--		_, ok := IsDomainName(l.token)
--		if !ok {
--			return nil, &ParseError{f, "bad MINFO Rmail", l}, ""
--		}
--		if rr.Rmail[l.length-1] != '.' {
--			rr.Rmail = appendOrigin(rr.Rmail, o)
--		}
--	}
--	<-c // _BLANK
--	l = <-c
--	rr.Email = l.token
--	if l.token == "@" {
--		rr.Email = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad MINFO Email", l}, ""
--	}
--	if rr.Email[l.length-1] != '.' {
--		rr.Email = appendOrigin(rr.Email, o)
--	}
--	return rr, nil, ""
--}
--
--func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(MF)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Mf = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Mf = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad MF Mf", l}, ""
--	}
--	if rr.Mf[l.length-1] != '.' {
--		rr.Mf = appendOrigin(rr.Mf, o)
--	}
--	return rr, nil, ""
--}
--
--func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(MD)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Md = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Md = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad MD Md", l}, ""
--	}
--	if rr.Md[l.length-1] != '.' {
--		rr.Md = appendOrigin(rr.Md, o)
--	}
--	return rr, nil, ""
--}
--
--func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(MX)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad MX Pref", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Mx = l.token
--	if l.token == "@" {
--		rr.Mx = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad MX Mx", l}, ""
--	}
--	if rr.Mx[l.length-1] != '.' {
--		rr.Mx = appendOrigin(rr.Mx, o)
--	}
--	return rr, nil, ""
--}
--
--func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(RT)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad RT Preference", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Host = l.token
--	if l.token == "@" {
--		rr.Host = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad RT Host", l}, ""
--	}
--	if rr.Host[l.length-1] != '.' {
--		rr.Host = appendOrigin(rr.Host, o)
--	}
--	return rr, nil, ""
--}
--
--func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(AFSDB)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad AFSDB Subtype", l}, ""
--	} else {
--		rr.Subtype = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Hostname = l.token
--	if l.token == "@" {
--		rr.Hostname = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad AFSDB Hostname", l}, ""
--	}
--	if rr.Hostname[l.length-1] != '.' {
--		rr.Hostname = appendOrigin(rr.Hostname, o)
--	}
--	return rr, nil, ""
--}
--
--func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(X25)
--	rr.Hdr = h
--
--	l := <-c
--	rr.PSDNAddress = l.token
--	return rr, nil, ""
--}
--
--func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(KX)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad KX Pref", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Exchanger = l.token
--	if l.token == "@" {
--		rr.Exchanger = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad KX Exchanger", l}, ""
--	}
--	if rr.Exchanger[l.length-1] != '.' {
--		rr.Exchanger = appendOrigin(rr.Exchanger, o)
--	}
--	return rr, nil, ""
--}
--
--func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(CNAME)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Target = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Target = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad CNAME Target", l}, ""
--	}
--	if rr.Target[l.length-1] != '.' {
--		rr.Target = appendOrigin(rr.Target, o)
--	}
--	return rr, nil, ""
--}
--
--func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(DNAME)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Target = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Target = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad CNAME Target", l}, ""
--	}
--	if rr.Target[l.length-1] != '.' {
--		rr.Target = appendOrigin(rr.Target, o)
--	}
--	return rr, nil, ""
--}
--
--func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(SOA)
--	rr.Hdr = h
--
--	l := <-c
--	rr.Ns = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	<-c // _BLANK
--	if l.token == "@" {
--		rr.Ns = o
--	} else {
--		_, ok := IsDomainName(l.token)
--		if !ok {
--			return nil, &ParseError{f, "bad SOA Ns", l}, ""
--		}
--		if rr.Ns[l.length-1] != '.' {
--			rr.Ns = appendOrigin(rr.Ns, o)
--		}
--	}
--
--	l = <-c
--	rr.Mbox = l.token
--	if l.token == "@" {
--		rr.Mbox = o
--	} else {
--		_, ok := IsDomainName(l.token)
--		if !ok || l.length == 0 {
--			return nil, &ParseError{f, "bad SOA Mbox", l}, ""
--		}
--		if rr.Mbox[l.length-1] != '.' {
--			rr.Mbox = appendOrigin(rr.Mbox, o)
--		}
--	}
--	<-c // _BLANK
--
--	var (
--		v  uint32
--		ok bool
--	)
--	for i := 0; i < 5; i++ {
--		l = <-c
--		if j, e := strconv.Atoi(l.token); e != nil {
--			if i == 0 {
--				// Serial should be a number
--				return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
--			}
--			if v, ok = stringToTtl(l.token); !ok {
--				return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
--
--			}
--		} else {
--			v = uint32(j)
--		}
--		switch i {
--		case 0:
--			rr.Serial = v
--			<-c // _BLANK
--		case 1:
--			rr.Refresh = v
--			<-c // _BLANK
--		case 2:
--			rr.Retry = v
--			<-c // _BLANK
--		case 3:
--			rr.Expire = v
--			<-c // _BLANK
--		case 4:
--			rr.Minttl = v
--		}
--	}
--	return rr, nil, ""
--}
--
--func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(SRV)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad SRV Priority", l}, ""
--	} else {
--		rr.Priority = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad SRV Weight", l}, ""
--	} else {
--		rr.Weight = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad SRV Port", l}, ""
--	} else {
--		rr.Port = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Target = l.token
--	if l.token == "@" {
--		rr.Target = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad SRV Target", l}, ""
--	}
--	if rr.Target[l.length-1] != '.' {
--		rr.Target = appendOrigin(rr.Target, o)
--	}
--	return rr, nil, ""
--}
--
--func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NAPTR)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NAPTR Order", l}, ""
--	} else {
--		rr.Order = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NAPTR Preference", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	// Flags
--	<-c     // _BLANK
--	l = <-c // _QUOTE
--	if l.value != _QUOTE {
--		return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
--	}
--	l = <-c // Either String or Quote
--	if l.value == _STRING {
--		rr.Flags = l.token
--		l = <-c // _QUOTE
--		if l.value != _QUOTE {
--			return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
--		}
--	} else if l.value == _QUOTE {
--		rr.Flags = ""
--	} else {
--		return nil, &ParseError{f, "bad NAPTR Flags", l}, ""
--	}
--
--	// Service
--	<-c     // _BLANK
--	l = <-c // _QUOTE
--	if l.value != _QUOTE {
--		return nil, &ParseError{f, "bad NAPTR Service", l}, ""
--	}
--	l = <-c // Either String or Quote
--	if l.value == _STRING {
--		rr.Service = l.token
--		l = <-c // _QUOTE
--		if l.value != _QUOTE {
--			return nil, &ParseError{f, "bad NAPTR Service", l}, ""
--		}
--	} else if l.value == _QUOTE {
--		rr.Service = ""
--	} else {
--		return nil, &ParseError{f, "bad NAPTR Service", l}, ""
--	}
--
--	// Regexp
--	<-c     // _BLANK
--	l = <-c // _QUOTE
--	if l.value != _QUOTE {
--		return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
--	}
--	l = <-c // Either String or Quote
--	if l.value == _STRING {
--		rr.Regexp = l.token
--		l = <-c // _QUOTE
--		if l.value != _QUOTE {
--			return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
--		}
--	} else if l.value == _QUOTE {
--		rr.Regexp = ""
--	} else {
--		return nil, &ParseError{f, "bad NAPTR Regexp", l}, ""
--	}
--	// After quote no space??
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Replacement = l.token
--	if l.token == "@" {
--		rr.Replacement = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad NAPTR Replacement", l}, ""
--	}
--	if rr.Replacement[l.length-1] != '.' {
--		rr.Replacement = appendOrigin(rr.Replacement, o)
--	}
--	return rr, nil, ""
--}
--
--func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(TALINK)
--	rr.Hdr = h
--
--	l := <-c
--	rr.PreviousName = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.PreviousName = o
--	} else {
--		_, ok := IsDomainName(l.token)
--		if !ok {
--			return nil, &ParseError{f, "bad TALINK PreviousName", l}, ""
--		}
--		if rr.PreviousName[l.length-1] != '.' {
--			rr.PreviousName = appendOrigin(rr.PreviousName, o)
--		}
--	}
--	<-c // _BLANK
--	l = <-c
--	rr.NextName = l.token
--	if l.token == "@" {
--		rr.NextName = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad TALINK NextName", l}, ""
--	}
--	if rr.NextName[l.length-1] != '.' {
--		rr.NextName = appendOrigin(rr.NextName, o)
--	}
--	return rr, nil, ""
--}
--
--func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(LOC)
--	rr.Hdr = h
--	// Non zero defaults for LOC record, see RFC 1876, Section 3.
--	rr.HorizPre = 165 // 10000
--	rr.VertPre = 162  // 10
--	rr.Size = 18      // 1
--	ok := false
--	// North
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad LOC Latitude", l}, ""
--	} else {
--		rr.Latitude = 1000 * 60 * 60 * uint32(i)
--	}
--	<-c // _BLANK
--	// Either number, 'N' or 'S'
--	l = <-c
--	if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
--		goto East
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad LOC Latitude minutes", l}, ""
--	} else {
--		rr.Latitude += 1000 * 60 * uint32(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.ParseFloat(l.token, 32); e != nil {
--		return nil, &ParseError{f, "bad LOC Latitude seconds", l}, ""
--	} else {
--		rr.Latitude += uint32(1000 * i)
--	}
--	<-c // _BLANK
--	// Either number, 'N' or 'S'
--	l = <-c
--	if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
--		goto East
--	}
--	// If still alive, flag an error
--	return nil, &ParseError{f, "bad LOC Latitude North/South", l}, ""
--
--East:
--	// East
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad LOC Longitude", l}, ""
--	} else {
--		rr.Longitude = 1000 * 60 * 60 * uint32(i)
--	}
--	<-c // _BLANK
--	// Either number, 'E' or 'W'
--	l = <-c
--	if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
--		goto Altitude
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad LOC Longitude minutes", l}, ""
--	} else {
--		rr.Longitude += 1000 * 60 * uint32(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.ParseFloat(l.token, 32); e != nil {
--		return nil, &ParseError{f, "bad LOC Longitude seconds", l}, ""
--	} else {
--		rr.Longitude += uint32(1000 * i)
--	}
--	<-c // _BLANK
--	// Either number, 'E' or 'W'
--	l = <-c
--	if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
--		goto Altitude
--	}
--	// If still alive, flag an error
--	return nil, &ParseError{f, "bad LOC Longitude East/West", l}, ""
--
--Altitude:
--	<-c // _BLANK
--	l = <-c
--	if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
--		l.token = l.token[0 : len(l.token)-1]
--	}
--	if i, e := strconv.ParseFloat(l.token, 32); e != nil {
--		return nil, &ParseError{f, "bad LOC Altitude", l}, ""
--	} else {
--		rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
--	}
--
--	// And now optionally the other values
--	l = <-c
--	count := 0
--	for l.value != _NEWLINE && l.value != _EOF {
--		switch l.value {
--		case _STRING:
--			switch count {
--			case 0: // Size
--				if e, m, ok := stringToCm(l.token); !ok {
--					return nil, &ParseError{f, "bad LOC Size", l}, ""
--				} else {
--					rr.Size = (e & 0x0f) | (m << 4 & 0xf0)
--				}
--			case 1: // HorizPre
--				if e, m, ok := stringToCm(l.token); !ok {
--					return nil, &ParseError{f, "bad LOC HorizPre", l}, ""
--				} else {
--					rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0)
--				}
--			case 2: // VertPre
--				if e, m, ok := stringToCm(l.token); !ok {
--					return nil, &ParseError{f, "bad LOC VertPre", l}, ""
--				} else {
--					rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0)
--				}
--			}
--			count++
--		case _BLANK:
--			// Ok
--		default:
--			return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, ""
--		}
--		l = <-c
--	}
--	return rr, nil, ""
--}
--
--func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(HIP)
--	rr.Hdr = h
--
--	// HitLength is not represented
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, ""
--	} else {
--		rr.PublicKeyAlgorithm = uint8(i)
--	}
--	<-c              // _BLANK
--	l = <-c          // _STRING
--	rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6.
--	rr.HitLength = uint8(len(rr.Hit)) / 2
--
--	<-c                    // _BLANK
--	l = <-c                // _STRING
--	rr.PublicKey = l.token // This cannot contain spaces
--	rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey)))
--
--	// RendezvousServers (if any)
--	l = <-c
--	xs := make([]string, 0)
--	for l.value != _NEWLINE && l.value != _EOF {
--		switch l.value {
--		case _STRING:
--			if l.token == "@" {
--				xs = append(xs, o)
--				continue
--			}
--			_, ok := IsDomainName(l.token)
--			if !ok || l.length == 0 {
--				return nil, &ParseError{f, "bad HIP RendezvousServers", l}, ""
--			}
--			if l.token[l.length-1] != '.' {
--				l.token = appendOrigin(l.token, o)
--			}
--			xs = append(xs, l.token)
--		case _BLANK:
--			// Ok
--		default:
--			return nil, &ParseError{f, "bad HIP RendezvousServers", l}, ""
--		}
--		l = <-c
--	}
--	rr.RendezvousServers = xs
--	return rr, nil, l.comment
--}
--
--func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(CERT)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if v, ok := StringToCertType[l.token]; ok {
--		rr.Type = v
--	} else if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad CERT Type", l}, ""
--	} else {
--		rr.Type = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad CERT KeyTag", l}, ""
--	} else {
--		rr.KeyTag = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if v, ok := StringToAlgorithm[l.token]; ok {
--		rr.Algorithm = v
--	} else if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad CERT Algorithm", l}, ""
--	} else {
--		rr.Algorithm = uint8(i)
--	}
--	s, e, c1 := endingToString(c, "bad CERT Certificate", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Certificate = s
--	return rr, nil, c1
--}
--
--func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(OPENPGPKEY)
--	rr.Hdr = h
--
--	s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.PublicKey = s
--	return rr, nil, c1
--}
--
--func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	r, e, s := setRRSIG(h, c, o, f)
--	if r != nil {
--		return &SIG{*r.(*RRSIG)}, e, s
--	}
--	return nil, e, s
--}
--
--func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(RRSIG)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if t, ok := StringToType[l.tokenUpper]; !ok {
--		if strings.HasPrefix(l.tokenUpper, "TYPE") {
--			if t, ok = typeToInt(l.tokenUpper); !ok {
--				return nil, &ParseError{f, "bad RRSIG Typecovered", l}, ""
--			} else {
--				rr.TypeCovered = t
--			}
--		} else {
--			return nil, &ParseError{f, "bad RRSIG Typecovered", l}, ""
--		}
--	} else {
--		rr.TypeCovered = t
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, err := strconv.Atoi(l.token); err != nil {
--		return nil, &ParseError{f, "bad RRSIG Algorithm", l}, ""
--	} else {
--		rr.Algorithm = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, err := strconv.Atoi(l.token); err != nil {
--		return nil, &ParseError{f, "bad RRSIG Labels", l}, ""
--	} else {
--		rr.Labels = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, err := strconv.Atoi(l.token); err != nil {
--		return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, ""
--	} else {
--		rr.OrigTtl = uint32(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, err := StringToTime(l.token); err != nil {
--		// Try to see if all numeric and use it as epoch
--		if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
--			// TODO(miek): error out on > MAX_UINT32, same below
--			rr.Expiration = uint32(i)
--		} else {
--			return nil, &ParseError{f, "bad RRSIG Expiration", l}, ""
--		}
--	} else {
--		rr.Expiration = i
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, err := StringToTime(l.token); err != nil {
--		if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
--			rr.Inception = uint32(i)
--		} else {
--			return nil, &ParseError{f, "bad RRSIG Inception", l}, ""
--		}
--	} else {
--		rr.Inception = i
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, err := strconv.Atoi(l.token); err != nil {
--		return nil, &ParseError{f, "bad RRSIG KeyTag", l}, ""
--	} else {
--		rr.KeyTag = uint16(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	rr.SignerName = l.token
--	if l.token == "@" {
--		rr.SignerName = o
--	} else {
--		_, ok := IsDomainName(l.token)
--		if !ok || l.length == 0 {
--			return nil, &ParseError{f, "bad RRSIG SignerName", l}, ""
--		}
--		if rr.SignerName[l.length-1] != '.' {
--			rr.SignerName = appendOrigin(rr.SignerName, o)
--		}
--	}
--	s, e, c1 := endingToString(c, "bad RRSIG Signature", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Signature = s
--	return rr, nil, c1
--}
--
--func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NSEC)
--	rr.Hdr = h
--
--	l := <-c
--	rr.NextDomain = l.token
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if l.token == "@" {
--		rr.NextDomain = o
--	} else {
--		_, ok := IsDomainName(l.token)
--		if !ok {
--			return nil, &ParseError{f, "bad NSEC NextDomain", l}, ""
--		}
--		if rr.NextDomain[l.length-1] != '.' {
--			rr.NextDomain = appendOrigin(rr.NextDomain, o)
--		}
--	}
--
--	rr.TypeBitMap = make([]uint16, 0)
--	var (
--		k  uint16
--		ok bool
--	)
--	l = <-c
--	for l.value != _NEWLINE && l.value != _EOF {
--		switch l.value {
--		case _BLANK:
--			// Ok
--		case _STRING:
--			if k, ok = StringToType[l.tokenUpper]; !ok {
--				if k, ok = typeToInt(l.tokenUpper); !ok {
--					return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, ""
--				}
--			}
--			rr.TypeBitMap = append(rr.TypeBitMap, k)
--		default:
--			return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, ""
--		}
--		l = <-c
--	}
--	return rr, nil, l.comment
--}
--
--func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NSEC3)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NSEC3 Hash", l}, ""
--	} else {
--		rr.Hash = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NSEC3 Flags", l}, ""
--	} else {
--		rr.Flags = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NSEC3 Iterations", l}, ""
--	} else {
--		rr.Iterations = uint16(i)
--	}
--	<-c
--	l = <-c
--	if len(l.token) == 0 {
--		return nil, &ParseError{f, "bad NSEC3 Salt", l}, ""
--	}
--	rr.SaltLength = uint8(len(l.token)) / 2
--	rr.Salt = l.token
--
--	<-c
--	l = <-c
--	rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits)
--	rr.NextDomain = l.token
--
--	rr.TypeBitMap = make([]uint16, 0)
--	var (
--		k  uint16
--		ok bool
--	)
--	l = <-c
--	for l.value != _NEWLINE && l.value != _EOF {
--		switch l.value {
--		case _BLANK:
--			// Ok
--		case _STRING:
--			if k, ok = StringToType[l.tokenUpper]; !ok {
--				if k, ok = typeToInt(l.tokenUpper); !ok {
--					return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, ""
--				}
--			}
--			rr.TypeBitMap = append(rr.TypeBitMap, k)
--		default:
--			return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, ""
--		}
--		l = <-c
--	}
--	return rr, nil, l.comment
--}
--
--func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NSEC3PARAM)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, ""
--	} else {
--		rr.Hash = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, ""
--	} else {
--		rr.Flags = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, ""
--	} else {
--		rr.Iterations = uint16(i)
--	}
--	<-c
--	l = <-c
--	rr.SaltLength = uint8(len(l.token))
--	rr.Salt = l.token
--	return rr, nil, ""
--}
--
--func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(EUI48)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.length != 17 {
--		return nil, &ParseError{f, "bad EUI48 Address", l}, ""
--	}
--	addr := make([]byte, 12)
--	dash := 0
--	for i := 0; i < 10; i += 2 {
--		addr[i] = l.token[i+dash]
--		addr[i+1] = l.token[i+1+dash]
--		dash++
--		if l.token[i+1+dash] != '-' {
--			return nil, &ParseError{f, "bad EUI48 Address", l}, ""
--		}
--	}
--	addr[10] = l.token[15]
--	addr[11] = l.token[16]
--
--	if i, e := strconv.ParseUint(string(addr), 16, 48); e != nil {
--		return nil, &ParseError{f, "bad EUI48 Address", l}, ""
--	} else {
--		rr.Address = i
--	}
--	return rr, nil, ""
--}
--
--func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(EUI64)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.length != 23 {
--		return nil, &ParseError{f, "bad EUI64 Address", l}, ""
--	}
--	addr := make([]byte, 16)
--	dash := 0
--	for i := 0; i < 14; i += 2 {
--		addr[i] = l.token[i+dash]
--		addr[i+1] = l.token[i+1+dash]
--		dash++
--		if l.token[i+1+dash] != '-' {
--			return nil, &ParseError{f, "bad EUI64 Address", l}, ""
--		}
--	}
--	addr[14] = l.token[21]
--	addr[15] = l.token[22]
--
--	if i, e := strconv.ParseUint(string(addr), 16, 64); e != nil {
--		return nil, &ParseError{f, "bad EUI68 Address", l}, ""
--	} else {
--		rr.Address = uint64(i)
--	}
--	return rr, nil, ""
--}
--
--func setWKS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(WKS)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	rr.Address = net.ParseIP(l.token)
--	if rr.Address == nil {
--		return nil, &ParseError{f, "bad WKS Address", l}, ""
--	}
--
--	<-c // _BLANK
--	l = <-c
--	proto := "tcp"
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad WKS Protocol", l}, ""
--	} else {
--		rr.Protocol = uint8(i)
--		switch rr.Protocol {
--		case 17:
--			proto = "udp"
--		case 6:
--			proto = "tcp"
--		default:
--			return nil, &ParseError{f, "bad WKS Protocol", l}, ""
--		}
--	}
--
--	<-c
--	l = <-c
--	rr.BitMap = make([]uint16, 0)
--	var (
--		k   int
--		err error
--	)
--	for l.value != _NEWLINE && l.value != _EOF {
--		switch l.value {
--		case _BLANK:
--			// Ok
--		case _STRING:
--			if k, err = net.LookupPort(proto, l.token); err != nil {
--				if i, e := strconv.Atoi(l.token); e != nil { // If a number use that
--					rr.BitMap = append(rr.BitMap, uint16(i))
--				} else {
--					return nil, &ParseError{f, "bad WKS BitMap", l}, ""
--				}
--			}
--			rr.BitMap = append(rr.BitMap, uint16(k))
--		default:
--			return nil, &ParseError{f, "bad WKS BitMap", l}, ""
--		}
--		l = <-c
--	}
--	return rr, nil, l.comment
--}
--
--func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(SSHFP)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad SSHFP Algorithm", l}, ""
--	} else {
--		rr.Algorithm = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad SSHFP Type", l}, ""
--	} else {
--		rr.Type = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	rr.FingerPrint = l.token
--	return rr, nil, ""
--}
--
--func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) {
--	rr := new(DNSKEY)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad " + typ + " Flags", l}, ""
--	} else {
--		rr.Flags = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad " + typ + " Protocol", l}, ""
--	} else {
--		rr.Protocol = uint8(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
--	} else {
--		rr.Algorithm = uint8(i)
--	}
--	s, e, c1 := endingToString(c, "bad "+typ+" PublicKey", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.PublicKey = s
--	return rr, nil, c1
--}
--
--func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	r, e, s := setDNSKEYs(h, c, o, f, "KEY")
--	if r != nil {
--		return &KEY{*r.(*DNSKEY)}, e, s
--	}
--	return nil, e, s
--}
--
--func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY")
--	return r, e, s
--}
--
--func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY")
--	if r != nil {
--		return &CDNSKEY{*r.(*DNSKEY)}, e, s
--	}
--	return nil, e, s
--}
--
--func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(RKEY)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad RKEY Flags", l}, ""
--	} else {
--		rr.Flags = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad RKEY Protocol", l}, ""
--	} else {
--		rr.Protocol = uint8(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad RKEY Algorithm", l}, ""
--	} else {
--		rr.Algorithm = uint8(i)
--	}
--	s, e, c1 := endingToString(c, "bad RKEY PublicKey", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.PublicKey = s
--	return rr, nil, c1
--}
--
--func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(EID)
--	rr.Hdr = h
--	s, e, c1 := endingToString(c, "bad EID Endpoint", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Endpoint = s
--	return rr, nil, c1
--}
--
--func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NIMLOC)
--	rr.Hdr = h
--	s, e, c1 := endingToString(c, "bad NIMLOC Locator", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Locator = s
--	return rr, nil, c1
--}
--
--func setNSAP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NSAP)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NSAP Length", l}, ""
--	} else {
--		rr.Length = uint8(i)
--	}
--	<-c // _BLANK
--	s, e, c1 := endingToString(c, "bad NSAP Nsap", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Nsap = s
--	return rr, nil, c1
--}
--
--func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(GPOS)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if _, e := strconv.ParseFloat(l.token, 64); e != nil {
--		return nil, &ParseError{f, "bad GPOS Longitude", l}, ""
--	} else {
--		rr.Longitude = l.token
--	}
--	<-c // _BLANK
--	l = <-c
--	if _, e := strconv.ParseFloat(l.token, 64); e != nil {
--		return nil, &ParseError{f, "bad GPOS Latitude", l}, ""
--	} else {
--		rr.Latitude = l.token
--	}
--	<-c // _BLANK
--	l = <-c
--	if _, e := strconv.ParseFloat(l.token, 64); e != nil {
--		return nil, &ParseError{f, "bad GPOS Altitude", l}, ""
--	} else {
--		rr.Altitude = l.token
--	}
--	return rr, nil, ""
--}
--
--func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) {
--	rr := new(DS)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, ""
--	} else {
--		rr.KeyTag = uint16(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		if i, ok := StringToAlgorithm[l.tokenUpper]; !ok {
--			return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
--		} else {
--			rr.Algorithm = i
--		}
--	} else {
--		rr.Algorithm = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad " + typ + " DigestType", l}, ""
--	} else {
--		rr.DigestType = uint8(i)
--	}
--	s, e, c1 := endingToString(c, "bad "+typ+" Digest", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Digest = s
--	return rr, nil, c1
--}
--
--func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	r, e, s := setDSs(h, c, o, f, "DS")
--	return r, e, s
--}
--
--func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	r, e, s := setDSs(h, c, o, f, "DLV")
--	if r != nil {
--		return &DLV{*r.(*DS)}, e, s
--	}
--	return nil, e, s
--}
--
--func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	r, e, s := setDSs(h, c, o, f, "DLV")
--	if r != nil {
--		return &CDS{*r.(*DS)}, e, s
--	}
--	return nil, e, s
--}
--
--func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(TA)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad TA KeyTag", l}, ""
--	} else {
--		rr.KeyTag = uint16(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		if i, ok := StringToAlgorithm[l.tokenUpper]; !ok {
--			return nil, &ParseError{f, "bad TA Algorithm", l}, ""
--		} else {
--			rr.Algorithm = i
--		}
--	} else {
--		rr.Algorithm = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad TA DigestType", l}, ""
--	} else {
--		rr.DigestType = uint8(i)
--	}
--	s, e, c1 := endingToString(c, "bad TA Digest", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Digest = s
--	return rr, nil, c1
--}
--
--func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(TLSA)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad TLSA Usage", l}, ""
--	} else {
--		rr.Usage = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad TLSA Selector", l}, ""
--	} else {
--		rr.Selector = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad TLSA MatchingType", l}, ""
--	} else {
--		rr.MatchingType = uint8(i)
--	}
--	s, e, c1 := endingToString(c, "bad TLSA Certificate", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Certificate = s
--	return rr, nil, c1
--}
--
--func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(RFC3597)
--	rr.Hdr = h
--	l := <-c
--	if l.token != "\\#" {
--		return nil, &ParseError{f, "bad RFC3597 Rdata", l}, ""
--	}
--	<-c // _BLANK
--	l = <-c
--	rdlength, e := strconv.Atoi(l.token)
--	if e != nil {
--		return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, ""
--	}
--
--	s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f)
--	if e1 != nil {
--		return nil, e1, c1
--	}
--	if rdlength*2 != len(s) {
--		return nil, &ParseError{f, "bad RFC3597 Rdata", l}, ""
--	}
--	rr.Rdata = s
--	return rr, nil, c1
--}
--
--func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(SPF)
--	rr.Hdr = h
--
--	s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f)
--	if e != nil {
--		return nil, e, ""
--	}
--	rr.Txt = s
--	return rr, nil, c1
--}
--
--func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(TXT)
--	rr.Hdr = h
--
--	// No _BLANK reading here, because this is all rdata is TXT
--	s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f)
--	if e != nil {
--		return nil, e, ""
--	}
--	rr.Txt = s
--	return rr, nil, c1
--}
--
--// identical to setTXT
--func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NINFO)
--	rr.Hdr = h
--
--	s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f)
--	if e != nil {
--		return nil, e, ""
--	}
--	rr.ZSData = s
--	return rr, nil, c1
--}
--
--func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(URI)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad URI Priority", l}, ""
--	} else {
--		rr.Priority = uint16(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad URI Weight", l}, ""
--	} else {
--		rr.Weight = uint16(i)
--	}
--
--	<-c // _BLANK
--	s, e, c1 := endingToTxtSlice(c, "bad URI Target", f)
--	if e != nil {
--		return nil, e, ""
--	}
--	rr.Target = s
--	return rr, nil, c1
--}
--
--func setIPSECKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(IPSECKEY)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, l.comment
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad IPSECKEY Precedence", l}, ""
--	} else {
--		rr.Precedence = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad IPSECKEY GatewayType", l}, ""
--	} else {
--		rr.GatewayType = uint8(i)
--	}
--	<-c // _BLANK
--	l = <-c
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad IPSECKEY Algorithm", l}, ""
--	} else {
--		rr.Algorithm = uint8(i)
--	}
--	<-c
--	l = <-c
--	rr.Gateway = l.token
--	s, e, c1 := endingToString(c, "bad IPSECKEY PublicKey", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.PublicKey = s
--	return rr, nil, c1
--}
--
--func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	// awesome record to parse!
--	rr := new(DHCID)
--	rr.Hdr = h
--
--	s, e, c1 := endingToString(c, "bad DHCID Digest", f)
--	if e != nil {
--		return nil, e, c1
--	}
--	rr.Digest = s
--	return rr, nil, c1
--}
--
--func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(NID)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad NID Preference", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	u, err := stringToNodeID(l)
--	if err != nil {
--		return nil, err, ""
--	}
--	rr.NodeID = u
--	return rr, nil, ""
--}
--
--func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(L32)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad L32 Preference", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Locator32 = net.ParseIP(l.token)
--	if rr.Locator32 == nil {
--		return nil, &ParseError{f, "bad L32 Locator", l}, ""
--	}
--	return rr, nil, ""
--}
--
--func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(LP)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad LP Preference", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Fqdn = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Fqdn = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad LP Fqdn", l}, ""
--	}
--	if rr.Fqdn[l.length-1] != '.' {
--		rr.Fqdn = appendOrigin(rr.Fqdn, o)
--	}
--	return rr, nil, ""
--}
--
--func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(L64)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad L64 Preference", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	u, err := stringToNodeID(l)
--	if err != nil {
--		return nil, err, ""
--	}
--	rr.Locator64 = u
--	return rr, nil, ""
--}
--
--func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(UID)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad UID Uid", l}, ""
--	} else {
--		rr.Uid = uint32(i)
--	}
--	return rr, nil, ""
--}
--
--func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(GID)
--	rr.Hdr = h
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad GID Gid", l}, ""
--	} else {
--		rr.Gid = uint32(i)
--	}
--	return rr, nil, ""
--}
--
--func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(UINFO)
--	rr.Hdr = h
--	s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f)
--	if e != nil {
--		return nil, e, ""
--	}
--	rr.Uinfo = s[0] // silently discard anything above
--	return rr, nil, c1
--}
--
--func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
--	rr := new(PX)
--	rr.Hdr = h
--
--	l := <-c
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if i, e := strconv.Atoi(l.token); e != nil {
--		return nil, &ParseError{f, "bad PX Preference", l}, ""
--	} else {
--		rr.Preference = uint16(i)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Map822 = l.token
--	if l.length == 0 {
--		return rr, nil, ""
--	}
--	if l.token == "@" {
--		rr.Map822 = o
--		return rr, nil, ""
--	}
--	_, ok := IsDomainName(l.token)
--	if !ok {
--		return nil, &ParseError{f, "bad PX Map822", l}, ""
--	}
--	if rr.Map822[l.length-1] != '.' {
--		rr.Map822 = appendOrigin(rr.Map822, o)
--	}
--	<-c     // _BLANK
--	l = <-c // _STRING
--	rr.Mapx400 = l.token
--	if l.token == "@" {
--		rr.Mapx400 = o
--		return rr, nil, ""
--	}
--	_, ok = IsDomainName(l.token)
--	if !ok || l.length == 0 {
--		return nil, &ParseError{f, "bad PX Mapx400", l}, ""
--	}
--	if rr.Mapx400[l.length-1] != '.' {
--		rr.Mapx400 = appendOrigin(rr.Mapx400, o)
--	}
--	return rr, nil, ""
--}
--
--var typeToparserFunc = map[uint16]parserFunc{
--	TypeAAAA:       parserFunc{setAAAA, false},
--	TypeAFSDB:      parserFunc{setAFSDB, false},
--	TypeA:          parserFunc{setA, false},
--	TypeCDS:        parserFunc{setCDS, true},
--	TypeCDNSKEY:    parserFunc{setCDNSKEY, true},
--	TypeCERT:       parserFunc{setCERT, true},
--	TypeCNAME:      parserFunc{setCNAME, false},
--	TypeDHCID:      parserFunc{setDHCID, true},
--	TypeDLV:        parserFunc{setDLV, true},
--	TypeDNAME:      parserFunc{setDNAME, false},
--	TypeKEY:        parserFunc{setKEY, true},
--	TypeDNSKEY:     parserFunc{setDNSKEY, true},
--	TypeDS:         parserFunc{setDS, true},
--	TypeEID:        parserFunc{setEID, true},
--	TypeEUI48:      parserFunc{setEUI48, false},
--	TypeEUI64:      parserFunc{setEUI64, false},
--	TypeGID:        parserFunc{setGID, false},
--	TypeGPOS:       parserFunc{setGPOS, false},
--	TypeHINFO:      parserFunc{setHINFO, false},
--	TypeHIP:        parserFunc{setHIP, true},
--	TypeIPSECKEY:   parserFunc{setIPSECKEY, true},
--	TypeKX:         parserFunc{setKX, false},
--	TypeL32:        parserFunc{setL32, false},
--	TypeL64:        parserFunc{setL64, false},
--	TypeLOC:        parserFunc{setLOC, true},
--	TypeLP:         parserFunc{setLP, false},
--	TypeMB:         parserFunc{setMB, false},
--	TypeMD:         parserFunc{setMD, false},
--	TypeMF:         parserFunc{setMF, false},
--	TypeMG:         parserFunc{setMG, false},
--	TypeMINFO:      parserFunc{setMINFO, false},
--	TypeMR:         parserFunc{setMR, false},
--	TypeMX:         parserFunc{setMX, false},
--	TypeNAPTR:      parserFunc{setNAPTR, false},
--	TypeNID:        parserFunc{setNID, false},
--	TypeNIMLOC:     parserFunc{setNIMLOC, true},
--	TypeNINFO:      parserFunc{setNINFO, true},
--	TypeNSAP:       parserFunc{setNSAP, true},
--	TypeNSAPPTR:    parserFunc{setNSAPPTR, false},
--	TypeNSEC3PARAM: parserFunc{setNSEC3PARAM, false},
--	TypeNSEC3:      parserFunc{setNSEC3, true},
--	TypeNSEC:       parserFunc{setNSEC, true},
--	TypeNS:         parserFunc{setNS, false},
--	TypeOPENPGPKEY: parserFunc{setOPENPGPKEY, true},
--	TypePTR:        parserFunc{setPTR, false},
--	TypePX:         parserFunc{setPX, false},
--	TypeSIG:        parserFunc{setSIG, true},
--	TypeRKEY:       parserFunc{setRKEY, true},
--	TypeRP:         parserFunc{setRP, false},
--	TypeRRSIG:      parserFunc{setRRSIG, true},
--	TypeRT:         parserFunc{setRT, false},
--	TypeSOA:        parserFunc{setSOA, false},
--	TypeSPF:        parserFunc{setSPF, true},
--	TypeSRV:        parserFunc{setSRV, false},
--	TypeSSHFP:      parserFunc{setSSHFP, false},
--	TypeTALINK:     parserFunc{setTALINK, false},
--	TypeTA:         parserFunc{setTA, true},
--	TypeTLSA:       parserFunc{setTLSA, true},
--	TypeTXT:        parserFunc{setTXT, true},
--	TypeUID:        parserFunc{setUID, false},
--	TypeUINFO:      parserFunc{setUINFO, true},
--	TypeURI:        parserFunc{setURI, true},
--	TypeWKS:        parserFunc{setWKS, true},
--	TypeX25:        parserFunc{setX25, false},
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt.go
-deleted file mode 100644
-index c0654f5..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package aws
--
--import (
--	"time"
--)
--
--// AttemptStrategy represents a strategy for waiting for an action
--// to complete successfully. This is an internal type used by the
--// implementation of other goamz packages.
--type AttemptStrategy struct {
--	Total time.Duration // total duration of attempt.
--	Delay time.Duration // interval between each try in the burst.
--	Min   int           // minimum number of retries; overrides Total
--}
--
--type Attempt struct {
--	strategy AttemptStrategy
--	last     time.Time
--	end      time.Time
--	force    bool
--	count    int
--}
--
--// Start begins a new sequence of attempts for the given strategy.
--func (s AttemptStrategy) Start() *Attempt {
--	now := time.Now()
--	return &Attempt{
--		strategy: s,
--		last:     now,
--		end:      now.Add(s.Total),
--		force:    true,
--	}
--}
--
--// Next waits until it is time to perform the next attempt or returns
--// false if it is time to stop trying.
--func (a *Attempt) Next() bool {
--	now := time.Now()
--	sleep := a.nextSleep(now)
--	if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
--		return false
--	}
--	a.force = false
--	if sleep > 0 && a.count > 0 {
--		time.Sleep(sleep)
--		now = time.Now()
--	}
--	a.count++
--	a.last = now
--	return true
--}
--
--func (a *Attempt) nextSleep(now time.Time) time.Duration {
--	sleep := a.strategy.Delay - now.Sub(a.last)
--	if sleep < 0 {
--		return 0
--	}
--	return sleep
--}
--
--// HasNext returns whether another attempt will be made if the current
--// one fails. If it returns true, the following call to Next is
--// guaranteed to return true.
--func (a *Attempt) HasNext() bool {
--	if a.force || a.strategy.Min > a.count {
--		return true
--	}
--	now := time.Now()
--	if now.Add(a.nextSleep(now)).Before(a.end) {
--		a.force = true
--		return true
--	}
--	return false
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt_test.go
-deleted file mode 100644
-index 1fda5bf..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/attempt_test.go
-+++ /dev/null
-@@ -1,57 +0,0 @@
--package aws_test
--
--import (
--	"github.com/mitchellh/goamz/aws"
--	. "github.com/motain/gocheck"
--	"time"
--)
--
--func (S) TestAttemptTiming(c *C) {
--	testAttempt := aws.AttemptStrategy{
--		Total: 0.25e9,
--		Delay: 0.1e9,
--	}
--	want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
--	got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
--	t0 := time.Now()
--	for a := testAttempt.Start(); a.Next(); {
--		got = append(got, time.Now().Sub(t0))
--	}
--	got = append(got, time.Now().Sub(t0))
--	c.Assert(got, HasLen, len(want))
--	const margin = 0.01e9
--	for i, got := range want {
--		lo := want[i] - margin
--		hi := want[i] + margin
--		if got < lo || got > hi {
--			c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
--		}
--	}
--}
--
--func (S) TestAttemptNextHasNext(c *C) {
--	a := aws.AttemptStrategy{}.Start()
--	c.Assert(a.Next(), Equals, true)
--	c.Assert(a.Next(), Equals, false)
--
--	a = aws.AttemptStrategy{}.Start()
--	c.Assert(a.Next(), Equals, true)
--	c.Assert(a.HasNext(), Equals, false)
--	c.Assert(a.Next(), Equals, false)
--
--	a = aws.AttemptStrategy{Total: 2e8}.Start()
--	c.Assert(a.Next(), Equals, true)
--	c.Assert(a.HasNext(), Equals, true)
--	time.Sleep(2e8)
--	c.Assert(a.HasNext(), Equals, true)
--	c.Assert(a.Next(), Equals, true)
--	c.Assert(a.Next(), Equals, false)
--
--	a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
--	time.Sleep(1e8)
--	c.Assert(a.Next(), Equals, true)
--	c.Assert(a.HasNext(), Equals, true)
--	c.Assert(a.Next(), Equals, true)
--	c.Assert(a.HasNext(), Equals, false)
--	c.Assert(a.Next(), Equals, false)
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws.go
-deleted file mode 100644
-index c304d55..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws.go
-+++ /dev/null
-@@ -1,423 +0,0 @@
--//
--// goamz - Go packages to interact with the Amazon Web Services.
--//
--//   https://wiki.ubuntu.com/goamz
--//
--// Copyright (c) 2011 Canonical Ltd.
--//
--// Written by Gustavo Niemeyer <gustavo.niemeyer at canonical.com>
--//
--package aws
--
--import (
--	"encoding/json"
--	"errors"
--	"fmt"
--	"github.com/vaughan0/go-ini"
--	"io/ioutil"
--	"os"
--)
--
--// Region defines the URLs where AWS services may be accessed.
--//
--// See http://goo.gl/d8BP1 for more details.
--type Region struct {
--	Name                 string // the canonical name of this region.
--	EC2Endpoint          string
--	S3Endpoint           string
--	S3BucketEndpoint     string // Not needed by AWS S3. Use ${bucket} for bucket name.
--	S3LocationConstraint bool   // true if this region requires a LocationConstraint declaration.
--	S3LowercaseBucket    bool   // true if the region requires bucket names to be lower case.
--	SDBEndpoint          string
--	SNSEndpoint          string
--	SQSEndpoint          string
--	IAMEndpoint          string
--	ELBEndpoint          string
--	AutoScalingEndpoint  string
--	RdsEndpoint          string
--	Route53Endpoint      string
--}
--
--var USGovWest = Region{
--	"us-gov-west-1",
--	"https://ec2.us-gov-west-1.amazonaws.com",
--	"https://s3-fips-us-gov-west-1.amazonaws.com",
--	"",
--	true,
--	true,
--	"",
--	"https://sns.us-gov-west-1.amazonaws.com",
--	"https://sqs.us-gov-west-1.amazonaws.com",
--	"https://iam.us-gov.amazonaws.com",
--	"https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
--	"https://autoscaling.us-gov-west-1.amazonaws.com",
--	"https://rds.us-gov-west-1.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var USEast = Region{
--	"us-east-1",
--	"https://ec2.us-east-1.amazonaws.com",
--	"https://s3.amazonaws.com",
--	"",
--	false,
--	false,
--	"https://sdb.amazonaws.com",
--	"https://sns.us-east-1.amazonaws.com",
--	"https://sqs.us-east-1.amazonaws.com",
--	"https://iam.amazonaws.com",
--	"https://elasticloadbalancing.us-east-1.amazonaws.com",
--	"https://autoscaling.us-east-1.amazonaws.com",
--	"https://rds.us-east-1.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var USWest = Region{
--	"us-west-1",
--	"https://ec2.us-west-1.amazonaws.com",
--	"https://s3-us-west-1.amazonaws.com",
--	"",
--	true,
--	true,
--	"https://sdb.us-west-1.amazonaws.com",
--	"https://sns.us-west-1.amazonaws.com",
--	"https://sqs.us-west-1.amazonaws.com",
--	"https://iam.amazonaws.com",
--	"https://elasticloadbalancing.us-west-1.amazonaws.com",
--	"https://autoscaling.us-west-1.amazonaws.com",
--	"https://rds.us-west-1.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var USWest2 = Region{
--	"us-west-2",
--	"https://ec2.us-west-2.amazonaws.com",
--	"https://s3-us-west-2.amazonaws.com",
--	"",
--	true,
--	true,
--	"https://sdb.us-west-2.amazonaws.com",
--	"https://sns.us-west-2.amazonaws.com",
--	"https://sqs.us-west-2.amazonaws.com",
--	"https://iam.amazonaws.com",
--	"https://elasticloadbalancing.us-west-2.amazonaws.com",
--	"https://autoscaling.us-west-2.amazonaws.com",
--	"https://rds.us-west-2.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var EUWest = Region{
--	"eu-west-1",
--	"https://ec2.eu-west-1.amazonaws.com",
--	"https://s3-eu-west-1.amazonaws.com",
--	"",
--	true,
--	true,
--	"https://sdb.eu-west-1.amazonaws.com",
--	"https://sns.eu-west-1.amazonaws.com",
--	"https://sqs.eu-west-1.amazonaws.com",
--	"https://iam.amazonaws.com",
--	"https://elasticloadbalancing.eu-west-1.amazonaws.com",
--	"https://autoscaling.eu-west-1.amazonaws.com",
--	"https://rds.eu-west-1.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var APSoutheast = Region{
--	"ap-southeast-1",
--	"https://ec2.ap-southeast-1.amazonaws.com",
--	"https://s3-ap-southeast-1.amazonaws.com",
--	"",
--	true,
--	true,
--	"https://sdb.ap-southeast-1.amazonaws.com",
--	"https://sns.ap-southeast-1.amazonaws.com",
--	"https://sqs.ap-southeast-1.amazonaws.com",
--	"https://iam.amazonaws.com",
--	"https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
--	"https://autoscaling.ap-southeast-1.amazonaws.com",
--	"https://rds.ap-southeast-1.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var APSoutheast2 = Region{
--	"ap-southeast-2",
--	"https://ec2.ap-southeast-2.amazonaws.com",
--	"https://s3-ap-southeast-2.amazonaws.com",
--	"",
--	true,
--	true,
--	"https://sdb.ap-southeast-2.amazonaws.com",
--	"https://sns.ap-southeast-2.amazonaws.com",
--	"https://sqs.ap-southeast-2.amazonaws.com",
--	"https://iam.amazonaws.com",
--	"https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
--	"https://autoscaling.ap-southeast-2.amazonaws.com",
--	"https://rds.ap-southeast-2.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var APNortheast = Region{
--	"ap-northeast-1",
--	"https://ec2.ap-northeast-1.amazonaws.com",
--	"https://s3-ap-northeast-1.amazonaws.com",
--	"",
--	true,
--	true,
--	"https://sdb.ap-northeast-1.amazonaws.com",
--	"https://sns.ap-northeast-1.amazonaws.com",
--	"https://sqs.ap-northeast-1.amazonaws.com",
--	"https://iam.amazonaws.com",
--	"https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
--	"https://autoscaling.ap-northeast-1.amazonaws.com",
--	"https://rds.ap-northeast-1.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var SAEast = Region{
--	"sa-east-1",
--	"https://ec2.sa-east-1.amazonaws.com",
--	"https://s3-sa-east-1.amazonaws.com",
--	"",
--	true,
--	true,
--	"https://sdb.sa-east-1.amazonaws.com",
--	"https://sns.sa-east-1.amazonaws.com",
--	"https://sqs.sa-east-1.amazonaws.com",
--	"https://iam.amazonaws.com",
--	"https://elasticloadbalancing.sa-east-1.amazonaws.com",
--	"https://autoscaling.sa-east-1.amazonaws.com",
--	"https://rds.sa-east-1.amazonaws.com",
--	"https://route53.amazonaws.com",
--}
--
--var CNNorth = Region{
--	"cn-north-1",
--	"https://ec2.cn-north-1.amazonaws.com.cn",
--	"https://s3.cn-north-1.amazonaws.com.cn",
--	"",
--	true,
--	true,
--	"",
--	"https://sns.cn-north-1.amazonaws.com.cn",
--	"https://sqs.cn-north-1.amazonaws.com.cn",
--	"https://iam.cn-north-1.amazonaws.com.cn",
--	"https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
--	"https://autoscaling.cn-north-1.amazonaws.com.cn",
--	"https://rds.cn-north-1.amazonaws.com.cn",
--	"https://route53.amazonaws.com",
--}
--
--var Regions = map[string]Region{
--	APNortheast.Name:  APNortheast,
--	APSoutheast.Name:  APSoutheast,
--	APSoutheast2.Name: APSoutheast2,
--	EUWest.Name:       EUWest,
--	USEast.Name:       USEast,
--	USWest.Name:       USWest,
--	USWest2.Name:      USWest2,
--	SAEast.Name:       SAEast,
--	USGovWest.Name:    USGovWest,
--	CNNorth.Name:      CNNorth,
--}
--
--type Auth struct {
--	AccessKey, SecretKey, Token string
--}
--
--var unreserved = make([]bool, 128)
--var hex = "0123456789ABCDEF"
--
--func init() {
--	// RFC3986
--	u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
--	for _, c := range u {
--		unreserved[c] = true
--	}
--}
--
--type credentials struct {
--	Code            string
--	LastUpdated     string
--	Type            string
--	AccessKeyId     string
--	SecretAccessKey string
--	Token           string
--	Expiration      string
--}
--
--// GetMetaData retrieves instance metadata about the current machine.
--//
--// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
--func GetMetaData(path string) (contents []byte, err error) {
--	url := "http://169.254.169.254/latest/meta-data/" + path
--
--	resp, err := RetryingClient.Get(url)
--	if err != nil {
--		return
--	}
--	defer resp.Body.Close()
--
--	if resp.StatusCode != 200 {
--		err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
--		return
--	}
--
--	body, err := ioutil.ReadAll(resp.Body)
--	if err != nil {
--		return
--	}
--	return []byte(body), err
--}
--
--func getInstanceCredentials() (cred credentials, err error) {
--	credentialPath := "iam/security-credentials/"
--
--	// Get the instance role
--	role, err := GetMetaData(credentialPath)
--	if err != nil {
--		return
--	}
--
--	// Get the instance role credentials
--	credentialJSON, err := GetMetaData(credentialPath + string(role))
--	if err != nil {
--		return
--	}
--
--	err = json.Unmarshal([]byte(credentialJSON), &cred)
--	return
--}
--
--// GetAuth creates an Auth based on either passed in credentials,
--// environment information or instance based role credentials.
--func GetAuth(accessKey string, secretKey string) (auth Auth, err error) {
--	// First try passed in credentials
--	if accessKey != "" && secretKey != "" {
--		return Auth{accessKey, secretKey, ""}, nil
--	}
--
--	// Next try to get auth from the environment
--	auth, err = SharedAuth()
--	if err == nil {
--		// Found auth, return
--		return
--	}
--
--	// Next try to get auth from the environment
--	auth, err = EnvAuth()
--	if err == nil {
--		// Found auth, return
--		return
--	}
--
--	// Next try getting auth from the instance role
--	cred, err := getInstanceCredentials()
--	if err == nil {
--		// Found auth, return
--		auth.AccessKey = cred.AccessKeyId
--		auth.SecretKey = cred.SecretAccessKey
--		auth.Token = cred.Token
--		return
--	}
--	err = errors.New("No valid AWS authentication found")
--	return
--}
--
--// SharedAuth creates an Auth based on shared credentials stored in
--// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to
--// select the profile.
--func SharedAuth() (auth Auth, err error) {
--	var profileName = os.Getenv("AWS_PROFILE")
--
--	if profileName == "" {
--		profileName = "default"
--	}
--
--	var homeDir = os.Getenv("HOME")
--	if homeDir == "" {
--		err = errors.New("Could not get HOME")
--		return
--	}
--
--	var credentialsFile = homeDir + "/.aws/credentials"
--	file, err := ini.LoadFile(credentialsFile)
--	if err != nil {
--		err = errors.New("Couldn't parse AWS credentials file")
--		return
--	}
--
--	var profile = file[profileName]
--	if profile == nil {
--		err = errors.New("Couldn't find profile in AWS credentials file")
--		return
--	}
--
--	auth.AccessKey = profile["aws_access_key_id"]
--	auth.SecretKey = profile["aws_secret_access_key"]
--
--	if auth.AccessKey == "" {
--		err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file")
--	}
--	if auth.SecretKey == "" {
--		err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file")
--	}
--	return
--}
--
--// EnvAuth creates an Auth based on environment information.
--// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
--// For accounts that require a security token, it is read from AWS_SECURITY_TOKEN
--// variables are used.
--func EnvAuth() (auth Auth, err error) {
--	auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
--	if auth.AccessKey == "" {
--		auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
--	}
--
--	auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
--	if auth.SecretKey == "" {
--		auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
--	}
--
--	auth.Token = os.Getenv("AWS_SECURITY_TOKEN")
--
--	if auth.AccessKey == "" {
--		err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
--	}
--	if auth.SecretKey == "" {
--		err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
--	}
--	return
--}
--
--// Encode takes a string and URI-encodes it in a way suitable
--// to be used in AWS signatures.
--func Encode(s string) string {
--	encode := false
--	for i := 0; i != len(s); i++ {
--		c := s[i]
--		if c > 127 || !unreserved[c] {
--			encode = true
--			break
--		}
--	}
--	if !encode {
--		return s
--	}
--	e := make([]byte, len(s)*3)
--	ei := 0
--	for i := 0; i != len(s); i++ {
--		c := s[i]
--		if c > 127 || !unreserved[c] {
--			e[ei] = '%'
--			e[ei+1] = hex[c>>4]
--			e[ei+2] = hex[c&0xF]
--			ei += 3
--		} else {
--			e[ei] = c
--			ei += 1
--		}
--	}
--	return string(e[:ei])
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws_test.go
-deleted file mode 100644
-index 78cbbaf..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/aws_test.go
-+++ /dev/null
-@@ -1,203 +0,0 @@
--package aws_test
--
--import (
--	"github.com/mitchellh/goamz/aws"
--	. "github.com/motain/gocheck"
--	"io/ioutil"
--	"os"
--	"strings"
--	"testing"
--)
--
--func Test(t *testing.T) {
--	TestingT(t)
--}
--
--var _ = Suite(&S{})
--
--type S struct {
--	environ []string
--}
--
--func (s *S) SetUpSuite(c *C) {
--	s.environ = os.Environ()
--}
--
--func (s *S) TearDownTest(c *C) {
--	os.Clearenv()
--	for _, kv := range s.environ {
--		l := strings.SplitN(kv, "=", 2)
--		os.Setenv(l[0], l[1])
--	}
--}
--
--func (s *S) TestSharedAuthNoHome(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_PROFILE", "foo")
--	_, err := aws.SharedAuth()
--	c.Assert(err, ErrorMatches, "Could not get HOME")
--}
--
--func (s *S) TestSharedAuthNoCredentialsFile(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_PROFILE", "foo")
--	os.Setenv("HOME", "/tmp")
--	_, err := aws.SharedAuth()
--	c.Assert(err, ErrorMatches, "Couldn't parse AWS credentials file")
--}
--
--func (s *S) TestSharedAuthNoProfileInFile(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_PROFILE", "foo")
--
--	d, err := ioutil.TempDir("", "")
--	if err != nil {
--		panic(err)
--	}
--	defer os.RemoveAll(d)
--
--	err = os.Mkdir(d+"/.aws", 0755)
--	if err != nil {
--		panic(err)
--	}
--
--	ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\n"), 0644)
--	os.Setenv("HOME", d)
--
--	_, err = aws.SharedAuth()
--	c.Assert(err, ErrorMatches, "Couldn't find profile in AWS credentials file")
--}
--
--func (s *S) TestSharedAuthNoKeysInProfile(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_PROFILE", "bar")
--
--	d, err := ioutil.TempDir("", "")
--	if err != nil {
--		panic(err)
--	}
--	defer os.RemoveAll(d)
--
--	err = os.Mkdir(d+"/.aws", 0755)
--	if err != nil {
--		panic(err)
--	}
--
--	ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\nawsaccesskeyid = AK.."), 0644)
--	os.Setenv("HOME", d)
--
--	_, err = aws.SharedAuth()
--	c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in credentials file")
--}
--
--func (s *S) TestSharedAuthDefaultCredentials(c *C) {
--	os.Clearenv()
--
--	d, err := ioutil.TempDir("", "")
--	if err != nil {
--		panic(err)
--	}
--	defer os.RemoveAll(d)
--
--	err = os.Mkdir(d+"/.aws", 0755)
--	if err != nil {
--		panic(err)
--	}
--
--	ioutil.WriteFile(d+"/.aws/credentials", []byte("[default]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
--	os.Setenv("HOME", d)
--
--	auth, err := aws.SharedAuth()
--	c.Assert(err, IsNil)
--	c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
--}
--
--func (s *S) TestSharedAuth(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_PROFILE", "bar")
--
--	d, err := ioutil.TempDir("", "")
--	if err != nil {
--		panic(err)
--	}
--	defer os.RemoveAll(d)
--
--	err = os.Mkdir(d+"/.aws", 0755)
--	if err != nil {
--		panic(err)
--	}
--
--	ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
--	os.Setenv("HOME", d)
--
--	auth, err := aws.SharedAuth()
--	c.Assert(err, IsNil)
--	c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
--}
--
--func (s *S) TestEnvAuthNoSecret(c *C) {
--	os.Clearenv()
--	_, err := aws.EnvAuth()
--	c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
--}
--
--func (s *S) TestEnvAuthNoAccess(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
--	_, err := aws.EnvAuth()
--	c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
--}
--
--func (s *S) TestEnvAuth(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
--	os.Setenv("AWS_ACCESS_KEY_ID", "access")
--	auth, err := aws.EnvAuth()
--	c.Assert(err, IsNil)
--	c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
--}
--
--func (s *S) TestEnvAuthWithToken(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
--	os.Setenv("AWS_ACCESS_KEY_ID", "access")
--	os.Setenv("AWS_SECURITY_TOKEN", "token")
--	auth, err := aws.EnvAuth()
--	c.Assert(err, IsNil)
--	c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access", Token: "token"})
--}
--
--func (s *S) TestEnvAuthAlt(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_SECRET_KEY", "secret")
--	os.Setenv("AWS_ACCESS_KEY", "access")
--	auth, err := aws.EnvAuth()
--	c.Assert(err, IsNil)
--	c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
--}
--
--func (s *S) TestGetAuthStatic(c *C) {
--	auth, err := aws.GetAuth("access", "secret")
--	c.Assert(err, IsNil)
--	c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
--}
--
--func (s *S) TestGetAuthEnv(c *C) {
--	os.Clearenv()
--	os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
--	os.Setenv("AWS_ACCESS_KEY_ID", "access")
--	auth, err := aws.GetAuth("", "")
--	c.Assert(err, IsNil)
--	c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
--}
--
--func (s *S) TestEncode(c *C) {
--	c.Assert(aws.Encode("foo"), Equals, "foo")
--	c.Assert(aws.Encode("/"), Equals, "%2F")
--}
--
--func (s *S) TestRegionsAreNamed(c *C) {
--	for n, r := range aws.Regions {
--		c.Assert(n, Equals, r.Name)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client.go
-deleted file mode 100644
-index ee53238..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client.go
-+++ /dev/null
-@@ -1,125 +0,0 @@
--package aws
--
--import (
--	"math"
--	"net"
--	"net/http"
--	"time"
--)
--
--type RetryableFunc func(*http.Request, *http.Response, error) bool
--type WaitFunc func(try int)
--type DeadlineFunc func() time.Time
--
--type ResilientTransport struct {
--	// Timeout is the maximum amount of time a dial will wait for
--	// a connect to complete.
--	//
--	// The default is no timeout.
--	//
--	// With or without a timeout, the operating system may impose
--	// its own earlier timeout. For instance, TCP timeouts are
--	// often around 3 minutes.
--	DialTimeout time.Duration
--
--	// MaxTries, if non-zero, specifies the number of times we will retry on
--	// failure. Retries are only attempted for temporary network errors or known
--	// safe failures.
--	MaxTries    int
--	Deadline    DeadlineFunc
--	ShouldRetry RetryableFunc
--	Wait        WaitFunc
--	transport   *http.Transport
--}
--
--// Convenience method for creating an http client
--func NewClient(rt *ResilientTransport) *http.Client {
--	rt.transport = &http.Transport{
--		Dial: func(netw, addr string) (net.Conn, error) {
--			c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
--			if err != nil {
--				return nil, err
--			}
--			c.SetDeadline(rt.Deadline())
--			return c, nil
--		},
--		DisableKeepAlives: true,
--		Proxy:             http.ProxyFromEnvironment,
--	}
--	// TODO: Would be nice is ResilientTransport allowed clients to initialize
--	// with http.Transport attributes.
--	return &http.Client{
--		Transport: rt,
--	}
--}
--
--var retryingTransport = &ResilientTransport{
--	Deadline: func() time.Time {
--		return time.Now().Add(5 * time.Second)
--	},
--	DialTimeout: 10 * time.Second,
--	MaxTries:    3,
--	ShouldRetry: awsRetry,
--	Wait:        ExpBackoff,
--}
--
--// Exported default client
--var RetryingClient = NewClient(retryingTransport)
--
--func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
--	return t.tries(req)
--}
--
--// Retry a request a maximum of t.MaxTries times.
--// We'll only retry if the proper criteria are met.
--// If a wait function is specified, wait that amount of time
--// In between requests.
--func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
--	for try := 0; try < t.MaxTries; try += 1 {
--		res, err = t.transport.RoundTrip(req)
--
--		if !t.ShouldRetry(req, res, err) {
--			break
--		}
--		if res != nil {
--			res.Body.Close()
--		}
--		if t.Wait != nil {
--			t.Wait(try)
--		}
--	}
--
--	return
--}
--
--func ExpBackoff(try int) {
--	time.Sleep(100 * time.Millisecond *
--		time.Duration(math.Exp2(float64(try))))
--}
--
--func LinearBackoff(try int) {
--	time.Sleep(time.Duration(try*100) * time.Millisecond)
--}
--
--// Decide if we should retry a request.
--// In general, the criteria for retrying a request is described here
--// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
--func awsRetry(req *http.Request, res *http.Response, err error) bool {
--	retry := false
--
--	// Retry if there's a temporary network error.
--	if neterr, ok := err.(net.Error); ok {
--		if neterr.Temporary() {
--			retry = true
--		}
--	}
--
--	// Retry if we get a 5xx series error.
--	if res != nil {
--		if res.StatusCode >= 500 && res.StatusCode < 600 {
--			retry = true
--		}
--	}
--
--	return retry
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client_test.go
-deleted file mode 100644
-index 2f6b39c..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/aws/client_test.go
-+++ /dev/null
-@@ -1,121 +0,0 @@
--package aws_test
--
--import (
--	"fmt"
--	"github.com/mitchellh/goamz/aws"
--	"io/ioutil"
--	"net/http"
--	"net/http/httptest"
--	"strings"
--	"testing"
--	"time"
--)
--
--// Retrieve the response from handler using aws.RetryingClient
--func serveAndGet(handler http.HandlerFunc) (body string, err error) {
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--	resp, err := aws.RetryingClient.Get(ts.URL)
--	if err != nil {
--		return
--	}
--	if resp.StatusCode != 200 {
--		return "", fmt.Errorf("Bad status code: %d", resp.StatusCode)
--	}
--	greeting, err := ioutil.ReadAll(resp.Body)
--	resp.Body.Close()
--	if err != nil {
--		return
--	}
--	return strings.TrimSpace(string(greeting)), nil
--}
--
--func TestClient_expected(t *testing.T) {
--	body := "foo bar"
--
--	resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
--		fmt.Fprintln(w, body)
--	})
--	if err != nil {
--		t.Fatal(err)
--	}
--	if resp != body {
--		t.Fatal("Body not as expected.")
--	}
--}
--
--func TestClient_delay(t *testing.T) {
--	body := "baz"
--	wait := 4
--	resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
--		if wait < 0 {
--			// If we dipped to zero delay and still failed.
--			t.Fatal("Never succeeded.")
--		}
--		wait -= 1
--		time.Sleep(time.Second * time.Duration(wait))
--		fmt.Fprintln(w, body)
--	})
--	if err != nil {
--		t.Fatal(err)
--	}
--	if resp != body {
--		t.Fatal("Body not as expected.", resp)
--	}
--}
--
--func TestClient_no4xxRetry(t *testing.T) {
--	tries := 0
--
--	// Fail once before succeeding.
--	_, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
--		tries += 1
--		http.Error(w, "error", 404)
--	})
--
--	if err == nil {
--		t.Fatal("should have error")
--	}
--
--	if tries != 1 {
--		t.Fatalf("should only try once: %d", tries)
--	}
--}
--
--func TestClient_retries(t *testing.T) {
--	body := "biz"
--	failed := false
--	// Fail once before succeeding.
--	resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
--		if !failed {
--			http.Error(w, "error", 500)
--			failed = true
--		} else {
--			fmt.Fprintln(w, body)
--		}
--	})
--	if failed != true {
--		t.Error("We didn't retry!")
--	}
--	if err != nil {
--		t.Fatal(err)
--	}
--	if resp != body {
--		t.Fatal("Body not as expected.")
--	}
--}
--
--func TestClient_fails(t *testing.T) {
--	tries := 0
--	// Fail 3 times and return the last error.
--	_, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
--		tries += 1
--		http.Error(w, "error", 500)
--	})
--	if err == nil {
--		t.Fatal(err)
--	}
--	if tries != 3 {
--		t.Fatal("Didn't retry enough")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2.go
-deleted file mode 100644
-index 8f94ad5..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2.go
-+++ /dev/null
-@@ -1,2599 +0,0 @@
--//
--// goamz - Go packages to interact with the Amazon Web Services.
--//
--//   https://wiki.ubuntu.com/goamz
--//
--// Copyright (c) 2011 Canonical Ltd.
--//
--// Written by Gustavo Niemeyer <gustavo.niemeyer at canonical.com>
--//
--
--package ec2
--
--import (
--	"crypto/rand"
--	"encoding/base64"
--	"encoding/hex"
--	"encoding/xml"
--	"fmt"
--	"log"
--	"net/http"
--	"net/http/httputil"
--	"net/url"
--	"sort"
--	"strconv"
--	"strings"
--	"time"
--
--	"github.com/mitchellh/goamz/aws"
--)
--
--const debug = false
--
--// The EC2 type encapsulates operations with a specific EC2 region.
--type EC2 struct {
--	aws.Auth
--	aws.Region
--	httpClient *http.Client
--	private    byte // Reserve the right of using private data.
--}
--
--// New creates a new EC2.
--func NewWithClient(auth aws.Auth, region aws.Region, client *http.Client) *EC2 {
--	return &EC2{auth, region, client, 0}
--}
--
--func New(auth aws.Auth, region aws.Region) *EC2 {
--	return NewWithClient(auth, region, aws.RetryingClient)
--}
--
--// ----------------------------------------------------------------------------
--// Filtering helper.
--
--// Filter builds filtering parameters to be used in an EC2 query which supports
--// filtering.  For example:
--//
--//     filter := NewFilter()
--//     filter.Add("architecture", "i386")
--//     filter.Add("launch-index", "0")
--//     resp, err := ec2.Instances(nil, filter)
--//
--type Filter struct {
--	m map[string][]string
--}
--
--// NewFilter creates a new Filter.
--func NewFilter() *Filter {
--	return &Filter{make(map[string][]string)}
--}
--
--// Add appends a filtering parameter with the given name and value(s).
--func (f *Filter) Add(name string, value ...string) {
--	f.m[name] = append(f.m[name], value...)
--}
--
--func (f *Filter) addParams(params map[string]string) {
--	if f != nil {
--		a := make([]string, len(f.m))
--		i := 0
--		for k := range f.m {
--			a[i] = k
--			i++
--		}
--		sort.StringSlice(a).Sort()
--		for i, k := range a {
--			prefix := "Filter." + strconv.Itoa(i+1)
--			params[prefix+".Name"] = k
--			for j, v := range f.m[k] {
--				params[prefix+".Value."+strconv.Itoa(j+1)] = v
--			}
--		}
--	}
--}
--
--// ----------------------------------------------------------------------------
--// Request dispatching logic.
--
--// Error encapsulates an error returned by EC2.
--//
--// See http://goo.gl/VZGuC for more details.
--type Error struct {
--	// HTTP status code (200, 403, ...)
--	StatusCode int
--	// EC2 error code ("UnsupportedOperation", ...)
--	Code string
--	// The human-oriented error message
--	Message   string
--	RequestId string `xml:"RequestID"`
--}
--
--func (err *Error) Error() string {
--	if err.Code == "" {
--		return err.Message
--	}
--
--	return fmt.Sprintf("%s (%s)", err.Message, err.Code)
--}
--
--// For now a single error inst is being exposed. In the future it may be useful
--// to provide access to all of them, but rather than doing it as an array/slice,
--// use a *next pointer, so that it's backward compatible and it continues to be
--// easy to handle the first error, which is what most people will want.
--type xmlErrors struct {
--	RequestId string  `xml:"RequestID"`
--	Errors    []Error `xml:"Errors>Error"`
--}
--
--var timeNow = time.Now
--
--func (ec2 *EC2) query(params map[string]string, resp interface{}) error {
--	params["Version"] = "2014-05-01"
--	params["Timestamp"] = timeNow().In(time.UTC).Format(time.RFC3339)
--	endpoint, err := url.Parse(ec2.Region.EC2Endpoint)
--	if err != nil {
--		return err
--	}
--	if endpoint.Path == "" {
--		endpoint.Path = "/"
--	}
--	sign(ec2.Auth, "GET", endpoint.Path, params, endpoint.Host)
--	endpoint.RawQuery = multimap(params).Encode()
--	if debug {
--		log.Printf("get { %v } -> {\n", endpoint.String())
--	}
--
--	r, err := ec2.httpClient.Get(endpoint.String())
--	if err != nil {
--		return err
--	}
--	defer r.Body.Close()
--
--	if debug {
--		dump, _ := httputil.DumpResponse(r, true)
--		log.Printf("response:\n")
--		log.Printf("%v\n}\n", string(dump))
--	}
--	if r.StatusCode != 200 {
--		return buildError(r)
--	}
--	err = xml.NewDecoder(r.Body).Decode(resp)
--	return err
--}
--
--func multimap(p map[string]string) url.Values {
--	q := make(url.Values, len(p))
--	for k, v := range p {
--		q[k] = []string{v}
--	}
--	return q
--}
--
--func buildError(r *http.Response) error {
--	errors := xmlErrors{}
--	xml.NewDecoder(r.Body).Decode(&errors)
--	var err Error
--	if len(errors.Errors) > 0 {
--		err = errors.Errors[0]
--	}
--	err.RequestId = errors.RequestId
--	err.StatusCode = r.StatusCode
--	if err.Message == "" {
--		err.Message = r.Status
--	}
--	return &err
--}
--
--func makeParams(action string) map[string]string {
--	params := make(map[string]string)
--	params["Action"] = action
--	return params
--}
--
--func addParamsList(params map[string]string, label string, ids []string) {
--	for i, id := range ids {
--		params[label+"."+strconv.Itoa(i+1)] = id
--	}
--}
--
--func addBlockDeviceParams(prename string, params map[string]string, blockdevices []BlockDeviceMapping) {
--	for i, k := range blockdevices {
--		// Fixup index since Amazon counts these from 1
--		prefix := prename + "BlockDeviceMapping." + strconv.Itoa(i+1) + "."
--
--		if k.DeviceName != "" {
--			params[prefix+"DeviceName"] = k.DeviceName
--		}
--		if k.VirtualName != "" {
--			params[prefix+"VirtualName"] = k.VirtualName
--		}
--		if k.SnapshotId != "" {
--			params[prefix+"Ebs.SnapshotId"] = k.SnapshotId
--		}
--		if k.VolumeType != "" {
--			params[prefix+"Ebs.VolumeType"] = k.VolumeType
--		}
--		if k.IOPS != 0 {
--			params[prefix+"Ebs.Iops"] = strconv.FormatInt(k.IOPS, 10)
--		}
--		if k.VolumeSize != 0 {
--			params[prefix+"Ebs.VolumeSize"] = strconv.FormatInt(k.VolumeSize, 10)
--		}
--		if k.DeleteOnTermination {
--			params[prefix+"Ebs.DeleteOnTermination"] = "true"
--		}
--		if k.Encrypted {
--			params[prefix+"Ebs.Encrypted"] = "true"
--		}
--		if k.NoDevice {
--			params[prefix+"NoDevice"] = ""
--		}
--	}
--}
--
--// ----------------------------------------------------------------------------
--// Instance management functions and types.
--
--// The RunInstances type encapsulates options for the respective request in EC2.
--//
--// See http://goo.gl/Mcm3b for more details.
--type RunInstances struct {
--	ImageId                  string
--	MinCount                 int
--	MaxCount                 int
--	KeyName                  string
--	InstanceType             string
--	SecurityGroups           []SecurityGroup
--	IamInstanceProfile       string
--	KernelId                 string
--	RamdiskId                string
--	UserData                 []byte
--	AvailZone                string
--	PlacementGroupName       string
--	Monitoring               bool
--	SubnetId                 string
--	AssociatePublicIpAddress bool
--	DisableAPITermination    bool
--	ShutdownBehavior         string
--	PrivateIPAddress         string
--	BlockDevices             []BlockDeviceMapping
--}
--
--// Response to a RunInstances request.
--//
--// See http://goo.gl/Mcm3b for more details.
--type RunInstancesResp struct {
--	RequestId      string          `xml:"requestId"`
--	ReservationId  string          `xml:"reservationId"`
--	OwnerId        string          `xml:"ownerId"`
--	SecurityGroups []SecurityGroup `xml:"groupSet>item"`
--	Instances      []Instance      `xml:"instancesSet>item"`
--}
--
--// Instance encapsulates a running instance in EC2.
--//
--// See http://goo.gl/OCH8a for more details.
--type Instance struct {
--	InstanceId         string          `xml:"instanceId"`
--	InstanceType       string          `xml:"instanceType"`
--	ImageId            string          `xml:"imageId"`
--	PrivateDNSName     string          `xml:"privateDnsName"`
--	DNSName            string          `xml:"dnsName"`
--	KeyName            string          `xml:"keyName"`
--	AMILaunchIndex     int             `xml:"amiLaunchIndex"`
--	Hypervisor         string          `xml:"hypervisor"`
--	VirtType           string          `xml:"virtualizationType"`
--	Monitoring         string          `xml:"monitoring>state"`
--	AvailZone          string          `xml:"placement>availabilityZone"`
--	PlacementGroupName string          `xml:"placement>groupName"`
--	State              InstanceState   `xml:"instanceState"`
--	Tags               []Tag           `xml:"tagSet>item"`
--	VpcId              string          `xml:"vpcId"`
--	SubnetId           string          `xml:"subnetId"`
--	IamInstanceProfile string          `xml:"iamInstanceProfile"`
--	PrivateIpAddress   string          `xml:"privateIpAddress"`
--	PublicIpAddress    string          `xml:"ipAddress"`
--	Architecture       string          `xml:"architecture"`
--	LaunchTime         time.Time       `xml:"launchTime"`
--	SourceDestCheck    bool            `xml:"sourceDestCheck"`
--	SecurityGroups     []SecurityGroup `xml:"groupSet>item"`
--}
--
--// RunInstances starts new instances in EC2.
--// If options.MinCount and options.MaxCount are both zero, a single instance
--// will be started; otherwise if options.MaxCount is zero, options.MinCount
--// will be used insteead.
--//
--// See http://goo.gl/Mcm3b for more details.
--func (ec2 *EC2) RunInstances(options *RunInstances) (resp *RunInstancesResp, err error) {
--	params := makeParams("RunInstances")
--	params["ImageId"] = options.ImageId
--	params["InstanceType"] = options.InstanceType
--	var min, max int
--	if options.MinCount == 0 && options.MaxCount == 0 {
--		min = 1
--		max = 1
--	} else if options.MaxCount == 0 {
--		min = options.MinCount
--		max = min
--	} else {
--		min = options.MinCount
--		max = options.MaxCount
--	}
--	params["MinCount"] = strconv.Itoa(min)
--	params["MaxCount"] = strconv.Itoa(max)
--	token, err := clientToken()
--	if err != nil {
--		return nil, err
--	}
--	params["ClientToken"] = token
--
--	if options.KeyName != "" {
--		params["KeyName"] = options.KeyName
--	}
--	if options.KernelId != "" {
--		params["KernelId"] = options.KernelId
--	}
--	if options.RamdiskId != "" {
--		params["RamdiskId"] = options.RamdiskId
--	}
--	if options.UserData != nil {
--		userData := make([]byte, b64.EncodedLen(len(options.UserData)))
--		b64.Encode(userData, options.UserData)
--		params["UserData"] = string(userData)
--	}
--	if options.AvailZone != "" {
--		params["Placement.AvailabilityZone"] = options.AvailZone
--	}
--	if options.PlacementGroupName != "" {
--		params["Placement.GroupName"] = options.PlacementGroupName
--	}
--	if options.Monitoring {
--		params["Monitoring.Enabled"] = "true"
--	}
--	if options.SubnetId != "" && options.AssociatePublicIpAddress {
--		// If we have a non-default VPC / Subnet specified, we can flag
--		// AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided.
--		// You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise
--		// you get: Network interfaces and an instance-level subnet ID may not be specified on the same request
--		// You also need to attach Security Groups to the NetworkInterface instead of the instance,
--		// to avoid: Network interfaces and an instance-level security groups may not be specified on
--		// the same request
--		params["NetworkInterface.0.DeviceIndex"] = "0"
--		params["NetworkInterface.0.AssociatePublicIpAddress"] = "true"
--		params["NetworkInterface.0.SubnetId"] = options.SubnetId
--
--		i := 1
--		for _, g := range options.SecurityGroups {
--			// We only have SecurityGroupId's on NetworkInterface's, no SecurityGroup params.
--			if g.Id != "" {
--				params["NetworkInterface.0.SecurityGroupId."+strconv.Itoa(i)] = g.Id
--				i++
--			}
--		}
--	} else {
--		if options.SubnetId != "" {
--			params["SubnetId"] = options.SubnetId
--		}
--
--		i, j := 1, 1
--		for _, g := range options.SecurityGroups {
--			if g.Id != "" {
--				params["SecurityGroupId."+strconv.Itoa(i)] = g.Id
--				i++
--			} else {
--				params["SecurityGroup."+strconv.Itoa(j)] = g.Name
--				j++
--			}
--		}
--	}
--	if options.IamInstanceProfile != "" {
--		params["IamInstanceProfile.Name"] = options.IamInstanceProfile
--	}
--	if options.DisableAPITermination {
--		params["DisableApiTermination"] = "true"
--	}
--	if options.ShutdownBehavior != "" {
--		params["InstanceInitiatedShutdownBehavior"] = options.ShutdownBehavior
--	}
--	if options.PrivateIPAddress != "" {
--		params["PrivateIpAddress"] = options.PrivateIPAddress
--	}
--	addBlockDeviceParams("", params, options.BlockDevices)
--
--	resp = &RunInstancesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--func clientToken() (string, error) {
--	// Maximum EC2 client token size is 64 bytes.
--	// Each byte expands to two when hex encoded.
--	buf := make([]byte, 32)
--	_, err := rand.Read(buf)
--	if err != nil {
--		return "", err
--	}
--	return hex.EncodeToString(buf), nil
--}
--
--// ----------------------------------------------------------------------------
--// Spot Instance management functions and types.
--
--// The RequestSpotInstances type encapsulates options for the respective request in EC2.
--//
--// See http://goo.gl/GRZgCD for more details.
--type RequestSpotInstances struct {
--	SpotPrice                string
--	InstanceCount            int
--	Type                     string
--	ImageId                  string
--	KeyName                  string
--	InstanceType             string
--	SecurityGroups           []SecurityGroup
--	IamInstanceProfile       string
--	KernelId                 string
--	RamdiskId                string
--	UserData                 []byte
--	AvailZone                string
--	PlacementGroupName       string
--	Monitoring               bool
--	SubnetId                 string
--	AssociatePublicIpAddress bool
--	PrivateIPAddress         string
--	BlockDevices             []BlockDeviceMapping
--}
--
--type SpotInstanceSpec struct {
--	ImageId                  string
--	KeyName                  string
--	InstanceType             string
--	SecurityGroups           []SecurityGroup
--	IamInstanceProfile       string
--	KernelId                 string
--	RamdiskId                string
--	UserData                 []byte
--	AvailZone                string
--	PlacementGroupName       string
--	Monitoring               bool
--	SubnetId                 string
--	AssociatePublicIpAddress bool
--	PrivateIPAddress         string
--	BlockDevices             []BlockDeviceMapping
--}
--
--type SpotLaunchSpec struct {
--	ImageId            string               `xml:"imageId"`
--	KeyName            string               `xml:"keyName"`
--	InstanceType       string               `xml:"instanceType"`
--	SecurityGroups     []SecurityGroup      `xml:"groupSet>item"`
--	IamInstanceProfile string               `xml:"iamInstanceProfile"`
--	KernelId           string               `xml:"kernelId"`
--	RamdiskId          string               `xml:"ramdiskId"`
--	PlacementGroupName string               `xml:"placement>groupName"`
--	Monitoring         bool                 `xml:"monitoring>enabled"`
--	SubnetId           string               `xml:"subnetId"`
--	BlockDevices       []BlockDeviceMapping `xml:"blockDeviceMapping>item"`
--}
--
--type SpotStatus struct {
--	Code       string `xml:"code"`
--	UpdateTime string `xml:"updateTime"`
--	Message    string `xml:"message"`
--}
--
--type SpotRequestResult struct {
--	SpotRequestId  string         `xml:"spotInstanceRequestId"`
--	SpotPrice      string         `xml:"spotPrice"`
--	Type           string         `xml:"type"`
--	AvailZone      string         `xml:"launchedAvailabilityZone"`
--	InstanceId     string         `xml:"instanceId"`
--	State          string         `xml:"state"`
--	Status         SpotStatus     `xml:"status"`
--	SpotLaunchSpec SpotLaunchSpec `xml:"launchSpecification"`
--	CreateTime     string         `xml:"createTime"`
--	Tags           []Tag          `xml:"tagSet>item"`
--}
--
--// Response to a RequestSpotInstances request.
--//
--// See http://goo.gl/GRZgCD for more details.
--type RequestSpotInstancesResp struct {
--	RequestId          string              `xml:"requestId"`
--	SpotRequestResults []SpotRequestResult `xml:"spotInstanceRequestSet>item"`
--}
--
--// RequestSpotInstances requests a new spot instances in EC2.
--func (ec2 *EC2) RequestSpotInstances(options *RequestSpotInstances) (resp *RequestSpotInstancesResp, err error) {
--	params := makeParams("RequestSpotInstances")
--	prefix := "LaunchSpecification" + "."
--
--	params["SpotPrice"] = options.SpotPrice
--	params[prefix+"ImageId"] = options.ImageId
--	params[prefix+"InstanceType"] = options.InstanceType
--
--	if options.InstanceCount != 0 {
--		params["InstanceCount"] = strconv.Itoa(options.InstanceCount)
--	}
--	if options.KeyName != "" {
--		params[prefix+"KeyName"] = options.KeyName
--	}
--	if options.KernelId != "" {
--		params[prefix+"KernelId"] = options.KernelId
--	}
--	if options.RamdiskId != "" {
--		params[prefix+"RamdiskId"] = options.RamdiskId
--	}
--	if options.UserData != nil {
--		userData := make([]byte, b64.EncodedLen(len(options.UserData)))
--		b64.Encode(userData, options.UserData)
--		params[prefix+"UserData"] = string(userData)
--	}
--	if options.AvailZone != "" {
--		params[prefix+"Placement.AvailabilityZone"] = options.AvailZone
--	}
--	if options.PlacementGroupName != "" {
--		params[prefix+"Placement.GroupName"] = options.PlacementGroupName
--	}
--	if options.Monitoring {
--		params[prefix+"Monitoring.Enabled"] = "true"
--	}
--	if options.SubnetId != "" && options.AssociatePublicIpAddress {
--		// If we have a non-default VPC / Subnet specified, we can flag
--		// AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided.
--		// You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise
--		// you get: Network interfaces and an instance-level subnet ID may not be specified on the same request
--		// You also need to attach Security Groups to the NetworkInterface instead of the instance,
--		// to avoid: Network interfaces and an instance-level security groups may not be specified on
--		// the same request
--		params[prefix+"NetworkInterface.0.DeviceIndex"] = "0"
--		params[prefix+"NetworkInterface.0.AssociatePublicIpAddress"] = "true"
--		params[prefix+"NetworkInterface.0.SubnetId"] = options.SubnetId
--
--		i := 1
--		for _, g := range options.SecurityGroups {
--			// We only have SecurityGroupId's on NetworkInterface's, no SecurityGroup params.
--			if g.Id != "" {
--				params[prefix+"NetworkInterface.0.SecurityGroupId."+strconv.Itoa(i)] = g.Id
--				i++
--			}
--		}
--	} else {
--		if options.SubnetId != "" {
--			params[prefix+"SubnetId"] = options.SubnetId
--		}
--
--		i, j := 1, 1
--		for _, g := range options.SecurityGroups {
--			if g.Id != "" {
--				params[prefix+"SecurityGroupId."+strconv.Itoa(i)] = g.Id
--				i++
--			} else {
--				params[prefix+"SecurityGroup."+strconv.Itoa(j)] = g.Name
--				j++
--			}
--		}
--	}
--	if options.IamInstanceProfile != "" {
--		params[prefix+"IamInstanceProfile.Name"] = options.IamInstanceProfile
--	}
--	if options.PrivateIPAddress != "" {
--		params[prefix+"PrivateIpAddress"] = options.PrivateIPAddress
--	}
--	addBlockDeviceParams(prefix, params, options.BlockDevices)
--
--	resp = &RequestSpotInstancesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Response to a DescribeSpotInstanceRequests request.
--//
--// See http://goo.gl/KsKJJk for more details.
--type SpotRequestsResp struct {
--	RequestId          string              `xml:"requestId"`
--	SpotRequestResults []SpotRequestResult `xml:"spotInstanceRequestSet>item"`
--}
--
--// DescribeSpotInstanceRequests returns details about spot requests in EC2.  Both parameters
--// are optional, and if provided will limit the spot requests returned to those
--// matching the given spot request ids or filtering rules.
--//
--// See http://goo.gl/KsKJJk for more details.
--func (ec2 *EC2) DescribeSpotRequests(spotrequestIds []string, filter *Filter) (resp *SpotRequestsResp, err error) {
--	params := makeParams("DescribeSpotInstanceRequests")
--	addParamsList(params, "SpotInstanceRequestId", spotrequestIds)
--	filter.addParams(params)
--	resp = &SpotRequestsResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Response to a CancelSpotInstanceRequests request.
--//
--// See http://goo.gl/3BKHj for more details.
--type CancelSpotRequestResult struct {
--	SpotRequestId string `xml:"spotInstanceRequestId"`
--	State         string `xml:"state"`
--}
--type CancelSpotRequestsResp struct {
--	RequestId                string                    `xml:"requestId"`
--	CancelSpotRequestResults []CancelSpotRequestResult `xml:"spotInstanceRequestSet>item"`
--}
--
--// CancelSpotRequests requests the cancellation of spot requests when the given ids.
--//
--// See http://goo.gl/3BKHj for more details.
--func (ec2 *EC2) CancelSpotRequests(spotrequestIds []string) (resp *CancelSpotRequestsResp, err error) {
--	params := makeParams("CancelSpotInstanceRequests")
--	addParamsList(params, "SpotInstanceRequestId", spotrequestIds)
--	resp = &CancelSpotRequestsResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Response to a TerminateInstances request.
--//
--// See http://goo.gl/3BKHj for more details.
--type TerminateInstancesResp struct {
--	RequestId    string                `xml:"requestId"`
--	StateChanges []InstanceStateChange `xml:"instancesSet>item"`
--}
--
--// InstanceState encapsulates the state of an instance in EC2.
--//
--// See http://goo.gl/y3ZBq for more details.
--type InstanceState struct {
--	Code int    `xml:"code"` // Watch out, bits 15-8 have unpublished meaning.
--	Name string `xml:"name"`
--}
--
--// InstanceStateChange informs of the previous and current states
--// for an instance when a state change is requested.
--type InstanceStateChange struct {
--	InstanceId    string        `xml:"instanceId"`
--	CurrentState  InstanceState `xml:"currentState"`
--	PreviousState InstanceState `xml:"previousState"`
--}
--
--// TerminateInstances requests the termination of instances when the given ids.
--//
--// See http://goo.gl/3BKHj for more details.
--func (ec2 *EC2) TerminateInstances(instIds []string) (resp *TerminateInstancesResp, err error) {
--	params := makeParams("TerminateInstances")
--	addParamsList(params, "InstanceId", instIds)
--	resp = &TerminateInstancesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Response to a DescribeInstances request.
--//
--// See http://goo.gl/mLbmw for more details.
--type InstancesResp struct {
--	RequestId    string        `xml:"requestId"`
--	Reservations []Reservation `xml:"reservationSet>item"`
--}
--
--// Reservation represents details about a reservation in EC2.
--//
--// See http://goo.gl/0ItPT for more details.
--type Reservation struct {
--	ReservationId  string          `xml:"reservationId"`
--	OwnerId        string          `xml:"ownerId"`
--	RequesterId    string          `xml:"requesterId"`
--	SecurityGroups []SecurityGroup `xml:"groupSet>item"`
--	Instances      []Instance      `xml:"instancesSet>item"`
--}
--
--// Instances returns details about instances in EC2.  Both parameters
--// are optional, and if provided will limit the instances returned to those
--// matching the given instance ids or filtering rules.
--//
--// See http://goo.gl/4No7c for more details.
--func (ec2 *EC2) Instances(instIds []string, filter *Filter) (resp *InstancesResp, err error) {
--	params := makeParams("DescribeInstances")
--	addParamsList(params, "InstanceId", instIds)
--	filter.addParams(params)
--	resp = &InstancesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// ----------------------------------------------------------------------------
--// Volume management
--
--// The CreateVolume request parameters
--//
--// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVolume.html
--type CreateVolume struct {
--	AvailZone  string
--	Size       int64
--	SnapshotId string
--	VolumeType string
--	IOPS       int64
--	Encrypted  bool
--}
--
--// Response to an AttachVolume request
--type AttachVolumeResp struct {
--	RequestId  string `xml:"requestId"`
--	VolumeId   string `xml:"volumeId"`
--	InstanceId string `xml:"instanceId"`
--	Device     string `xml:"device"`
--	Status     string `xml:"status"`
--	AttachTime string `xml:"attachTime"`
--}
--
--// Response to a CreateVolume request
--type CreateVolumeResp struct {
--	RequestId  string `xml:"requestId"`
--	VolumeId   string `xml:"volumeId"`
--	Size       int64  `xml:"size"`
--	SnapshotId string `xml:"snapshotId"`
--	AvailZone  string `xml:"availabilityZone"`
--	Status     string `xml:"status"`
--	CreateTime string `xml:"createTime"`
--	VolumeType string `xml:"volumeType"`
--	IOPS       int64  `xml:"iops"`
--	Encrypted  bool   `xml:"encrypted"`
--}
--
--// Volume is a single volume.
--type Volume struct {
--	VolumeId    string             `xml:"volumeId"`
--	Size        string             `xml:"size"`
--	SnapshotId  string             `xml:"snapshotId"`
--	AvailZone   string             `xml:"availabilityZone"`
--	Status      string             `xml:"status"`
--	Attachments []VolumeAttachment `xml:"attachmentSet>item"`
--	VolumeType  string             `xml:"volumeType"`
--	IOPS        int64              `xml:"iops"`
--	Encrypted   bool               `xml:"encrypted"`
--	Tags        []Tag              `xml:"tagSet>item"`
--}
--
--type VolumeAttachment struct {
--	VolumeId   string `xml:"volumeId"`
--	InstanceId string `xml:"instanceId"`
--	Device     string `xml:"device"`
--	Status     string `xml:"status"`
--}
--
--// Response to a DescribeVolumes request
--type VolumesResp struct {
--	RequestId string   `xml:"requestId"`
--	Volumes   []Volume `xml:"volumeSet>item"`
--}
--
--// Attach a volume.
--func (ec2 *EC2) AttachVolume(volumeId string, instanceId string, device string) (resp *AttachVolumeResp, err error) {
--	params := makeParams("AttachVolume")
--	params["VolumeId"] = volumeId
--	params["InstanceId"] = instanceId
--	params["Device"] = device
--
--	resp = &AttachVolumeResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Create a new volume.
--func (ec2 *EC2) CreateVolume(options *CreateVolume) (resp *CreateVolumeResp, err error) {
--	params := makeParams("CreateVolume")
--	params["AvailabilityZone"] = options.AvailZone
--	if options.Size > 0 {
--		params["Size"] = strconv.FormatInt(options.Size, 10)
--	}
--
--	if options.SnapshotId != "" {
--		params["SnapshotId"] = options.SnapshotId
--	}
--
--	if options.VolumeType != "" {
--		params["VolumeType"] = options.VolumeType
--	}
--
--	if options.IOPS > 0 {
--		params["Iops"] = strconv.FormatInt(options.IOPS, 10)
--	}
--
--	if options.Encrypted {
--		params["Encrypted"] = "true"
--	}
--
--	resp = &CreateVolumeResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Delete an EBS volume.
--func (ec2 *EC2) DeleteVolume(id string) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteVolume")
--	params["VolumeId"] = id
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Detaches an EBS volume.
--func (ec2 *EC2) DetachVolume(id string) (resp *SimpleResp, err error) {
--	params := makeParams("DetachVolume")
--	params["VolumeId"] = id
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Finds or lists all volumes.
--func (ec2 *EC2) Volumes(volIds []string, filter *Filter) (resp *VolumesResp, err error) {
--	params := makeParams("DescribeVolumes")
--	addParamsList(params, "VolumeId", volIds)
--	filter.addParams(params)
--	resp = &VolumesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// ----------------------------------------------------------------------------
--// ElasticIp management (for VPC)
--
--// The AllocateAddress request parameters
--//
--// see http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-AllocateAddress.html
--type AllocateAddress struct {
--	Domain string
--}
--
--// Response to an AllocateAddress request
--type AllocateAddressResp struct {
--	RequestId    string `xml:"requestId"`
--	PublicIp     string `xml:"publicIp"`
--	Domain       string `xml:"domain"`
--	AllocationId string `xml:"allocationId"`
--}
--
--// The AssociateAddress request parameters
--//
--// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-AssociateAddress.html
--type AssociateAddress struct {
--	InstanceId         string
--	PublicIp           string
--	AllocationId       string
--	AllowReassociation bool
--}
--
--// Response to an AssociateAddress request
--type AssociateAddressResp struct {
--	RequestId     string `xml:"requestId"`
--	Return        bool   `xml:"return"`
--	AssociationId string `xml:"associationId"`
--}
--
--// Address represents an Elastic IP Address
--// See http://goo.gl/uxCjp7 for more details
--type Address struct {
--	PublicIp                string `xml:"publicIp"`
--	AllocationId            string `xml:"allocationId"`
--	Domain                  string `xml:"domain"`
--	InstanceId              string `xml:"instanceId"`
--	AssociationId           string `xml:"associationId"`
--	NetworkInterfaceId      string `xml:"networkInterfaceId"`
--	NetworkInterfaceOwnerId string `xml:"networkInterfaceOwnerId"`
--	PrivateIpAddress        string `xml:"privateIpAddress"`
--}
--
--type DescribeAddressesResp struct {
--	RequestId string    `xml:"requestId"`
--	Addresses []Address `xml:"addressesSet>item"`
--}
--
--// Allocate a new Elastic IP.
--func (ec2 *EC2) AllocateAddress(options *AllocateAddress) (resp *AllocateAddressResp, err error) {
--	params := makeParams("AllocateAddress")
--	params["Domain"] = options.Domain
--
--	resp = &AllocateAddressResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Release an Elastic IP (VPC).
--func (ec2 *EC2) ReleaseAddress(id string) (resp *SimpleResp, err error) {
--	params := makeParams("ReleaseAddress")
--	params["AllocationId"] = id
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Release an Elastic IP (Public)
--func (ec2 *EC2) ReleasePublicAddress(publicIp string) (resp *SimpleResp, err error) {
--	params := makeParams("ReleaseAddress")
--	params["PublicIp"] = publicIp
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Associate an address with a VPC instance.
--func (ec2 *EC2) AssociateAddress(options *AssociateAddress) (resp *AssociateAddressResp, err error) {
--	params := makeParams("AssociateAddress")
--	params["InstanceId"] = options.InstanceId
--	if options.PublicIp != "" {
--		params["PublicIp"] = options.PublicIp
--	}
--	if options.AllocationId != "" {
--		params["AllocationId"] = options.AllocationId
--	}
--	if options.AllowReassociation {
--		params["AllowReassociation"] = "true"
--	}
--
--	resp = &AssociateAddressResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Disassociate an address from a VPC instance.
--func (ec2 *EC2) DisassociateAddress(id string) (resp *SimpleResp, err error) {
--	params := makeParams("DisassociateAddress")
--	params["AssociationId"] = id
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// DescribeAddresses returns details about one or more
--// Elastic IP Addresses. Returned addresses can be
--// filtered by Public IP, Allocation ID or multiple filters
--//
--// See http://goo.gl/zW7J4p for more details.
--func (ec2 *EC2) Addresses(publicIps []string, allocationIds []string, filter *Filter) (resp *DescribeAddressesResp, err error) {
--	params := makeParams("DescribeAddresses")
--	addParamsList(params, "PublicIp", publicIps)
--	addParamsList(params, "AllocationId", allocationIds)
--	filter.addParams(params)
--	resp = &DescribeAddressesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// ----------------------------------------------------------------------------
--// Image and snapshot management functions and types.
--
--// The CreateImage request parameters.
--//
--// See http://goo.gl/cxU41 for more details.
--type CreateImage struct {
--	InstanceId   string
--	Name         string
--	Description  string
--	NoReboot     bool
--	BlockDevices []BlockDeviceMapping
--}
--
--// Response to a CreateImage request.
--//
--// See http://goo.gl/cxU41 for more details.
--type CreateImageResp struct {
--	RequestId string `xml:"requestId"`
--	ImageId   string `xml:"imageId"`
--}
--
--// Response to a DescribeImages request.
--//
--// See http://goo.gl/hLnyg for more details.
--type ImagesResp struct {
--	RequestId string  `xml:"requestId"`
--	Images    []Image `xml:"imagesSet>item"`
--}
--
--// Response to a DescribeImageAttribute request.
--//
--// See http://goo.gl/bHO3zT for more details.
--type ImageAttributeResp struct {
--	RequestId    string               `xml:"requestId"`
--	ImageId      string               `xml:"imageId"`
--	Kernel       string               `xml:"kernel>value"`
--	RamDisk      string               `xml:"ramdisk>value"`
--	Description  string               `xml:"description>value"`
--	Group        string               `xml:"launchPermission>item>group"`
--	UserIds      []string             `xml:"launchPermission>item>userId"`
--	ProductCodes []string             `xml:"productCodes>item>productCode"`
--	BlockDevices []BlockDeviceMapping `xml:"blockDeviceMapping>item"`
--}
--
--// The RegisterImage request parameters.
--type RegisterImage struct {
--	ImageLocation   string
--	Name            string
--	Description     string
--	Architecture    string
--	KernelId        string
--	RamdiskId       string
--	RootDeviceName  string
--	VirtType        string
--	SriovNetSupport string
--	BlockDevices    []BlockDeviceMapping
--}
--
--// Response to a RegisterImage request.
--type RegisterImageResp struct {
--	RequestId string `xml:"requestId"`
--	ImageId   string `xml:"imageId"`
--}
--
--// Response to a DegisterImage request.
--//
--// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeregisterImage.html
--type DeregisterImageResp struct {
--	RequestId string `xml:"requestId"`
--	Return    bool   `xml:"return"`
--}
--
--// BlockDeviceMapping represents the association of a block device with an image.
--//
--// See http://goo.gl/wnDBf for more details.
--type BlockDeviceMapping struct {
--	DeviceName          string `xml:"deviceName"`
--	VirtualName         string `xml:"virtualName"`
--	SnapshotId          string `xml:"ebs>snapshotId"`
--	VolumeType          string `xml:"ebs>volumeType"`
--	VolumeSize          int64  `xml:"ebs>volumeSize"`
--	DeleteOnTermination bool   `xml:"ebs>deleteOnTermination"`
--	Encrypted           bool   `xml:"ebs>encrypted"`
--	NoDevice            bool   `xml:"noDevice"`
--
--	// The number of I/O operations per second (IOPS) that the volume supports.
--	IOPS int64 `xml:"ebs>iops"`
--}
--
--// Image represents details about an image.
--//
--// See http://goo.gl/iSqJG for more details.
--type Image struct {
--	Id                 string               `xml:"imageId"`
--	Name               string               `xml:"name"`
--	Description        string               `xml:"description"`
--	Type               string               `xml:"imageType"`
--	State              string               `xml:"imageState"`
--	Location           string               `xml:"imageLocation"`
--	Public             bool                 `xml:"isPublic"`
--	Architecture       string               `xml:"architecture"`
--	Platform           string               `xml:"platform"`
--	ProductCodes       []string             `xml:"productCode>item>productCode"`
--	KernelId           string               `xml:"kernelId"`
--	RamdiskId          string               `xml:"ramdiskId"`
--	StateReason        string               `xml:"stateReason"`
--	OwnerId            string               `xml:"imageOwnerId"`
--	OwnerAlias         string               `xml:"imageOwnerAlias"`
--	RootDeviceType     string               `xml:"rootDeviceType"`
--	RootDeviceName     string               `xml:"rootDeviceName"`
--	VirtualizationType string               `xml:"virtualizationType"`
--	Hypervisor         string               `xml:"hypervisor"`
--	BlockDevices       []BlockDeviceMapping `xml:"blockDeviceMapping>item"`
--	Tags               []Tag                `xml:"tagSet>item"`
--}
--
--// The ModifyImageAttribute request parameters.
--type ModifyImageAttribute struct {
--	AddUsers     []string
--	RemoveUsers  []string
--	AddGroups    []string
--	RemoveGroups []string
--	ProductCodes []string
--	Description  string
--}
--
--// The CopyImage request parameters.
--//
--// See http://goo.gl/hQwPCK for more details.
--type CopyImage struct {
--	SourceRegion  string
--	SourceImageId string
--	Name          string
--	Description   string
--	ClientToken   string
--}
--
--// Response to a CopyImage request.
--//
--// See http://goo.gl/hQwPCK for more details.
--type CopyImageResp struct {
--	RequestId string `xml:"requestId"`
--	ImageId   string `xml:"imageId"`
--}
--
--// Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance
--// that is either running or stopped.
--//
--// See http://goo.gl/cxU41 for more details.
--func (ec2 *EC2) CreateImage(options *CreateImage) (resp *CreateImageResp, err error) {
--	params := makeParams("CreateImage")
--	params["InstanceId"] = options.InstanceId
--	params["Name"] = options.Name
--	if options.Description != "" {
--		params["Description"] = options.Description
--	}
--	if options.NoReboot {
--		params["NoReboot"] = "true"
--	}
--	addBlockDeviceParams("", params, options.BlockDevices)
--
--	resp = &CreateImageResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Images returns details about available images.
--// The ids and filter parameters, if provided, will limit the images returned.
--// For example, to get all the private images associated with this account set
--// the boolean filter "is-public" to 0.
--// For list of filters: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeImages.html
--//
--// Note: calling this function with nil ids and filter parameters will result in
--// a very large number of images being returned.
--//
--// See http://goo.gl/SRBhW for more details.
--func (ec2 *EC2) Images(ids []string, filter *Filter) (resp *ImagesResp, err error) {
--	params := makeParams("DescribeImages")
--	for i, id := range ids {
--		params["ImageId."+strconv.Itoa(i+1)] = id
--	}
--	filter.addParams(params)
--
--	resp = &ImagesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// ImagesByOwners returns details about available images.
--// The ids, owners, and filter parameters, if provided, will limit the images returned.
--// For example, to get all the private images associated with this account set
--// the boolean filter "is-public" to 0.
--// For list of filters: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeImages.html
--//
--// Note: calling this function with nil ids and filter parameters will result in
--// a very large number of images being returned.
--//
--// See http://goo.gl/SRBhW for more details.
--func (ec2 *EC2) ImagesByOwners(ids []string, owners []string, filter *Filter) (resp *ImagesResp, err error) {
--	params := makeParams("DescribeImages")
--	for i, id := range ids {
--		params["ImageId."+strconv.Itoa(i+1)] = id
--	}
--	for i, owner := range owners {
--		params[fmt.Sprintf("Owner.%d", i+1)] = owner
--	}
--
--	filter.addParams(params)
--
--	resp = &ImagesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// ImageAttribute describes an attribute of an AMI.
--// You can specify only one attribute at a time.
--// Valid attributes are:
--//    description | kernel | ramdisk | launchPermission | productCodes | blockDeviceMapping
--//
--// See http://goo.gl/bHO3zT for more details.
--func (ec2 *EC2) ImageAttribute(imageId, attribute string) (resp *ImageAttributeResp, err error) {
--	params := makeParams("DescribeImageAttribute")
--	params["ImageId"] = imageId
--	params["Attribute"] = attribute
--
--	resp = &ImageAttributeResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// ModifyImageAttribute sets attributes for an image.
--//
--// See http://goo.gl/YUjO4G for more details.
--func (ec2 *EC2) ModifyImageAttribute(imageId string, options *ModifyImageAttribute) (resp *SimpleResp, err error) {
--	params := makeParams("ModifyImageAttribute")
--	params["ImageId"] = imageId
--	if options.Description != "" {
--		params["Description.Value"] = options.Description
--	}
--
--	if options.AddUsers != nil {
--		for i, user := range options.AddUsers {
--			p := fmt.Sprintf("LaunchPermission.Add.%d.UserId", i+1)
--			params[p] = user
--		}
--	}
--
--	if options.RemoveUsers != nil {
--		for i, user := range options.RemoveUsers {
--			p := fmt.Sprintf("LaunchPermission.Remove.%d.UserId", i+1)
--			params[p] = user
--		}
--	}
--
--	if options.AddGroups != nil {
--		for i, group := range options.AddGroups {
--			p := fmt.Sprintf("LaunchPermission.Add.%d.Group", i+1)
--			params[p] = group
--		}
--	}
--
--	if options.RemoveGroups != nil {
--		for i, group := range options.RemoveGroups {
--			p := fmt.Sprintf("LaunchPermission.Remove.%d.Group", i+1)
--			params[p] = group
--		}
--	}
--
--	if options.ProductCodes != nil {
--		addParamsList(params, "ProductCode", options.ProductCodes)
--	}
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		resp = nil
--	}
--
--	return
--}
--
--// Registers a new AMI with EC2.
--//
--// See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-RegisterImage.html
--func (ec2 *EC2) RegisterImage(options *RegisterImage) (resp *RegisterImageResp, err error) {
--	params := makeParams("RegisterImage")
--	params["Name"] = options.Name
--	if options.ImageLocation != "" {
--		params["ImageLocation"] = options.ImageLocation
--	}
--
--	if options.Description != "" {
--		params["Description"] = options.Description
--	}
--
--	if options.Architecture != "" {
--		params["Architecture"] = options.Architecture
--	}
--
--	if options.KernelId != "" {
--		params["KernelId"] = options.KernelId
--	}
--
--	if options.RamdiskId != "" {
--		params["RamdiskId"] = options.RamdiskId
--	}
--
--	if options.RootDeviceName != "" {
--		params["RootDeviceName"] = options.RootDeviceName
--	}
--
--	if options.VirtType != "" {
--		params["VirtualizationType"] = options.VirtType
--	}
--
--	if options.SriovNetSupport != "" {
--		params["SriovNetSupport"] = "simple"
--	}
--
--	addBlockDeviceParams("", params, options.BlockDevices)
--
--	resp = &RegisterImageResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Degisters an image. Note that this does not delete the backing stores of the AMI.
--//
--// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeregisterImage.html
--func (ec2 *EC2) DeregisterImage(imageId string) (resp *DeregisterImageResp, err error) {
--	params := makeParams("DeregisterImage")
--	params["ImageId"] = imageId
--
--	resp = &DeregisterImageResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Copy and Image from one region to another.
--//
--// See http://goo.gl/hQwPCK for more details.
--func (ec2 *EC2) CopyImage(options *CopyImage) (resp *CopyImageResp, err error) {
--	params := makeParams("CopyImage")
--
--	if options.SourceRegion != "" {
--		params["SourceRegion"] = options.SourceRegion
--	}
--
--	if options.SourceImageId != "" {
--		params["SourceImageId"] = options.SourceImageId
--	}
--
--	if options.Name != "" {
--		params["Name"] = options.Name
--	}
--
--	if options.Description != "" {
--		params["Description"] = options.Description
--	}
--
--	if options.ClientToken != "" {
--		params["ClientToken"] = options.ClientToken
--	}
--
--	resp = &CopyImageResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Response to a CreateSnapshot request.
--//
--// See http://goo.gl/ttcda for more details.
--type CreateSnapshotResp struct {
--	RequestId string `xml:"requestId"`
--	Snapshot
--}
--
--// CreateSnapshot creates a volume snapshot and stores it in S3.
--//
--// See http://goo.gl/ttcda for more details.
--func (ec2 *EC2) CreateSnapshot(volumeId, description string) (resp *CreateSnapshotResp, err error) {
--	params := makeParams("CreateSnapshot")
--	params["VolumeId"] = volumeId
--	params["Description"] = description
--
--	resp = &CreateSnapshotResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// DeleteSnapshots deletes the volume snapshots with the given ids.
--//
--// Note: If you make periodic snapshots of a volume, the snapshots are
--// incremental so that only the blocks on the device that have changed
--// since your last snapshot are incrementally saved in the new snapshot.
--// Even though snapshots are saved incrementally, the snapshot deletion
--// process is designed so that you need to retain only the most recent
--// snapshot in order to restore the volume.
--//
--// See http://goo.gl/vwU1y for more details.
--func (ec2 *EC2) DeleteSnapshots(ids []string) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteSnapshot")
--	for i, id := range ids {
--		params["SnapshotId."+strconv.Itoa(i+1)] = id
--	}
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Response to a DescribeSnapshots request.
--//
--// See http://goo.gl/nClDT for more details.
--type SnapshotsResp struct {
--	RequestId string     `xml:"requestId"`
--	Snapshots []Snapshot `xml:"snapshotSet>item"`
--}
--
--// Snapshot represents details about a volume snapshot.
--//
--// See http://goo.gl/nkovs for more details.
--type Snapshot struct {
--	Id          string `xml:"snapshotId"`
--	VolumeId    string `xml:"volumeId"`
--	VolumeSize  string `xml:"volumeSize"`
--	Status      string `xml:"status"`
--	StartTime   string `xml:"startTime"`
--	Description string `xml:"description"`
--	Progress    string `xml:"progress"`
--	OwnerId     string `xml:"ownerId"`
--	OwnerAlias  string `xml:"ownerAlias"`
--	Encrypted   bool   `xml:"encrypted"`
--	Tags        []Tag  `xml:"tagSet>item"`
--}
--
--// Snapshots returns details about volume snapshots available to the user.
--// The ids and filter parameters, if provided, limit the snapshots returned.
--//
--// See http://goo.gl/ogJL4 for more details.
--func (ec2 *EC2) Snapshots(ids []string, filter *Filter) (resp *SnapshotsResp, err error) {
--	params := makeParams("DescribeSnapshots")
--	for i, id := range ids {
--		params["SnapshotId."+strconv.Itoa(i+1)] = id
--	}
--	filter.addParams(params)
--
--	resp = &SnapshotsResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// ----------------------------------------------------------------------------
--// KeyPair management functions and types.
--
--type KeyPair struct {
--	Name        string `xml:"keyName"`
--	Fingerprint string `xml:"keyFingerprint"`
--}
--
--type KeyPairsResp struct {
--	RequestId string    `xml:"requestId"`
--	Keys      []KeyPair `xml:"keySet>item"`
--}
--
--type CreateKeyPairResp struct {
--	RequestId      string `xml:"requestId"`
--	KeyName        string `xml:"keyName"`
--	KeyFingerprint string `xml:"keyFingerprint"`
--	KeyMaterial    string `xml:"keyMaterial"`
--}
--
--type ImportKeyPairResponse struct {
--	RequestId      string `xml:"requestId"`
--	KeyName        string `xml:"keyName"`
--	KeyFingerprint string `xml:"keyFingerprint"`
--}
--
--// CreateKeyPair creates a new key pair and returns the private key contents.
--//
--// See http://goo.gl/0S6hV
--func (ec2 *EC2) CreateKeyPair(keyName string) (resp *CreateKeyPairResp, err error) {
--	params := makeParams("CreateKeyPair")
--	params["KeyName"] = keyName
--
--	resp = &CreateKeyPairResp{}
--	err = ec2.query(params, resp)
--	if err == nil {
--		resp.KeyFingerprint = strings.TrimSpace(resp.KeyFingerprint)
--	}
--	return
--}
--
--// DeleteKeyPair deletes a key pair.
--//
--// See http://goo.gl/0bqok
--func (ec2 *EC2) DeleteKeyPair(name string) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteKeyPair")
--	params["KeyName"] = name
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	return
--}
--
--// KeyPairs returns list of key pairs for this account
--//
--// See http://goo.gl/Apzsfz
--func (ec2 *EC2) KeyPairs(keynames []string, filter *Filter) (resp *KeyPairsResp, err error) {
--	params := makeParams("DescribeKeyPairs")
--	for i, name := range keynames {
--		params["KeyName."+strconv.Itoa(i)] = name
--	}
--	filter.addParams(params)
--
--	resp = &KeyPairsResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return resp, nil
--}
--
--// ImportKeyPair imports a key into AWS
--//
--// See http://goo.gl/NbZUvw
--func (ec2 *EC2) ImportKeyPair(keyname string, key string) (resp *ImportKeyPairResponse, err error) {
--	params := makeParams("ImportKeyPair")
--	params["KeyName"] = keyname
--
--	// Oddly, AWS requires the key material to be base64-encoded, even if it was
--	// already encoded. So, we force another round of encoding...
--	// c.f. https://groups.google.com/forum/?fromgroups#!topic/boto-dev/IczrStO9Q8M
--	params["PublicKeyMaterial"] = base64.StdEncoding.EncodeToString([]byte(key))
--
--	resp = &ImportKeyPairResponse{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// ----------------------------------------------------------------------------
--// Security group management functions and types.
--
--// SimpleResp represents a response to an EC2 request which on success will
--// return no other information besides a request id.
--type SimpleResp struct {
--	XMLName   xml.Name
--	RequestId string `xml:"requestId"`
--}
--
--// CreateSecurityGroupResp represents a response to a CreateSecurityGroup request.
--type CreateSecurityGroupResp struct {
--	SecurityGroup
--	RequestId string `xml:"requestId"`
--}
--
--// CreateSecurityGroup run a CreateSecurityGroup request in EC2, with the provided
--// name and description.
--//
--// See http://goo.gl/Eo7Yl for more details.
--func (ec2 *EC2) CreateSecurityGroup(group SecurityGroup) (resp *CreateSecurityGroupResp, err error) {
--	params := makeParams("CreateSecurityGroup")
--	params["GroupName"] = group.Name
--	params["GroupDescription"] = group.Description
--	if group.VpcId != "" {
--		params["VpcId"] = group.VpcId
--	}
--
--	resp = &CreateSecurityGroupResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	resp.Name = group.Name
--	return resp, nil
--}
--
--// SecurityGroupsResp represents a response to a DescribeSecurityGroups
--// request in EC2.
--//
--// See http://goo.gl/k12Uy for more details.
--type SecurityGroupsResp struct {
--	RequestId string              `xml:"requestId"`
--	Groups    []SecurityGroupInfo `xml:"securityGroupInfo>item"`
--}
--
--// SecurityGroup encapsulates details for a security group in EC2.
--//
--// See http://goo.gl/CIdyP for more details.
--type SecurityGroupInfo struct {
--	SecurityGroup
--	OwnerId     string   `xml:"ownerId"`
--	Description string   `xml:"groupDescription"`
--	IPPerms     []IPPerm `xml:"ipPermissions>item"`
--}
--
--// IPPerm represents an allowance within an EC2 security group.
--//
--// See http://goo.gl/4oTxv for more details.
--type IPPerm struct {
--	Protocol     string              `xml:"ipProtocol"`
--	FromPort     int                 `xml:"fromPort"`
--	ToPort       int                 `xml:"toPort"`
--	SourceIPs    []string            `xml:"ipRanges>item>cidrIp"`
--	SourceGroups []UserSecurityGroup `xml:"groups>item"`
--}
--
--// UserSecurityGroup holds a security group and the owner
--// of that group.
--type UserSecurityGroup struct {
--	Id      string `xml:"groupId"`
--	Name    string `xml:"groupName"`
--	OwnerId string `xml:"userId"`
--}
--
--// SecurityGroup represents an EC2 security group.
--// If SecurityGroup is used as a parameter, then one of Id or Name
--// may be empty. If both are set, then Id is used.
--type SecurityGroup struct {
--	Id          string `xml:"groupId"`
--	Name        string `xml:"groupName"`
--	Description string `xml:"groupDescription"`
--	VpcId       string `xml:"vpcId"`
--}
--
--// SecurityGroupNames is a convenience function that
--// returns a slice of security groups with the given names.
--func SecurityGroupNames(names ...string) []SecurityGroup {
--	g := make([]SecurityGroup, len(names))
--	for i, name := range names {
--		g[i] = SecurityGroup{Name: name}
--	}
--	return g
--}
--
--// SecurityGroupNames is a convenience function that
--// returns a slice of security groups with the given ids.
--func SecurityGroupIds(ids ...string) []SecurityGroup {
--	g := make([]SecurityGroup, len(ids))
--	for i, id := range ids {
--		g[i] = SecurityGroup{Id: id}
--	}
--	return g
--}
--
--// SecurityGroups returns details about security groups in EC2.  Both parameters
--// are optional, and if provided will limit the security groups returned to those
--// matching the given groups or filtering rules.
--//
--// See http://goo.gl/k12Uy for more details.
--func (ec2 *EC2) SecurityGroups(groups []SecurityGroup, filter *Filter) (resp *SecurityGroupsResp, err error) {
--	params := makeParams("DescribeSecurityGroups")
--	i, j := 1, 1
--	for _, g := range groups {
--		if g.Id != "" {
--			params["GroupId."+strconv.Itoa(i)] = g.Id
--			i++
--		} else {
--			params["GroupName."+strconv.Itoa(j)] = g.Name
--			j++
--		}
--	}
--	filter.addParams(params)
--
--	resp = &SecurityGroupsResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// DeleteSecurityGroup removes the given security group in EC2.
--//
--// See http://goo.gl/QJJDO for more details.
--func (ec2 *EC2) DeleteSecurityGroup(group SecurityGroup) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteSecurityGroup")
--	if group.Id != "" {
--		params["GroupId"] = group.Id
--	} else {
--		params["GroupName"] = group.Name
--	}
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// AuthorizeSecurityGroup creates an allowance for clients matching the provided
--// rules to access instances within the given security group.
--//
--// See http://goo.gl/u2sDJ for more details.
--func (ec2 *EC2) AuthorizeSecurityGroup(group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) {
--	return ec2.authOrRevoke("AuthorizeSecurityGroupIngress", group, perms)
--}
--
--// AuthorizeSecurityGroupEgress creates an allowance for clients matching the provided
--// rules for egress access.
--//
--// See http://goo.gl/UHnH4L for more details.
--func (ec2 *EC2) AuthorizeSecurityGroupEgress(group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) {
--	return ec2.authOrRevoke("AuthorizeSecurityGroupEgress", group, perms)
--}
--
--// RevokeSecurityGroup revokes permissions from a group.
--//
--// See http://goo.gl/ZgdxA for more details.
--func (ec2 *EC2) RevokeSecurityGroup(group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) {
--	return ec2.authOrRevoke("RevokeSecurityGroupIngress", group, perms)
--}
--
--func (ec2 *EC2) authOrRevoke(op string, group SecurityGroup, perms []IPPerm) (resp *SimpleResp, err error) {
--	params := makeParams(op)
--	if group.Id != "" {
--		params["GroupId"] = group.Id
--	} else {
--		params["GroupName"] = group.Name
--	}
--
--	for i, perm := range perms {
--		prefix := "IpPermissions." + strconv.Itoa(i+1)
--		params[prefix+".IpProtocol"] = perm.Protocol
--		params[prefix+".FromPort"] = strconv.Itoa(perm.FromPort)
--		params[prefix+".ToPort"] = strconv.Itoa(perm.ToPort)
--		for j, ip := range perm.SourceIPs {
--			params[prefix+".IpRanges."+strconv.Itoa(j+1)+".CidrIp"] = ip
--		}
--		for j, g := range perm.SourceGroups {
--			subprefix := prefix + ".Groups." + strconv.Itoa(j+1)
--			if g.OwnerId != "" {
--				params[subprefix+".UserId"] = g.OwnerId
--			}
--			if g.Id != "" {
--				params[subprefix+".GroupId"] = g.Id
--			} else {
--				params[subprefix+".GroupName"] = g.Name
--			}
--		}
--	}
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// ResourceTag represents key-value metadata used to classify and organize
--// EC2 instances.
--//
--// See http://goo.gl/bncl3 for more details
--type Tag struct {
--	Key   string `xml:"key"`
--	Value string `xml:"value"`
--}
--
--// CreateTags adds or overwrites one or more tags for the specified taggable resources.
--// For a list of tagable resources, see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html
--//
--// See http://goo.gl/Vmkqc for more details
--func (ec2 *EC2) CreateTags(resourceIds []string, tags []Tag) (resp *SimpleResp, err error) {
--	params := makeParams("CreateTags")
--	addParamsList(params, "ResourceId", resourceIds)
--
--	for j, tag := range tags {
--		params["Tag."+strconv.Itoa(j+1)+".Key"] = tag.Key
--		params["Tag."+strconv.Itoa(j+1)+".Value"] = tag.Value
--	}
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--type TagsResp struct {
--	RequestId string        `xml:"requestId"`
--	Tags      []ResourceTag `xml:"tagSet>item"`
--}
--
--type ResourceTag struct {
--	Tag
--	ResourceId   string `xml:"resourceId"`
--	ResourceType string `xml:"resourceType"`
--}
--
--func (ec2 *EC2) Tags(filter *Filter) (*TagsResp, error) {
--	params := makeParams("DescribeTags")
--	filter.addParams(params)
--
--	resp := &TagsResp{}
--	if err := ec2.query(params, resp); err != nil {
--		return nil, err
--	}
--
--	return resp, nil
--}
--
--// Response to a StartInstances request.
--//
--// See http://goo.gl/awKeF for more details.
--type StartInstanceResp struct {
--	RequestId    string                `xml:"requestId"`
--	StateChanges []InstanceStateChange `xml:"instancesSet>item"`
--}
--
--// Response to a StopInstances request.
--//
--// See http://goo.gl/436dJ for more details.
--type StopInstanceResp struct {
--	RequestId    string                `xml:"requestId"`
--	StateChanges []InstanceStateChange `xml:"instancesSet>item"`
--}
--
--// StartInstances starts an Amazon EBS-backed AMI that you've previously stopped.
--//
--// See http://goo.gl/awKeF for more details.
--func (ec2 *EC2) StartInstances(ids ...string) (resp *StartInstanceResp, err error) {
--	params := makeParams("StartInstances")
--	addParamsList(params, "InstanceId", ids)
--	resp = &StartInstanceResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// StopInstances requests stopping one or more Amazon EBS-backed instances.
--//
--// See http://goo.gl/436dJ for more details.
--func (ec2 *EC2) StopInstances(ids ...string) (resp *StopInstanceResp, err error) {
--	params := makeParams("StopInstances")
--	addParamsList(params, "InstanceId", ids)
--	resp = &StopInstanceResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// RebootInstance requests a reboot of one or more instances. This operation is asynchronous;
--// it only queues a request to reboot the specified instance(s). The operation will succeed
--// if the instances are valid and belong to you.
--//
--// Requests to reboot terminated instances are ignored.
--//
--// See http://goo.gl/baoUf for more details.
--func (ec2 *EC2) RebootInstances(ids ...string) (resp *SimpleResp, err error) {
--	params := makeParams("RebootInstances")
--	addParamsList(params, "InstanceId", ids)
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// The ModifyInstanceAttribute request parameters.
--type ModifyInstance struct {
--	InstanceType          string
--	BlockDevices          []BlockDeviceMapping
--	DisableAPITermination bool
--	EbsOptimized          bool
--	SecurityGroups        []SecurityGroup
--	ShutdownBehavior      string
--	KernelId              string
--	RamdiskId             string
--	SourceDestCheck       bool
--	SriovNetSupport       bool
--	UserData              []byte
--
--	SetSourceDestCheck bool
--}
--
--// Response to a ModifyInstanceAttribute request.
--//
--// http://goo.gl/icuXh5 for more details.
--type ModifyInstanceResp struct {
--	RequestId string `xml:"requestId"`
--	Return    bool   `xml:"return"`
--}
--
--// ModifyImageAttribute modifies the specified attribute of the specified instance.
--// You can specify only one attribute at a time. To modify some attributes, the
--// instance must be stopped.
--//
--// See http://goo.gl/icuXh5 for more details.
--func (ec2 *EC2) ModifyInstance(instId string, options *ModifyInstance) (resp *ModifyInstanceResp, err error) {
--	params := makeParams("ModifyInstanceAttribute")
--	params["InstanceId"] = instId
--	addBlockDeviceParams("", params, options.BlockDevices)
--
--	if options.InstanceType != "" {
--		params["InstanceType.Value"] = options.InstanceType
--	}
--
--	if options.DisableAPITermination {
--		params["DisableApiTermination.Value"] = "true"
--	}
--
--	if options.EbsOptimized {
--		params["EbsOptimized"] = "true"
--	}
--
--	if options.ShutdownBehavior != "" {
--		params["InstanceInitiatedShutdownBehavior.Value"] = options.ShutdownBehavior
--	}
--
--	if options.KernelId != "" {
--		params["Kernel.Value"] = options.KernelId
--	}
--
--	if options.RamdiskId != "" {
--		params["Ramdisk.Value"] = options.RamdiskId
--	}
--
--	if options.SourceDestCheck || options.SetSourceDestCheck {
--		if options.SourceDestCheck {
--			params["SourceDestCheck.Value"] = "true"
--		} else {
--			params["SourceDestCheck.Value"] = "false"
--		}
--	}
--
--	if options.SriovNetSupport {
--		params["SriovNetSupport.Value"] = "simple"
--	}
--
--	if options.UserData != nil {
--		userData := make([]byte, b64.EncodedLen(len(options.UserData)))
--		b64.Encode(userData, options.UserData)
--		params["UserData"] = string(userData)
--	}
--
--	i := 1
--	for _, g := range options.SecurityGroups {
--		if g.Id != "" {
--			params["GroupId."+strconv.Itoa(i)] = g.Id
--			i++
--		}
--	}
--
--	resp = &ModifyInstanceResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		resp = nil
--	}
--	return
--}
--
--// ----------------------------------------------------------------------------
--// VPC management functions and types.
--
--// The CreateVpc request parameters
--//
--// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpc.html
--type CreateVpc struct {
--	CidrBlock       string
--	InstanceTenancy string
--}
--
--// Response to a CreateVpc request
--type CreateVpcResp struct {
--	RequestId string `xml:"requestId"`
--	VPC       VPC    `xml:"vpc"`
--}
--
--// The ModifyVpcAttribute request parameters.
--//
--// See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-DescribeVpcAttribute.html for more details.
--type ModifyVpcAttribute struct {
--	EnableDnsSupport   bool
--	EnableDnsHostnames bool
--
--	SetEnableDnsSupport   bool
--	SetEnableDnsHostnames bool
--}
--
--// Response to a DescribeVpcAttribute request.
--//
--// See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-DescribeVpcAttribute.html for more details.
--type VpcAttributeResp struct {
--	RequestId          string `xml:"requestId"`
--	VpcId              string `xml:"vpcId"`
--	EnableDnsSupport   bool   `xml:"enableDnsSupport>value"`
--	EnableDnsHostnames bool   `xml:"enableDnsHostnames>value"`
--}
--
--// CreateInternetGateway request parameters.
--//
--// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateInternetGateway.html
--type CreateInternetGateway struct{}
--
--// CreateInternetGateway response
--type CreateInternetGatewayResp struct {
--	RequestId       string          `xml:"requestId"`
--	InternetGateway InternetGateway `xml:"internetGateway"`
--}
--
--// The CreateRouteTable request parameters.
--//
--// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateRouteTable.html
--type CreateRouteTable struct {
--	VpcId string
--}
--
--// Response to a CreateRouteTable request.
--type CreateRouteTableResp struct {
--	RequestId  string     `xml:"requestId"`
--	RouteTable RouteTable `xml:"routeTable"`
--}
--
--// CreateRoute request parameters
--//
--// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateRoute.html
--type CreateRoute struct {
--	RouteTableId           string
--	DestinationCidrBlock   string
--	GatewayId              string
--	InstanceId             string
--	NetworkInterfaceId     string
--	VpcPeeringConnectionId string
--}
--type ReplaceRoute struct {
--	RouteTableId           string
--	DestinationCidrBlock   string
--	GatewayId              string
--	InstanceId             string
--	NetworkInterfaceId     string
--	VpcPeeringConnectionId string
--}
--
--type AssociateRouteTableResp struct {
--	RequestId     string `xml:"requestId"`
--	AssociationId string `xml:"associationId"`
--}
--type ReassociateRouteTableResp struct {
--	RequestId     string `xml:"requestId"`
--	AssociationId string `xml:"newAssociationId"`
--}
--
--// The CreateSubnet request parameters
--//
--// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateSubnet.html
--type CreateSubnet struct {
--	VpcId            string
--	CidrBlock        string
--	AvailabilityZone string
--}
--
--// Response to a CreateSubnet request
--type CreateSubnetResp struct {
--	RequestId string `xml:"requestId"`
--	Subnet    Subnet `xml:"subnet"`
--}
--
--// Response to a DescribeInternetGateways request.
--type InternetGatewaysResp struct {
--	RequestId        string            `xml:"requestId"`
--	InternetGateways []InternetGateway `xml:"internetGatewaySet>item"`
--}
--
--// Response to a DescribeRouteTables request.
--type RouteTablesResp struct {
--	RequestId   string       `xml:"requestId"`
--	RouteTables []RouteTable `xml:"routeTableSet>item"`
--}
--
--// Response to a DescribeVpcs request.
--type VpcsResp struct {
--	RequestId string `xml:"requestId"`
--	VPCs      []VPC  `xml:"vpcSet>item"`
--}
--
--// Internet Gateway
--type InternetGateway struct {
--	InternetGatewayId string                      `xml:"internetGatewayId"`
--	Attachments       []InternetGatewayAttachment `xml:"attachmentSet>item"`
--	Tags              []Tag                       `xml:"tagSet>item"`
--}
--
--type InternetGatewayAttachment struct {
--	VpcId string `xml:"vpcId"`
--	State string `xml:"state"`
--}
--
--// Routing Table
--type RouteTable struct {
--	RouteTableId string                  `xml:"routeTableId"`
--	VpcId        string                  `xml:"vpcId"`
--	Associations []RouteTableAssociation `xml:"associationSet>item"`
--	Routes       []Route                 `xml:"routeSet>item"`
--	Tags         []Tag                   `xml:"tagSet>item"`
--}
--
--type RouteTableAssociation struct {
--	AssociationId string `xml:"routeTableAssociationId"`
--	RouteTableId  string `xml:"routeTableId"`
--	SubnetId      string `xml:"subnetId"`
--	Main          bool   `xml:"main"`
--}
--
--type Route struct {
--	DestinationCidrBlock   string `xml:"destinationCidrBlock"`
--	GatewayId              string `xml:"gatewayId"`
--	InstanceId             string `xml:"instanceId"`
--	InstanceOwnerId        string `xml:"instanceOwnerId"`
--	NetworkInterfaceId     string `xml:"networkInterfaceId"`
--	State                  string `xml:"state"`
--	Origin                 string `xml:"origin"`
--	VpcPeeringConnectionId string `xml:"vpcPeeringConnectionId"`
--}
--
--// Subnet
--type Subnet struct {
--	SubnetId                string `xml:"subnetId"`
--	State                   string `xml:"state"`
--	VpcId                   string `xml:"vpcId"`
--	CidrBlock               string `xml:"cidrBlock"`
--	AvailableIpAddressCount int    `xml:"availableIpAddressCount"`
--	AvailabilityZone        string `xml:"availabilityZone"`
--	DefaultForAZ            bool   `xml:"defaultForAz"`
--	MapPublicIpOnLaunch     bool   `xml:"mapPublicIpOnLaunch"`
--	Tags                    []Tag  `xml:"tagSet>item"`
--}
--
--// VPC represents a single VPC.
--type VPC struct {
--	VpcId           string `xml:"vpcId"`
--	State           string `xml:"state"`
--	CidrBlock       string `xml:"cidrBlock"`
--	DHCPOptionsID   string `xml:"dhcpOptionsId"`
--	InstanceTenancy string `xml:"instanceTenancy"`
--	IsDefault       bool   `xml:"isDefault"`
--	Tags            []Tag  `xml:"tagSet>item"`
--}
--
--// Response to a DescribeSubnets request.
--type SubnetsResp struct {
--	RequestId string   `xml:"requestId"`
--	Subnets   []Subnet `xml:"subnetSet>item"`
--}
--
--// Create a new VPC.
--func (ec2 *EC2) CreateVpc(options *CreateVpc) (resp *CreateVpcResp, err error) {
--	params := makeParams("CreateVpc")
--	params["CidrBlock"] = options.CidrBlock
--
--	if options.InstanceTenancy != "" {
--		params["InstanceTenancy"] = options.InstanceTenancy
--	}
--
--	resp = &CreateVpcResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Delete a VPC.
--func (ec2 *EC2) DeleteVpc(id string) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteVpc")
--	params["VpcId"] = id
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// DescribeVpcs
--//
--// See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVpcs.html
--func (ec2 *EC2) DescribeVpcs(ids []string, filter *Filter) (resp *VpcsResp, err error) {
--	params := makeParams("DescribeVpcs")
--	addParamsList(params, "VpcId", ids)
--	filter.addParams(params)
--	resp = &VpcsResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// VpcAttribute describes an attribute of a VPC.
--// You can specify only one attribute at a time.
--// Valid attributes are:
--//    enableDnsSupport | enableDnsHostnames
--//
--// See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-DescribeVpcAttribute.html for more details.
--func (ec2 *EC2) VpcAttribute(vpcId, attribute string) (resp *VpcAttributeResp, err error) {
--	params := makeParams("DescribeVpcAttribute")
--	params["VpcId"] = vpcId
--	params["Attribute"] = attribute
--
--	resp = &VpcAttributeResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// ModifyVpcAttribute modifies the specified attribute of the specified VPC.
--//
--// See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-ModifyVpcAttribute.html for more details.
--func (ec2 *EC2) ModifyVpcAttribute(vpcId string, options *ModifyVpcAttribute) (*SimpleResp, error) {
--	params := makeParams("ModifyVpcAttribute")
--
--	params["VpcId"] = vpcId
--
--	if options.SetEnableDnsSupport {
--		params["EnableDnsSupport.Value"] = strconv.FormatBool(options.EnableDnsSupport)
--	}
--
--	if options.SetEnableDnsHostnames {
--		params["EnableDnsHostnames.Value"] = strconv.FormatBool(options.EnableDnsHostnames)
--	}
--
--	resp := &SimpleResp{}
--	if err := ec2.query(params, resp); err != nil {
--		return nil, err
--	}
--
--	return resp, nil
--}
--
--// Create a new subnet.
--func (ec2 *EC2) CreateSubnet(options *CreateSubnet) (resp *CreateSubnetResp, err error) {
--	params := makeParams("CreateSubnet")
--	params["AvailabilityZone"] = options.AvailabilityZone
--	params["CidrBlock"] = options.CidrBlock
--	params["VpcId"] = options.VpcId
--
--	resp = &CreateSubnetResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Delete a Subnet.
--func (ec2 *EC2) DeleteSubnet(id string) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteSubnet")
--	params["SubnetId"] = id
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// DescribeSubnets
--//
--// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSubnets.html
--func (ec2 *EC2) DescribeSubnets(ids []string, filter *Filter) (resp *SubnetsResp, err error) {
--	params := makeParams("DescribeSubnets")
--	addParamsList(params, "SubnetId", ids)
--	filter.addParams(params)
--
--	resp = &SubnetsResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Create a new internet gateway.
--func (ec2 *EC2) CreateInternetGateway(
--	options *CreateInternetGateway) (resp *CreateInternetGatewayResp, err error) {
--	params := makeParams("CreateInternetGateway")
--
--	resp = &CreateInternetGatewayResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Attach an InternetGateway.
--func (ec2 *EC2) AttachInternetGateway(id, vpcId string) (resp *SimpleResp, err error) {
--	params := makeParams("AttachInternetGateway")
--	params["InternetGatewayId"] = id
--	params["VpcId"] = vpcId
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Detach an InternetGateway.
--func (ec2 *EC2) DetachInternetGateway(id, vpcId string) (resp *SimpleResp, err error) {
--	params := makeParams("DetachInternetGateway")
--	params["InternetGatewayId"] = id
--	params["VpcId"] = vpcId
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Delete an InternetGateway.
--func (ec2 *EC2) DeleteInternetGateway(id string) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteInternetGateway")
--	params["InternetGatewayId"] = id
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// DescribeInternetGateways
--//
--// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInternetGateways.html
--func (ec2 *EC2) DescribeInternetGateways(ids []string, filter *Filter) (resp *InternetGatewaysResp, err error) {
--	params := makeParams("DescribeInternetGateways")
--	addParamsList(params, "InternetGatewayId", ids)
--	filter.addParams(params)
--
--	resp = &InternetGatewaysResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Create a new routing table.
--func (ec2 *EC2) CreateRouteTable(
--	options *CreateRouteTable) (resp *CreateRouteTableResp, err error) {
--	params := makeParams("CreateRouteTable")
--	params["VpcId"] = options.VpcId
--
--	resp = &CreateRouteTableResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Delete a RouteTable.
--func (ec2 *EC2) DeleteRouteTable(id string) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteRouteTable")
--	params["RouteTableId"] = id
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// DescribeRouteTables
--//
--// http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeRouteTables.html
--func (ec2 *EC2) DescribeRouteTables(ids []string, filter *Filter) (resp *RouteTablesResp, err error) {
--	params := makeParams("DescribeRouteTables")
--	addParamsList(params, "RouteTableId", ids)
--	filter.addParams(params)
--
--	resp = &RouteTablesResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return
--}
--
--// Associate a routing table.
--func (ec2 *EC2) AssociateRouteTable(id, subnetId string) (*AssociateRouteTableResp, error) {
--	params := makeParams("AssociateRouteTable")
--	params["RouteTableId"] = id
--	params["SubnetId"] = subnetId
--
--	resp := &AssociateRouteTableResp{}
--	err := ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// Disassociate a routing table.
--func (ec2 *EC2) DisassociateRouteTable(id string) (*SimpleResp, error) {
--	params := makeParams("DisassociateRouteTable")
--	params["AssociationId"] = id
--
--	resp := &SimpleResp{}
--	err := ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// Re-associate a routing table.
--func (ec2 *EC2) ReassociateRouteTable(id, routeTableId string) (*ReassociateRouteTableResp, error) {
--	params := makeParams("ReplaceRouteTableAssociation")
--	params["AssociationId"] = id
--	params["RouteTableId"] = routeTableId
--
--	resp := &ReassociateRouteTableResp{}
--	err := ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return resp, nil
--}
--
--// Create a new route.
--func (ec2 *EC2) CreateRoute(options *CreateRoute) (resp *SimpleResp, err error) {
--	params := makeParams("CreateRoute")
--	params["RouteTableId"] = options.RouteTableId
--	params["DestinationCidrBlock"] = options.DestinationCidrBlock
--
--	if v := options.GatewayId; v != "" {
--		params["GatewayId"] = v
--	}
--	if v := options.InstanceId; v != "" {
--		params["InstanceId"] = v
--	}
--	if v := options.NetworkInterfaceId; v != "" {
--		params["NetworkInterfaceId"] = v
--	}
--	if v := options.VpcPeeringConnectionId; v != "" {
--		params["VpcPeeringConnectionId"] = v
--	}
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Delete a Route.
--func (ec2 *EC2) DeleteRoute(routeTableId, cidr string) (resp *SimpleResp, err error) {
--	params := makeParams("DeleteRoute")
--	params["RouteTableId"] = routeTableId
--	params["DestinationCidrBlock"] = cidr
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// Replace a new route.
--func (ec2 *EC2) ReplaceRoute(options *ReplaceRoute) (resp *SimpleResp, err error) {
--	params := makeParams("ReplaceRoute")
--	params["RouteTableId"] = options.RouteTableId
--	params["DestinationCidrBlock"] = options.DestinationCidrBlock
--
--	if v := options.GatewayId; v != "" {
--		params["GatewayId"] = v
--	}
--	if v := options.InstanceId; v != "" {
--		params["InstanceId"] = v
--	}
--	if v := options.NetworkInterfaceId; v != "" {
--		params["NetworkInterfaceId"] = v
--	}
--	if v := options.VpcPeeringConnectionId; v != "" {
--		params["VpcPeeringConnectionId"] = v
--	}
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
--
--// The ResetImageAttribute request parameters.
--type ResetImageAttribute struct {
--	Attribute string
--}
--
--// ResetImageAttribute resets an attribute of an AMI to its default value.
--//
--// http://goo.gl/r6ZCPm for more details.
--func (ec2 *EC2) ResetImageAttribute(imageId string, options *ResetImageAttribute) (resp *SimpleResp, err error) {
--	params := makeParams("ResetImageAttribute")
--	params["ImageId"] = imageId
--
--	if options.Attribute != "" {
--		params["Attribute"] = options.Attribute
--	}
--
--	resp = &SimpleResp{}
--	err = ec2.query(params, resp)
--	if err != nil {
--		return nil, err
--	}
--	return
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2_test.go
-deleted file mode 100644
-index 849bfe2..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2_test.go
-+++ /dev/null
-@@ -1,1243 +0,0 @@
--package ec2_test
--
--import (
--	"testing"
--
--	"github.com/mitchellh/goamz/aws"
--	"github.com/mitchellh/goamz/ec2"
--	"github.com/mitchellh/goamz/testutil"
--	. "github.com/motain/gocheck"
--)
--
--func Test(t *testing.T) {
--	TestingT(t)
--}
--
--var _ = Suite(&S{})
--
--type S struct {
--	ec2 *ec2.EC2
--}
--
--var testServer = testutil.NewHTTPServer()
--
--func (s *S) SetUpSuite(c *C) {
--	testServer.Start()
--	auth := aws.Auth{"abc", "123", ""}
--	s.ec2 = ec2.NewWithClient(
--		auth,
--		aws.Region{EC2Endpoint: testServer.URL},
--		testutil.DefaultClient,
--	)
--}
--
--func (s *S) TearDownTest(c *C) {
--	testServer.Flush()
--}
--
--func (s *S) TestRunInstancesErrorDump(c *C) {
--	testServer.Response(400, nil, ErrorDump)
--
--	options := ec2.RunInstances{
--		ImageId:      "ami-a6f504cf", // Ubuntu Maverick, i386, instance store
--		InstanceType: "t1.micro",     // Doesn't work with micro, results in 400.
--	}
--
--	msg := `AMIs with an instance-store root device are not supported for the instance type 't1\.micro'\.`
--
--	resp, err := s.ec2.RunInstances(&options)
--
--	testServer.WaitRequest()
--
--	c.Assert(resp, IsNil)
--	c.Assert(err, ErrorMatches, msg+` \(UnsupportedOperation\)`)
--
--	ec2err, ok := err.(*ec2.Error)
--	c.Assert(ok, Equals, true)
--	c.Assert(ec2err.StatusCode, Equals, 400)
--	c.Assert(ec2err.Code, Equals, "UnsupportedOperation")
--	c.Assert(ec2err.Message, Matches, msg)
--	c.Assert(ec2err.RequestId, Equals, "0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4")
--}
--
--func (s *S) TestRequestSpotInstancesErrorDump(c *C) {
--	testServer.Response(400, nil, ErrorDump)
--
--	options := ec2.RequestSpotInstances{
--		SpotPrice:    "0.01",
--		ImageId:      "ami-a6f504cf", // Ubuntu Maverick, i386, instance store
--		InstanceType: "t1.micro",     // Doesn't work with micro, results in 400.
--	}
--
--	msg := `AMIs with an instance-store root device are not supported for the instance type 't1\.micro'\.`
--
--	resp, err := s.ec2.RequestSpotInstances(&options)
--
--	testServer.WaitRequest()
--
--	c.Assert(resp, IsNil)
--	c.Assert(err, ErrorMatches, msg+` \(UnsupportedOperation\)`)
--
--	ec2err, ok := err.(*ec2.Error)
--	c.Assert(ok, Equals, true)
--	c.Assert(ec2err.StatusCode, Equals, 400)
--	c.Assert(ec2err.Code, Equals, "UnsupportedOperation")
--	c.Assert(ec2err.Message, Matches, msg)
--	c.Assert(ec2err.RequestId, Equals, "0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4")
--}
--
--func (s *S) TestRunInstancesErrorWithoutXML(c *C) {
--	testServer.Responses(5, 500, nil, "")
--	options := ec2.RunInstances{ImageId: "image-id"}
--
--	resp, err := s.ec2.RunInstances(&options)
--
--	testServer.WaitRequest()
--
--	c.Assert(resp, IsNil)
--	c.Assert(err, ErrorMatches, "500 Internal Server Error")
--
--	ec2err, ok := err.(*ec2.Error)
--	c.Assert(ok, Equals, true)
--	c.Assert(ec2err.StatusCode, Equals, 500)
--	c.Assert(ec2err.Code, Equals, "")
--	c.Assert(ec2err.Message, Equals, "500 Internal Server Error")
--	c.Assert(ec2err.RequestId, Equals, "")
--}
--
--func (s *S) TestRequestSpotInstancesErrorWithoutXML(c *C) {
--	testServer.Responses(5, 500, nil, "")
--	options := ec2.RequestSpotInstances{SpotPrice: "spot-price", ImageId: "image-id"}
--
--	resp, err := s.ec2.RequestSpotInstances(&options)
--
--	testServer.WaitRequest()
--
--	c.Assert(resp, IsNil)
--	c.Assert(err, ErrorMatches, "500 Internal Server Error")
--
--	ec2err, ok := err.(*ec2.Error)
--	c.Assert(ok, Equals, true)
--	c.Assert(ec2err.StatusCode, Equals, 500)
--	c.Assert(ec2err.Code, Equals, "")
--	c.Assert(ec2err.Message, Equals, "500 Internal Server Error")
--	c.Assert(ec2err.RequestId, Equals, "")
--}
--
--func (s *S) TestRunInstancesExample(c *C) {
--	testServer.Response(200, nil, RunInstancesExample)
--
--	options := ec2.RunInstances{
--		KeyName:               "my-keys",
--		ImageId:               "image-id",
--		InstanceType:          "inst-type",
--		SecurityGroups:        []ec2.SecurityGroup{{Name: "g1"}, {Id: "g2"}, {Name: "g3"}, {Id: "g4"}},
--		UserData:              []byte("1234"),
--		KernelId:              "kernel-id",
--		RamdiskId:             "ramdisk-id",
--		AvailZone:             "zone",
--		PlacementGroupName:    "group",
--		Monitoring:            true,
--		SubnetId:              "subnet-id",
--		DisableAPITermination: true,
--		ShutdownBehavior:      "terminate",
--		PrivateIPAddress:      "10.0.0.25",
--		BlockDevices: []ec2.BlockDeviceMapping{
--			{DeviceName: "/dev/sdb", VirtualName: "ephemeral0"},
--			{DeviceName: "/dev/sdc", SnapshotId: "snap-a08912c9", DeleteOnTermination: true},
--		},
--	}
--	resp, err := s.ec2.RunInstances(&options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"RunInstances"})
--	c.Assert(req.Form["ImageId"], DeepEquals, []string{"image-id"})
--	c.Assert(req.Form["MinCount"], DeepEquals, []string{"1"})
--	c.Assert(req.Form["MaxCount"], DeepEquals, []string{"1"})
--	c.Assert(req.Form["KeyName"], DeepEquals, []string{"my-keys"})
--	c.Assert(req.Form["InstanceType"], DeepEquals, []string{"inst-type"})
--	c.Assert(req.Form["SecurityGroup.1"], DeepEquals, []string{"g1"})
--	c.Assert(req.Form["SecurityGroup.2"], DeepEquals, []string{"g3"})
--	c.Assert(req.Form["SecurityGroupId.1"], DeepEquals, []string{"g2"})
--	c.Assert(req.Form["SecurityGroupId.2"], DeepEquals, []string{"g4"})
--	c.Assert(req.Form["UserData"], DeepEquals, []string{"MTIzNA=="})
--	c.Assert(req.Form["KernelId"], DeepEquals, []string{"kernel-id"})
--	c.Assert(req.Form["RamdiskId"], DeepEquals, []string{"ramdisk-id"})
--	c.Assert(req.Form["Placement.AvailabilityZone"], DeepEquals, []string{"zone"})
--	c.Assert(req.Form["Placement.GroupName"], DeepEquals, []string{"group"})
--	c.Assert(req.Form["Monitoring.Enabled"], DeepEquals, []string{"true"})
--	c.Assert(req.Form["SubnetId"], DeepEquals, []string{"subnet-id"})
--	c.Assert(req.Form["DisableApiTermination"], DeepEquals, []string{"true"})
--	c.Assert(req.Form["InstanceInitiatedShutdownBehavior"], DeepEquals, []string{"terminate"})
--	c.Assert(req.Form["PrivateIpAddress"], DeepEquals, []string{"10.0.0.25"})
--	c.Assert(req.Form["BlockDeviceMapping.1.DeviceName"], DeepEquals, []string{"/dev/sdb"})
--	c.Assert(req.Form["BlockDeviceMapping.1.VirtualName"], DeepEquals, []string{"ephemeral0"})
--	c.Assert(req.Form["BlockDeviceMapping.2.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"})
--	c.Assert(req.Form["BlockDeviceMapping.2.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.ReservationId, Equals, "r-47a5402e")
--	c.Assert(resp.OwnerId, Equals, "999988887777")
--	c.Assert(resp.SecurityGroups, DeepEquals, []ec2.SecurityGroup{{Name: "default", Id: "sg-67ad940e"}})
--	c.Assert(resp.Instances, HasLen, 3)
--
--	i0 := resp.Instances[0]
--	c.Assert(i0.InstanceId, Equals, "i-2ba64342")
--	c.Assert(i0.InstanceType, Equals, "m1.small")
--	c.Assert(i0.ImageId, Equals, "ami-60a54009")
--	c.Assert(i0.Monitoring, Equals, "enabled")
--	c.Assert(i0.KeyName, Equals, "example-key-name")
--	c.Assert(i0.AMILaunchIndex, Equals, 0)
--	c.Assert(i0.VirtType, Equals, "paravirtual")
--	c.Assert(i0.Hypervisor, Equals, "xen")
--
--	i1 := resp.Instances[1]
--	c.Assert(i1.InstanceId, Equals, "i-2bc64242")
--	c.Assert(i1.InstanceType, Equals, "m1.small")
--	c.Assert(i1.ImageId, Equals, "ami-60a54009")
--	c.Assert(i1.Monitoring, Equals, "enabled")
--	c.Assert(i1.KeyName, Equals, "example-key-name")
--	c.Assert(i1.AMILaunchIndex, Equals, 1)
--	c.Assert(i1.VirtType, Equals, "paravirtual")
--	c.Assert(i1.Hypervisor, Equals, "xen")
--
--	i2 := resp.Instances[2]
--	c.Assert(i2.InstanceId, Equals, "i-2be64332")
--	c.Assert(i2.InstanceType, Equals, "m1.small")
--	c.Assert(i2.ImageId, Equals, "ami-60a54009")
--	c.Assert(i2.Monitoring, Equals, "enabled")
--	c.Assert(i2.KeyName, Equals, "example-key-name")
--	c.Assert(i2.AMILaunchIndex, Equals, 2)
--	c.Assert(i2.VirtType, Equals, "paravirtual")
--	c.Assert(i2.Hypervisor, Equals, "xen")
--}
--
--func (s *S) TestRequestSpotInstancesExample(c *C) {
--	testServer.Response(200, nil, RequestSpotInstancesExample)
--
--	options := ec2.RequestSpotInstances{
--		SpotPrice:          "0.5",
--		KeyName:            "my-keys",
--		ImageId:            "image-id",
--		InstanceType:       "inst-type",
--		SecurityGroups:     []ec2.SecurityGroup{{Name: "g1"}, {Id: "g2"}, {Name: "g3"}, {Id: "g4"}},
--		UserData:           []byte("1234"),
--		KernelId:           "kernel-id",
--		RamdiskId:          "ramdisk-id",
--		AvailZone:          "zone",
--		PlacementGroupName: "group",
--		Monitoring:         true,
--		SubnetId:           "subnet-id",
--		PrivateIPAddress:   "10.0.0.25",
--		BlockDevices: []ec2.BlockDeviceMapping{
--			{DeviceName: "/dev/sdb", VirtualName: "ephemeral0"},
--			{DeviceName: "/dev/sdc", SnapshotId: "snap-a08912c9", DeleteOnTermination: true},
--		},
--	}
--	resp, err := s.ec2.RequestSpotInstances(&options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"RequestSpotInstances"})
--	c.Assert(req.Form["SpotPrice"], DeepEquals, []string{"0.5"})
--	c.Assert(req.Form["LaunchSpecification.ImageId"], DeepEquals, []string{"image-id"})
--	c.Assert(req.Form["LaunchSpecification.KeyName"], DeepEquals, []string{"my-keys"})
--	c.Assert(req.Form["LaunchSpecification.InstanceType"], DeepEquals, []string{"inst-type"})
--	c.Assert(req.Form["LaunchSpecification.SecurityGroup.1"], DeepEquals, []string{"g1"})
--	c.Assert(req.Form["LaunchSpecification.SecurityGroup.2"], DeepEquals, []string{"g3"})
--	c.Assert(req.Form["LaunchSpecification.SecurityGroupId.1"], DeepEquals, []string{"g2"})
--	c.Assert(req.Form["LaunchSpecification.SecurityGroupId.2"], DeepEquals, []string{"g4"})
--	c.Assert(req.Form["LaunchSpecification.UserData"], DeepEquals, []string{"MTIzNA=="})
--	c.Assert(req.Form["LaunchSpecification.KernelId"], DeepEquals, []string{"kernel-id"})
--	c.Assert(req.Form["LaunchSpecification.RamdiskId"], DeepEquals, []string{"ramdisk-id"})
--	c.Assert(req.Form["LaunchSpecification.Placement.AvailabilityZone"], DeepEquals, []string{"zone"})
--	c.Assert(req.Form["LaunchSpecification.Placement.GroupName"], DeepEquals, []string{"group"})
--	c.Assert(req.Form["LaunchSpecification.Monitoring.Enabled"], DeepEquals, []string{"true"})
--	c.Assert(req.Form["LaunchSpecification.SubnetId"], DeepEquals, []string{"subnet-id"})
--	c.Assert(req.Form["LaunchSpecification.PrivateIpAddress"], DeepEquals, []string{"10.0.0.25"})
--	c.Assert(req.Form["LaunchSpecification.BlockDeviceMapping.1.DeviceName"], DeepEquals, []string{"/dev/sdb"})
--	c.Assert(req.Form["LaunchSpecification.BlockDeviceMapping.1.VirtualName"], DeepEquals, []string{"ephemeral0"})
--	c.Assert(req.Form["LaunchSpecification.BlockDeviceMapping.2.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"})
--	c.Assert(req.Form["LaunchSpecification.BlockDeviceMapping.2.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.SpotRequestResults[0].SpotRequestId, Equals, "sir-1a2b3c4d")
--	c.Assert(resp.SpotRequestResults[0].SpotPrice, Equals, "0.5")
--	c.Assert(resp.SpotRequestResults[0].State, Equals, "open")
--	c.Assert(resp.SpotRequestResults[0].SpotLaunchSpec.ImageId, Equals, "ami-1a2b3c4d")
--	c.Assert(resp.SpotRequestResults[0].Status.Code, Equals, "pending-evaluation")
--	c.Assert(resp.SpotRequestResults[0].Status.UpdateTime, Equals, "2008-05-07T12:51:50.000Z")
--	c.Assert(resp.SpotRequestResults[0].Status.Message, Equals, "Your Spot request has been submitted for review, and is pending evaluation.")
--}
--
--func (s *S) TestCancelSpotRequestsExample(c *C) {
--	testServer.Response(200, nil, CancelSpotRequestsExample)
--
--	resp, err := s.ec2.CancelSpotRequests([]string{"s-1", "s-2"})
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"CancelSpotInstanceRequests"})
--	c.Assert(req.Form["SpotInstanceRequestId.1"], DeepEquals, []string{"s-1"})
--	c.Assert(req.Form["SpotInstanceRequestId.2"], DeepEquals, []string{"s-2"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.CancelSpotRequestResults[0].SpotRequestId, Equals, "sir-1a2b3c4d")
--	c.Assert(resp.CancelSpotRequestResults[0].State, Equals, "cancelled")
--}
--
--func (s *S) TestTerminateInstancesExample(c *C) {
--	testServer.Response(200, nil, TerminateInstancesExample)
--
--	resp, err := s.ec2.TerminateInstances([]string{"i-1", "i-2"})
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"TerminateInstances"})
--	c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-1"})
--	c.Assert(req.Form["InstanceId.2"], DeepEquals, []string{"i-2"})
--	c.Assert(req.Form["UserData"], IsNil)
--	c.Assert(req.Form["KernelId"], IsNil)
--	c.Assert(req.Form["RamdiskId"], IsNil)
--	c.Assert(req.Form["Placement.AvailabilityZone"], IsNil)
--	c.Assert(req.Form["Placement.GroupName"], IsNil)
--	c.Assert(req.Form["Monitoring.Enabled"], IsNil)
--	c.Assert(req.Form["SubnetId"], IsNil)
--	c.Assert(req.Form["DisableApiTermination"], IsNil)
--	c.Assert(req.Form["InstanceInitiatedShutdownBehavior"], IsNil)
--	c.Assert(req.Form["PrivateIpAddress"], IsNil)
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.StateChanges, HasLen, 1)
--	c.Assert(resp.StateChanges[0].InstanceId, Equals, "i-3ea74257")
--	c.Assert(resp.StateChanges[0].CurrentState.Code, Equals, 32)
--	c.Assert(resp.StateChanges[0].CurrentState.Name, Equals, "shutting-down")
--	c.Assert(resp.StateChanges[0].PreviousState.Code, Equals, 16)
--	c.Assert(resp.StateChanges[0].PreviousState.Name, Equals, "running")
--}
--
--func (s *S) TestDescribeSpotRequestsExample(c *C) {
--	testServer.Response(200, nil, DescribeSpotRequestsExample)
--
--	filter := ec2.NewFilter()
--	filter.Add("key1", "value1")
--	filter.Add("key2", "value2", "value3")
--
--	resp, err := s.ec2.DescribeSpotRequests([]string{"s-1", "s-2"}, filter)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSpotInstanceRequests"})
--	c.Assert(req.Form["SpotInstanceRequestId.1"], DeepEquals, []string{"s-1"})
--	c.Assert(req.Form["SpotInstanceRequestId.2"], DeepEquals, []string{"s-2"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "b1719f2a-5334-4479-b2f1-26926EXAMPLE")
--	c.Assert(resp.SpotRequestResults[0].SpotRequestId, Equals, "sir-1a2b3c4d")
--	c.Assert(resp.SpotRequestResults[0].State, Equals, "active")
--	c.Assert(resp.SpotRequestResults[0].SpotPrice, Equals, "0.5")
--	c.Assert(resp.SpotRequestResults[0].SpotLaunchSpec.ImageId, Equals, "ami-1a2b3c4d")
--	c.Assert(resp.SpotRequestResults[0].Status.Code, Equals, "fulfilled")
--	c.Assert(resp.SpotRequestResults[0].Status.UpdateTime, Equals, "2008-05-07T12:51:50.000Z")
--	c.Assert(resp.SpotRequestResults[0].Status.Message, Equals, "Your Spot request is fulfilled.")
--}
--
--func (s *S) TestDescribeInstancesExample1(c *C) {
--	testServer.Response(200, nil, DescribeInstancesExample1)
--
--	filter := ec2.NewFilter()
--	filter.Add("key1", "value1")
--	filter.Add("key2", "value2", "value3")
--
--	resp, err := s.ec2.Instances([]string{"i-1", "i-2"}, nil)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeInstances"})
--	c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-1"})
--	c.Assert(req.Form["InstanceId.2"], DeepEquals, []string{"i-2"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "98e3c9a4-848c-4d6d-8e8a-b1bdEXAMPLE")
--	c.Assert(resp.Reservations, HasLen, 2)
--
--	r0 := resp.Reservations[0]
--	c.Assert(r0.ReservationId, Equals, "r-b27e30d9")
--	c.Assert(r0.OwnerId, Equals, "999988887777")
--	c.Assert(r0.RequesterId, Equals, "854251627541")
--	c.Assert(r0.SecurityGroups, DeepEquals, []ec2.SecurityGroup{{Name: "default", Id: "sg-67ad940e"}})
--	c.Assert(r0.Instances, HasLen, 1)
--
--	r0i := r0.Instances[0]
--	c.Assert(r0i.InstanceId, Equals, "i-c5cd56af")
--	c.Assert(r0i.PrivateDNSName, Equals, "domU-12-31-39-10-56-34.compute-1.internal")
--	c.Assert(r0i.DNSName, Equals, "ec2-174-129-165-232.compute-1.amazonaws.com")
--	c.Assert(r0i.AvailZone, Equals, "us-east-1b")
--}
--
--func (s *S) TestDescribeInstancesExample2(c *C) {
--	testServer.Response(200, nil, DescribeInstancesExample2)
--
--	filter := ec2.NewFilter()
--	filter.Add("key1", "value1")
--	filter.Add("key2", "value2", "value3")
--
--	resp, err := s.ec2.Instances([]string{"i-1", "i-2"}, filter)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeInstances"})
--	c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-1"})
--	c.Assert(req.Form["InstanceId.2"], DeepEquals, []string{"i-2"})
--	c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"key1"})
--	c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"})
--	c.Assert(req.Form["Filter.1.Value.2"], IsNil)
--	c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"key2"})
--	c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"})
--	c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.Reservations, HasLen, 1)
--
--	r0 := resp.Reservations[0]
--	r0i := r0.Instances[0]
--	c.Assert(r0i.State.Code, Equals, 16)
--	c.Assert(r0i.State.Name, Equals, "running")
--
--	r0t0 := r0i.Tags[0]
--	r0t1 := r0i.Tags[1]
--	c.Assert(r0t0.Key, Equals, "webserver")
--	c.Assert(r0t0.Value, Equals, "")
--	c.Assert(r0t1.Key, Equals, "stack")
--	c.Assert(r0t1.Value, Equals, "Production")
--}
--
--func (s *S) TestCreateImageExample(c *C) {
--	testServer.Response(200, nil, CreateImageExample)
--
--	options := &ec2.CreateImage{
--		InstanceId:  "i-123456",
--		Name:        "foo",
--		Description: "Test CreateImage",
--		NoReboot:    true,
--		BlockDevices: []ec2.BlockDeviceMapping{
--			{DeviceName: "/dev/sdb", VirtualName: "ephemeral0"},
--			{DeviceName: "/dev/sdc", SnapshotId: "snap-a08912c9", DeleteOnTermination: true},
--		},
--	}
--
--	resp, err := s.ec2.CreateImage(options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"CreateImage"})
--	c.Assert(req.Form["InstanceId"], DeepEquals, []string{options.InstanceId})
--	c.Assert(req.Form["Name"], DeepEquals, []string{options.Name})
--	c.Assert(req.Form["Description"], DeepEquals, []string{options.Description})
--	c.Assert(req.Form["NoReboot"], DeepEquals, []string{"true"})
--	c.Assert(req.Form["BlockDeviceMapping.1.DeviceName"], DeepEquals, []string{"/dev/sdb"})
--	c.Assert(req.Form["BlockDeviceMapping.1.VirtualName"], DeepEquals, []string{"ephemeral0"})
--	c.Assert(req.Form["BlockDeviceMapping.2.DeviceName"], DeepEquals, []string{"/dev/sdc"})
--	c.Assert(req.Form["BlockDeviceMapping.2.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"})
--	c.Assert(req.Form["BlockDeviceMapping.2.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.ImageId, Equals, "ami-4fa54026")
--}
--
--func (s *S) TestDescribeImagesExample(c *C) {
--	testServer.Response(200, nil, DescribeImagesExample)
--
--	filter := ec2.NewFilter()
--	filter.Add("key1", "value1")
--	filter.Add("key2", "value2", "value3")
--
--	resp, err := s.ec2.Images([]string{"ami-1", "ami-2"}, filter)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeImages"})
--	c.Assert(req.Form["ImageId.1"], DeepEquals, []string{"ami-1"})
--	c.Assert(req.Form["ImageId.2"], DeepEquals, []string{"ami-2"})
--	c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"key1"})
--	c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"})
--	c.Assert(req.Form["Filter.1.Value.2"], IsNil)
--	c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"key2"})
--	c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"})
--	c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "4a4a27a2-2e7c-475d-b35b-ca822EXAMPLE")
--	c.Assert(resp.Images, HasLen, 1)
--
--	i0 := resp.Images[0]
--	c.Assert(i0.Id, Equals, "ami-a2469acf")
--	c.Assert(i0.Type, Equals, "machine")
--	c.Assert(i0.Name, Equals, "example-marketplace-amzn-ami.1")
--	c.Assert(i0.Description, Equals, "Amazon Linux AMI i386 EBS")
--	c.Assert(i0.Location, Equals, "aws-marketplace/example-marketplace-amzn-ami.1")
--	c.Assert(i0.State, Equals, "available")
--	c.Assert(i0.Public, Equals, true)
--	c.Assert(i0.OwnerId, Equals, "123456789999")
--	c.Assert(i0.OwnerAlias, Equals, "aws-marketplace")
--	c.Assert(i0.Architecture, Equals, "i386")
--	c.Assert(i0.KernelId, Equals, "aki-805ea7e9")
--	c.Assert(i0.RootDeviceType, Equals, "ebs")
--	c.Assert(i0.RootDeviceName, Equals, "/dev/sda1")
--	c.Assert(i0.VirtualizationType, Equals, "paravirtual")
--	c.Assert(i0.Hypervisor, Equals, "xen")
--
--	c.Assert(i0.BlockDevices, HasLen, 1)
--	c.Assert(i0.BlockDevices[0].DeviceName, Equals, "/dev/sda1")
--	c.Assert(i0.BlockDevices[0].SnapshotId, Equals, "snap-787e9403")
--	c.Assert(i0.BlockDevices[0].VolumeSize, Equals, int64(8))
--	c.Assert(i0.BlockDevices[0].DeleteOnTermination, Equals, true)
--
--	testServer.Response(200, nil, DescribeImagesExample)
--	resp2, err := s.ec2.ImagesByOwners([]string{"ami-1", "ami-2"}, []string{"123456789999", "id2"}, filter)
--
--	req2 := testServer.WaitRequest()
--	c.Assert(req2.Form["Action"], DeepEquals, []string{"DescribeImages"})
--	c.Assert(req2.Form["ImageId.1"], DeepEquals, []string{"ami-1"})
--	c.Assert(req2.Form["ImageId.2"], DeepEquals, []string{"ami-2"})
--	c.Assert(req2.Form["Owner.1"], DeepEquals, []string{"123456789999"})
--	c.Assert(req2.Form["Owner.2"], DeepEquals, []string{"id2"})
--	c.Assert(req2.Form["Filter.1.Name"], DeepEquals, []string{"key1"})
--	c.Assert(req2.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"})
--	c.Assert(req2.Form["Filter.1.Value.2"], IsNil)
--	c.Assert(req2.Form["Filter.2.Name"], DeepEquals, []string{"key2"})
--	c.Assert(req2.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"})
--	c.Assert(req2.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp2.RequestId, Equals, "4a4a27a2-2e7c-475d-b35b-ca822EXAMPLE")
--	c.Assert(resp2.Images, HasLen, 1)
--
--	i1 := resp2.Images[0]
--	c.Assert(i1.Id, Equals, "ami-a2469acf")
--	c.Assert(i1.Type, Equals, "machine")
--	c.Assert(i1.Name, Equals, "example-marketplace-amzn-ami.1")
--	c.Assert(i1.Description, Equals, "Amazon Linux AMI i386 EBS")
--	c.Assert(i1.Location, Equals, "aws-marketplace/example-marketplace-amzn-ami.1")
--	c.Assert(i1.State, Equals, "available")
--	c.Assert(i1.Public, Equals, true)
--	c.Assert(i1.OwnerId, Equals, "123456789999")
--	c.Assert(i1.OwnerAlias, Equals, "aws-marketplace")
--	c.Assert(i1.Architecture, Equals, "i386")
--	c.Assert(i1.KernelId, Equals, "aki-805ea7e9")
--	c.Assert(i1.RootDeviceType, Equals, "ebs")
--	c.Assert(i1.RootDeviceName, Equals, "/dev/sda1")
--	c.Assert(i1.VirtualizationType, Equals, "paravirtual")
--	c.Assert(i1.Hypervisor, Equals, "xen")
--
--	c.Assert(i1.BlockDevices, HasLen, 1)
--	c.Assert(i1.BlockDevices[0].DeviceName, Equals, "/dev/sda1")
--	c.Assert(i1.BlockDevices[0].SnapshotId, Equals, "snap-787e9403")
--	c.Assert(i1.BlockDevices[0].VolumeSize, Equals, int64(8))
--	c.Assert(i1.BlockDevices[0].DeleteOnTermination, Equals, true)
--}
--
--func (s *S) TestImageAttributeExample(c *C) {
--	testServer.Response(200, nil, ImageAttributeExample)
--
--	resp, err := s.ec2.ImageAttribute("ami-61a54008", "launchPermission")
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeImageAttribute"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.ImageId, Equals, "ami-61a54008")
--	c.Assert(resp.Group, Equals, "all")
--	c.Assert(resp.UserIds[0], Equals, "495219933132")
--}
--
--func (s *S) TestCreateSnapshotExample(c *C) {
--	testServer.Response(200, nil, CreateSnapshotExample)
--
--	resp, err := s.ec2.CreateSnapshot("vol-4d826724", "Daily Backup")
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"CreateSnapshot"})
--	c.Assert(req.Form["VolumeId"], DeepEquals, []string{"vol-4d826724"})
--	c.Assert(req.Form["Description"], DeepEquals, []string{"Daily Backup"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.Snapshot.Id, Equals, "snap-78a54011")
--	c.Assert(resp.Snapshot.VolumeId, Equals, "vol-4d826724")
--	c.Assert(resp.Snapshot.Status, Equals, "pending")
--	c.Assert(resp.Snapshot.StartTime, Equals, "2008-05-07T12:51:50.000Z")
--	c.Assert(resp.Snapshot.Progress, Equals, "60%")
--	c.Assert(resp.Snapshot.OwnerId, Equals, "111122223333")
--	c.Assert(resp.Snapshot.VolumeSize, Equals, "10")
--	c.Assert(resp.Snapshot.Description, Equals, "Daily Backup")
--}
--
--func (s *S) TestDeleteSnapshotsExample(c *C) {
--	testServer.Response(200, nil, DeleteSnapshotExample)
--
--	resp, err := s.ec2.DeleteSnapshots([]string{"snap-78a54011"})
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteSnapshot"})
--	c.Assert(req.Form["SnapshotId.1"], DeepEquals, []string{"snap-78a54011"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestDescribeSnapshotsExample(c *C) {
--	testServer.Response(200, nil, DescribeSnapshotsExample)
--
--	filter := ec2.NewFilter()
--	filter.Add("key1", "value1")
--	filter.Add("key2", "value2", "value3")
--
--	resp, err := s.ec2.Snapshots([]string{"snap-1", "snap-2"}, filter)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSnapshots"})
--	c.Assert(req.Form["SnapshotId.1"], DeepEquals, []string{"snap-1"})
--	c.Assert(req.Form["SnapshotId.2"], DeepEquals, []string{"snap-2"})
--	c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"key1"})
--	c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"})
--	c.Assert(req.Form["Filter.1.Value.2"], IsNil)
--	c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"key2"})
--	c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"})
--	c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.Snapshots, HasLen, 1)
--
--	s0 := resp.Snapshots[0]
--	c.Assert(s0.Id, Equals, "snap-1a2b3c4d")
--	c.Assert(s0.VolumeId, Equals, "vol-8875daef")
--	c.Assert(s0.VolumeSize, Equals, "15")
--	c.Assert(s0.Status, Equals, "pending")
--	c.Assert(s0.StartTime, Equals, "2010-07-29T04:12:01.000Z")
--	c.Assert(s0.Progress, Equals, "30%")
--	c.Assert(s0.OwnerId, Equals, "111122223333")
--	c.Assert(s0.Description, Equals, "Daily Backup")
--
--	c.Assert(s0.Tags, HasLen, 1)
--	c.Assert(s0.Tags[0].Key, Equals, "Purpose")
--	c.Assert(s0.Tags[0].Value, Equals, "demo_db_14_backup")
--}
--
--func (s *S) TestModifyImageAttributeExample(c *C) {
--	testServer.Response(200, nil, ModifyImageAttributeExample)
--
--	options := ec2.ModifyImageAttribute{
--		Description: "Test Description",
--	}
--
--	resp, err := s.ec2.ModifyImageAttribute("ami-4fa54026", &options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"ModifyImageAttribute"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestModifyImageAttributeExample_complex(c *C) {
--	testServer.Response(200, nil, ModifyImageAttributeExample)
--
--	options := ec2.ModifyImageAttribute{
--		AddUsers:     []string{"u1", "u2"},
--		RemoveUsers:  []string{"u3"},
--		AddGroups:    []string{"g1", "g3"},
--		RemoveGroups: []string{"g2"},
--		Description:  "Test Description",
--	}
--
--	resp, err := s.ec2.ModifyImageAttribute("ami-4fa54026", &options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"ModifyImageAttribute"})
--	c.Assert(req.Form["LaunchPermission.Add.1.UserId"], DeepEquals, []string{"u1"})
--	c.Assert(req.Form["LaunchPermission.Add.2.UserId"], DeepEquals, []string{"u2"})
--	c.Assert(req.Form["LaunchPermission.Remove.1.UserId"], DeepEquals, []string{"u3"})
--	c.Assert(req.Form["LaunchPermission.Add.1.Group"], DeepEquals, []string{"g1"})
--	c.Assert(req.Form["LaunchPermission.Add.2.Group"], DeepEquals, []string{"g3"})
--	c.Assert(req.Form["LaunchPermission.Remove.1.Group"], DeepEquals, []string{"g2"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestCopyImageExample(c *C) {
--	testServer.Response(200, nil, CopyImageExample)
--
--	options := ec2.CopyImage{
--		SourceRegion:  "us-west-2",
--		SourceImageId: "ami-1a2b3c4d",
--		Description:   "Test Description",
--	}
--
--	resp, err := s.ec2.CopyImage(&options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"CopyImage"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "60bc441d-fa2c-494d-b155-5d6a3EXAMPLE")
--}
--
--func (s *S) TestCreateKeyPairExample(c *C) {
--	testServer.Response(200, nil, CreateKeyPairExample)
--
--	resp, err := s.ec2.CreateKeyPair("foo")
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"CreateKeyPair"})
--	c.Assert(req.Form["KeyName"], DeepEquals, []string{"foo"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.KeyName, Equals, "foo")
--	c.Assert(resp.KeyFingerprint, Equals, "00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00")
--}
--
--func (s *S) TestDeleteKeyPairExample(c *C) {
--	testServer.Response(200, nil, DeleteKeyPairExample)
--
--	resp, err := s.ec2.DeleteKeyPair("foo")
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteKeyPair"})
--	c.Assert(req.Form["KeyName"], DeepEquals, []string{"foo"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestCreateSecurityGroupExample(c *C) {
--	testServer.Response(200, nil, CreateSecurityGroupExample)
--
--	resp, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: "websrv", Description: "Web Servers"})
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"CreateSecurityGroup"})
--	c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"})
--	c.Assert(req.Form["GroupDescription"], DeepEquals, []string{"Web Servers"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.Name, Equals, "websrv")
--	c.Assert(resp.Id, Equals, "sg-67ad940e")
--}
--
--func (s *S) TestDescribeSecurityGroupsExample(c *C) {
--	testServer.Response(200, nil, DescribeSecurityGroupsExample)
--
--	resp, err := s.ec2.SecurityGroups([]ec2.SecurityGroup{{Name: "WebServers"}, {Name: "RangedPortsBySource"}}, nil)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSecurityGroups"})
--	c.Assert(req.Form["GroupName.1"], DeepEquals, []string{"WebServers"})
--	c.Assert(req.Form["GroupName.2"], DeepEquals, []string{"RangedPortsBySource"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.Groups, HasLen, 2)
--
--	g0 := resp.Groups[0]
--	c.Assert(g0.OwnerId, Equals, "999988887777")
--	c.Assert(g0.Name, Equals, "WebServers")
--	c.Assert(g0.Id, Equals, "sg-67ad940e")
--	c.Assert(g0.Description, Equals, "Web Servers")
--	c.Assert(g0.IPPerms, HasLen, 1)
--
--	g0ipp := g0.IPPerms[0]
--	c.Assert(g0ipp.Protocol, Equals, "tcp")
--	c.Assert(g0ipp.FromPort, Equals, 80)
--	c.Assert(g0ipp.ToPort, Equals, 80)
--	c.Assert(g0ipp.SourceIPs, DeepEquals, []string{"0.0.0.0/0"})
--
--	g1 := resp.Groups[1]
--	c.Assert(g1.OwnerId, Equals, "999988887777")
--	c.Assert(g1.Name, Equals, "RangedPortsBySource")
--	c.Assert(g1.Id, Equals, "sg-76abc467")
--	c.Assert(g1.Description, Equals, "Group A")
--	c.Assert(g1.IPPerms, HasLen, 1)
--
--	g1ipp := g1.IPPerms[0]
--	c.Assert(g1ipp.Protocol, Equals, "tcp")
--	c.Assert(g1ipp.FromPort, Equals, 6000)
--	c.Assert(g1ipp.ToPort, Equals, 7000)
--	c.Assert(g1ipp.SourceIPs, IsNil)
--}
--
--func (s *S) TestDescribeSecurityGroupsExampleWithFilter(c *C) {
--	testServer.Response(200, nil, DescribeSecurityGroupsExample)
--
--	filter := ec2.NewFilter()
--	filter.Add("ip-permission.protocol", "tcp")
--	filter.Add("ip-permission.from-port", "22")
--	filter.Add("ip-permission.to-port", "22")
--	filter.Add("ip-permission.group-name", "app_server_group", "database_group")
--
--	_, err := s.ec2.SecurityGroups(nil, filter)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSecurityGroups"})
--	c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"ip-permission.from-port"})
--	c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"22"})
--	c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"ip-permission.group-name"})
--	c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"app_server_group"})
--	c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"database_group"})
--	c.Assert(req.Form["Filter.3.Name"], DeepEquals, []string{"ip-permission.protocol"})
--	c.Assert(req.Form["Filter.3.Value.1"], DeepEquals, []string{"tcp"})
--	c.Assert(req.Form["Filter.4.Name"], DeepEquals, []string{"ip-permission.to-port"})
--	c.Assert(req.Form["Filter.4.Value.1"], DeepEquals, []string{"22"})
--
--	c.Assert(err, IsNil)
--}
--
--func (s *S) TestDescribeSecurityGroupsDumpWithGroup(c *C) {
--	testServer.Response(200, nil, DescribeSecurityGroupsDump)
--
--	resp, err := s.ec2.SecurityGroups(nil, nil)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeSecurityGroups"})
--	c.Assert(err, IsNil)
--	c.Check(resp.Groups, HasLen, 1)
--	c.Check(resp.Groups[0].IPPerms, HasLen, 2)
--
--	ipp0 := resp.Groups[0].IPPerms[0]
--	c.Assert(ipp0.SourceIPs, IsNil)
--	c.Check(ipp0.Protocol, Equals, "icmp")
--	c.Assert(ipp0.SourceGroups, HasLen, 1)
--	c.Check(ipp0.SourceGroups[0].OwnerId, Equals, "12345")
--	c.Check(ipp0.SourceGroups[0].Name, Equals, "default")
--	c.Check(ipp0.SourceGroups[0].Id, Equals, "sg-67ad940e")
--
--	ipp1 := resp.Groups[0].IPPerms[1]
--	c.Check(ipp1.Protocol, Equals, "tcp")
--	c.Assert(ipp0.SourceIPs, IsNil)
--	c.Assert(ipp0.SourceGroups, HasLen, 1)
--	c.Check(ipp1.SourceGroups[0].Id, Equals, "sg-76abc467")
--	c.Check(ipp1.SourceGroups[0].OwnerId, Equals, "12345")
--	c.Check(ipp1.SourceGroups[0].Name, Equals, "other")
--}
--
--func (s *S) TestDeleteSecurityGroupExample(c *C) {
--	testServer.Response(200, nil, DeleteSecurityGroupExample)
--
--	resp, err := s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: "websrv"})
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteSecurityGroup"})
--	c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"})
--	c.Assert(req.Form["GroupId"], IsNil)
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestDeleteSecurityGroupExampleWithId(c *C) {
--	testServer.Response(200, nil, DeleteSecurityGroupExample)
--
--	// ignore return and error - we're only want to check the parameter handling.
--	s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Id: "sg-67ad940e", Name: "ignored"})
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["GroupName"], IsNil)
--	c.Assert(req.Form["GroupId"], DeepEquals, []string{"sg-67ad940e"})
--}
--
--func (s *S) TestAuthorizeSecurityGroupExample1(c *C) {
--	testServer.Response(200, nil, AuthorizeSecurityGroupIngressExample)
--
--	perms := []ec2.IPPerm{{
--		Protocol:  "tcp",
--		FromPort:  80,
--		ToPort:    80,
--		SourceIPs: []string{"205.192.0.0/16", "205.159.0.0/16"},
--	}}
--	resp, err := s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: "websrv"}, perms)
--
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"AuthorizeSecurityGroupIngress"})
--	c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"})
--	c.Assert(req.Form["IpPermissions.1.IpProtocol"], DeepEquals, []string{"tcp"})
--	c.Assert(req.Form["IpPermissions.1.FromPort"], DeepEquals, []string{"80"})
--	c.Assert(req.Form["IpPermissions.1.ToPort"], DeepEquals, []string{"80"})
--	c.Assert(req.Form["IpPermissions.1.IpRanges.1.CidrIp"], DeepEquals, []string{"205.192.0.0/16"})
--	c.Assert(req.Form["IpPermissions.1.IpRanges.2.CidrIp"], DeepEquals, []string{"205.159.0.0/16"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestAuthorizeSecurityGroupEgress(c *C) {
--	testServer.Response(200, nil, AuthorizeSecurityGroupEgressExample)
--
--	perms := []ec2.IPPerm{{
--		Protocol:  "tcp",
--		FromPort:  80,
--		ToPort:    80,
--		SourceIPs: []string{"205.192.0.0/16", "205.159.0.0/16"},
--	}}
--	resp, err := s.ec2.AuthorizeSecurityGroupEgress(ec2.SecurityGroup{Name: "websrv"}, perms)
--
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"AuthorizeSecurityGroupEgress"})
--	c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"})
--	c.Assert(req.Form["IpPermissions.1.IpProtocol"], DeepEquals, []string{"tcp"})
--	c.Assert(req.Form["IpPermissions.1.FromPort"], DeepEquals, []string{"80"})
--	c.Assert(req.Form["IpPermissions.1.ToPort"], DeepEquals, []string{"80"})
--	c.Assert(req.Form["IpPermissions.1.IpRanges.1.CidrIp"], DeepEquals, []string{"205.192.0.0/16"})
--	c.Assert(req.Form["IpPermissions.1.IpRanges.2.CidrIp"], DeepEquals, []string{"205.159.0.0/16"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestAuthorizeSecurityGroupExample1WithId(c *C) {
--	testServer.Response(200, nil, AuthorizeSecurityGroupIngressExample)
--
--	perms := []ec2.IPPerm{{
--		Protocol:  "tcp",
--		FromPort:  80,
--		ToPort:    80,
--		SourceIPs: []string{"205.192.0.0/16", "205.159.0.0/16"},
--	}}
--	// ignore return and error - we're only want to check the parameter handling.
--	s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Id: "sg-67ad940e", Name: "ignored"}, perms)
--
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["GroupName"], IsNil)
--	c.Assert(req.Form["GroupId"], DeepEquals, []string{"sg-67ad940e"})
--}
--
--func (s *S) TestAuthorizeSecurityGroupExample2(c *C) {
--	testServer.Response(200, nil, AuthorizeSecurityGroupIngressExample)
--
--	perms := []ec2.IPPerm{{
--		Protocol: "tcp",
--		FromPort: 80,
--		ToPort:   81,
--		SourceGroups: []ec2.UserSecurityGroup{
--			{OwnerId: "999988887777", Name: "OtherAccountGroup"},
--			{Id: "sg-67ad940e"},
--		},
--	}}
--	resp, err := s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: "websrv"}, perms)
--
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"AuthorizeSecurityGroupIngress"})
--	c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"})
--	c.Assert(req.Form["IpPermissions.1.IpProtocol"], DeepEquals, []string{"tcp"})
--	c.Assert(req.Form["IpPermissions.1.FromPort"], DeepEquals, []string{"80"})
--	c.Assert(req.Form["IpPermissions.1.ToPort"], DeepEquals, []string{"81"})
--	c.Assert(req.Form["IpPermissions.1.Groups.1.UserId"], DeepEquals, []string{"999988887777"})
--	c.Assert(req.Form["IpPermissions.1.Groups.1.GroupName"], DeepEquals, []string{"OtherAccountGroup"})
--	c.Assert(req.Form["IpPermissions.1.Groups.2.UserId"], IsNil)
--	c.Assert(req.Form["IpPermissions.1.Groups.2.GroupName"], IsNil)
--	c.Assert(req.Form["IpPermissions.1.Groups.2.GroupId"], DeepEquals, []string{"sg-67ad940e"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestRevokeSecurityGroupExample(c *C) {
--	// RevokeSecurityGroup is implemented by the same code as AuthorizeSecurityGroup
--	// so there's no need to duplicate all the tests.
--	testServer.Response(200, nil, RevokeSecurityGroupIngressExample)
--
--	resp, err := s.ec2.RevokeSecurityGroup(ec2.SecurityGroup{Name: "websrv"}, nil)
--
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"RevokeSecurityGroupIngress"})
--	c.Assert(req.Form["GroupName"], DeepEquals, []string{"websrv"})
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestCreateTags(c *C) {
--	testServer.Response(200, nil, CreateTagsExample)
--
--	resp, err := s.ec2.CreateTags([]string{"ami-1a2b3c4d", "i-7f4d3a2b"}, []ec2.Tag{{"webserver", ""}, {"stack", "Production"}})
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["ResourceId.1"], DeepEquals, []string{"ami-1a2b3c4d"})
--	c.Assert(req.Form["ResourceId.2"], DeepEquals, []string{"i-7f4d3a2b"})
--	c.Assert(req.Form["Tag.1.Key"], DeepEquals, []string{"webserver"})
--	c.Assert(req.Form["Tag.1.Value"], DeepEquals, []string{""})
--	c.Assert(req.Form["Tag.2.Key"], DeepEquals, []string{"stack"})
--	c.Assert(req.Form["Tag.2.Value"], DeepEquals, []string{"Production"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestStartInstances(c *C) {
--	testServer.Response(200, nil, StartInstancesExample)
--
--	resp, err := s.ec2.StartInstances("i-10a64379")
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"StartInstances"})
--	c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-10a64379"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--
--	s0 := resp.StateChanges[0]
--	c.Assert(s0.InstanceId, Equals, "i-10a64379")
--	c.Assert(s0.CurrentState.Code, Equals, 0)
--	c.Assert(s0.CurrentState.Name, Equals, "pending")
--	c.Assert(s0.PreviousState.Code, Equals, 80)
--	c.Assert(s0.PreviousState.Name, Equals, "stopped")
--}
--
--func (s *S) TestStopInstances(c *C) {
--	testServer.Response(200, nil, StopInstancesExample)
--
--	resp, err := s.ec2.StopInstances("i-10a64379")
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"StopInstances"})
--	c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-10a64379"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--
--	s0 := resp.StateChanges[0]
--	c.Assert(s0.InstanceId, Equals, "i-10a64379")
--	c.Assert(s0.CurrentState.Code, Equals, 64)
--	c.Assert(s0.CurrentState.Name, Equals, "stopping")
--	c.Assert(s0.PreviousState.Code, Equals, 16)
--	c.Assert(s0.PreviousState.Name, Equals, "running")
--}
--
--func (s *S) TestRebootInstances(c *C) {
--	testServer.Response(200, nil, RebootInstancesExample)
--
--	resp, err := s.ec2.RebootInstances("i-10a64379")
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"RebootInstances"})
--	c.Assert(req.Form["InstanceId.1"], DeepEquals, []string{"i-10a64379"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestSignatureWithEndpointPath(c *C) {
--	ec2.FakeTime(true)
--	defer ec2.FakeTime(false)
--
--	testServer.Response(200, nil, RebootInstancesExample)
--
--	// https://bugs.launchpad.net/goamz/+bug/1022749
--	ec2 := ec2.NewWithClient(s.ec2.Auth, aws.Region{EC2Endpoint: testServer.URL + "/services/Cloud"}, testutil.DefaultClient)
--
--	_, err := ec2.RebootInstances("i-10a64379")
--	c.Assert(err, IsNil)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Signature"], DeepEquals, []string{"QmvgkYGn19WirCuCz/jRp3RmRgFwWR5WRkKZ5AZnyXQ="})
--}
--
--func (s *S) TestAllocateAddressExample(c *C) {
--	testServer.Response(200, nil, AllocateAddressExample)
--
--	options := &ec2.AllocateAddress{
--		Domain: "vpc",
--	}
--
--	resp, err := s.ec2.AllocateAddress(options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"AllocateAddress"})
--	c.Assert(req.Form["Domain"], DeepEquals, []string{"vpc"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.PublicIp, Equals, "198.51.100.1")
--	c.Assert(resp.Domain, Equals, "vpc")
--	c.Assert(resp.AllocationId, Equals, "eipalloc-5723d13e")
--}
--
--func (s *S) TestReleaseAddressExample(c *C) {
--	testServer.Response(200, nil, ReleaseAddressExample)
--
--	resp, err := s.ec2.ReleaseAddress("eipalloc-5723d13e")
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"ReleaseAddress"})
--	c.Assert(req.Form["AllocationId"], DeepEquals, []string{"eipalloc-5723d13e"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestAssociateAddressExample(c *C) {
--	testServer.Response(200, nil, AssociateAddressExample)
--
--	options := &ec2.AssociateAddress{
--		InstanceId:         "i-4fd2431a",
--		AllocationId:       "eipalloc-5723d13e",
--		AllowReassociation: true,
--	}
--
--	resp, err := s.ec2.AssociateAddress(options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"AssociateAddress"})
--	c.Assert(req.Form["InstanceId"], DeepEquals, []string{"i-4fd2431a"})
--	c.Assert(req.Form["AllocationId"], DeepEquals, []string{"eipalloc-5723d13e"})
--	c.Assert(req.Form["AllowReassociation"], DeepEquals, []string{"true"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--	c.Assert(resp.AssociationId, Equals, "eipassoc-fc5ca095")
--}
--
--func (s *S) TestDisassociateAddressExample(c *C) {
--	testServer.Response(200, nil, DisassociateAddressExample)
--
--	resp, err := s.ec2.DisassociateAddress("eipassoc-aa7486c3")
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DisassociateAddress"})
--	c.Assert(req.Form["AssociationId"], DeepEquals, []string{"eipassoc-aa7486c3"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestModifyInstance(c *C) {
--	testServer.Response(200, nil, ModifyInstanceExample)
--
--	options := ec2.ModifyInstance{
--		InstanceType:          "m1.small",
--		DisableAPITermination: true,
--		EbsOptimized:          true,
--		SecurityGroups:        []ec2.SecurityGroup{{Id: "g1"}, {Id: "g2"}},
--		ShutdownBehavior:      "terminate",
--		KernelId:              "kernel-id",
--		RamdiskId:             "ramdisk-id",
--		SourceDestCheck:       true,
--		SriovNetSupport:       true,
--		UserData:              []byte("1234"),
--		BlockDevices: []ec2.BlockDeviceMapping{
--			{DeviceName: "/dev/sda1", SnapshotId: "snap-a08912c9", DeleteOnTermination: true},
--		},
--	}
--
--	resp, err := s.ec2.ModifyInstance("i-2ba64342", &options)
--	req := testServer.WaitRequest()
--
--	c.Assert(req.Form["Action"], DeepEquals, []string{"ModifyInstanceAttribute"})
--	c.Assert(req.Form["InstanceId"], DeepEquals, []string{"i-2ba64342"})
--	c.Assert(req.Form["InstanceType.Value"], DeepEquals, []string{"m1.small"})
--	c.Assert(req.Form["BlockDeviceMapping.1.DeviceName"], DeepEquals, []string{"/dev/sda1"})
--	c.Assert(req.Form["BlockDeviceMapping.1.Ebs.SnapshotId"], DeepEquals, []string{"snap-a08912c9"})
--	c.Assert(req.Form["BlockDeviceMapping.1.Ebs.DeleteOnTermination"], DeepEquals, []string{"true"})
--	c.Assert(req.Form["DisableApiTermination.Value"], DeepEquals, []string{"true"})
--	c.Assert(req.Form["EbsOptimized"], DeepEquals, []string{"true"})
--	c.Assert(req.Form["GroupId.1"], DeepEquals, []string{"g1"})
--	c.Assert(req.Form["GroupId.2"], DeepEquals, []string{"g2"})
--	c.Assert(req.Form["InstanceInitiatedShutdownBehavior.Value"], DeepEquals, []string{"terminate"})
--	c.Assert(req.Form["Kernel.Value"], DeepEquals, []string{"kernel-id"})
--	c.Assert(req.Form["Ramdisk.Value"], DeepEquals, []string{"ramdisk-id"})
--	c.Assert(req.Form["SourceDestCheck.Value"], DeepEquals, []string{"true"})
--	c.Assert(req.Form["SriovNetSupport.Value"], DeepEquals, []string{"simple"})
--	c.Assert(req.Form["UserData"], DeepEquals, []string{"MTIzNA=="})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
--
--func (s *S) TestCreateVpc(c *C) {
--	testServer.Response(200, nil, CreateVpcExample)
--
--	options := &ec2.CreateVpc{
--		CidrBlock: "foo",
--	}
--
--	resp, err := s.ec2.CreateVpc(options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["CidrBlock"], DeepEquals, []string{"foo"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE")
--	c.Assert(resp.VPC.VpcId, Equals, "vpc-1a2b3c4d")
--	c.Assert(resp.VPC.State, Equals, "pending")
--	c.Assert(resp.VPC.CidrBlock, Equals, "10.0.0.0/16")
--	c.Assert(resp.VPC.DHCPOptionsID, Equals, "dopt-1a2b3c4d2")
--	c.Assert(resp.VPC.InstanceTenancy, Equals, "default")
--}
--
--func (s *S) TestDescribeVpcs(c *C) {
--	testServer.Response(200, nil, DescribeVpcsExample)
--
--	filter := ec2.NewFilter()
--	filter.Add("key1", "value1")
--	filter.Add("key2", "value2", "value3")
--
--	resp, err := s.ec2.DescribeVpcs([]string{"id1", "id2"}, filter)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeVpcs"})
--	c.Assert(req.Form["VpcId.1"], DeepEquals, []string{"id1"})
--	c.Assert(req.Form["VpcId.2"], DeepEquals, []string{"id2"})
--	c.Assert(req.Form["Filter.1.Name"], DeepEquals, []string{"key1"})
--	c.Assert(req.Form["Filter.1.Value.1"], DeepEquals, []string{"value1"})
--	c.Assert(req.Form["Filter.1.Value.2"], IsNil)
--	c.Assert(req.Form["Filter.2.Name"], DeepEquals, []string{"key2"})
--	c.Assert(req.Form["Filter.2.Value.1"], DeepEquals, []string{"value2"})
--	c.Assert(req.Form["Filter.2.Value.2"], DeepEquals, []string{"value3"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE")
--	c.Assert(resp.VPCs, HasLen, 1)
--}
--
--func (s *S) TestCreateSubnet(c *C) {
--	testServer.Response(200, nil, CreateSubnetExample)
--
--	options := &ec2.CreateSubnet{
--		AvailabilityZone: "baz",
--		CidrBlock:        "foo",
--		VpcId:            "bar",
--	}
--
--	resp, err := s.ec2.CreateSubnet(options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["VpcId"], DeepEquals, []string{"bar"})
--	c.Assert(req.Form["CidrBlock"], DeepEquals, []string{"foo"})
--	c.Assert(req.Form["AvailabilityZone"], DeepEquals, []string{"baz"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE")
--	c.Assert(resp.Subnet.SubnetId, Equals, "subnet-9d4a7b6c")
--	c.Assert(resp.Subnet.State, Equals, "pending")
--	c.Assert(resp.Subnet.VpcId, Equals, "vpc-1a2b3c4d")
--	c.Assert(resp.Subnet.CidrBlock, Equals, "10.0.1.0/24")
--	c.Assert(resp.Subnet.AvailableIpAddressCount, Equals, 251)
--}
--
--func (s *S) TestResetImageAttribute(c *C) {
--	testServer.Response(200, nil, ResetImageAttributeExample)
--
--	options := ec2.ResetImageAttribute{Attribute: "launchPermission"}
--	resp, err := s.ec2.ResetImageAttribute("i-2ba64342", &options)
--
--	req := testServer.WaitRequest()
--	c.Assert(req.Form["Action"], DeepEquals, []string{"ResetImageAttribute"})
--
--	c.Assert(err, IsNil)
--	c.Assert(resp.RequestId, Equals, "59dbff89-35bd-4eac-99ed-be587EXAMPLE")
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2i_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2i_test.go
-deleted file mode 100644
-index 3773041..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2i_test.go
-+++ /dev/null
-@@ -1,203 +0,0 @@
--package ec2_test
--
--import (
--	"crypto/rand"
--	"fmt"
--	"github.com/mitchellh/goamz/aws"
--	"github.com/mitchellh/goamz/ec2"
--	"github.com/mitchellh/goamz/testutil"
--	. "github.com/motain/gocheck"
--)
--
--// AmazonServer represents an Amazon EC2 server.
--type AmazonServer struct {
--	auth aws.Auth
--}
--
--func (s *AmazonServer) SetUp(c *C) {
--	auth, err := aws.EnvAuth()
--	if err != nil {
--		c.Fatal(err.Error())
--	}
--	s.auth = auth
--}
--
--// Suite cost per run: 0.02 USD
--var _ = Suite(&AmazonClientSuite{})
--
--// AmazonClientSuite tests the client against a live EC2 server.
--type AmazonClientSuite struct {
--	srv AmazonServer
--	ClientTests
--}
--
--func (s *AmazonClientSuite) SetUpSuite(c *C) {
--	if !testutil.Amazon {
--		c.Skip("AmazonClientSuite tests not enabled")
--	}
--	s.srv.SetUp(c)
--	s.ec2 = ec2.NewWithClient(s.srv.auth, aws.USEast, testutil.DefaultClient)
--}
--
--// ClientTests defines integration tests designed to test the client.
--// It is not used as a test suite in itself, but embedded within
--// another type.
--type ClientTests struct {
--	ec2 *ec2.EC2
--}
--
--var imageId = "ami-ccf405a5" // Ubuntu Maverick, i386, EBS store
--
--// Cost: 0.00 USD
--func (s *ClientTests) TestRunInstancesError(c *C) {
--	options := ec2.RunInstances{
--		ImageId:      "ami-a6f504cf", // Ubuntu Maverick, i386, instance store
--		InstanceType: "t1.micro",     // Doesn't work with micro, results in 400.
--	}
--
--	resp, err := s.ec2.RunInstances(&options)
--
--	c.Assert(resp, IsNil)
--	c.Assert(err, ErrorMatches, "AMI.*root device.*not supported.*")
--
--	ec2err, ok := err.(*ec2.Error)
--	c.Assert(ok, Equals, true)
--	c.Assert(ec2err.StatusCode, Equals, 400)
--	c.Assert(ec2err.Code, Equals, "UnsupportedOperation")
--	c.Assert(ec2err.Message, Matches, "AMI.*root device.*not supported.*")
--	c.Assert(ec2err.RequestId, Matches, ".+")
--}
--
--// Cost: 0.02 USD
--func (s *ClientTests) TestRunAndTerminate(c *C) {
--	options := ec2.RunInstances{
--		ImageId:      imageId,
--		InstanceType: "t1.micro",
--	}
--	resp1, err := s.ec2.RunInstances(&options)
--	c.Assert(err, IsNil)
--	c.Check(resp1.ReservationId, Matches, "r-[0-9a-f]*")
--	c.Check(resp1.OwnerId, Matches, "[0-9]+")
--	c.Check(resp1.Instances, HasLen, 1)
--	c.Check(resp1.Instances[0].InstanceType, Equals, "t1.micro")
--
--	instId := resp1.Instances[0].InstanceId
--
--	resp2, err := s.ec2.Instances([]string{instId}, nil)
--	c.Assert(err, IsNil)
--	if c.Check(resp2.Reservations, HasLen, 1) && c.Check(len(resp2.Reservations[0].Instances), Equals, 1) {
--		inst := resp2.Reservations[0].Instances[0]
--		c.Check(inst.InstanceId, Equals, instId)
--	}
--
--	resp3, err := s.ec2.TerminateInstances([]string{instId})
--	c.Assert(err, IsNil)
--	c.Check(resp3.StateChanges, HasLen, 1)
--	c.Check(resp3.StateChanges[0].InstanceId, Equals, instId)
--	c.Check(resp3.StateChanges[0].CurrentState.Name, Equals, "shutting-down")
--	c.Check(resp3.StateChanges[0].CurrentState.Code, Equals, 32)
--}
--
--// Cost: 0.00 USD
--func (s *ClientTests) TestSecurityGroups(c *C) {
--	name := "goamz-test"
--	descr := "goamz security group for tests"
--
--	// Clean it up, if a previous test left it around and avoid leaving it around.
--	s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name})
--	defer s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name})
--
--	resp1, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr})
--	c.Assert(err, IsNil)
--	c.Assert(resp1.RequestId, Matches, ".+")
--	c.Assert(resp1.Name, Equals, name)
--	c.Assert(resp1.Id, Matches, ".+")
--
--	resp1, err = s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr})
--	ec2err, _ := err.(*ec2.Error)
--	c.Assert(resp1, IsNil)
--	c.Assert(ec2err, NotNil)
--	c.Assert(ec2err.Code, Equals, "InvalidGroup.Duplicate")
--
--	perms := []ec2.IPPerm{{
--		Protocol:  "tcp",
--		FromPort:  0,
--		ToPort:    1024,
--		SourceIPs: []string{"127.0.0.1/24"},
--	}}
--
--	resp2, err := s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms)
--	c.Assert(err, IsNil)
--	c.Assert(resp2.RequestId, Matches, ".+")
--
--	resp3, err := s.ec2.SecurityGroups(ec2.SecurityGroupNames(name), nil)
--	c.Assert(err, IsNil)
--	c.Assert(resp3.RequestId, Matches, ".+")
--	c.Assert(resp3.Groups, HasLen, 1)
--
--	g0 := resp3.Groups[0]
--	c.Assert(g0.Name, Equals, name)
--	c.Assert(g0.Description, Equals, descr)
--	c.Assert(g0.IPPerms, HasLen, 1)
--	c.Assert(g0.IPPerms[0].Protocol, Equals, "tcp")
--	c.Assert(g0.IPPerms[0].FromPort, Equals, 0)
--	c.Assert(g0.IPPerms[0].ToPort, Equals, 1024)
--	c.Assert(g0.IPPerms[0].SourceIPs, DeepEquals, []string{"127.0.0.1/24"})
--
--	resp2, err = s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name})
--	c.Assert(err, IsNil)
--	c.Assert(resp2.RequestId, Matches, ".+")
--}
--
--var sessionId = func() string {
--	buf := make([]byte, 8)
--	// if we have no randomness, we'll just make do, so ignore the error.
--	rand.Read(buf)
--	return fmt.Sprintf("%x", buf)
--}()
--
--// sessionName reutrns a name that is probably
--// unique to this test session.
--func sessionName(prefix string) string {
--	return prefix + "-" + sessionId
--}
--
--var allRegions = []aws.Region{
--	aws.USEast,
--	aws.USWest,
--	aws.EUWest,
--	aws.APSoutheast,
--	aws.APNortheast,
--}
--
--// Communicate with all EC2 endpoints to see if they are alive.
--func (s *ClientTests) TestRegions(c *C) {
--	name := sessionName("goamz-region-test")
--	perms := []ec2.IPPerm{{
--		Protocol:  "tcp",
--		FromPort:  80,
--		ToPort:    80,
--		SourceIPs: []string{"127.0.0.1/32"},
--	}}
--	errs := make(chan error, len(allRegions))
--	for _, region := range allRegions {
--		go func(r aws.Region) {
--			e := ec2.NewWithClient(s.ec2.Auth, r, testutil.DefaultClient)
--			_, err := e.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms)
--			errs <- err
--		}(region)
--	}
--	for _ = range allRegions {
--		err := <-errs
--		if err != nil {
--			ec2_err, ok := err.(*ec2.Error)
--			if ok {
--				c.Check(ec2_err.Code, Matches, "InvalidGroup.NotFound")
--			} else {
--				c.Errorf("Non-EC2 error: %s", err)
--			}
--		} else {
--			c.Errorf("Test should have errored but it seems to have succeeded")
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2t_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2t_test.go
-deleted file mode 100644
-index fe50356..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2t_test.go
-+++ /dev/null
-@@ -1,580 +0,0 @@
--package ec2_test
--
--import (
--	"fmt"
--	"github.com/mitchellh/goamz/aws"
--	"github.com/mitchellh/goamz/ec2"
--	"github.com/mitchellh/goamz/ec2/ec2test"
--	"github.com/mitchellh/goamz/testutil"
--	. "github.com/motain/gocheck"
--	"regexp"
--	"sort"
--)
--
--// LocalServer represents a local ec2test fake server.
--type LocalServer struct {
--	auth   aws.Auth
--	region aws.Region
--	srv    *ec2test.Server
--}
--
--func (s *LocalServer) SetUp(c *C) {
--	srv, err := ec2test.NewServer()
--	c.Assert(err, IsNil)
--	c.Assert(srv, NotNil)
--
--	s.srv = srv
--	s.region = aws.Region{EC2Endpoint: srv.URL()}
--}
--
--// LocalServerSuite defines tests that will run
--// against the local ec2test server. It includes
--// selected tests from ClientTests;
--// when the ec2test functionality is sufficient, it should
--// include all of them, and ClientTests can be simply embedded.
--type LocalServerSuite struct {
--	srv LocalServer
--	ServerTests
--	clientTests ClientTests
--}
--
--var _ = Suite(&LocalServerSuite{})
--
--func (s *LocalServerSuite) SetUpSuite(c *C) {
--	s.srv.SetUp(c)
--	s.ServerTests.ec2 = ec2.NewWithClient(s.srv.auth, s.srv.region, testutil.DefaultClient)
--	s.clientTests.ec2 = ec2.NewWithClient(s.srv.auth, s.srv.region, testutil.DefaultClient)
--}
--
--func (s *LocalServerSuite) TestRunAndTerminate(c *C) {
--	s.clientTests.TestRunAndTerminate(c)
--}
--
--func (s *LocalServerSuite) TestSecurityGroups(c *C) {
--	s.clientTests.TestSecurityGroups(c)
--}
--
--// TestUserData is not defined on ServerTests because it
--// requires the ec2test server to function.
--func (s *LocalServerSuite) TestUserData(c *C) {
--	data := make([]byte, 256)
--	for i := range data {
--		data[i] = byte(i)
--	}
--	inst, err := s.ec2.RunInstances(&ec2.RunInstances{
--		ImageId:      imageId,
--		InstanceType: "t1.micro",
--		UserData:     data,
--	})
--	c.Assert(err, IsNil)
--	c.Assert(inst, NotNil)
--	c.Assert(inst.Instances[0].DNSName, Equals, inst.Instances[0].InstanceId+".example.com")
--
--	id := inst.Instances[0].InstanceId
--
--	defer s.ec2.TerminateInstances([]string{id})
--
--	tinst := s.srv.srv.Instance(id)
--	c.Assert(tinst, NotNil)
--	c.Assert(tinst.UserData, DeepEquals, data)
--}
--
--// AmazonServerSuite runs the ec2test server tests against a live EC2 server.
--// It will only be activated if the -all flag is specified.
--type AmazonServerSuite struct {
--	srv AmazonServer
--	ServerTests
--}
--
--var _ = Suite(&AmazonServerSuite{})
--
--func (s *AmazonServerSuite) SetUpSuite(c *C) {
--	if !testutil.Amazon {
--		c.Skip("AmazonServerSuite tests not enabled")
--	}
--	s.srv.SetUp(c)
--	s.ServerTests.ec2 = ec2.NewWithClient(s.srv.auth, aws.USEast, testutil.DefaultClient)
--}
--
--// ServerTests defines a set of tests designed to test
--// the ec2test local fake ec2 server.
--// It is not used as a test suite in itself, but embedded within
--// another type.
--type ServerTests struct {
--	ec2 *ec2.EC2
--}
--
--func terminateInstances(c *C, e *ec2.EC2, insts []*ec2.Instance) {
--	var ids []string
--	for _, inst := range insts {
--		if inst != nil {
--			ids = append(ids, inst.InstanceId)
--		}
--	}
--	_, err := e.TerminateInstances(ids)
--	c.Check(err, IsNil, Commentf("%d INSTANCES LEFT RUNNING!!!", len(ids)))
--}
--
--func (s *ServerTests) makeTestGroup(c *C, name, descr string) ec2.SecurityGroup {
--	// Clean it up if a previous test left it around.
--	_, err := s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name})
--	if err != nil && err.(*ec2.Error).Code != "InvalidGroup.NotFound" {
--		c.Fatalf("delete security group: %v", err)
--	}
--
--	resp, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr})
--	c.Assert(err, IsNil)
--	c.Assert(resp.Name, Equals, name)
--	return resp.SecurityGroup
--}
--
--func (s *ServerTests) TestIPPerms(c *C) {
--	g0 := s.makeTestGroup(c, "goamz-test0", "ec2test group 0")
--	defer s.ec2.DeleteSecurityGroup(g0)
--
--	g1 := s.makeTestGroup(c, "goamz-test1", "ec2test group 1")
--	defer s.ec2.DeleteSecurityGroup(g1)
--
--	resp, err := s.ec2.SecurityGroups([]ec2.SecurityGroup{g0, g1}, nil)
--	c.Assert(err, IsNil)
--	c.Assert(resp.Groups, HasLen, 2)
--	c.Assert(resp.Groups[0].IPPerms, HasLen, 0)
--	c.Assert(resp.Groups[1].IPPerms, HasLen, 0)
--
--	ownerId := resp.Groups[0].OwnerId
--
--	// test some invalid parameters
--	// TODO more
--	_, err = s.ec2.AuthorizeSecurityGroup(g0, []ec2.IPPerm{{
--		Protocol:  "tcp",
--		FromPort:  0,
--		ToPort:    1024,
--		SourceIPs: []string{"z127.0.0.1/24"},
--	}})
--	c.Assert(err, NotNil)
--	c.Check(err.(*ec2.Error).Code, Equals, "InvalidPermission.Malformed")
--
--	// Check that AuthorizeSecurityGroup adds the correct authorizations.
--	_, err = s.ec2.AuthorizeSecurityGroup(g0, []ec2.IPPerm{{
--		Protocol:  "tcp",
--		FromPort:  2000,
--		ToPort:    2001,
--		SourceIPs: []string{"127.0.0.0/24"},
--		SourceGroups: []ec2.UserSecurityGroup{{
--			Name: g1.Name,
--		}, {
--			Id: g0.Id,
--		}},
--	}, {
--		Protocol:  "tcp",
--		FromPort:  2000,
--		ToPort:    2001,
--		SourceIPs: []string{"200.1.1.34/32"},
--	}})
--	c.Assert(err, IsNil)
--
--	resp, err = s.ec2.SecurityGroups([]ec2.SecurityGroup{g0}, nil)
--	c.Assert(err, IsNil)
--	c.Assert(resp.Groups, HasLen, 1)
--	c.Assert(resp.Groups[0].IPPerms, HasLen, 1)
--
--	perm := resp.Groups[0].IPPerms[0]
--	srcg := perm.SourceGroups
--	c.Assert(srcg, HasLen, 2)
--
--	// Normalize so we don't care about returned order.
--	if srcg[0].Name == g1.Name {
--		srcg[0], srcg[1] = srcg[1], srcg[0]
--	}
--	c.Check(srcg[0].Name, Equals, g0.Name)
--	c.Check(srcg[0].Id, Equals, g0.Id)
--	c.Check(srcg[0].OwnerId, Equals, ownerId)
--	c.Check(srcg[1].Name, Equals, g1.Name)
--	c.Check(srcg[1].Id, Equals, g1.Id)
--	c.Check(srcg[1].OwnerId, Equals, ownerId)
--
--	sort.Strings(perm.SourceIPs)
--	c.Check(perm.SourceIPs, DeepEquals, []string{"127.0.0.0/24", "200.1.1.34/32"})
--
--	// Check that we can't delete g1 (because g0 is using it)
--	_, err = s.ec2.DeleteSecurityGroup(g1)
--	c.Assert(err, NotNil)
--	c.Check(err.(*ec2.Error).Code, Equals, "InvalidGroup.InUse")
--
--	_, err = s.ec2.RevokeSecurityGroup(g0, []ec2.IPPerm{{
--		Protocol:     "tcp",
--		FromPort:     2000,
--		ToPort:       2001,
--		SourceGroups: []ec2.UserSecurityGroup{{Id: g1.Id}},
--	}, {
--		Protocol:  "tcp",
--		FromPort:  2000,
--		ToPort:    2001,
--		SourceIPs: []string{"200.1.1.34/32"},
--	}})
--	c.Assert(err, IsNil)
--
--	resp, err = s.ec2.SecurityGroups([]ec2.SecurityGroup{g0}, nil)
--	c.Assert(err, IsNil)
--	c.Assert(resp.Groups, HasLen, 1)
--	c.Assert(resp.Groups[0].IPPerms, HasLen, 1)
--
--	perm = resp.Groups[0].IPPerms[0]
--	srcg = perm.SourceGroups
--	c.Assert(srcg, HasLen, 1)
--	c.Check(srcg[0].Name, Equals, g0.Name)
--	c.Check(srcg[0].Id, Equals, g0.Id)
--	c.Check(srcg[0].OwnerId, Equals, ownerId)
--
--	c.Check(perm.SourceIPs, DeepEquals, []string{"127.0.0.0/24"})
--
--	// We should be able to delete g1 now because we've removed its only use.
--	_, err = s.ec2.DeleteSecurityGroup(g1)
--	c.Assert(err, IsNil)
--
--	_, err = s.ec2.DeleteSecurityGroup(g0)
--	c.Assert(err, IsNil)
--
--	f := ec2.NewFilter()
--	f.Add("group-id", g0.Id, g1.Id)
--	resp, err = s.ec2.SecurityGroups(nil, f)
--	c.Assert(err, IsNil)
--	c.Assert(resp.Groups, HasLen, 0)
--}
--
--func (s *ServerTests) TestDuplicateIPPerm(c *C) {
--	name := "goamz-test"
--	descr := "goamz security group for tests"
--
--	// Clean it up, if a previous test left it around and avoid leaving it around.
--	s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name})
--	defer s.ec2.DeleteSecurityGroup(ec2.SecurityGroup{Name: name})
--
--	resp1, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: name, Description: descr})
--	c.Assert(err, IsNil)
--	c.Assert(resp1.Name, Equals, name)
--
--	perms := []ec2.IPPerm{{
--		Protocol:  "tcp",
--		FromPort:  200,
--		ToPort:    1024,
--		SourceIPs: []string{"127.0.0.1/24"},
--	}, {
--		Protocol:  "tcp",
--		FromPort:  0,
--		ToPort:    100,
--		SourceIPs: []string{"127.0.0.1/24"},
--	}}
--
--	_, err = s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms[0:1])
--	c.Assert(err, IsNil)
--
--	_, err = s.ec2.AuthorizeSecurityGroup(ec2.SecurityGroup{Name: name}, perms[0:2])
--	c.Assert(err, ErrorMatches, `.*\(InvalidPermission.Duplicate\)`)
--}
--
--type filterSpec struct {
--	name   string
--	values []string
--}
--
--func (s *ServerTests) TestInstanceFiltering(c *C) {
--	groupResp, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: sessionName("testgroup1"), Description: "testgroup one description"})
--	c.Assert(err, IsNil)
--	group1 := groupResp.SecurityGroup
--	defer s.ec2.DeleteSecurityGroup(group1)
--
--	groupResp, err = s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: sessionName("testgroup2"), Description: "testgroup two description"})
--	c.Assert(err, IsNil)
--	group2 := groupResp.SecurityGroup
--	defer s.ec2.DeleteSecurityGroup(group2)
--
--	insts := make([]*ec2.Instance, 3)
--	inst, err := s.ec2.RunInstances(&ec2.RunInstances{
--		MinCount:       2,
--		ImageId:        imageId,
--		InstanceType:   "t1.micro",
--		SecurityGroups: []ec2.SecurityGroup{group1},
--	})
--	c.Assert(err, IsNil)
--	insts[0] = &inst.Instances[0]
--	insts[1] = &inst.Instances[1]
--	defer terminateInstances(c, s.ec2, insts)
--
--	imageId2 := "ami-e358958a" // Natty server, i386, EBS store
--	inst, err = s.ec2.RunInstances(&ec2.RunInstances{
--		ImageId:        imageId2,
--		InstanceType:   "t1.micro",
--		SecurityGroups: []ec2.SecurityGroup{group2},
--	})
--	c.Assert(err, IsNil)
--	insts[2] = &inst.Instances[0]
--
--	ids := func(indices ...int) (instIds []string) {
--		for _, index := range indices {
--			instIds = append(instIds, insts[index].InstanceId)
--		}
--		return
--	}
--
--	tests := []struct {
--		about       string
--		instanceIds []string     // instanceIds argument to Instances method.
--		filters     []filterSpec // filters argument to Instances method.
--		resultIds   []string     // set of instance ids of expected results.
--		allowExtra  bool         // resultIds may be incomplete.
--		err         string       // expected error.
--	}{
--		{
--			about:      "check that Instances returns all instances",
--			resultIds:  ids(0, 1, 2),
--			allowExtra: true,
--		}, {
--			about:       "check that specifying two instance ids returns them",
--			instanceIds: ids(0, 2),
--			resultIds:   ids(0, 2),
--		}, {
--			about:       "check that specifying a non-existent instance id gives an error",
--			instanceIds: append(ids(0), "i-deadbeef"),
--			err:         `.*\(InvalidInstanceID\.NotFound\)`,
--		}, {
--			about: "check that a filter allowed both instances returns both of them",
--			filters: []filterSpec{
--				{"instance-id", ids(0, 2)},
--			},
--			resultIds: ids(0, 2),
--		}, {
--			about: "check that a filter allowing only one instance returns it",
--			filters: []filterSpec{
--				{"instance-id", ids(1)},
--			},
--			resultIds: ids(1),
--		}, {
--			about: "check that a filter allowing no instances returns none",
--			filters: []filterSpec{
--				{"instance-id", []string{"i-deadbeef12345"}},
--			},
--		}, {
--			about: "check that filtering on group id works",
--			filters: []filterSpec{
--				{"group-id", []string{group1.Id}},
--			},
--			resultIds: ids(0, 1),
--		}, {
--			about: "check that filtering on group name works",
--			filters: []filterSpec{
--				{"group-name", []string{group1.Name}},
--			},
--			resultIds: ids(0, 1),
--		}, {
--			about: "check that filtering on image id works",
--			filters: []filterSpec{
--				{"image-id", []string{imageId}},
--			},
--			resultIds:  ids(0, 1),
--			allowExtra: true,
--		}, {
--			about: "combination filters 1",
--			filters: []filterSpec{
--				{"image-id", []string{imageId, imageId2}},
--				{"group-name", []string{group1.Name}},
--			},
--			resultIds: ids(0, 1),
--		}, {
--			about: "combination filters 2",
--			filters: []filterSpec{
--				{"image-id", []string{imageId2}},
--				{"group-name", []string{group1.Name}},
--			},
--		},
--	}
--	for i, t := range tests {
--		c.Logf("%d. %s", i, t.about)
--		var f *ec2.Filter
--		if t.filters != nil {
--			f = ec2.NewFilter()
--			for _, spec := range t.filters {
--				f.Add(spec.name, spec.values...)
--			}
--		}
--		resp, err := s.ec2.Instances(t.instanceIds, f)
--		if t.err != "" {
--			c.Check(err, ErrorMatches, t.err)
--			continue
--		}
--		c.Assert(err, IsNil)
--		insts := make(map[string]*ec2.Instance)
--		for _, r := range resp.Reservations {
--			for j := range r.Instances {
--				inst := &r.Instances[j]
--				c.Check(insts[inst.InstanceId], IsNil, Commentf("duplicate instance id: %q", inst.InstanceId))
--				insts[inst.InstanceId] = inst
--			}
--		}
--		if !t.allowExtra {
--			c.Check(insts, HasLen, len(t.resultIds), Commentf("expected %d instances got %#v", len(t.resultIds), insts))
--		}
--		for j, id := range t.resultIds {
--			c.Check(insts[id], NotNil, Commentf("instance id %d (%q) not found; got %#v", j, id, insts))
--		}
--	}
--}
--
--func idsOnly(gs []ec2.SecurityGroup) []ec2.SecurityGroup {
--	for i := range gs {
--		gs[i].Name = ""
--	}
--	return gs
--}
--
--func namesOnly(gs []ec2.SecurityGroup) []ec2.SecurityGroup {
--	for i := range gs {
--		gs[i].Id = ""
--	}
--	return gs
--}
--
--func (s *ServerTests) TestGroupFiltering(c *C) {
--	g := make([]ec2.SecurityGroup, 4)
--	for i := range g {
--		resp, err := s.ec2.CreateSecurityGroup(ec2.SecurityGroup{Name: sessionName(fmt.Sprintf("testgroup%d", i)), Description: fmt.Sprintf("testdescription%d", i)})
--		c.Assert(err, IsNil)
--		g[i] = resp.SecurityGroup
--		c.Logf("group %d: %v", i, g[i])
--		defer s.ec2.DeleteSecurityGroup(g[i])
--	}
--
--	perms := [][]ec2.IPPerm{
--		{{
--			Protocol:  "tcp",
--			FromPort:  100,
--			ToPort:    200,
--			SourceIPs: []string{"1.2.3.4/32"},
--		}},
--		{{
--			Protocol:     "tcp",
--			FromPort:     200,
--			ToPort:       300,
--			SourceGroups: []ec2.UserSecurityGroup{{Id: g[1].Id}},
--		}},
--		{{
--			Protocol:     "udp",
--			FromPort:     200,
--			ToPort:       400,
--			SourceGroups: []ec2.UserSecurityGroup{{Id: g[1].Id}},
--		}},
--	}
--	for i, ps := range perms {
--		_, err := s.ec2.AuthorizeSecurityGroup(g[i], ps)
--		c.Assert(err, IsNil)
--	}
--
--	groups := func(indices ...int) (gs []ec2.SecurityGroup) {
--		for _, index := range indices {
--			gs = append(gs, g[index])
--		}
--		return
--	}
--
--	type groupTest struct {
--		about      string
--		groups     []ec2.SecurityGroup // groupIds argument to SecurityGroups method.
--		filters    []filterSpec        // filters argument to SecurityGroups method.
--		results    []ec2.SecurityGroup // set of expected result groups.
--		allowExtra bool                // specified results may be incomplete.
--		err        string              // expected error.
--	}
--	filterCheck := func(name, val string, gs []ec2.SecurityGroup) groupTest {
--		return groupTest{
--			about:      "filter check " + name,
--			filters:    []filterSpec{{name, []string{val}}},
--			results:    gs,
--			allowExtra: true,
--		}
--	}
--	tests := []groupTest{
--		{
--			about:      "check that SecurityGroups returns all groups",
--			results:    groups(0, 1, 2, 3),
--			allowExtra: true,
--		}, {
--			about:   "check that specifying two group ids returns them",
--			groups:  idsOnly(groups(0, 2)),
--			results: groups(0, 2),
--		}, {
--			about:   "check that specifying names only works",
--			groups:  namesOnly(groups(0, 2)),
--			results: groups(0, 2),
--		}, {
--			about:  "check that specifying a non-existent group id gives an error",
--			groups: append(groups(0), ec2.SecurityGroup{Id: "sg-eeeeeeeee"}),
--			err:    `.*\(InvalidGroup\.NotFound\)`,
--		}, {
--			about: "check that a filter allowed two groups returns both of them",
--			filters: []filterSpec{
--				{"group-id", []string{g[0].Id, g[2].Id}},
--			},
--			results: groups(0, 2),
--		},
--		{
--			about:  "check that the previous filter works when specifying a list of ids",
--			groups: groups(1, 2),
--			filters: []filterSpec{
--				{"group-id", []string{g[0].Id, g[2].Id}},
--			},
--			results: groups(2),
--		}, {
--			about: "check that a filter allowing no groups returns none",
--			filters: []filterSpec{
--				{"group-id", []string{"sg-eeeeeeeee"}},
--			},
--		},
--		filterCheck("description", "testdescription1", groups(1)),
--		filterCheck("group-name", g[2].Name, groups(2)),
--		filterCheck("ip-permission.cidr", "1.2.3.4/32", groups(0)),
--		filterCheck("ip-permission.group-name", g[1].Name, groups(1, 2)),
--		filterCheck("ip-permission.protocol", "udp", groups(2)),
--		filterCheck("ip-permission.from-port", "200", groups(1, 2)),
--		filterCheck("ip-permission.to-port", "200", groups(0)),
--		// TODO owner-id
--	}
--	for i, t := range tests {
--		c.Logf("%d. %s", i, t.about)
--		var f *ec2.Filter
--		if t.filters != nil {
--			f = ec2.NewFilter()
--			for _, spec := range t.filters {
--				f.Add(spec.name, spec.values...)
--			}
--		}
--		resp, err := s.ec2.SecurityGroups(t.groups, f)
--		if t.err != "" {
--			c.Check(err, ErrorMatches, t.err)
--			continue
--		}
--		c.Assert(err, IsNil)
--		groups := make(map[string]*ec2.SecurityGroup)
--		for j := range resp.Groups {
--			group := &resp.Groups[j].SecurityGroup
--			c.Check(groups[group.Id], IsNil, Commentf("duplicate group id: %q", group.Id))
--
--			groups[group.Id] = group
--		}
--		// If extra groups may be returned, eliminate all groups that
--		// we did not create in this session apart from the default group.
--		if t.allowExtra {
--			namePat := regexp.MustCompile(sessionName("testgroup[0-9]"))
--			for id, g := range groups {
--				if !namePat.MatchString(g.Name) {
--					delete(groups, id)
--				}
--			}
--		}
--		c.Check(groups, HasLen, len(t.results))
--		for j, g := range t.results {
--			rg := groups[g.Id]
--			c.Assert(rg, NotNil, Commentf("group %d (%v) not found; got %#v", j, g, groups))
--			c.Check(rg.Name, Equals, g.Name, Commentf("group %d (%v)", j, g))
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2test/filter.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2test/filter.go
-deleted file mode 100644
-index 1a0c046..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2test/filter.go
-+++ /dev/null
-@@ -1,84 +0,0 @@
--package ec2test
--
--import (
--	"fmt"
--	"net/url"
--	"strings"
--)
--
--// filter holds an ec2 filter.  A filter maps an attribute to a set of
--// possible values for that attribute. For an item to pass through the
--// filter, every attribute of the item mentioned in the filter must match
--// at least one of its given values.
--type filter map[string][]string
--
--// newFilter creates a new filter from the Filter fields in the url form.
--//
--// The filtering is specified through a map of name=>values, where the
--// name is a well-defined key identifying the data to be matched,
--// and the list of values holds the possible values the filtered
--// item can take for the key to be included in the
--// result set. For example:
--//
--//   Filter.1.Name=instance-type
--//   Filter.1.Value.1=m1.small
--//   Filter.1.Value.2=m1.large
--//
--func newFilter(form url.Values) filter {
--	// TODO return an error if the fields are not well formed?
--	names := make(map[int]string)
--	values := make(map[int][]string)
--	maxId := 0
--	for name, fvalues := range form {
--		var rest string
--		var id int
--		if x, _ := fmt.Sscanf(name, "Filter.%d.%s", &id, &rest); x != 2 {
--			continue
--		}
--		if id > maxId {
--			maxId = id
--		}
--		if rest == "Name" {
--			names[id] = fvalues[0]
--			continue
--		}
--		if !strings.HasPrefix(rest, "Value.") {
--			continue
--		}
--		values[id] = append(values[id], fvalues[0])
--	}
--
--	f := make(filter)
--	for id, name := range names {
--		f[name] = values[id]
--	}
--	return f
--}
--
--func notDigit(r rune) bool {
--	return r < '0' || r > '9'
--}
--
--// filterable represents an object that can be passed through a filter.
--type filterable interface {
--	// matchAttr returns true if given attribute of the
--	// object matches value. It returns an error if the
--	// attribute is not recognised or the value is malformed.
--	matchAttr(attr, value string) (bool, error)
--}
--
--// ok returns true if x passes through the filter.
--func (f filter) ok(x filterable) (bool, error) {
--next:
--	for a, vs := range f {
--		for _, v := range vs {
--			if ok, err := x.matchAttr(a, v); ok {
--				continue next
--			} else if err != nil {
--				return false, fmt.Errorf("bad attribute or value %q=%q for type %T: %v", a, v, x, err)
--			}
--		}
--		return false, nil
--	}
--	return true, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2test/server.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2test/server.go
-deleted file mode 100644
-index 2f24cb2..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/ec2test/server.go
-+++ /dev/null
-@@ -1,993 +0,0 @@
--// The ec2test package implements a fake EC2 provider with
--// the capability of inducing errors on any given operation,
--// and retrospectively determining what operations have been
--// carried out.
--package ec2test
--
--import (
--	"encoding/base64"
--	"encoding/xml"
--	"fmt"
--	"github.com/mitchellh/goamz/ec2"
--	"io"
--	"net"
--	"net/http"
--	"net/url"
--	"regexp"
--	"strconv"
--	"strings"
--	"sync"
--)
--
--var b64 = base64.StdEncoding
--
--// Action represents a request that changes the ec2 state.
--type Action struct {
--	RequestId string
--
--	// Request holds the requested action as a url.Values instance
--	Request url.Values
--
--	// If the action succeeded, Response holds the value that
--	// was marshalled to build the XML response for the request.
--	Response interface{}
--
--	// If the action failed, Err holds an error giving details of the failure.
--	Err *ec2.Error
--}
--
--// TODO possible other things:
--// - some virtual time stamp interface, so a client
--// can ask for all actions after a certain virtual time.
--
--// Server implements an EC2 simulator for use in testing.
--type Server struct {
--	url      string
--	listener net.Listener
--	mu       sync.Mutex
--	reqs     []*Action
--
--	instances            map[string]*Instance      // id -> instance
--	reservations         map[string]*reservation   // id -> reservation
--	groups               map[string]*securityGroup // id -> group
--	maxId                counter
--	reqId                counter
--	reservationId        counter
--	groupId              counter
--	initialInstanceState ec2.InstanceState
--}
--
--// reservation holds a simulated ec2 reservation.
--type reservation struct {
--	id        string
--	instances map[string]*Instance
--	groups    []*securityGroup
--}
--
--// instance holds a simulated ec2 instance
--type Instance struct {
--	// UserData holds the data that was passed to the RunInstances request
--	// when the instance was started.
--	UserData    []byte
--	id          string
--	imageId     string
--	reservation *reservation
--	instType    string
--	state       ec2.InstanceState
--}
--
--// permKey represents permission for a given security
--// group or IP address (but not both) to access a given range of
--// ports. Equality of permKeys is used in the implementation of
--// permission sets, relying on the uniqueness of securityGroup
--// instances.
--type permKey struct {
--	protocol string
--	fromPort int
--	toPort   int
--	group    *securityGroup
--	ipAddr   string
--}
--
--// securityGroup holds a simulated ec2 security group.
--// Instances of securityGroup should only be created through
--// Server.createSecurityGroup to ensure that groups can be
--// compared by pointer value.
--type securityGroup struct {
--	id          string
--	name        string
--	description string
--
--	perms map[permKey]bool
--}
--
--func (g *securityGroup) ec2SecurityGroup() ec2.SecurityGroup {
--	return ec2.SecurityGroup{
--		Name: g.name,
--		Id:   g.id,
--	}
--}
--
--func (g *securityGroup) matchAttr(attr, value string) (ok bool, err error) {
--	switch attr {
--	case "description":
--		return g.description == value, nil
--	case "group-id":
--		return g.id == value, nil
--	case "group-name":
--		return g.name == value, nil
--	case "ip-permission.cidr":
--		return g.hasPerm(func(k permKey) bool { return k.ipAddr == value }), nil
--	case "ip-permission.group-name":
--		return g.hasPerm(func(k permKey) bool {
--			return k.group != nil && k.group.name == value
--		}), nil
--	case "ip-permission.from-port":
--		port, err := strconv.Atoi(value)
--		if err != nil {
--			return false, err
--		}
--		return g.hasPerm(func(k permKey) bool { return k.fromPort == port }), nil
--	case "ip-permission.to-port":
--		port, err := strconv.Atoi(value)
--		if err != nil {
--			return false, err
--		}
--		return g.hasPerm(func(k permKey) bool { return k.toPort == port }), nil
--	case "ip-permission.protocol":
--		return g.hasPerm(func(k permKey) bool { return k.protocol == value }), nil
--	case "owner-id":
--		return value == ownerId, nil
--	}
--	return false, fmt.Errorf("unknown attribute %q", attr)
--}
--
--func (g *securityGroup) hasPerm(test func(k permKey) bool) bool {
--	for k := range g.perms {
--		if test(k) {
--			return true
--		}
--	}
--	return false
--}
--
--// ec2Perms returns the list of EC2 permissions granted
--// to g. It groups permissions by port range and protocol.
--func (g *securityGroup) ec2Perms() (perms []ec2.IPPerm) {
--	// The grouping is held in result. We use permKey for convenience,
--	// (ensuring that the group and ipAddr of each key is zero). For
--	// each protocol/port range combination, we build up the permission
--	// set in the associated value.
--	result := make(map[permKey]*ec2.IPPerm)
--	for k := range g.perms {
--		groupKey := k
--		groupKey.group = nil
--		groupKey.ipAddr = ""
--
--		ec2p := result[groupKey]
--		if ec2p == nil {
--			ec2p = &ec2.IPPerm{
--				Protocol: k.protocol,
--				FromPort: k.fromPort,
--				ToPort:   k.toPort,
--			}
--			result[groupKey] = ec2p
--		}
--		if k.group != nil {
--			ec2p.SourceGroups = append(ec2p.SourceGroups,
--				ec2.UserSecurityGroup{
--					Id:      k.group.id,
--					Name:    k.group.name,
--					OwnerId: ownerId,
--				})
--		} else {
--			ec2p.SourceIPs = append(ec2p.SourceIPs, k.ipAddr)
--		}
--	}
--	for _, ec2p := range result {
--		perms = append(perms, *ec2p)
--	}
--	return
--}
--
--var actions = map[string]func(*Server, http.ResponseWriter, *http.Request, string) interface{}{
--	"RunInstances":                  (*Server).runInstances,
--	"TerminateInstances":            (*Server).terminateInstances,
--	"DescribeInstances":             (*Server).describeInstances,
--	"CreateSecurityGroup":           (*Server).createSecurityGroup,
--	"DescribeSecurityGroups":        (*Server).describeSecurityGroups,
--	"DeleteSecurityGroup":           (*Server).deleteSecurityGroup,
--	"AuthorizeSecurityGroupIngress": (*Server).authorizeSecurityGroupIngress,
--	"RevokeSecurityGroupIngress":    (*Server).revokeSecurityGroupIngress,
--}
--
--const ownerId = "9876"
--
--// newAction allocates a new action and adds it to the
--// recorded list of server actions.
--func (srv *Server) newAction() *Action {
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--
--	a := new(Action)
--	srv.reqs = append(srv.reqs, a)
--	return a
--}
--
--// NewServer returns a new server.
--func NewServer() (*Server, error) {
--	srv := &Server{
--		instances:            make(map[string]*Instance),
--		groups:               make(map[string]*securityGroup),
--		reservations:         make(map[string]*reservation),
--		initialInstanceState: Pending,
--	}
--
--	// Add default security group.
--	g := &securityGroup{
--		name:        "default",
--		description: "default group",
--		id:          fmt.Sprintf("sg-%d", srv.groupId.next()),
--	}
--	g.perms = map[permKey]bool{
--		permKey{
--			protocol: "icmp",
--			fromPort: -1,
--			toPort:   -1,
--			group:    g,
--		}: true,
--		permKey{
--			protocol: "tcp",
--			fromPort: 0,
--			toPort:   65535,
--			group:    g,
--		}: true,
--		permKey{
--			protocol: "udp",
--			fromPort: 0,
--			toPort:   65535,
--			group:    g,
--		}: true,
--	}
--	srv.groups[g.id] = g
--
--	l, err := net.Listen("tcp", "localhost:0")
--	if err != nil {
--		return nil, fmt.Errorf("cannot listen on localhost: %v", err)
--	}
--	srv.listener = l
--
--	srv.url = "http://" + l.Addr().String()
--
--	// we use HandlerFunc rather than *Server directly so that we
--	// can avoid exporting HandlerFunc from *Server.
--	go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
--		srv.serveHTTP(w, req)
--	}))
--	return srv, nil
--}
--
--// Quit closes down the server.
--func (srv *Server) Quit() {
--	srv.listener.Close()
--}
--
--// SetInitialInstanceState sets the state that any new instances will be started in.
--func (srv *Server) SetInitialInstanceState(state ec2.InstanceState) {
--	srv.mu.Lock()
--	srv.initialInstanceState = state
--	srv.mu.Unlock()
--}
--
--// URL returns the URL of the server.
--func (srv *Server) URL() string {
--	return srv.url
--}
--
--// serveHTTP serves the EC2 protocol.
--func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
--	req.ParseForm()
--
--	a := srv.newAction()
--	a.RequestId = fmt.Sprintf("req%d", srv.reqId.next())
--	a.Request = req.Form
--
--	// Methods on Server that deal with parsing user data
--	// may fail. To save on error handling code, we allow these
--	// methods to call fatalf, which will panic with an *ec2.Error
--	// which will be caught here and returned
--	// to the client as a properly formed EC2 error.
--	defer func() {
--		switch err := recover().(type) {
--		case *ec2.Error:
--			a.Err = err
--			err.RequestId = a.RequestId
--			writeError(w, err)
--		case nil:
--		default:
--			panic(err)
--		}
--	}()
--
--	f := actions[req.Form.Get("Action")]
--	if f == nil {
--		fatalf(400, "InvalidParameterValue", "Unrecognized Action")
--	}
--
--	response := f(srv, w, req, a.RequestId)
--	a.Response = response
--
--	w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
--	xmlMarshal(w, response)
--}
--
--// Instance returns the instance for the given instance id.
--// It returns nil if there is no such instance.
--func (srv *Server) Instance(id string) *Instance {
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--	return srv.instances[id]
--}
--
--// writeError writes an appropriate error response.
--// TODO how should we deal with errors when the
--// error itself is potentially generated by backend-agnostic
--// code?
--func writeError(w http.ResponseWriter, err *ec2.Error) {
--	// Error encapsulates an error returned by EC2.
--	// TODO merge with ec2.Error when xml supports ignoring a field.
--	type ec2error struct {
--		Code      string // EC2 error code ("UnsupportedOperation", ...)
--		Message   string // The human-oriented error message
--		RequestId string
--	}
--
--	type Response struct {
--		RequestId string
--		Errors    []ec2error `xml:"Errors>Error"`
--	}
--
--	w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
--	w.WriteHeader(err.StatusCode)
--	xmlMarshal(w, Response{
--		RequestId: err.RequestId,
--		Errors: []ec2error{{
--			Code:    err.Code,
--			Message: err.Message,
--		}},
--	})
--}
--
--// xmlMarshal is the same as xml.Marshal except that
--// it panics on error. The marshalling should not fail,
--// but we want to know if it does.
--func xmlMarshal(w io.Writer, x interface{}) {
--	if err := xml.NewEncoder(w).Encode(x); err != nil {
--		panic(fmt.Errorf("error marshalling %#v: %v", x, err))
--	}
--}
--
--// formToGroups parses a set of SecurityGroup form values
--// as found in a RunInstances request, and returns the resulting
--// slice of security groups.
--// It calls fatalf if a group is not found.
--func (srv *Server) formToGroups(form url.Values) []*securityGroup {
--	var groups []*securityGroup
--	for name, values := range form {
--		switch {
--		case strings.HasPrefix(name, "SecurityGroupId."):
--			if g := srv.groups[values[0]]; g != nil {
--				groups = append(groups, g)
--			} else {
--				fatalf(400, "InvalidGroup.NotFound", "unknown group id %q", values[0])
--			}
--		case strings.HasPrefix(name, "SecurityGroup."):
--			var found *securityGroup
--			for _, g := range srv.groups {
--				if g.name == values[0] {
--					found = g
--				}
--			}
--			if found == nil {
--				fatalf(400, "InvalidGroup.NotFound", "unknown group name %q", values[0])
--			}
--			groups = append(groups, found)
--		}
--	}
--	return groups
--}
--
--// runInstances implements the EC2 RunInstances entry point.
--func (srv *Server) runInstances(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	min := atoi(req.Form.Get("MinCount"))
--	max := atoi(req.Form.Get("MaxCount"))
--	if min < 0 || max < 1 {
--		fatalf(400, "InvalidParameterValue", "bad values for MinCount or MaxCount")
--	}
--	if min > max {
--		fatalf(400, "InvalidParameterCombination", "MinCount is greater than MaxCount")
--	}
--	var userData []byte
--	if data := req.Form.Get("UserData"); data != "" {
--		var err error
--		userData, err = b64.DecodeString(data)
--		if err != nil {
--			fatalf(400, "InvalidParameterValue", "bad UserData value: %v", err)
--		}
--	}
--
--	// TODO attributes still to consider:
--	//    ImageId:                  accept anything, we can verify later
--	//    KeyName                   ?
--	//    InstanceType              ?
--	//    KernelId                  ?
--	//    RamdiskId                 ?
--	//    AvailZone                 ?
--	//    GroupName                 tag
--	//    Monitoring                ignore?
--	//    SubnetId                  ?
--	//    DisableAPITermination     bool
--	//    ShutdownBehavior          string
--	//    PrivateIPAddress          string
--
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--
--	// make sure that form fields are correct before creating the reservation.
--	instType := req.Form.Get("InstanceType")
--	imageId := req.Form.Get("ImageId")
--
--	r := srv.newReservation(srv.formToGroups(req.Form))
--
--	var resp ec2.RunInstancesResp
--	resp.RequestId = reqId
--	resp.ReservationId = r.id
--	resp.OwnerId = ownerId
--
--	for i := 0; i < max; i++ {
--		inst := srv.newInstance(r, instType, imageId, srv.initialInstanceState)
--		inst.UserData = userData
--		resp.Instances = append(resp.Instances, inst.ec2instance())
--	}
--	return &resp
--}
--
--func (srv *Server) group(group ec2.SecurityGroup) *securityGroup {
--	if group.Id != "" {
--		return srv.groups[group.Id]
--	}
--	for _, g := range srv.groups {
--		if g.name == group.Name {
--			return g
--		}
--	}
--	return nil
--}
--
--// NewInstances creates n new instances in srv with the given instance type,
--// image ID,  initial state and security groups. If any group does not already
--// exist, it will be created. NewInstances returns the ids of the new instances.
--func (srv *Server) NewInstances(n int, instType string, imageId string, state ec2.InstanceState, groups []ec2.SecurityGroup) []string {
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--
--	rgroups := make([]*securityGroup, len(groups))
--	for i, group := range groups {
--		g := srv.group(group)
--		if g == nil {
--			fatalf(400, "InvalidGroup.NotFound", "no such group %v", g)
--		}
--		rgroups[i] = g
--	}
--	r := srv.newReservation(rgroups)
--
--	ids := make([]string, n)
--	for i := 0; i < n; i++ {
--		inst := srv.newInstance(r, instType, imageId, state)
--		ids[i] = inst.id
--	}
--	return ids
--}
--
--func (srv *Server) newInstance(r *reservation, instType string, imageId string, state ec2.InstanceState) *Instance {
--	inst := &Instance{
--		id:          fmt.Sprintf("i-%d", srv.maxId.next()),
--		instType:    instType,
--		imageId:     imageId,
--		state:       state,
--		reservation: r,
--	}
--	srv.instances[inst.id] = inst
--	r.instances[inst.id] = inst
--	return inst
--}
--
--func (srv *Server) newReservation(groups []*securityGroup) *reservation {
--	r := &reservation{
--		id:        fmt.Sprintf("r-%d", srv.reservationId.next()),
--		instances: make(map[string]*Instance),
--		groups:    groups,
--	}
--
--	srv.reservations[r.id] = r
--	return r
--}
--
--func (srv *Server) terminateInstances(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--	var resp ec2.TerminateInstancesResp
--	resp.RequestId = reqId
--	var insts []*Instance
--	for attr, vals := range req.Form {
--		if strings.HasPrefix(attr, "InstanceId.") {
--			id := vals[0]
--			inst := srv.instances[id]
--			if inst == nil {
--				fatalf(400, "InvalidInstanceID.NotFound", "no such instance id %q", id)
--			}
--			insts = append(insts, inst)
--		}
--	}
--	for _, inst := range insts {
--		resp.StateChanges = append(resp.StateChanges, inst.terminate())
--	}
--	return &resp
--}
--
--func (inst *Instance) terminate() (d ec2.InstanceStateChange) {
--	d.PreviousState = inst.state
--	inst.state = ShuttingDown
--	d.CurrentState = inst.state
--	d.InstanceId = inst.id
--	return d
--}
--
--func (inst *Instance) ec2instance() ec2.Instance {
--	return ec2.Instance{
--		InstanceId:   inst.id,
--		InstanceType: inst.instType,
--		ImageId:      inst.imageId,
--		DNSName:      fmt.Sprintf("%s.example.com", inst.id),
--		// TODO the rest
--	}
--}
--
--func (inst *Instance) matchAttr(attr, value string) (ok bool, err error) {
--	switch attr {
--	case "architecture":
--		return value == "i386", nil
--	case "instance-id":
--		return inst.id == value, nil
--	case "group-id":
--		for _, g := range inst.reservation.groups {
--			if g.id == value {
--				return true, nil
--			}
--		}
--		return false, nil
--	case "group-name":
--		for _, g := range inst.reservation.groups {
--			if g.name == value {
--				return true, nil
--			}
--		}
--		return false, nil
--	case "image-id":
--		return value == inst.imageId, nil
--	case "instance-state-code":
--		code, err := strconv.Atoi(value)
--		if err != nil {
--			return false, err
--		}
--		return code&0xff == inst.state.Code, nil
--	case "instance-state-name":
--		return value == inst.state.Name, nil
--	}
--	return false, fmt.Errorf("unknown attribute %q", attr)
--}
--
--var (
--	Pending      = ec2.InstanceState{0, "pending"}
--	Running      = ec2.InstanceState{16, "running"}
--	ShuttingDown = ec2.InstanceState{32, "shutting-down"}
--	Terminated   = ec2.InstanceState{16, "terminated"}
--	Stopped      = ec2.InstanceState{16, "stopped"}
--)
--
--func (srv *Server) createSecurityGroup(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	name := req.Form.Get("GroupName")
--	if name == "" {
--		fatalf(400, "InvalidParameterValue", "empty security group name")
--	}
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--	if srv.group(ec2.SecurityGroup{Name: name}) != nil {
--		fatalf(400, "InvalidGroup.Duplicate", "group %q already exists", name)
--	}
--	g := &securityGroup{
--		name:        name,
--		description: req.Form.Get("GroupDescription"),
--		id:          fmt.Sprintf("sg-%d", srv.groupId.next()),
--		perms:       make(map[permKey]bool),
--	}
--	srv.groups[g.id] = g
--	// we define a local type for this because ec2.CreateSecurityGroupResp
--	// contains SecurityGroup, but the response to this request
--	// should not contain the security group name.
--	type CreateSecurityGroupResponse struct {
--		RequestId string `xml:"requestId"`
--		Return    bool   `xml:"return"`
--		GroupId   string `xml:"groupId"`
--	}
--	r := &CreateSecurityGroupResponse{
--		RequestId: reqId,
--		Return:    true,
--		GroupId:   g.id,
--	}
--	return r
--}
--
--func (srv *Server) notImplemented(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	fatalf(500, "InternalError", "not implemented")
--	panic("not reached")
--}
--
--func (srv *Server) describeInstances(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--	insts := make(map[*Instance]bool)
--	for name, vals := range req.Form {
--		if !strings.HasPrefix(name, "InstanceId.") {
--			continue
--		}
--		inst := srv.instances[vals[0]]
--		if inst == nil {
--			fatalf(400, "InvalidInstanceID.NotFound", "instance %q not found", vals[0])
--		}
--		insts[inst] = true
--	}
--
--	f := newFilter(req.Form)
--
--	var resp ec2.InstancesResp
--	resp.RequestId = reqId
--	for _, r := range srv.reservations {
--		var instances []ec2.Instance
--		for _, inst := range r.instances {
--			if len(insts) > 0 && !insts[inst] {
--				continue
--			}
--			ok, err := f.ok(inst)
--			if ok {
--				instances = append(instances, inst.ec2instance())
--			} else if err != nil {
--				fatalf(400, "InvalidParameterValue", "describe instances: %v", err)
--			}
--		}
--		if len(instances) > 0 {
--			var groups []ec2.SecurityGroup
--			for _, g := range r.groups {
--				groups = append(groups, g.ec2SecurityGroup())
--			}
--			resp.Reservations = append(resp.Reservations, ec2.Reservation{
--				ReservationId:  r.id,
--				OwnerId:        ownerId,
--				Instances:      instances,
--				SecurityGroups: groups,
--			})
--		}
--	}
--	return &resp
--}
--
--func (srv *Server) describeSecurityGroups(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	// BUG similar bug to describeInstances, but for GroupName and GroupId
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--
--	var groups []*securityGroup
--	for name, vals := range req.Form {
--		var g ec2.SecurityGroup
--		switch {
--		case strings.HasPrefix(name, "GroupName."):
--			g.Name = vals[0]
--		case strings.HasPrefix(name, "GroupId."):
--			g.Id = vals[0]
--		default:
--			continue
--		}
--		sg := srv.group(g)
--		if sg == nil {
--			fatalf(400, "InvalidGroup.NotFound", "no such group %v", g)
--		}
--		groups = append(groups, sg)
--	}
--	if len(groups) == 0 {
--		for _, g := range srv.groups {
--			groups = append(groups, g)
--		}
--	}
--
--	f := newFilter(req.Form)
--	var resp ec2.SecurityGroupsResp
--	resp.RequestId = reqId
--	for _, group := range groups {
--		ok, err := f.ok(group)
--		if ok {
--			resp.Groups = append(resp.Groups, ec2.SecurityGroupInfo{
--				OwnerId:       ownerId,
--				SecurityGroup: group.ec2SecurityGroup(),
--				Description:   group.description,
--				IPPerms:       group.ec2Perms(),
--			})
--		} else if err != nil {
--			fatalf(400, "InvalidParameterValue", "describe security groups: %v", err)
--		}
--	}
--	return &resp
--}
--
--func (srv *Server) authorizeSecurityGroupIngress(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--	g := srv.group(ec2.SecurityGroup{
--		Name: req.Form.Get("GroupName"),
--		Id:   req.Form.Get("GroupId"),
--	})
--	if g == nil {
--		fatalf(400, "InvalidGroup.NotFound", "group not found")
--	}
--	perms := srv.parsePerms(req)
--
--	for _, p := range perms {
--		if g.perms[p] {
--			fatalf(400, "InvalidPermission.Duplicate", "Permission has already been authorized on the specified group")
--		}
--	}
--	for _, p := range perms {
--		g.perms[p] = true
--	}
--	return &ec2.SimpleResp{
--		XMLName:   xml.Name{"", "AuthorizeSecurityGroupIngressResponse"},
--		RequestId: reqId,
--	}
--}
--
--func (srv *Server) revokeSecurityGroupIngress(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--	g := srv.group(ec2.SecurityGroup{
--		Name: req.Form.Get("GroupName"),
--		Id:   req.Form.Get("GroupId"),
--	})
--	if g == nil {
--		fatalf(400, "InvalidGroup.NotFound", "group not found")
--	}
--	perms := srv.parsePerms(req)
--
--	// Note EC2 does not give an error if asked to revoke an authorization
--	// that does not exist.
--	for _, p := range perms {
--		delete(g.perms, p)
--	}
--	return &ec2.SimpleResp{
--		XMLName:   xml.Name{"", "RevokeSecurityGroupIngressResponse"},
--		RequestId: reqId,
--	}
--}
--
--var secGroupPat = regexp.MustCompile(`^sg-[a-z0-9]+$`)
--var ipPat = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+/[0-9]+$`)
--var ownerIdPat = regexp.MustCompile(`^[0-9]+$`)
--
--// parsePerms returns a slice of permKey values extracted
--// from the permission fields in req.
--func (srv *Server) parsePerms(req *http.Request) []permKey {
--	// perms maps an index found in the form to its associated
--	// IPPerm. For instance, the form value with key
--	// "IpPermissions.3.FromPort" will be stored in perms[3].FromPort
--	perms := make(map[int]ec2.IPPerm)
--
--	type subgroupKey struct {
--		id1, id2 int
--	}
--	// Each IPPerm can have many source security groups.  The form key
--	// for a source security group contains two indices: the index
--	// of the IPPerm and the sub-index of the security group. The
--	// sourceGroups map maps from a subgroupKey containing these
--	// two indices to the associated security group. For instance,
--	// the form value with key "IPPermissions.3.Groups.2.GroupName"
--	// will be stored in sourceGroups[subgroupKey{3, 2}].Name.
--	sourceGroups := make(map[subgroupKey]ec2.UserSecurityGroup)
--
--	// For each value in the form we store its associated information in the
--	// above maps. The maps are necessary because the form keys may
--	// arrive in any order, and the indices are not
--	// necessarily sequential or even small.
--	for name, vals := range req.Form {
--		val := vals[0]
--		var id1 int
--		var rest string
--		if x, _ := fmt.Sscanf(name, "IpPermissions.%d.%s", &id1, &rest); x != 2 {
--			continue
--		}
--		ec2p := perms[id1]
--		switch {
--		case rest == "FromPort":
--			ec2p.FromPort = atoi(val)
--		case rest == "ToPort":
--			ec2p.ToPort = atoi(val)
--		case rest == "IpProtocol":
--			switch val {
--			case "tcp", "udp", "icmp":
--				ec2p.Protocol = val
--			default:
--				// check it's a well formed number
--				atoi(val)
--				ec2p.Protocol = val
--			}
--		case strings.HasPrefix(rest, "Groups."):
--			k := subgroupKey{id1: id1}
--			if x, _ := fmt.Sscanf(rest[len("Groups."):], "%d.%s", &k.id2, &rest); x != 2 {
--				continue
--			}
--			g := sourceGroups[k]
--			switch rest {
--			case "UserId":
--				// BUG if the user id is blank, this does not conform to the
--				// way that EC2 handles it - a specified but blank owner id
--				// can cause RevokeSecurityGroupIngress to fail with
--				// "group not found" even if the security group id has been
--				// correctly specified.
--				// By failing here, we ensure that we fail early in this case.
--				if !ownerIdPat.MatchString(val) {
--					fatalf(400, "InvalidUserID.Malformed", "Invalid user ID: %q", val)
--				}
--				g.OwnerId = val
--			case "GroupName":
--				g.Name = val
--			case "GroupId":
--				if !secGroupPat.MatchString(val) {
--					fatalf(400, "InvalidGroupId.Malformed", "Invalid group ID: %q", val)
--				}
--				g.Id = val
--			default:
--				fatalf(400, "UnknownParameter", "unknown parameter %q", name)
--			}
--			sourceGroups[k] = g
--		case strings.HasPrefix(rest, "IpRanges."):
--			var id2 int
--			if x, _ := fmt.Sscanf(rest[len("IpRanges."):], "%d.%s", &id2, &rest); x != 2 {
--				continue
--			}
--			switch rest {
--			case "CidrIp":
--				if !ipPat.MatchString(val) {
--					fatalf(400, "InvalidPermission.Malformed", "Invalid IP range: %q", val)
--				}
--				ec2p.SourceIPs = append(ec2p.SourceIPs, val)
--			default:
--				fatalf(400, "UnknownParameter", "unknown parameter %q", name)
--			}
--		default:
--			fatalf(400, "UnknownParameter", "unknown parameter %q", name)
--		}
--		perms[id1] = ec2p
--	}
--	// Associate each set of source groups with its IPPerm.
--	for k, g := range sourceGroups {
--		p := perms[k.id1]
--		p.SourceGroups = append(p.SourceGroups, g)
--		perms[k.id1] = p
--	}
--
--	// Now that we have built up the IPPerms we need, we check for
--	// parameter errors and build up a permKey for each permission,
--	// looking up security groups from srv as we do so.
--	var result []permKey
--	for _, p := range perms {
--		if p.FromPort > p.ToPort {
--			fatalf(400, "InvalidParameterValue", "invalid port range")
--		}
--		k := permKey{
--			protocol: p.Protocol,
--			fromPort: p.FromPort,
--			toPort:   p.ToPort,
--		}
--		for _, g := range p.SourceGroups {
--			if g.OwnerId != "" && g.OwnerId != ownerId {
--				fatalf(400, "InvalidGroup.NotFound", "group %q not found", g.Name)
--			}
--			var ec2g ec2.SecurityGroup
--			switch {
--			case g.Id != "":
--				ec2g.Id = g.Id
--			case g.Name != "":
--				ec2g.Name = g.Name
--			}
--			k.group = srv.group(ec2g)
--			if k.group == nil {
--				fatalf(400, "InvalidGroup.NotFound", "group %v not found", g)
--			}
--			result = append(result, k)
--		}
--		k.group = nil
--		for _, ip := range p.SourceIPs {
--			k.ipAddr = ip
--			result = append(result, k)
--		}
--	}
--	return result
--}
--
--func (srv *Server) deleteSecurityGroup(w http.ResponseWriter, req *http.Request, reqId string) interface{} {
--	srv.mu.Lock()
--	defer srv.mu.Unlock()
--	g := srv.group(ec2.SecurityGroup{
--		Name: req.Form.Get("GroupName"),
--		Id:   req.Form.Get("GroupId"),
--	})
--	if g == nil {
--		fatalf(400, "InvalidGroup.NotFound", "group not found")
--	}
--	for _, r := range srv.reservations {
--		for _, h := range r.groups {
--			if h == g && r.hasRunningMachine() {
--				fatalf(500, "InvalidGroup.InUse", "group is currently in use by a running instance")
--			}
--		}
--	}
--	for _, sg := range srv.groups {
--		// If a group refers to itself, it's ok to delete it.
--		if sg == g {
--			continue
--		}
--		for k := range sg.perms {
--			if k.group == g {
--				fatalf(500, "InvalidGroup.InUse", "group is currently in use by group %q", sg.id)
--			}
--		}
--	}
--
--	delete(srv.groups, g.id)
--	return &ec2.SimpleResp{
--		XMLName:   xml.Name{"", "DeleteSecurityGroupResponse"},
--		RequestId: reqId,
--	}
--}
--
--func (r *reservation) hasRunningMachine() bool {
--	for _, inst := range r.instances {
--		if inst.state.Code != ShuttingDown.Code && inst.state.Code != Terminated.Code {
--			return true
--		}
--	}
--	return false
--}
--
--type counter int
--
--func (c *counter) next() (i int) {
--	i = int(*c)
--	(*c)++
--	return
--}
--
--// atoi is like strconv.Atoi but is fatal if the
--// string is not well formed.
--func atoi(s string) int {
--	i, err := strconv.Atoi(s)
--	if err != nil {
--		fatalf(400, "InvalidParameterValue", "bad number: %v", err)
--	}
--	return i
--}
--
--func fatalf(statusCode int, code string, f string, a ...interface{}) {
--	panic(&ec2.Error{
--		StatusCode: statusCode,
--		Code:       code,
--		Message:    fmt.Sprintf(f, a...),
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/export_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/export_test.go
-deleted file mode 100644
-index 1c24422..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/export_test.go
-+++ /dev/null
-@@ -1,22 +0,0 @@
--package ec2
--
--import (
--	"github.com/mitchellh/goamz/aws"
--	"time"
--)
--
--func Sign(auth aws.Auth, method, path string, params map[string]string, host string) {
--	sign(auth, method, path, params, host)
--}
--
--func fixedTime() time.Time {
--	return time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)
--}
--
--func FakeTime(fakeIt bool) {
--	if fakeIt {
--		timeNow = fixedTime
--	} else {
--		timeNow = time.Now
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/responses_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/responses_test.go
-deleted file mode 100644
-index 0a4dbb3..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/responses_test.go
-+++ /dev/null
-@@ -1,854 +0,0 @@
--package ec2_test
--
--var ErrorDump = `
--<?xml version="1.0" encoding="UTF-8"?>
--<Response><Errors><Error><Code>UnsupportedOperation</Code>
--<Message>AMIs with an instance-store root device are not supported for the instance type 't1.micro'.</Message>
--</Error></Errors><RequestID>0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4</RequestID></Response>
--`
--
--// http://goo.gl/Mcm3b
--var RunInstancesExample = `
--<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <reservationId>r-47a5402e</reservationId>
--  <ownerId>999988887777</ownerId>
--  <groupSet>
--      <item>
--          <groupId>sg-67ad940e</groupId>
--          <groupName>default</groupName>
--      </item>
--  </groupSet>
--  <instancesSet>
--    <item>
--      <instanceId>i-2ba64342</instanceId>
--      <imageId>ami-60a54009</imageId>
--      <instanceState>
--        <code>0</code>
--        <name>pending</name>
--      </instanceState>
--      <privateDnsName></privateDnsName>
--      <dnsName></dnsName>
--      <keyName>example-key-name</keyName>
--      <amiLaunchIndex>0</amiLaunchIndex>
--      <instanceType>m1.small</instanceType>
--      <launchTime>2007-08-07T11:51:50.000Z</launchTime>
--      <placement>
--        <availabilityZone>us-east-1b</availabilityZone>
--      </placement>
--      <monitoring>
--        <state>enabled</state>
--      </monitoring>
--      <virtualizationType>paravirtual</virtualizationType>
--      <clientToken/>
--      <tagSet/>
--      <hypervisor>xen</hypervisor>
--    </item>
--    <item>
--      <instanceId>i-2bc64242</instanceId>
--      <imageId>ami-60a54009</imageId>
--      <instanceState>
--        <code>0</code>
--        <name>pending</name>
--      </instanceState>
--      <privateDnsName></privateDnsName>
--      <dnsName></dnsName>
--      <keyName>example-key-name</keyName>
--      <amiLaunchIndex>1</amiLaunchIndex>
--      <instanceType>m1.small</instanceType>
--      <launchTime>2007-08-07T11:51:50.000Z</launchTime>
--      <placement>
--         <availabilityZone>us-east-1b</availabilityZone>
--      </placement>
--      <monitoring>
--        <state>enabled</state>
--      </monitoring>
--      <virtualizationType>paravirtual</virtualizationType>
--      <clientToken/>
--      <tagSet/>
--      <hypervisor>xen</hypervisor>
--    </item>
--    <item>
--      <instanceId>i-2be64332</instanceId>
--      <imageId>ami-60a54009</imageId>
--      <instanceState>
--        <code>0</code>
--        <name>pending</name>
--      </instanceState>
--      <privateDnsName></privateDnsName>
--      <dnsName></dnsName>
--      <keyName>example-key-name</keyName>
--      <amiLaunchIndex>2</amiLaunchIndex>
--      <instanceType>m1.small</instanceType>
--      <launchTime>2007-08-07T11:51:50.000Z</launchTime>
--      <placement>
--         <availabilityZone>us-east-1b</availabilityZone>
--      </placement>
--      <monitoring>
--        <state>enabled</state>
--      </monitoring>
--      <virtualizationType>paravirtual</virtualizationType>
--      <clientToken/>
--      <tagSet/>
--      <hypervisor>xen</hypervisor>
--    </item>
--  </instancesSet>
--</RunInstancesResponse>
--`
--
--// http://goo.gl/GRZgCD
--var RequestSpotInstancesExample = `
--<RequestSpotInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2014-02-01/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <spotInstanceRequestSet>
--    <item>
--      <spotInstanceRequestId>sir-1a2b3c4d</spotInstanceRequestId>
--      <spotPrice>0.5</spotPrice>
--      <type>one-time</type>
--      <state>open</state>
--      <status>
--        <code>pending-evaluation</code>
--        <updateTime>2008-05-07T12:51:50.000Z</updateTime>
--        <message>Your Spot request has been submitted for review, and is pending evaluation.</message>
--      </status>
--      <availabilityZoneGroup>MyAzGroup</availabilityZoneGroup>
--      <launchSpecification>
--        <imageId>ami-1a2b3c4d</imageId>
--        <keyName>gsg-keypair</keyName>
--        <groupSet>
--          <item>
--            <groupId>sg-1a2b3c4d</groupId>
--            <groupName>websrv</groupName>
--          </item>
--        </groupSet>
--        <instanceType>m1.small</instanceType>
--        <blockDeviceMapping/>
--        <monitoring>
--          <enabled>false</enabled>
--        </monitoring>
--        <ebsOptimized>false</ebsOptimized>
--      </launchSpecification>
--      <createTime>YYYY-MM-DDTHH:MM:SS.000Z</createTime>
--      <productDescription>Linux/UNIX</productDescription>
--    </item>
-- </spotInstanceRequestSet>
--</RequestSpotInstancesResponse>
--`
--
--// http://goo.gl/KsKJJk
--var DescribeSpotRequestsExample = `
--<DescribeSpotInstanceRequestsResponse xmlns="http://ec2.amazonaws.com/doc/2014-02-01/">
--  <requestId>b1719f2a-5334-4479-b2f1-26926EXAMPLE</requestId>
--  <spotInstanceRequestSet>
--    <item>
--      <spotInstanceRequestId>sir-1a2b3c4d</spotInstanceRequestId>
--      <spotPrice>0.5</spotPrice>
--      <type>one-time</type>
--      <state>active</state>
--      <status>
--        <code>fulfilled</code>
--        <updateTime>2008-05-07T12:51:50.000Z</updateTime>
--        <message>Your Spot request is fulfilled.</message>
--      </status>
--      <launchSpecification>
--        <imageId>ami-1a2b3c4d</imageId>
--        <keyName>gsg-keypair</keyName>
--        <groupSet>
--          <item>
--            <groupId>sg-1a2b3c4d</groupId>
--            <groupName>websrv</groupName>
--          </item>
--        </groupSet>
--        <instanceType>m1.small</instanceType>
--        <monitoring>
--          <enabled>false</enabled>
--        </monitoring>
--        <ebsOptimized>false</ebsOptimized>
--      </launchSpecification>
--      <instanceId>i-1a2b3c4d</instanceId>
--      <createTime>YYYY-MM-DDTHH:MM:SS.000Z</createTime>
--      <productDescription>Linux/UNIX</productDescription>
--      <launchedAvailabilityZone>us-east-1a</launchedAvailabilityZone>
--    </item>
--  </spotInstanceRequestSet>
--</DescribeSpotInstanceRequestsResponse>
--`
--
--// http://goo.gl/DcfFgJ
--var CancelSpotRequestsExample = `
--<CancelSpotInstanceRequestsResponse xmlns="http://ec2.amazonaws.com/doc/2014-02-01/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <spotInstanceRequestSet>
--    <item>
--      <spotInstanceRequestId>sir-1a2b3c4d</spotInstanceRequestId>
--      <state>cancelled</state>
--    </item>
--  </spotInstanceRequestSet>
--</CancelSpotInstanceRequestsResponse>
--`
--
--// http://goo.gl/3BKHj
--var TerminateInstancesExample = `
--<TerminateInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <instancesSet>
--    <item>
--      <instanceId>i-3ea74257</instanceId>
--      <currentState>
--        <code>32</code>
--        <name>shutting-down</name>
--      </currentState>
--      <previousState>
--        <code>16</code>
--        <name>running</name>
--      </previousState>
--    </item>
--  </instancesSet>
--</TerminateInstancesResponse>
--`
--
--// http://goo.gl/mLbmw
--var DescribeInstancesExample1 = `
--<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>98e3c9a4-848c-4d6d-8e8a-b1bdEXAMPLE</requestId>
--  <reservationSet>
--    <item>
--      <reservationId>r-b27e30d9</reservationId>
--      <ownerId>999988887777</ownerId>
--      <groupSet>
--        <item>
--          <groupId>sg-67ad940e</groupId>
--          <groupName>default</groupName>
--        </item>
--      </groupSet>
--      <instancesSet>
--        <item>
--          <instanceId>i-c5cd56af</instanceId>
--          <imageId>ami-1a2b3c4d</imageId>
--          <instanceState>
--            <code>16</code>
--            <name>running</name>
--          </instanceState>
--          <privateDnsName>domU-12-31-39-10-56-34.compute-1.internal</privateDnsName>
--          <dnsName>ec2-174-129-165-232.compute-1.amazonaws.com</dnsName>
--          <reason/>
--          <keyName>GSG_Keypair</keyName>
--          <amiLaunchIndex>0</amiLaunchIndex>
--          <productCodes/>
--          <instanceType>m1.small</instanceType>
--          <launchTime>2010-08-17T01:15:18.000Z</launchTime>
--          <placement>
--            <availabilityZone>us-east-1b</availabilityZone>
--            <groupName/>
--          </placement>
--          <kernelId>aki-94c527fd</kernelId>
--          <ramdiskId>ari-96c527ff</ramdiskId>
--          <monitoring>
--            <state>disabled</state>
--          </monitoring>
--          <privateIpAddress>10.198.85.190</privateIpAddress>
--          <ipAddress>174.129.165.232</ipAddress>
--          <architecture>i386</architecture>
--          <rootDeviceType>ebs</rootDeviceType>
--          <rootDeviceName>/dev/sda1</rootDeviceName>
--          <blockDeviceMapping>
--            <item>
--              <deviceName>/dev/sda1</deviceName>
--              <ebs>
--                <volumeId>vol-a082c1c9</volumeId>
--                <status>attached</status>
--                <attachTime>2010-08-17T01:15:21.000Z</attachTime>
--                <deleteOnTermination>false</deleteOnTermination>
--              </ebs>
--            </item>
--          </blockDeviceMapping>
--          <instanceLifecycle>spot</instanceLifecycle>
--          <spotInstanceRequestId>sir-7a688402</spotInstanceRequestId>
--          <virtualizationType>paravirtual</virtualizationType>
--          <clientToken/>
--          <tagSet/>
--          <hypervisor>xen</hypervisor>
--       </item>
--      </instancesSet>
--      <requesterId>854251627541</requesterId>
--    </item>
--    <item>
--      <reservationId>r-b67e30dd</reservationId>
--      <ownerId>999988887777</ownerId>
--      <groupSet>
--        <item>
--          <groupId>sg-67ad940e</groupId>
--          <groupName>default</groupName>
--        </item>
--      </groupSet>
--      <instancesSet>
--        <item>
--          <instanceId>i-d9cd56b3</instanceId>
--          <imageId>ami-1a2b3c4d</imageId>
--          <instanceState>
--            <code>16</code>
--            <name>running</name>
--          </instanceState>
--          <privateDnsName>domU-12-31-39-10-54-E5.compute-1.internal</privateDnsName>
--          <dnsName>ec2-184-73-58-78.compute-1.amazonaws.com</dnsName>
--          <reason/>
--          <keyName>GSG_Keypair</keyName>
--          <amiLaunchIndex>0</amiLaunchIndex>
--          <productCodes/>
--          <instanceType>m1.large</instanceType>
--          <launchTime>2010-08-17T01:15:19.000Z</launchTime>
--          <placement>
--            <availabilityZone>us-east-1b</availabilityZone>
--            <groupName/>
--          </placement>
--          <kernelId>aki-94c527fd</kernelId>
--          <ramdiskId>ari-96c527ff</ramdiskId>
--          <monitoring>
--            <state>disabled</state>
--          </monitoring>
--          <privateIpAddress>10.198.87.19</privateIpAddress>
--          <ipAddress>184.73.58.78</ipAddress>
--          <architecture>i386</architecture>
--          <rootDeviceType>ebs</rootDeviceType>
--          <rootDeviceName>/dev/sda1</rootDeviceName>
--          <blockDeviceMapping>
--            <item>
--              <deviceName>/dev/sda1</deviceName>
--              <ebs>
--                <volumeId>vol-a282c1cb</volumeId>
--                <status>attached</status>
--                <attachTime>2010-08-17T01:15:23.000Z</attachTime>
--                <deleteOnTermination>false</deleteOnTermination>
--              </ebs>
--            </item>
--          </blockDeviceMapping>
--          <instanceLifecycle>spot</instanceLifecycle>
--          <spotInstanceRequestId>sir-55a3aa02</spotInstanceRequestId>
--          <virtualizationType>paravirtual</virtualizationType>
--          <clientToken/>
--          <tagSet/>
--          <hypervisor>xen</hypervisor>
--       </item>
--      </instancesSet>
--      <requesterId>854251627541</requesterId>
--    </item>
--  </reservationSet>
--</DescribeInstancesResponse>
--`
--
--// http://goo.gl/mLbmw
--var DescribeInstancesExample2 = `
--<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <reservationSet>
--    <item>
--      <reservationId>r-bc7e30d7</reservationId>
--      <ownerId>999988887777</ownerId>
--      <groupSet>
--        <item>
--          <groupId>sg-67ad940e</groupId>
--          <groupName>default</groupName>
--        </item>
--      </groupSet>
--      <instancesSet>
--        <item>
--          <instanceId>i-c7cd56ad</instanceId>
--          <imageId>ami-b232d0db</imageId>
--          <instanceState>
--            <code>16</code>
--            <name>running</name>
--          </instanceState>
--          <privateDnsName>domU-12-31-39-01-76-06.compute-1.internal</privateDnsName>
--          <dnsName>ec2-72-44-52-124.compute-1.amazonaws.com</dnsName>
--          <keyName>GSG_Keypair</keyName>
--          <amiLaunchIndex>0</amiLaunchIndex>
--          <productCodes/>
--          <instanceType>m1.small</instanceType>
--          <launchTime>2010-08-17T01:15:16.000Z</launchTime>
--          <placement>
--              <availabilityZone>us-east-1b</availabilityZone>
--          </placement>
--          <kernelId>aki-94c527fd</kernelId>
--          <ramdiskId>ari-96c527ff</ramdiskId>
--          <monitoring>
--              <state>disabled</state>
--          </monitoring>
--          <privateIpAddress>10.255.121.240</privateIpAddress>
--          <ipAddress>72.44.52.124</ipAddress>
--          <architecture>i386</architecture>
--          <rootDeviceType>ebs</rootDeviceType>
--          <rootDeviceName>/dev/sda1</rootDeviceName>
--          <blockDeviceMapping>
--              <item>
--                 <deviceName>/dev/sda1</deviceName>
--                 <ebs>
--                    <volumeId>vol-a482c1cd</volumeId>
--                    <status>attached</status>
--                    <attachTime>2010-08-17T01:15:26.000Z</attachTime>
--                    <deleteOnTermination>true</deleteOnTermination>
--                </ebs>
--             </item>
--          </blockDeviceMapping>
--          <virtualizationType>paravirtual</virtualizationType>
--          <clientToken/>
--          <tagSet>
--              <item>
--                    <key>webserver</key>
--                    <value></value>
--             </item>
--              <item>
--                    <key>stack</key>
--                    <value>Production</value>
--             </item>
--          </tagSet>
--          <hypervisor>xen</hypervisor>
--        </item>
--      </instancesSet>
--    </item>
--  </reservationSet>
--</DescribeInstancesResponse>
--`
--
--// http://goo.gl/cxU41
--var CreateImageExample = `
--<CreateImageResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <imageId>ami-4fa54026</imageId>
--</CreateImageResponse>
--`
--
--// http://goo.gl/V0U25
--var DescribeImagesExample = `
--<DescribeImagesResponse xmlns="http://ec2.amazonaws.com/doc/2012-08-15/">
--         <requestId>4a4a27a2-2e7c-475d-b35b-ca822EXAMPLE</requestId>
--    <imagesSet>
--        <item>
--            <imageId>ami-a2469acf</imageId>
--            <imageLocation>aws-marketplace/example-marketplace-amzn-ami.1</imageLocation>
--            <imageState>available</imageState>
--            <imageOwnerId>123456789999</imageOwnerId>
--            <isPublic>true</isPublic>
--            <productCodes>
--                <item>
--                    <productCode>a1b2c3d4e5f6g7h8i9j10k11</productCode>
--                    <type>marketplace</type>
--                </item>
--            </productCodes>
--            <architecture>i386</architecture>
--            <imageType>machine</imageType>
--            <kernelId>aki-805ea7e9</kernelId>
--            <imageOwnerAlias>aws-marketplace</imageOwnerAlias>
--            <name>example-marketplace-amzn-ami.1</name>
--            <description>Amazon Linux AMI i386 EBS</description>
--            <rootDeviceType>ebs</rootDeviceType>
--            <rootDeviceName>/dev/sda1</rootDeviceName>
--            <blockDeviceMapping>
--                <item>
--                    <deviceName>/dev/sda1</deviceName>
--                    <ebs>
--                        <snapshotId>snap-787e9403</snapshotId>
--                        <volumeSize>8</volumeSize>
--                        <deleteOnTermination>true</deleteOnTermination>
--                    </ebs>
--                </item>
--            </blockDeviceMapping>
--            <virtualizationType>paravirtual</virtualizationType>
--            <hypervisor>xen</hypervisor>
--        </item>
--    </imagesSet>
--</DescribeImagesResponse>
--`
--
--// http://goo.gl/bHO3z
--var ImageAttributeExample = `
--<DescribeImageAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <imageId>ami-61a54008</imageId>
--   <launchPermission>
--      <item>
--         <group>all</group>
--      </item>
--      <item>
--         <userId>495219933132</userId>
--      </item>
--   </launchPermission>
--</DescribeImageAttributeResponse>
--`
--
--// http://goo.gl/ttcda
--var CreateSnapshotExample = `
--<CreateSnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2012-10-01/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <snapshotId>snap-78a54011</snapshotId>
--  <volumeId>vol-4d826724</volumeId>
--  <status>pending</status>
--  <startTime>2008-05-07T12:51:50.000Z</startTime>
--  <progress>60%</progress>
--  <ownerId>111122223333</ownerId>
--  <volumeSize>10</volumeSize>
--  <description>Daily Backup</description>
--</CreateSnapshotResponse>
--`
--
--// http://goo.gl/vwU1y
--var DeleteSnapshotExample = `
--<DeleteSnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2012-10-01/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <return>true</return>
--</DeleteSnapshotResponse>
--`
--
--// http://goo.gl/nkovs
--var DescribeSnapshotsExample = `
--<DescribeSnapshotsResponse xmlns="http://ec2.amazonaws.com/doc/2012-10-01/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <snapshotSet>
--      <item>
--         <snapshotId>snap-1a2b3c4d</snapshotId>
--         <volumeId>vol-8875daef</volumeId>
--         <status>pending</status>
--         <startTime>2010-07-29T04:12:01.000Z</startTime>
--         <progress>30%</progress>
--         <ownerId>111122223333</ownerId>
--         <volumeSize>15</volumeSize>
--         <description>Daily Backup</description>
--         <tagSet>
--            <item>
--               <key>Purpose</key>
--               <value>demo_db_14_backup</value>
--            </item>
--         </tagSet>
--      </item>
--   </snapshotSet>
--</DescribeSnapshotsResponse>
--`
--
--// http://goo.gl/YUjO4G
--var ModifyImageAttributeExample = `
--<ModifyImageAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-06-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <return>true</return>
--</ModifyImageAttributeResponse>
--`
--
--// http://goo.gl/hQwPCK
--var CopyImageExample = `
--<CopyImageResponse xmlns="http://ec2.amazonaws.com/doc/2013-06-15/">
--   <requestId>60bc441d-fa2c-494d-b155-5d6a3EXAMPLE</requestId>
--   <imageId>ami-4d3c2b1a</imageId>
--</CopyImageResponse>
--`
--
--var CreateKeyPairExample = `
--<CreateKeyPairResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <keyName>foo</keyName>
--  <keyFingerprint>
--     00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00
--  </keyFingerprint>
--  <keyMaterial>---- BEGIN RSA PRIVATE KEY ----
--MIICiTCCAfICCQD6m7oRw0uXOjANBgkqhkiG9w0BAQUFADCBiDELMAkGA1UEBhMC
--VVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6
--b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAd
--BgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wHhcNMTEwNDI1MjA0NTIxWhcN
--MTIwNDI0MjA0NTIxWjCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYD
--VQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25z
--b2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFt
--YXpvbi5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMaK0dn+a4GmWIWJ
--21uUSfwfEvySWtC2XADZ4nB+BLYgVIk60CpiwsZ3G93vUEIO3IyNoH/f0wYK8m9T
--rDHudUZg3qX4waLG5M43q7Wgc/MbQITxOUSQv7c7ugFFDzQGBzZswY6786m86gpE
--Ibb3OhjZnzcvQAaRHhdlQWIMm2nrAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAtCu4
--nUhVVxYUntneD9+h8Mg9q6q+auNKyExzyLwaxlAoo7TJHidbtS4J5iNmZgXL0Fkb
--FFBjvSfpJIlJ00zbhNYS5f6GuoEDmFJl0ZxBHjJnyp378OD8uTs7fLvjx79LjSTb
--NYiytVbZPQUQ5Yaxu2jXnimvw3rrszlaEXAMPLE=
-------END RSA PRIVATE KEY-----
--</keyMaterial>
--</CreateKeyPairResponse>
--`
--
--var DeleteKeyPairExample = `
--<DeleteKeyPairResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <return>true</return>
--</DeleteKeyPairResponse>
--`
--
--// http://goo.gl/Eo7Yl
--var CreateSecurityGroupExample = `
--<CreateSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <return>true</return>
--   <groupId>sg-67ad940e</groupId>
--</CreateSecurityGroupResponse>
--`
--
--// http://goo.gl/k12Uy
--var DescribeSecurityGroupsExample = `
--<DescribeSecurityGroupsResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <securityGroupInfo>
--    <item>
--      <ownerId>999988887777</ownerId>
--      <groupName>WebServers</groupName>
--      <groupId>sg-67ad940e</groupId>
--      <groupDescription>Web Servers</groupDescription>
--      <ipPermissions>
--        <item>
--           <ipProtocol>tcp</ipProtocol>
--           <fromPort>80</fromPort>
--           <toPort>80</toPort>
--           <groups/>
--           <ipRanges>
--             <item>
--               <cidrIp>0.0.0.0/0</cidrIp>
--             </item>
--           </ipRanges>
--        </item>
--      </ipPermissions>
--    </item>
--    <item>
--      <ownerId>999988887777</ownerId>
--      <groupName>RangedPortsBySource</groupName>
--      <groupId>sg-76abc467</groupId>
--      <groupDescription>Group A</groupDescription>
--      <ipPermissions>
--        <item>
--           <ipProtocol>tcp</ipProtocol>
--           <fromPort>6000</fromPort>
--           <toPort>7000</toPort>
--           <groups/>
--           <ipRanges/>
--        </item>
--      </ipPermissions>
--    </item>
--  </securityGroupInfo>
--</DescribeSecurityGroupsResponse>
--`
--
--// A dump which includes groups within ip permissions.
--var DescribeSecurityGroupsDump = `
--<?xml version="1.0" encoding="UTF-8"?>
--<DescribeSecurityGroupsResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--    <requestId>87b92b57-cc6e-48b2-943f-f6f0e5c9f46c</requestId>
--    <securityGroupInfo>
--        <item>
--            <ownerId>12345</ownerId>
--            <groupName>default</groupName>
--            <groupDescription>default group</groupDescription>
--            <ipPermissions>
--                <item>
--                    <ipProtocol>icmp</ipProtocol>
--                    <fromPort>-1</fromPort>
--                    <toPort>-1</toPort>
--                    <groups>
--                        <item>
--                            <userId>12345</userId>
--                            <groupName>default</groupName>
--                            <groupId>sg-67ad940e</groupId>
--                        </item>
--                    </groups>
--                    <ipRanges/>
--                </item>
--                <item>
--                    <ipProtocol>tcp</ipProtocol>
--                    <fromPort>0</fromPort>
--                    <toPort>65535</toPort>
--                    <groups>
--                        <item>
--                            <userId>12345</userId>
--                            <groupName>other</groupName>
--                            <groupId>sg-76abc467</groupId>
--                        </item>
--                    </groups>
--                    <ipRanges/>
--                </item>
--            </ipPermissions>
--        </item>
--    </securityGroupInfo>
--</DescribeSecurityGroupsResponse>
--`
--
--// http://goo.gl/QJJDO
--var DeleteSecurityGroupExample = `
--<DeleteSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <return>true</return>
--</DeleteSecurityGroupResponse>
--`
--
--// http://goo.gl/u2sDJ
--var AuthorizeSecurityGroupIngressExample = `
--<AuthorizeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <return>true</return>
--</AuthorizeSecurityGroupIngressResponse>
--`
--
--// http://goo.gl/u2sDJ
--var AuthorizeSecurityGroupEgressExample = `
--<AuthorizeSecurityGroupEgressResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <return>true</return>
--</AuthorizeSecurityGroupEgressResponse>
--`
--
--// http://goo.gl/Mz7xr
--var RevokeSecurityGroupIngressExample = `
--<RevokeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <return>true</return>
--</RevokeSecurityGroupIngressResponse>
--`
--
--// http://goo.gl/Vmkqc
--var CreateTagsExample = `
--<CreateTagsResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <return>true</return>
--</CreateTagsResponse>
--`
--
--// http://goo.gl/awKeF
--var StartInstancesExample = `
--<StartInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <instancesSet>
--    <item>
--      <instanceId>i-10a64379</instanceId>
--      <currentState>
--          <code>0</code>
--          <name>pending</name>
--      </currentState>
--      <previousState>
--          <code>80</code>
--          <name>stopped</name>
--      </previousState>
--    </item>
--  </instancesSet>
--</StartInstancesResponse>
--`
--
--// http://goo.gl/436dJ
--var StopInstancesExample = `
--<StopInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <instancesSet>
--    <item>
--      <instanceId>i-10a64379</instanceId>
--      <currentState>
--          <code>64</code>
--          <name>stopping</name>
--      </currentState>
--      <previousState>
--          <code>16</code>
--          <name>running</name>
--      </previousState>
--    </item>
--  </instancesSet>
--</StopInstancesResponse>
--`
--
--// http://goo.gl/baoUf
--var RebootInstancesExample = `
--<RebootInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2011-12-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <return>true</return>
--</RebootInstancesResponse>
--`
--
--// http://goo.gl/9rprDN
--var AllocateAddressExample = `
--<AllocateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <publicIp>198.51.100.1</publicIp>
--   <domain>vpc</domain>
--   <allocationId>eipalloc-5723d13e</allocationId>
--</AllocateAddressResponse>
--`
--
--// http://goo.gl/3Q0oCc
--var ReleaseAddressExample = `
--<ReleaseAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <return>true</return>
--</ReleaseAddressResponse>
--`
--
--// http://goo.gl/uOSQE
--var AssociateAddressExample = `
--<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <return>true</return>
--   <associationId>eipassoc-fc5ca095</associationId>
--</AssociateAddressResponse>
--`
--
--// http://goo.gl/LrOa0
--var DisassociateAddressExample = `
--<DisassociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
--   <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--   <return>true</return>
--</DisassociateAddressResponse>
--`
--
--// http://goo.gl/icuXh5
--var ModifyInstanceExample = `
--<ModifyImageAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-06-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <return>true</return>
--</ModifyImageAttributeResponse>
--`
--
--var CreateVpcExample = `
--<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
--   <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
--   <vpc>
--      <vpcId>vpc-1a2b3c4d</vpcId>
--      <state>pending</state>
--      <cidrBlock>10.0.0.0/16</cidrBlock>
--      <dhcpOptionsId>dopt-1a2b3c4d2</dhcpOptionsId>
--      <instanceTenancy>default</instanceTenancy>
--      <tagSet/>
--   </vpc>
--</CreateVpcResponse>
--`
--
--var DescribeVpcsExample = `
--<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
--  <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
--  <vpcSet>
--    <item>
--      <vpcId>vpc-1a2b3c4d</vpcId>
--      <state>available</state>
--      <cidrBlock>10.0.0.0/23</cidrBlock>
--      <dhcpOptionsId>dopt-7a8b9c2d</dhcpOptionsId>
--      <instanceTenancy>default</instanceTenancy>
--      <isDefault>false</isDefault>
--      <tagSet/>
--    </item>
--  </vpcSet>
--</DescribeVpcsResponse>
--`
--
--var CreateSubnetExample = `
--<CreateSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
--  <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
--  <subnet>
--    <subnetId>subnet-9d4a7b6c</subnetId>
--    <state>pending</state>
--    <vpcId>vpc-1a2b3c4d</vpcId>
--    <cidrBlock>10.0.1.0/24</cidrBlock>
--    <availableIpAddressCount>251</availableIpAddressCount>
--    <availabilityZone>us-east-1a</availabilityZone>
--    <tagSet/>
--  </subnet>
--</CreateSubnetResponse>
--`
--
--// http://goo.gl/r6ZCPm
--var ResetImageAttributeExample = `
--<ResetImageAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
--  <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
--  <return>true</return>
--</ResetImageAttributeResponse>
--`
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/sign.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/sign.go
-deleted file mode 100644
-index bffc3c7..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/sign.go
-+++ /dev/null
-@@ -1,45 +0,0 @@
--package ec2
--
--import (
--	"crypto/hmac"
--	"crypto/sha256"
--	"encoding/base64"
--	"github.com/mitchellh/goamz/aws"
--	"sort"
--	"strings"
--)
--
--// ----------------------------------------------------------------------------
--// EC2 signing (http://goo.gl/fQmAN)
--
--var b64 = base64.StdEncoding
--
--func sign(auth aws.Auth, method, path string, params map[string]string, host string) {
--	params["AWSAccessKeyId"] = auth.AccessKey
--	params["SignatureVersion"] = "2"
--	params["SignatureMethod"] = "HmacSHA256"
--	if auth.Token != "" {
--		params["SecurityToken"] = auth.Token
--	}
--
--	// AWS specifies that the parameters in a signed request must
--	// be provided in the natural order of the keys. This is distinct
--	// from the natural order of the encoded value of key=value.
--	// Percent and equals affect the sorting order.
--	var keys, sarray []string
--	for k, _ := range params {
--		keys = append(keys, k)
--	}
--	sort.Strings(keys)
--	for _, k := range keys {
--		sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(params[k]))
--	}
--	joined := strings.Join(sarray, "&")
--	payload := method + "\n" + host + "\n" + path + "\n" + joined
--	hash := hmac.New(sha256.New, []byte(auth.SecretKey))
--	hash.Write([]byte(payload))
--	signature := make([]byte, b64.EncodedLen(hash.Size()))
--	b64.Encode(signature, hash.Sum(nil))
--
--	params["Signature"] = string(signature)
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/sign_test.go b/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/sign_test.go
-deleted file mode 100644
-index 86d203e..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/goamz/ec2/sign_test.go
-+++ /dev/null
-@@ -1,68 +0,0 @@
--package ec2_test
--
--import (
--	"github.com/mitchellh/goamz/aws"
--	"github.com/mitchellh/goamz/ec2"
--	. "github.com/motain/gocheck"
--)
--
--// EC2 ReST authentication docs: http://goo.gl/fQmAN
--
--var testAuth = aws.Auth{"user", "secret", ""}
--
--func (s *S) TestBasicSignature(c *C) {
--	params := map[string]string{}
--	ec2.Sign(testAuth, "GET", "/path", params, "localhost")
--	c.Assert(params["SignatureVersion"], Equals, "2")
--	c.Assert(params["SignatureMethod"], Equals, "HmacSHA256")
--	expected := "6lSe5QyXum0jMVc7cOUz32/52ZnL7N5RyKRk/09yiK4="
--	c.Assert(params["Signature"], Equals, expected)
--}
--
--func (s *S) TestParamSignature(c *C) {
--	params := map[string]string{
--		"param1": "value1",
--		"param2": "value2",
--		"param3": "value3",
--	}
--	ec2.Sign(testAuth, "GET", "/path", params, "localhost")
--	expected := "XWOR4+0lmK8bD8CGDGZ4kfuSPbb2JibLJiCl/OPu1oU="
--	c.Assert(params["Signature"], Equals, expected)
--}
--
--func (s *S) TestManyParams(c *C) {
--	params := map[string]string{
--		"param1":  "value10",
--		"param2":  "value2",
--		"param3":  "value3",
--		"param4":  "value4",
--		"param5":  "value5",
--		"param6":  "value6",
--		"param7":  "value7",
--		"param8":  "value8",
--		"param9":  "value9",
--		"param10": "value1",
--	}
--	ec2.Sign(testAuth, "GET", "/path", params, "localhost")
--	expected := "di0sjxIvezUgQ1SIL6i+C/H8lL+U0CQ9frLIak8jkVg="
--	c.Assert(params["Signature"], Equals, expected)
--}
--
--func (s *S) TestEscaping(c *C) {
--	params := map[string]string{"Nonce": "+ +"}
--	ec2.Sign(testAuth, "GET", "/path", params, "localhost")
--	c.Assert(params["Nonce"], Equals, "+ +")
--	expected := "bqffDELReIqwjg/W0DnsnVUmfLK4wXVLO4/LuG+1VFA="
--	c.Assert(params["Signature"], Equals, expected)
--}
--
--func (s *S) TestSignatureExample1(c *C) {
--	params := map[string]string{
--		"Timestamp": "2009-02-01T12:53:20+00:00",
--		"Version":   "2007-11-07",
--		"Action":    "ListDomains",
--	}
--	ec2.Sign(aws.Auth{"access", "secret", ""}, "GET", "/", params, "sdb.amazonaws.com")
--	expected := "okj96/5ucWBSc1uR2zXVfm6mDHtgfNv657rRtt/aunQ="
--	c.Assert(params["Signature"], Equals, expected)
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE
-deleted file mode 100644
-index f9c841a..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/LICENSE
-+++ /dev/null
-@@ -1,21 +0,0 @@
--The MIT License (MIT)
--
--Copyright (c) 2013 Mitchell Hashimoto
--
--Permission is hereby granted, free of charge, to any person obtaining a copy
--of this software and associated documentation files (the "Software"), to deal
--in the Software without restriction, including without limitation the rights
--to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
--copies of the Software, and to permit persons to whom the Software is
--furnished to do so, subject to the following conditions:
--
--The above copyright notice and this permission notice shall be included in
--all copies or substantial portions of the Software.
--
--THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
--IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
--FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
--AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
--LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
--OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
--THE SOFTWARE.
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md
-deleted file mode 100644
-index 659d688..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/README.md
-+++ /dev/null
-@@ -1,46 +0,0 @@
--# mapstructure
--
--mapstructure is a Go library for decoding generic map values to structures
--and vice versa, while providing helpful error handling.
--
--This library is most useful when decoding values from some data stream (JSON,
--Gob, etc.) where you don't _quite_ know the structure of the underlying data
--until you read a part of it. You can therefore read a `map[string]interface{}`
--and use this library to decode it into the proper underlying native Go
--structure.
--
--## Installation
--
--Standard `go get`:
--
--```
--$ go get github.com/mitchellh/mapstructure
--```
--
--## Usage & Example
--
--For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
--
--The `Decode` function has examples associated with it there.
--
--## But Why?!
--
--Go offers fantastic standard libraries for decoding formats such as JSON.
--The standard method is to have a struct pre-created, and populate that struct
--from the bytes of the encoded format. This is great, but the problem is if
--you have configuration or an encoding that changes slightly depending on
--specific fields. For example, consider this JSON:
--
--```json
--{
--  "type": "person",
--  "name": "Mitchell"
--}
--```
--
--Perhaps we can't populate a specific structure without first reading
--the "type" field from the JSON. We could always do two passes over the
--decoding of the JSON (reading the "type" first, and the rest later).
--However, it is much simpler to just decode this into a `map[string]interface{}`
--structure, read the "type" key, then use something like this library
--to decode it into the proper structure.
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go
-deleted file mode 100644
-index 087a392..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks.go
-+++ /dev/null
-@@ -1,84 +0,0 @@
--package mapstructure
--
--import (
--	"reflect"
--	"strconv"
--	"strings"
--)
--
--// ComposeDecodeHookFunc creates a single DecodeHookFunc that
--// automatically composes multiple DecodeHookFuncs.
--//
--// The composed funcs are called in order, with the result of the
--// previous transformation.
--func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
--	return func(
--		f reflect.Kind,
--		t reflect.Kind,
--		data interface{}) (interface{}, error) {
--		var err error
--		for _, f1 := range fs {
--			data, err = f1(f, t, data)
--			if err != nil {
--				return nil, err
--			}
--
--			// Modify the from kind to be correct with the new data
--			f = getKind(reflect.ValueOf(data))
--		}
--
--		return data, nil
--	}
--}
--
--// StringToSliceHookFunc returns a DecodeHookFunc that converts
--// string to []string by splitting on the given sep.
--func StringToSliceHookFunc(sep string) DecodeHookFunc {
--	return func(
--		f reflect.Kind,
--		t reflect.Kind,
--		data interface{}) (interface{}, error) {
--		if f != reflect.String || t != reflect.Slice {
--			return data, nil
--		}
--
--		raw := data.(string)
--		if raw == "" {
--			return []string{}, nil
--		}
--
--		return strings.Split(raw, sep), nil
--	}
--}
--
--func WeaklyTypedHook(
--	f reflect.Kind,
--	t reflect.Kind,
--	data interface{}) (interface{}, error) {
--	dataVal := reflect.ValueOf(data)
--	switch t {
--	case reflect.String:
--		switch f {
--		case reflect.Bool:
--			if dataVal.Bool() {
--				return "1", nil
--			} else {
--				return "0", nil
--			}
--		case reflect.Float32:
--			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
--		case reflect.Int:
--			return strconv.FormatInt(dataVal.Int(), 10), nil
--		case reflect.Slice:
--			dataType := dataVal.Type()
--			elemKind := dataType.Elem().Kind()
--			if elemKind == reflect.Uint8 {
--				return string(dataVal.Interface().([]uint8)), nil
--			}
--		case reflect.Uint:
--			return strconv.FormatUint(dataVal.Uint(), 10), nil
--		}
--	}
--
--	return data, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go
-deleted file mode 100644
-index b417dee..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/decode_hooks_test.go
-+++ /dev/null
-@@ -1,191 +0,0 @@
--package mapstructure
--
--import (
--	"errors"
--	"reflect"
--	"testing"
--)
--
--func TestComposeDecodeHookFunc(t *testing.T) {
--	f1 := func(
--		f reflect.Kind,
--		t reflect.Kind,
--		data interface{}) (interface{}, error) {
--		return data.(string) + "foo", nil
--	}
--
--	f2 := func(
--		f reflect.Kind,
--		t reflect.Kind,
--		data interface{}) (interface{}, error) {
--		return data.(string) + "bar", nil
--	}
--
--	f := ComposeDecodeHookFunc(f1, f2)
--
--	result, err := f(reflect.String, reflect.Slice, "")
--	if err != nil {
--		t.Fatalf("bad: %s", err)
--	}
--	if result.(string) != "foobar" {
--		t.Fatalf("bad: %#v", result)
--	}
--}
--
--func TestComposeDecodeHookFunc_err(t *testing.T) {
--	f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) {
--		return nil, errors.New("foo")
--	}
--
--	f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) {
--		panic("NOPE")
--	}
--
--	f := ComposeDecodeHookFunc(f1, f2)
--
--	_, err := f(reflect.String, reflect.Slice, 42)
--	if err.Error() != "foo" {
--		t.Fatalf("bad: %s", err)
--	}
--}
--
--func TestComposeDecodeHookFunc_kinds(t *testing.T) {
--	var f2From reflect.Kind
--
--	f1 := func(
--		f reflect.Kind,
--		t reflect.Kind,
--		data interface{}) (interface{}, error) {
--		return int(42), nil
--	}
--
--	f2 := func(
--		f reflect.Kind,
--		t reflect.Kind,
--		data interface{}) (interface{}, error) {
--		f2From = f
--		return data, nil
--	}
--
--	f := ComposeDecodeHookFunc(f1, f2)
--
--	_, err := f(reflect.String, reflect.Slice, "")
--	if err != nil {
--		t.Fatalf("bad: %s", err)
--	}
--	if f2From != reflect.Int {
--		t.Fatalf("bad: %#v", f2From)
--	}
--}
--
--func TestStringToSliceHookFunc(t *testing.T) {
--	f := StringToSliceHookFunc(",")
--
--	cases := []struct {
--		f, t   reflect.Kind
--		data   interface{}
--		result interface{}
--		err    bool
--	}{
--		{reflect.Slice, reflect.Slice, 42, 42, false},
--		{reflect.String, reflect.String, 42, 42, false},
--		{
--			reflect.String,
--			reflect.Slice,
--			"foo,bar,baz",
--			[]string{"foo", "bar", "baz"},
--			false,
--		},
--		{
--			reflect.String,
--			reflect.Slice,
--			"",
--			[]string{},
--			false,
--		},
--	}
--
--	for i, tc := range cases {
--		actual, err := f(tc.f, tc.t, tc.data)
--		if tc.err != (err != nil) {
--			t.Fatalf("case %d: expected err %#v", i, tc.err)
--		}
--		if !reflect.DeepEqual(actual, tc.result) {
--			t.Fatalf(
--				"case %d: expected %#v, got %#v",
--				i, tc.result, actual)
--		}
--	}
--}
--
--func TestWeaklyTypedHook(t *testing.T) {
--	var f DecodeHookFunc = WeaklyTypedHook
--
--	cases := []struct {
--		f, t   reflect.Kind
--		data   interface{}
--		result interface{}
--		err    bool
--	}{
--		// TO STRING
--		{
--			reflect.Bool,
--			reflect.String,
--			false,
--			"0",
--			false,
--		},
--
--		{
--			reflect.Bool,
--			reflect.String,
--			true,
--			"1",
--			false,
--		},
--
--		{
--			reflect.Float32,
--			reflect.String,
--			float32(7),
--			"7",
--			false,
--		},
--
--		{
--			reflect.Int,
--			reflect.String,
--			int(7),
--			"7",
--			false,
--		},
--
--		{
--			reflect.Slice,
--			reflect.String,
--			[]uint8("foo"),
--			"foo",
--			false,
--		},
--
--		{
--			reflect.Uint,
--			reflect.String,
--			uint(7),
--			"7",
--			false,
--		},
--	}
--
--	for i, tc := range cases {
--		actual, err := f(tc.f, tc.t, tc.data)
--		if tc.err != (err != nil) {
--			t.Fatalf("case %d: expected err %#v", i, tc.err)
--		}
--		if !reflect.DeepEqual(actual, tc.result) {
--			t.Fatalf(
--				"case %d: expected %#v, got %#v",
--				i, tc.result, actual)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go
-deleted file mode 100644
-index 3460799..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/error.go
-+++ /dev/null
-@@ -1,32 +0,0 @@
--package mapstructure
--
--import (
--	"fmt"
--	"strings"
--)
--
--// Error implements the error interface and can represents multiple
--// errors that occur in the course of a single decode.
--type Error struct {
--	Errors []string
--}
--
--func (e *Error) Error() string {
--	points := make([]string, len(e.Errors))
--	for i, err := range e.Errors {
--		points[i] = fmt.Sprintf("* %s", err)
--	}
--
--	return fmt.Sprintf(
--		"%d error(s) decoding:\n\n%s",
--		len(e.Errors), strings.Join(points, "\n"))
--}
--
--func appendErrors(errors []string, err error) []string {
--	switch e := err.(type) {
--	case *Error:
--		return append(errors, e.Errors...)
--	default:
--		return append(errors, e.Error())
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go
-deleted file mode 100644
-index 381ba5d..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure.go
-+++ /dev/null
-@@ -1,704 +0,0 @@
--// The mapstructure package exposes functionality to convert an
--// abitrary map[string]interface{} into a native Go structure.
--//
--// The Go structure can be arbitrarily complex, containing slices,
--// other structs, etc. and the decoder will properly decode nested
--// maps and so on into the proper structures in the native Go struct.
--// See the examples to see what the decoder is capable of.
--package mapstructure
--
--import (
--	"errors"
--	"fmt"
--	"reflect"
--	"sort"
--	"strconv"
--	"strings"
--)
--
--// DecodeHookFunc is the callback function that can be used for
--// data transformations. See "DecodeHook" in the DecoderConfig
--// struct.
--type DecodeHookFunc func(
--	from reflect.Kind,
--	to reflect.Kind,
--	data interface{}) (interface{}, error)
--
--// DecoderConfig is the configuration that is used to create a new decoder
--// and allows customization of various aspects of decoding.
--type DecoderConfig struct {
--	// DecodeHook, if set, will be called before any decoding and any
--	// type conversion (if WeaklyTypedInput is on). This lets you modify
--	// the values before they're set down onto the resulting struct.
--	//
--	// If an error is returned, the entire decode will fail with that
--	// error.
--	DecodeHook DecodeHookFunc
--
--	// If ErrorUnused is true, then it is an error for there to exist
--	// keys in the original map that were unused in the decoding process
--	// (extra keys).
--	ErrorUnused bool
--
--	// If WeaklyTypedInput is true, the decoder will make the following
--	// "weak" conversions:
--	//
--	//   - bools to string (true = "1", false = "0")
--	//   - numbers to string (base 10)
--	//   - bools to int/uint (true = 1, false = 0)
--	//   - strings to int/uint (base implied by prefix)
--	//   - int to bool (true if value != 0)
--	//   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
--	//     FALSE, false, False. Anything else is an error)
--	//   - empty array = empty map and vice versa
--	//
--	WeaklyTypedInput bool
--
--	// Metadata is the struct that will contain extra metadata about
--	// the decoding. If this is nil, then no metadata will be tracked.
--	Metadata *Metadata
--
--	// Result is a pointer to the struct that will contain the decoded
--	// value.
--	Result interface{}
--
--	// The tag name that mapstructure reads for field names. This
--	// defaults to "mapstructure"
--	TagName string
--}
--
--// A Decoder takes a raw interface value and turns it into structured
--// data, keeping track of rich error information along the way in case
--// anything goes wrong. Unlike the basic top-level Decode method, you can
--// more finely control how the Decoder behaves using the DecoderConfig
--// structure. The top-level Decode method is just a convenience that sets
--// up the most basic Decoder.
--type Decoder struct {
--	config *DecoderConfig
--}
--
--// Metadata contains information about decoding a structure that
--// is tedious or difficult to get otherwise.
--type Metadata struct {
--	// Keys are the keys of the structure which were successfully decoded
--	Keys []string
--
--	// Unused is a slice of keys that were found in the raw value but
--	// weren't decoded since there was no matching field in the result interface
--	Unused []string
--}
--
--// Decode takes a map and uses reflection to convert it into the
--// given Go native structure. val must be a pointer to a struct.
--func Decode(m interface{}, rawVal interface{}) error {
--	config := &DecoderConfig{
--		Metadata: nil,
--		Result:   rawVal,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		return err
--	}
--
--	return decoder.Decode(m)
--}
--
--// WeakDecode is the same as Decode but is shorthand to enable
--// WeaklyTypedInput. See DecoderConfig for more info.
--func WeakDecode(input, output interface{}) error {
--	config := &DecoderConfig{
--		Metadata:         nil,
--		Result:           output,
--		WeaklyTypedInput: true,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		return err
--	}
--
--	return decoder.Decode(input)
--}
--
--// NewDecoder returns a new decoder for the given configuration. Once
--// a decoder has been returned, the same configuration must not be used
--// again.
--func NewDecoder(config *DecoderConfig) (*Decoder, error) {
--	val := reflect.ValueOf(config.Result)
--	if val.Kind() != reflect.Ptr {
--		return nil, errors.New("result must be a pointer")
--	}
--
--	val = val.Elem()
--	if !val.CanAddr() {
--		return nil, errors.New("result must be addressable (a pointer)")
--	}
--
--	if config.Metadata != nil {
--		if config.Metadata.Keys == nil {
--			config.Metadata.Keys = make([]string, 0)
--		}
--
--		if config.Metadata.Unused == nil {
--			config.Metadata.Unused = make([]string, 0)
--		}
--	}
--
--	if config.TagName == "" {
--		config.TagName = "mapstructure"
--	}
--
--	result := &Decoder{
--		config: config,
--	}
--
--	return result, nil
--}
--
--// Decode decodes the given raw interface to the target pointer specified
--// by the configuration.
--func (d *Decoder) Decode(raw interface{}) error {
--	return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
--}
--
--// Decodes an unknown data type into a specific reflection value.
--func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
--	if data == nil {
--		// If the data is nil, then we don't set anything.
--		return nil
--	}
--
--	dataVal := reflect.ValueOf(data)
--	if !dataVal.IsValid() {
--		// If the data value is invalid, then we just set the value
--		// to be the zero value.
--		val.Set(reflect.Zero(val.Type()))
--		return nil
--	}
--
--	if d.config.DecodeHook != nil {
--		// We have a DecodeHook, so let's pre-process the data.
--		var err error
--		data, err = d.config.DecodeHook(getKind(dataVal), getKind(val), data)
--		if err != nil {
--			return err
--		}
--	}
--
--	var err error
--	dataKind := getKind(val)
--	switch dataKind {
--	case reflect.Bool:
--		err = d.decodeBool(name, data, val)
--	case reflect.Interface:
--		err = d.decodeBasic(name, data, val)
--	case reflect.String:
--		err = d.decodeString(name, data, val)
--	case reflect.Int:
--		err = d.decodeInt(name, data, val)
--	case reflect.Uint:
--		err = d.decodeUint(name, data, val)
--	case reflect.Float32:
--		err = d.decodeFloat(name, data, val)
--	case reflect.Struct:
--		err = d.decodeStruct(name, data, val)
--	case reflect.Map:
--		err = d.decodeMap(name, data, val)
--	case reflect.Ptr:
--		err = d.decodePtr(name, data, val)
--	case reflect.Slice:
--		err = d.decodeSlice(name, data, val)
--	default:
--		// If we reached this point then we weren't able to decode it
--		return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
--	}
--
--	// If we reached here, then we successfully decoded SOMETHING, so
--	// mark the key as used if we're tracking metadata.
--	if d.config.Metadata != nil && name != "" {
--		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
--	}
--
--	return err
--}
--
--// This decodes a basic type (bool, int, string, etc.) and sets the
--// value to "data" of that type.
--func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
--	dataVal := reflect.ValueOf(data)
--	dataValType := dataVal.Type()
--	if !dataValType.AssignableTo(val.Type()) {
--		return fmt.Errorf(
--			"'%s' expected type '%s', got '%s'",
--			name, val.Type(), dataValType)
--	}
--
--	val.Set(dataVal)
--	return nil
--}
--
--func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
--	dataVal := reflect.ValueOf(data)
--	dataKind := getKind(dataVal)
--
--	converted := true
--	switch {
--	case dataKind == reflect.String:
--		val.SetString(dataVal.String())
--	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
--		if dataVal.Bool() {
--			val.SetString("1")
--		} else {
--			val.SetString("0")
--		}
--	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
--		val.SetString(strconv.FormatInt(dataVal.Int(), 10))
--	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
--		val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
--	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
--		val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
--	case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
--		dataType := dataVal.Type()
--		elemKind := dataType.Elem().Kind()
--		switch {
--		case elemKind == reflect.Uint8:
--			val.SetString(string(dataVal.Interface().([]uint8)))
--		default:
--			converted = false
--		}
--	default:
--		converted = false
--	}
--
--	if !converted {
--		return fmt.Errorf(
--			"'%s' expected type '%s', got unconvertible type '%s'",
--			name, val.Type(), dataVal.Type())
--	}
--
--	return nil
--}
--
--func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
--	dataVal := reflect.ValueOf(data)
--	dataKind := getKind(dataVal)
--
--	switch {
--	case dataKind == reflect.Int:
--		val.SetInt(dataVal.Int())
--	case dataKind == reflect.Uint:
--		val.SetInt(int64(dataVal.Uint()))
--	case dataKind == reflect.Float32:
--		val.SetInt(int64(dataVal.Float()))
--	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
--		if dataVal.Bool() {
--			val.SetInt(1)
--		} else {
--			val.SetInt(0)
--		}
--	case dataKind == reflect.String && d.config.WeaklyTypedInput:
--		i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
--		if err == nil {
--			val.SetInt(i)
--		} else {
--			return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
--		}
--	default:
--		return fmt.Errorf(
--			"'%s' expected type '%s', got unconvertible type '%s'",
--			name, val.Type(), dataVal.Type())
--	}
--
--	return nil
--}
--
--func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
--	dataVal := reflect.ValueOf(data)
--	dataKind := getKind(dataVal)
--
--	switch {
--	case dataKind == reflect.Int:
--		val.SetUint(uint64(dataVal.Int()))
--	case dataKind == reflect.Uint:
--		val.SetUint(dataVal.Uint())
--	case dataKind == reflect.Float32:
--		val.SetUint(uint64(dataVal.Float()))
--	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
--		if dataVal.Bool() {
--			val.SetUint(1)
--		} else {
--			val.SetUint(0)
--		}
--	case dataKind == reflect.String && d.config.WeaklyTypedInput:
--		i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
--		if err == nil {
--			val.SetUint(i)
--		} else {
--			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
--		}
--	default:
--		return fmt.Errorf(
--			"'%s' expected type '%s', got unconvertible type '%s'",
--			name, val.Type(), dataVal.Type())
--	}
--
--	return nil
--}
--
--func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
--	dataVal := reflect.ValueOf(data)
--	dataKind := getKind(dataVal)
--
--	switch {
--	case dataKind == reflect.Bool:
--		val.SetBool(dataVal.Bool())
--	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
--		val.SetBool(dataVal.Int() != 0)
--	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
--		val.SetBool(dataVal.Uint() != 0)
--	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
--		val.SetBool(dataVal.Float() != 0)
--	case dataKind == reflect.String && d.config.WeaklyTypedInput:
--		b, err := strconv.ParseBool(dataVal.String())
--		if err == nil {
--			val.SetBool(b)
--		} else if dataVal.String() == "" {
--			val.SetBool(false)
--		} else {
--			return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
--		}
--	default:
--		return fmt.Errorf(
--			"'%s' expected type '%s', got unconvertible type '%s'",
--			name, val.Type(), dataVal.Type())
--	}
--
--	return nil
--}
--
--func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
--	dataVal := reflect.ValueOf(data)
--	dataKind := getKind(dataVal)
--
--	switch {
--	case dataKind == reflect.Int:
--		val.SetFloat(float64(dataVal.Int()))
--	case dataKind == reflect.Uint:
--		val.SetFloat(float64(dataVal.Uint()))
--	case dataKind == reflect.Float32:
--		val.SetFloat(float64(dataVal.Float()))
--	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
--		if dataVal.Bool() {
--			val.SetFloat(1)
--		} else {
--			val.SetFloat(0)
--		}
--	case dataKind == reflect.String && d.config.WeaklyTypedInput:
--		f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
--		if err == nil {
--			val.SetFloat(f)
--		} else {
--			return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
--		}
--	default:
--		return fmt.Errorf(
--			"'%s' expected type '%s', got unconvertible type '%s'",
--			name, val.Type(), dataVal.Type())
--	}
--
--	return nil
--}
--
--func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
--	valType := val.Type()
--	valKeyType := valType.Key()
--	valElemType := valType.Elem()
--
--	// Make a new map to hold our result
--	mapType := reflect.MapOf(valKeyType, valElemType)
--	valMap := reflect.MakeMap(mapType)
--
--	// Check input type
--	dataVal := reflect.Indirect(reflect.ValueOf(data))
--	if dataVal.Kind() != reflect.Map {
--		// Accept empty array/slice instead of an empty map in weakly typed mode
--		if d.config.WeaklyTypedInput &&
--			(dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) &&
--			dataVal.Len() == 0 {
--			val.Set(valMap)
--			return nil
--		} else {
--			return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
--		}
--	}
--
--	// Accumulate errors
--	errors := make([]string, 0)
--
--	for _, k := range dataVal.MapKeys() {
--		fieldName := fmt.Sprintf("%s[%s]", name, k)
--
--		// First decode the key into the proper type
--		currentKey := reflect.Indirect(reflect.New(valKeyType))
--		if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
--			errors = appendErrors(errors, err)
--			continue
--		}
--
--		// Next decode the data into the proper type
--		v := dataVal.MapIndex(k).Interface()
--		currentVal := reflect.Indirect(reflect.New(valElemType))
--		if err := d.decode(fieldName, v, currentVal); err != nil {
--			errors = appendErrors(errors, err)
--			continue
--		}
--
--		valMap.SetMapIndex(currentKey, currentVal)
--	}
--
--	// Set the built up map to the value
--	val.Set(valMap)
--
--	// If we had errors, return those
--	if len(errors) > 0 {
--		return &Error{errors}
--	}
--
--	return nil
--}
--
--func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
--	// Create an element of the concrete (non pointer) type and decode
--	// into that. Then set the value of the pointer to this type.
--	valType := val.Type()
--	valElemType := valType.Elem()
--	realVal := reflect.New(valElemType)
--	if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
--		return err
--	}
--
--	val.Set(realVal)
--	return nil
--}
--
--func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
--	dataVal := reflect.Indirect(reflect.ValueOf(data))
--	dataValKind := dataVal.Kind()
--	valType := val.Type()
--	valElemType := valType.Elem()
--	sliceType := reflect.SliceOf(valElemType)
--
--	// Check input type
--	if dataValKind != reflect.Array && dataValKind != reflect.Slice {
--		// Accept empty map instead of array/slice in weakly typed mode
--		if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
--			val.Set(reflect.MakeSlice(sliceType, 0, 0))
--			return nil
--		} else {
--			return fmt.Errorf(
--				"'%s': source data must be an array or slice, got %s", name, dataValKind)
--		}
--	}
--
--	// Make a new slice to hold our result, same size as the original data.
--	valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
--
--	// Accumulate any errors
--	errors := make([]string, 0)
--
--	for i := 0; i < dataVal.Len(); i++ {
--		currentData := dataVal.Index(i).Interface()
--		currentField := valSlice.Index(i)
--
--		fieldName := fmt.Sprintf("%s[%d]", name, i)
--		if err := d.decode(fieldName, currentData, currentField); err != nil {
--			errors = appendErrors(errors, err)
--		}
--	}
--
--	// Finally, set the value to the slice we built up
--	val.Set(valSlice)
--
--	// If there were errors, we return those
--	if len(errors) > 0 {
--		return &Error{errors}
--	}
--
--	return nil
--}
--
--func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
--	dataVal := reflect.Indirect(reflect.ValueOf(data))
--	dataValKind := dataVal.Kind()
--	if dataValKind != reflect.Map {
--		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
--	}
--
--	dataValType := dataVal.Type()
--	if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
--		return fmt.Errorf(
--			"'%s' needs a map with string keys, has '%s' keys",
--			name, dataValType.Key().Kind())
--	}
--
--	dataValKeys := make(map[reflect.Value]struct{})
--	dataValKeysUnused := make(map[interface{}]struct{})
--	for _, dataValKey := range dataVal.MapKeys() {
--		dataValKeys[dataValKey] = struct{}{}
--		dataValKeysUnused[dataValKey.Interface()] = struct{}{}
--	}
--
--	errors := make([]string, 0)
--
--	// This slice will keep track of all the structs we'll be decoding.
--	// There can be more than one struct if there are embedded structs
--	// that are squashed.
--	structs := make([]reflect.Value, 1, 5)
--	structs[0] = val
--
--	// Compile the list of all the fields that we're going to be decoding
--	// from all the structs.
--	fields := make(map[*reflect.StructField]reflect.Value)
--	for len(structs) > 0 {
--		structVal := structs[0]
--		structs = structs[1:]
--
--		structType := structVal.Type()
--		for i := 0; i < structType.NumField(); i++ {
--			fieldType := structType.Field(i)
--
--			if fieldType.Anonymous {
--				fieldKind := fieldType.Type.Kind()
--				if fieldKind != reflect.Struct {
--					errors = appendErrors(errors,
--						fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind))
--					continue
--				}
--
--				// We have an embedded field. We "squash" the fields down
--				// if specified in the tag.
--				squash := false
--				tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
--				for _, tag := range tagParts[1:] {
--					if tag == "squash" {
--						squash = true
--						break
--					}
--				}
--
--				if squash {
--					structs = append(structs, val.FieldByName(fieldType.Name))
--					continue
--				}
--			}
--
--			// Normal struct field, store it away
--			fields[&fieldType] = structVal.Field(i)
--		}
--	}
--
--	for fieldType, field := range fields {
--		fieldName := fieldType.Name
--
--		tagValue := fieldType.Tag.Get(d.config.TagName)
--		tagValue = strings.SplitN(tagValue, ",", 2)[0]
--		if tagValue != "" {
--			fieldName = tagValue
--		}
--
--		rawMapKey := reflect.ValueOf(fieldName)
--		rawMapVal := dataVal.MapIndex(rawMapKey)
--		if !rawMapVal.IsValid() {
--			// Do a slower search by iterating over each key and
--			// doing case-insensitive search.
--			for dataValKey, _ := range dataValKeys {
--				mK, ok := dataValKey.Interface().(string)
--				if !ok {
--					// Not a string key
--					continue
--				}
--
--				if strings.EqualFold(mK, fieldName) {
--					rawMapKey = dataValKey
--					rawMapVal = dataVal.MapIndex(dataValKey)
--					break
--				}
--			}
--
--			if !rawMapVal.IsValid() {
--				// There was no matching key in the map for the value in
--				// the struct. Just ignore.
--				continue
--			}
--		}
--
--		// Delete the key we're using from the unused map so we stop tracking
--		delete(dataValKeysUnused, rawMapKey.Interface())
--
--		if !field.IsValid() {
--			// This should never happen
--			panic("field is not valid")
--		}
--
--		// If we can't set the field, then it is unexported or something,
--		// and we just continue onwards.
--		if !field.CanSet() {
--			continue
--		}
--
--		// If the name is empty string, then we're at the root, and we
--		// don't dot-join the fields.
--		if name != "" {
--			fieldName = fmt.Sprintf("%s.%s", name, fieldName)
--		}
--
--		if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
--			errors = appendErrors(errors, err)
--		}
--	}
--
--	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
--		keys := make([]string, 0, len(dataValKeysUnused))
--		for rawKey, _ := range dataValKeysUnused {
--			keys = append(keys, rawKey.(string))
--		}
--		sort.Strings(keys)
--
--		err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
--		errors = appendErrors(errors, err)
--	}
--
--	if len(errors) > 0 {
--		return &Error{errors}
--	}
--
--	// Add the unused keys to the list of unused keys if we're tracking metadata
--	if d.config.Metadata != nil {
--		for rawKey, _ := range dataValKeysUnused {
--			key := rawKey.(string)
--			if name != "" {
--				key = fmt.Sprintf("%s.%s", name, key)
--			}
--
--			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
--		}
--	}
--
--	return nil
--}
--
--func getKind(val reflect.Value) reflect.Kind {
--	kind := val.Kind()
--
--	switch {
--	case kind >= reflect.Int && kind <= reflect.Int64:
--		return reflect.Int
--	case kind >= reflect.Uint && kind <= reflect.Uint64:
--		return reflect.Uint
--	case kind >= reflect.Float32 && kind <= reflect.Float64:
--		return reflect.Float32
--	default:
--		return kind
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go
-deleted file mode 100644
-index b50ac36..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go
-+++ /dev/null
-@@ -1,243 +0,0 @@
--package mapstructure
--
--import (
--	"testing"
--)
--
--func Benchmark_Decode(b *testing.B) {
--	type Person struct {
--		Name   string
--		Age    int
--		Emails []string
--		Extra  map[string]string
--	}
--
--	input := map[string]interface{}{
--		"name":   "Mitchell",
--		"age":    91,
--		"emails": []string{"one", "two", "three"},
--		"extra": map[string]string{
--			"twitter": "mitchellh",
--		},
--	}
--
--	var result Person
--	for i := 0; i < b.N; i++ {
--		Decode(input, &result)
--	}
--}
--
--func Benchmark_DecodeBasic(b *testing.B) {
--	input := map[string]interface{}{
--		"vstring": "foo",
--		"vint":    42,
--		"Vuint":   42,
--		"vbool":   true,
--		"Vfloat":  42.42,
--		"vsilent": true,
--		"vdata":   42,
--	}
--
--	var result Basic
--	for i := 0; i < b.N; i++ {
--		Decode(input, &result)
--	}
--}
--
--func Benchmark_DecodeEmbedded(b *testing.B) {
--	input := map[string]interface{}{
--		"vstring": "foo",
--		"Basic": map[string]interface{}{
--			"vstring": "innerfoo",
--		},
--		"vunique": "bar",
--	}
--
--	var result Embedded
--	for i := 0; i < b.N; i++ {
--		Decode(input, &result)
--	}
--}
--
--func Benchmark_DecodeTypeConversion(b *testing.B) {
--	input := map[string]interface{}{
--		"IntToFloat":    42,
--		"IntToUint":     42,
--		"IntToBool":     1,
--		"IntToString":   42,
--		"UintToInt":     42,
--		"UintToFloat":   42,
--		"UintToBool":    42,
--		"UintToString":  42,
--		"BoolToInt":     true,
--		"BoolToUint":    true,
--		"BoolToFloat":   true,
--		"BoolToString":  true,
--		"FloatToInt":    42.42,
--		"FloatToUint":   42.42,
--		"FloatToBool":   42.42,
--		"FloatToString": 42.42,
--		"StringToInt":   "42",
--		"StringToUint":  "42",
--		"StringToBool":  "1",
--		"StringToFloat": "42.42",
--		"SliceToMap":    []interface{}{},
--		"MapToSlice":    map[string]interface{}{},
--	}
--
--	var resultStrict TypeConversionResult
--	for i := 0; i < b.N; i++ {
--		Decode(input, &resultStrict)
--	}
--}
--
--func Benchmark_DecodeMap(b *testing.B) {
--	input := map[string]interface{}{
--		"vfoo": "foo",
--		"vother": map[interface{}]interface{}{
--			"foo": "foo",
--			"bar": "bar",
--		},
--	}
--
--	var result Map
--	for i := 0; i < b.N; i++ {
--		Decode(input, &result)
--	}
--}
--
--func Benchmark_DecodeMapOfStruct(b *testing.B) {
--	input := map[string]interface{}{
--		"value": map[string]interface{}{
--			"foo": map[string]string{"vstring": "one"},
--			"bar": map[string]string{"vstring": "two"},
--		},
--	}
--
--	var result MapOfStruct
--	for i := 0; i < b.N; i++ {
--		Decode(input, &result)
--	}
--}
--
--func Benchmark_DecodeSlice(b *testing.B) {
--	input := map[string]interface{}{
--		"vfoo": "foo",
--		"vbar": []string{"foo", "bar", "baz"},
--	}
--
--	var result Slice
--	for i := 0; i < b.N; i++ {
--		Decode(input, &result)
--	}
--}
--
--func Benchmark_DecodeSliceOfStruct(b *testing.B) {
--	input := map[string]interface{}{
--		"value": []map[string]interface{}{
--			{"vstring": "one"},
--			{"vstring": "two"},
--		},
--	}
--
--	var result SliceOfStruct
--	for i := 0; i < b.N; i++ {
--		Decode(input, &result)
--	}
--}
--
--func Benchmark_DecodeWeaklyTypedInput(b *testing.B) {
--	type Person struct {
--		Name   string
--		Age    int
--		Emails []string
--	}
--
--	// This input can come from anywhere, but typically comes from
--	// something like decoding JSON, generated by a weakly typed language
--	// such as PHP.
--	input := map[string]interface{}{
--		"name":   123,                      // number => string
--		"age":    "42",                     // string => number
--		"emails": map[string]interface{}{}, // empty map => empty array
--	}
--
--	var result Person
--	config := &DecoderConfig{
--		WeaklyTypedInput: true,
--		Result:           &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		panic(err)
--	}
--
--	for i := 0; i < b.N; i++ {
--		decoder.Decode(input)
--	}
--}
--
--func Benchmark_DecodeMetadata(b *testing.B) {
--	type Person struct {
--		Name string
--		Age  int
--	}
--
--	input := map[string]interface{}{
--		"name":  "Mitchell",
--		"age":   91,
--		"email": "foo at bar.com",
--	}
--
--	var md Metadata
--	var result Person
--	config := &DecoderConfig{
--		Metadata: &md,
--		Result:   &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		panic(err)
--	}
--
--	for i := 0; i < b.N; i++ {
--		decoder.Decode(input)
--	}
--}
--
--func Benchmark_DecodeMetadataEmbedded(b *testing.B) {
--	input := map[string]interface{}{
--		"vstring": "foo",
--		"vunique": "bar",
--	}
--
--	var md Metadata
--	var result EmbeddedSquash
--	config := &DecoderConfig{
--		Metadata: &md,
--		Result:   &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		b.Fatalf("err: %s", err)
--	}
--
--	for i := 0; i < b.N; i++ {
--		decoder.Decode(input)
--	}
--}
--
--func Benchmark_DecodeTagged(b *testing.B) {
--	input := map[string]interface{}{
--		"foo": "bar",
--		"bar": "value",
--	}
--
--	var result Tagged
--	for i := 0; i < b.N; i++ {
--		Decode(input, &result)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
-deleted file mode 100644
-index 7054f1a..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
-+++ /dev/null
-@@ -1,47 +0,0 @@
--package mapstructure
--
--import "testing"
--
--// GH-1
--func TestDecode_NilValue(t *testing.T) {
--	input := map[string]interface{}{
--		"vfoo":   nil,
--		"vother": nil,
--	}
--
--	var result Map
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("should not error: %s", err)
--	}
--
--	if result.Vfoo != "" {
--		t.Fatalf("value should be default: %s", result.Vfoo)
--	}
--
--	if result.Vother != nil {
--		t.Fatalf("Vother should be nil: %s", result.Vother)
--	}
--}
--
--// GH-10
--func TestDecode_mapInterfaceInterface(t *testing.T) {
--	input := map[interface{}]interface{}{
--		"vfoo":   nil,
--		"vother": nil,
--	}
--
--	var result Map
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("should not error: %s", err)
--	}
--
--	if result.Vfoo != "" {
--		t.Fatalf("value should be default: %s", result.Vfoo)
--	}
--
--	if result.Vother != nil {
--		t.Fatalf("Vother should be nil: %s", result.Vother)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go
-deleted file mode 100644
-index aa393cc..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_examples_test.go
-+++ /dev/null
-@@ -1,169 +0,0 @@
--package mapstructure
--
--import (
--	"fmt"
--)
--
--func ExampleDecode() {
--	type Person struct {
--		Name   string
--		Age    int
--		Emails []string
--		Extra  map[string]string
--	}
--
--	// This input can come from anywhere, but typically comes from
--	// something like decoding JSON where we're not quite sure of the
--	// struct initially.
--	input := map[string]interface{}{
--		"name":   "Mitchell",
--		"age":    91,
--		"emails": []string{"one", "two", "three"},
--		"extra": map[string]string{
--			"twitter": "mitchellh",
--		},
--	}
--
--	var result Person
--	err := Decode(input, &result)
--	if err != nil {
--		panic(err)
--	}
--
--	fmt.Printf("%#v", result)
--	// Output:
--	// mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}}
--}
--
--func ExampleDecode_errors() {
--	type Person struct {
--		Name   string
--		Age    int
--		Emails []string
--		Extra  map[string]string
--	}
--
--	// This input can come from anywhere, but typically comes from
--	// something like decoding JSON where we're not quite sure of the
--	// struct initially.
--	input := map[string]interface{}{
--		"name":   123,
--		"age":    "bad value",
--		"emails": []int{1, 2, 3},
--	}
--
--	var result Person
--	err := Decode(input, &result)
--	if err == nil {
--		panic("should have an error")
--	}
--
--	fmt.Println(err.Error())
--	// Output:
--	// 5 error(s) decoding:
--	//
--	// * 'Name' expected type 'string', got unconvertible type 'int'
--	// * 'Age' expected type 'int', got unconvertible type 'string'
--	// * 'Emails[0]' expected type 'string', got unconvertible type 'int'
--	// * 'Emails[1]' expected type 'string', got unconvertible type 'int'
--	// * 'Emails[2]' expected type 'string', got unconvertible type 'int'
--}
--
--func ExampleDecode_metadata() {
--	type Person struct {
--		Name string
--		Age  int
--	}
--
--	// This input can come from anywhere, but typically comes from
--	// something like decoding JSON where we're not quite sure of the
--	// struct initially.
--	input := map[string]interface{}{
--		"name":  "Mitchell",
--		"age":   91,
--		"email": "foo at bar.com",
--	}
--
--	// For metadata, we make a more advanced DecoderConfig so we can
--	// more finely configure the decoder that is used. In this case, we
--	// just tell the decoder we want to track metadata.
--	var md Metadata
--	var result Person
--	config := &DecoderConfig{
--		Metadata: &md,
--		Result:   &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		panic(err)
--	}
--
--	if err := decoder.Decode(input); err != nil {
--		panic(err)
--	}
--
--	fmt.Printf("Unused keys: %#v", md.Unused)
--	// Output:
--	// Unused keys: []string{"email"}
--}
--
--func ExampleDecode_weaklyTypedInput() {
--	type Person struct {
--		Name   string
--		Age    int
--		Emails []string
--	}
--
--	// This input can come from anywhere, but typically comes from
--	// something like decoding JSON, generated by a weakly typed language
--	// such as PHP.
--	input := map[string]interface{}{
--		"name":   123,                      // number => string
--		"age":    "42",                     // string => number
--		"emails": map[string]interface{}{}, // empty map => empty array
--	}
--
--	var result Person
--	config := &DecoderConfig{
--		WeaklyTypedInput: true,
--		Result:           &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		panic(err)
--	}
--
--	err = decoder.Decode(input)
--	if err != nil {
--		panic(err)
--	}
--
--	fmt.Printf("%#v", result)
--	// Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}}
--}
--
--func ExampleDecode_tags() {
--	// Note that the mapstructure tags defined in the struct type
--	// can indicate which fields the values are mapped to.
--	type Person struct {
--		Name string `mapstructure:"person_name"`
--		Age  int    `mapstructure:"person_age"`
--	}
--
--	input := map[string]interface{}{
--		"person_name": "Mitchell",
--		"person_age":  91,
--	}
--
--	var result Person
--	err := Decode(input, &result)
--	if err != nil {
--		panic(err)
--	}
--
--	fmt.Printf("%#v", result)
--	// Output:
--	// mapstructure.Person{Name:"Mitchell", Age:91}
--}
-diff --git a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go b/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go
-deleted file mode 100644
-index 23029c7..0000000
---- a/Godeps/_workspace/src/github.com/mitchellh/mapstructure/mapstructure_test.go
-+++ /dev/null
-@@ -1,828 +0,0 @@
--package mapstructure
--
--import (
--	"reflect"
--	"sort"
--	"testing"
--)
--
--type Basic struct {
--	Vstring string
--	Vint    int
--	Vuint   uint
--	Vbool   bool
--	Vfloat  float64
--	Vextra  string
--	vsilent bool
--	Vdata   interface{}
--}
--
--type Embedded struct {
--	Basic
--	Vunique string
--}
--
--type EmbeddedPointer struct {
--	*Basic
--	Vunique string
--}
--
--type EmbeddedSquash struct {
--	Basic   `mapstructure:",squash"`
--	Vunique string
--}
--
--type Map struct {
--	Vfoo   string
--	Vother map[string]string
--}
--
--type MapOfStruct struct {
--	Value map[string]Basic
--}
--
--type Nested struct {
--	Vfoo string
--	Vbar Basic
--}
--
--type NestedPointer struct {
--	Vfoo string
--	Vbar *Basic
--}
--
--type Slice struct {
--	Vfoo string
--	Vbar []string
--}
--
--type SliceOfStruct struct {
--	Value []Basic
--}
--
--type Tagged struct {
--	Extra string `mapstructure:"bar,what,what"`
--	Value string `mapstructure:"foo"`
--}
--
--type TypeConversionResult struct {
--	IntToFloat         float32
--	IntToUint          uint
--	IntToBool          bool
--	IntToString        string
--	UintToInt          int
--	UintToFloat        float32
--	UintToBool         bool
--	UintToString       string
--	BoolToInt          int
--	BoolToUint         uint
--	BoolToFloat        float32
--	BoolToString       string
--	FloatToInt         int
--	FloatToUint        uint
--	FloatToBool        bool
--	FloatToString      string
--	SliceUint8ToString string
--	StringToInt        int
--	StringToUint       uint
--	StringToBool       bool
--	StringToFloat      float32
--	SliceToMap         map[string]interface{}
--	MapToSlice         []interface{}
--}
--
--func TestBasicTypes(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vstring": "foo",
--		"vint":    42,
--		"Vuint":   42,
--		"vbool":   true,
--		"Vfloat":  42.42,
--		"vsilent": true,
--		"vdata":   42,
--	}
--
--	var result Basic
--	err := Decode(input, &result)
--	if err != nil {
--		t.Errorf("got an err: %s", err.Error())
--		t.FailNow()
--	}
--
--	if result.Vstring != "foo" {
--		t.Errorf("vstring value should be 'foo': %#v", result.Vstring)
--	}
--
--	if result.Vint != 42 {
--		t.Errorf("vint value should be 42: %#v", result.Vint)
--	}
--
--	if result.Vuint != 42 {
--		t.Errorf("vuint value should be 42: %#v", result.Vuint)
--	}
--
--	if result.Vbool != true {
--		t.Errorf("vbool value should be true: %#v", result.Vbool)
--	}
--
--	if result.Vfloat != 42.42 {
--		t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat)
--	}
--
--	if result.Vextra != "" {
--		t.Errorf("vextra value should be empty: %#v", result.Vextra)
--	}
--
--	if result.vsilent != false {
--		t.Error("vsilent should not be set, it is unexported")
--	}
--
--	if result.Vdata != 42 {
--		t.Error("vdata should be valid")
--	}
--}
--
--func TestBasic_IntWithFloat(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vint": float64(42),
--	}
--
--	var result Basic
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got an err: %s", err)
--	}
--}
--
--func TestDecode_Embedded(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vstring": "foo",
--		"Basic": map[string]interface{}{
--			"vstring": "innerfoo",
--		},
--		"vunique": "bar",
--	}
--
--	var result Embedded
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got an err: %s", err.Error())
--	}
--
--	if result.Vstring != "innerfoo" {
--		t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring)
--	}
--
--	if result.Vunique != "bar" {
--		t.Errorf("vunique value should be 'bar': %#v", result.Vunique)
--	}
--}
--
--func TestDecode_EmbeddedPointer(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vstring": "foo",
--		"Basic": map[string]interface{}{
--			"vstring": "innerfoo",
--		},
--		"vunique": "bar",
--	}
--
--	var result EmbeddedPointer
--	err := Decode(input, &result)
--	if err == nil {
--		t.Fatal("should get error")
--	}
--}
--
--func TestDecode_EmbeddedSquash(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vstring": "foo",
--		"vunique": "bar",
--	}
--
--	var result EmbeddedSquash
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got an err: %s", err.Error())
--	}
--
--	if result.Vstring != "foo" {
--		t.Errorf("vstring value should be 'foo': %#v", result.Vstring)
--	}
--
--	if result.Vunique != "bar" {
--		t.Errorf("vunique value should be 'bar': %#v", result.Vunique)
--	}
--}
--
--func TestDecode_DecodeHook(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vint": "WHAT",
--	}
--
--	decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) {
--		if from == reflect.String && to != reflect.String {
--			return 5, nil
--		}
--
--		return v, nil
--	}
--
--	var result Basic
--	config := &DecoderConfig{
--		DecodeHook: decodeHook,
--		Result:     &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		t.Fatalf("err: %s", err)
--	}
--
--	err = decoder.Decode(input)
--	if err != nil {
--		t.Fatalf("got an err: %s", err)
--	}
--
--	if result.Vint != 5 {
--		t.Errorf("vint should be 5: %#v", result.Vint)
--	}
--}
--
--func TestDecode_Nil(t *testing.T) {
--	t.Parallel()
--
--	var input interface{} = nil
--	result := Basic{
--		Vstring: "foo",
--	}
--
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("err: %s", err)
--	}
--
--	if result.Vstring != "foo" {
--		t.Fatalf("bad: %#v", result.Vstring)
--	}
--}
--
--func TestDecode_NonStruct(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"foo": "bar",
--		"bar": "baz",
--	}
--
--	var result map[string]string
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("err: %s", err)
--	}
--
--	if result["foo"] != "bar" {
--		t.Fatal("foo is not bar")
--	}
--}
--
--func TestDecode_TypeConversion(t *testing.T) {
--	input := map[string]interface{}{
--		"IntToFloat":         42,
--		"IntToUint":          42,
--		"IntToBool":          1,
--		"IntToString":        42,
--		"UintToInt":          42,
--		"UintToFloat":        42,
--		"UintToBool":         42,
--		"UintToString":       42,
--		"BoolToInt":          true,
--		"BoolToUint":         true,
--		"BoolToFloat":        true,
--		"BoolToString":       true,
--		"FloatToInt":         42.42,
--		"FloatToUint":        42.42,
--		"FloatToBool":        42.42,
--		"FloatToString":      42.42,
--		"SliceUint8ToString": []uint8("foo"),
--		"StringToInt":        "42",
--		"StringToUint":       "42",
--		"StringToBool":       "1",
--		"StringToFloat":      "42.42",
--		"SliceToMap":         []interface{}{},
--		"MapToSlice":         map[string]interface{}{},
--	}
--
--	expectedResultStrict := TypeConversionResult{
--		IntToFloat:  42.0,
--		IntToUint:   42,
--		UintToInt:   42,
--		UintToFloat: 42,
--		BoolToInt:   0,
--		BoolToUint:  0,
--		BoolToFloat: 0,
--		FloatToInt:  42,
--		FloatToUint: 42,
--	}
--
--	expectedResultWeak := TypeConversionResult{
--		IntToFloat:         42.0,
--		IntToUint:          42,
--		IntToBool:          true,
--		IntToString:        "42",
--		UintToInt:          42,
--		UintToFloat:        42,
--		UintToBool:         true,
--		UintToString:       "42",
--		BoolToInt:          1,
--		BoolToUint:         1,
--		BoolToFloat:        1,
--		BoolToString:       "1",
--		FloatToInt:         42,
--		FloatToUint:        42,
--		FloatToBool:        true,
--		FloatToString:      "42.42",
--		SliceUint8ToString: "foo",
--		StringToInt:        42,
--		StringToUint:       42,
--		StringToBool:       true,
--		StringToFloat:      42.42,
--		SliceToMap:         map[string]interface{}{},
--		MapToSlice:         []interface{}{},
--	}
--
--	// Test strict type conversion
--	var resultStrict TypeConversionResult
--	err := Decode(input, &resultStrict)
--	if err == nil {
--		t.Errorf("should return an error")
--	}
--	if !reflect.DeepEqual(resultStrict, expectedResultStrict) {
--		t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict)
--	}
--
--	// Test weak type conversion
--	var decoder *Decoder
--	var resultWeak TypeConversionResult
--
--	config := &DecoderConfig{
--		WeaklyTypedInput: true,
--		Result:           &resultWeak,
--	}
--
--	decoder, err = NewDecoder(config)
--	if err != nil {
--		t.Fatalf("err: %s", err)
--	}
--
--	err = decoder.Decode(input)
--	if err != nil {
--		t.Fatalf("got an err: %s", err)
--	}
--
--	if !reflect.DeepEqual(resultWeak, expectedResultWeak) {
--		t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak)
--	}
--}
--
--func TestDecoder_ErrorUnused(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vstring": "hello",
--		"foo":     "bar",
--	}
--
--	var result Basic
--	config := &DecoderConfig{
--		ErrorUnused: true,
--		Result:      &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		t.Fatalf("err: %s", err)
--	}
--
--	err = decoder.Decode(input)
--	if err == nil {
--		t.Fatal("expected error")
--	}
--}
--
--func TestMap(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vfoo": "foo",
--		"vother": map[interface{}]interface{}{
--			"foo": "foo",
--			"bar": "bar",
--		},
--	}
--
--	var result Map
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got an error: %s", err)
--	}
--
--	if result.Vfoo != "foo" {
--		t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
--	}
--
--	if result.Vother == nil {
--		t.Fatal("vother should not be nil")
--	}
--
--	if len(result.Vother) != 2 {
--		t.Error("vother should have two items")
--	}
--
--	if result.Vother["foo"] != "foo" {
--		t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"])
--	}
--
--	if result.Vother["bar"] != "bar" {
--		t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"])
--	}
--}
--
--func TestMapOfStruct(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"value": map[string]interface{}{
--			"foo": map[string]string{"vstring": "one"},
--			"bar": map[string]string{"vstring": "two"},
--		},
--	}
--
--	var result MapOfStruct
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got an err: %s", err)
--	}
--
--	if result.Value == nil {
--		t.Fatal("value should not be nil")
--	}
--
--	if len(result.Value) != 2 {
--		t.Error("value should have two items")
--	}
--
--	if result.Value["foo"].Vstring != "one" {
--		t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring)
--	}
--
--	if result.Value["bar"].Vstring != "two" {
--		t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring)
--	}
--}
--
--func TestNestedType(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vfoo": "foo",
--		"vbar": map[string]interface{}{
--			"vstring": "foo",
--			"vint":    42,
--			"vbool":   true,
--		},
--	}
--
--	var result Nested
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got an err: %s", err.Error())
--	}
--
--	if result.Vfoo != "foo" {
--		t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
--	}
--
--	if result.Vbar.Vstring != "foo" {
--		t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring)
--	}
--
--	if result.Vbar.Vint != 42 {
--		t.Errorf("vint value should be 42: %#v", result.Vbar.Vint)
--	}
--
--	if result.Vbar.Vbool != true {
--		t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool)
--	}
--
--	if result.Vbar.Vextra != "" {
--		t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra)
--	}
--}
--
--func TestNestedTypePointer(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vfoo": "foo",
--		"vbar": &map[string]interface{}{
--			"vstring": "foo",
--			"vint":    42,
--			"vbool":   true,
--		},
--	}
--
--	var result NestedPointer
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got an err: %s", err.Error())
--	}
--
--	if result.Vfoo != "foo" {
--		t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
--	}
--
--	if result.Vbar.Vstring != "foo" {
--		t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring)
--	}
--
--	if result.Vbar.Vint != 42 {
--		t.Errorf("vint value should be 42: %#v", result.Vbar.Vint)
--	}
--
--	if result.Vbar.Vbool != true {
--		t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool)
--	}
--
--	if result.Vbar.Vextra != "" {
--		t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra)
--	}
--}
--
--func TestSlice(t *testing.T) {
--	t.Parallel()
--
--	inputStringSlice := map[string]interface{}{
--		"vfoo": "foo",
--		"vbar": []string{"foo", "bar", "baz"},
--	}
--
--	inputStringSlicePointer := map[string]interface{}{
--		"vfoo": "foo",
--		"vbar": &[]string{"foo", "bar", "baz"},
--	}
--
--	outputStringSlice := &Slice{
--		"foo",
--		[]string{"foo", "bar", "baz"},
--	}
--
--	testSliceInput(t, inputStringSlice, outputStringSlice)
--	testSliceInput(t, inputStringSlicePointer, outputStringSlice)
--}
--
--func TestInvalidSlice(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vfoo": "foo",
--		"vbar": 42,
--	}
--
--	result := Slice{}
--	err := Decode(input, &result)
--	if err == nil {
--		t.Errorf("expected failure")
--	}
--}
--
--func TestSliceOfStruct(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"value": []map[string]interface{}{
--			{"vstring": "one"},
--			{"vstring": "two"},
--		},
--	}
--
--	var result SliceOfStruct
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got unexpected error: %s", err)
--	}
--
--	if len(result.Value) != 2 {
--		t.Fatalf("expected two values, got %d", len(result.Value))
--	}
--
--	if result.Value[0].Vstring != "one" {
--		t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring)
--	}
--
--	if result.Value[1].Vstring != "two" {
--		t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring)
--	}
--}
--
--func TestInvalidType(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vstring": 42,
--	}
--
--	var result Basic
--	err := Decode(input, &result)
--	if err == nil {
--		t.Fatal("error should exist")
--	}
--
--	derr, ok := err.(*Error)
--	if !ok {
--		t.Fatalf("error should be kind of Error, instead: %#v", err)
--	}
--
--	if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" {
--		t.Errorf("got unexpected error: %s", err)
--	}
--}
--
--func TestMetadata(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vfoo": "foo",
--		"vbar": map[string]interface{}{
--			"vstring": "foo",
--			"Vuint":   42,
--			"foo":     "bar",
--		},
--		"bar": "nil",
--	}
--
--	var md Metadata
--	var result Nested
--	config := &DecoderConfig{
--		Metadata: &md,
--		Result:   &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		t.Fatalf("err: %s", err)
--	}
--
--	err = decoder.Decode(input)
--	if err != nil {
--		t.Fatalf("err: %s", err.Error())
--	}
--
--	expectedKeys := []string{"Vfoo", "Vbar.Vstring", "Vbar.Vuint", "Vbar"}
--	if !reflect.DeepEqual(md.Keys, expectedKeys) {
--		t.Fatalf("bad keys: %#v", md.Keys)
--	}
--
--	expectedUnused := []string{"Vbar.foo", "bar"}
--	if !reflect.DeepEqual(md.Unused, expectedUnused) {
--		t.Fatalf("bad unused: %#v", md.Unused)
--	}
--}
--
--func TestMetadata_Embedded(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"vstring": "foo",
--		"vunique": "bar",
--	}
--
--	var md Metadata
--	var result EmbeddedSquash
--	config := &DecoderConfig{
--		Metadata: &md,
--		Result:   &result,
--	}
--
--	decoder, err := NewDecoder(config)
--	if err != nil {
--		t.Fatalf("err: %s", err)
--	}
--
--	err = decoder.Decode(input)
--	if err != nil {
--		t.Fatalf("err: %s", err.Error())
--	}
--
--	expectedKeys := []string{"Vstring", "Vunique"}
--
--	sort.Strings(md.Keys)
--	if !reflect.DeepEqual(md.Keys, expectedKeys) {
--		t.Fatalf("bad keys: %#v", md.Keys)
--	}
--
--	expectedUnused := []string{}
--	if !reflect.DeepEqual(md.Unused, expectedUnused) {
--		t.Fatalf("bad unused: %#v", md.Unused)
--	}
--}
--
--func TestNonPtrValue(t *testing.T) {
--	t.Parallel()
--
--	err := Decode(map[string]interface{}{}, Basic{})
--	if err == nil {
--		t.Fatal("error should exist")
--	}
--
--	if err.Error() != "result must be a pointer" {
--		t.Errorf("got unexpected error: %s", err)
--	}
--}
--
--func TestTagged(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"foo": "bar",
--		"bar": "value",
--	}
--
--	var result Tagged
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("unexpected error: %s", err)
--	}
--
--	if result.Value != "bar" {
--		t.Errorf("value should be 'bar', got: %#v", result.Value)
--	}
--
--	if result.Extra != "value" {
--		t.Errorf("extra should be 'value', got: %#v", result.Extra)
--	}
--}
--
--func TestWeakDecode(t *testing.T) {
--	t.Parallel()
--
--	input := map[string]interface{}{
--		"foo": "4",
--		"bar": "value",
--	}
--
--	var result struct {
--		Foo int
--		Bar string
--	}
--
--	if err := WeakDecode(input, &result); err != nil {
--		t.Fatalf("err: %s", err)
--	}
--	if result.Foo != 4 {
--		t.Fatalf("bad: %#v", result)
--	}
--	if result.Bar != "value" {
--		t.Fatalf("bad: %#v", result)
--	}
--}
--
--func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) {
--	var result Slice
--	err := Decode(input, &result)
--	if err != nil {
--		t.Fatalf("got error: %s", err)
--	}
--
--	if result.Vfoo != expected.Vfoo {
--		t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo)
--	}
--
--	if result.Vbar == nil {
--		t.Fatalf("Vbar a slice, got '%#v'", result.Vbar)
--	}
--
--	if len(result.Vbar) != len(expected.Vbar) {
--		t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar))
--	}
--
--	for i, v := range result.Vbar {
--		if v != expected.Vbar[i] {
--			t.Errorf(
--				"Vbar[%d] should be '%#v', got '%#v'",
--				i, expected.Vbar[i], v)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/racker/perigee/.gitignore b/Godeps/_workspace/src/github.com/racker/perigee/.gitignore
-deleted file mode 100644
-index 49ca32a..0000000
---- a/Godeps/_workspace/src/github.com/racker/perigee/.gitignore
-+++ /dev/null
-@@ -1,2 +0,0 @@
--bin/*
--pkg/*
-diff --git a/Godeps/_workspace/src/github.com/racker/perigee/LICENSE b/Godeps/_workspace/src/github.com/racker/perigee/LICENSE
-deleted file mode 100644
-index d645695..0000000
---- a/Godeps/_workspace/src/github.com/racker/perigee/LICENSE
-+++ /dev/null
-@@ -1,202 +0,0 @@
--
--                                 Apache License
--                           Version 2.0, January 2004
--                        http://www.apache.org/licenses/
--
--   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
--
--   1. Definitions.
--
--      "License" shall mean the terms and conditions for use, reproduction,
--      and distribution as defined by Sections 1 through 9 of this document.
--
--      "Licensor" shall mean the copyright owner or entity authorized by
--      the copyright owner that is granting the License.
--
--      "Legal Entity" shall mean the union of the acting entity and all
--      other entities that control, are controlled by, or are under common
--      control with that entity. For the purposes of this definition,
--      "control" means (i) the power, direct or indirect, to cause the
--      direction or management of such entity, whether by contract or
--      otherwise, or (ii) ownership of fifty percent (50%) or more of the
--      outstanding shares, or (iii) beneficial ownership of such entity.
--
--      "You" (or "Your") shall mean an individual or Legal Entity
--      exercising permissions granted by this License.
--
--      "Source" form shall mean the preferred form for making modifications,
--      including but not limited to software source code, documentation
--      source, and configuration files.
--
--      "Object" form shall mean any form resulting from mechanical
--      transformation or translation of a Source form, including but
--      not limited to compiled object code, generated documentation,
--      and conversions to other media types.
--
--      "Work" shall mean the work of authorship, whether in Source or
--      Object form, made available under the License, as indicated by a
--      copyright notice that is included in or attached to the work
--      (an example is provided in the Appendix below).
--
--      "Derivative Works" shall mean any work, whether in Source or Object
--      form, that is based on (or derived from) the Work and for which the
--      editorial revisions, annotations, elaborations, or other modifications
--      represent, as a whole, an original work of authorship. For the purposes
--      of this License, Derivative Works shall not include works that remain
--      separable from, or merely link (or bind by name) to the interfaces of,
--      the Work and Derivative Works thereof.
--
--      "Contribution" shall mean any work of authorship, including
--      the original version of the Work and any modifications or additions
--      to that Work or Derivative Works thereof, that is intentionally
--      submitted to Licensor for inclusion in the Work by the copyright owner
--      or by an individual or Legal Entity authorized to submit on behalf of
--      the copyright owner. For the purposes of this definition, "submitted"
--      means any form of electronic, verbal, or written communication sent
--      to the Licensor or its representatives, including but not limited to
--      communication on electronic mailing lists, source code control systems,
--      and issue tracking systems that are managed by, or on behalf of, the
--      Licensor for the purpose of discussing and improving the Work, but
--      excluding communication that is conspicuously marked or otherwise
--      designated in writing by the copyright owner as "Not a Contribution."
--
--      "Contributor" shall mean Licensor and any individual or Legal Entity
--      on behalf of whom a Contribution has been received by Licensor and
--      subsequently incorporated within the Work.
--
--   2. Grant of Copyright License. Subject to the terms and conditions of
--      this License, each Contributor hereby grants to You a perpetual,
--      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
--      copyright license to reproduce, prepare Derivative Works of,
--      publicly display, publicly perform, sublicense, and distribute the
--      Work and such Derivative Works in Source or Object form.
--
--   3. Grant of Patent License. Subject to the terms and conditions of
--      this License, each Contributor hereby grants to You a perpetual,
--      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
--      (except as stated in this section) patent license to make, have made,
--      use, offer to sell, sell, import, and otherwise transfer the Work,
--      where such license applies only to those patent claims licensable
--      by such Contributor that are necessarily infringed by their
--      Contribution(s) alone or by combination of their Contribution(s)
--      with the Work to which such Contribution(s) was submitted. If You
--      institute patent litigation against any entity (including a
--      cross-claim or counterclaim in a lawsuit) alleging that the Work
--      or a Contribution incorporated within the Work constitutes direct
--      or contributory patent infringement, then any patent licenses
--      granted to You under this License for that Work shall terminate
--      as of the date such litigation is filed.
--
--   4. Redistribution. You may reproduce and distribute copies of the
--      Work or Derivative Works thereof in any medium, with or without
--      modifications, and in Source or Object form, provided that You
--      meet the following conditions:
--
--      (a) You must give any other recipients of the Work or
--          Derivative Works a copy of this License; and
--
--      (b) You must cause any modified files to carry prominent notices
--          stating that You changed the files; and
--
--      (c) You must retain, in the Source form of any Derivative Works
--          that You distribute, all copyright, patent, trademark, and
--          attribution notices from the Source form of the Work,
--          excluding those notices that do not pertain to any part of
--          the Derivative Works; and
--
--      (d) If the Work includes a "NOTICE" text file as part of its
--          distribution, then any Derivative Works that You distribute must
--          include a readable copy of the attribution notices contained
--          within such NOTICE file, excluding those notices that do not
--          pertain to any part of the Derivative Works, in at least one
--          of the following places: within a NOTICE text file distributed
--          as part of the Derivative Works; within the Source form or
--          documentation, if provided along with the Derivative Works; or,
--          within a display generated by the Derivative Works, if and
--          wherever such third-party notices normally appear. The contents
--          of the NOTICE file are for informational purposes only and
--          do not modify the License. You may add Your own attribution
--          notices within Derivative Works that You distribute, alongside
--          or as an addendum to the NOTICE text from the Work, provided
--          that such additional attribution notices cannot be construed
--          as modifying the License.
--
--      You may add Your own copyright statement to Your modifications and
--      may provide additional or different license terms and conditions
--      for use, reproduction, or distribution of Your modifications, or
--      for any such Derivative Works as a whole, provided Your use,
--      reproduction, and distribution of the Work otherwise complies with
--      the conditions stated in this License.
--
--   5. Submission of Contributions. Unless You explicitly state otherwise,
--      any Contribution intentionally submitted for inclusion in the Work
--      by You to the Licensor shall be under the terms and conditions of
--      this License, without any additional terms or conditions.
--      Notwithstanding the above, nothing herein shall supersede or modify
--      the terms of any separate license agreement you may have executed
--      with Licensor regarding such Contributions.
--
--   6. Trademarks. This License does not grant permission to use the trade
--      names, trademarks, service marks, or product names of the Licensor,
--      except as required for reasonable and customary use in describing the
--      origin of the Work and reproducing the content of the NOTICE file.
--
--   7. Disclaimer of Warranty. Unless required by applicable law or
--      agreed to in writing, Licensor provides the Work (and each
--      Contributor provides its Contributions) on an "AS IS" BASIS,
--      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
--      implied, including, without limitation, any warranties or conditions
--      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
--      PARTICULAR PURPOSE. You are solely responsible for determining the
--      appropriateness of using or redistributing the Work and assume any
--      risks associated with Your exercise of permissions under this License.
--
--   8. Limitation of Liability. In no event and under no legal theory,
--      whether in tort (including negligence), contract, or otherwise,
--      unless required by applicable law (such as deliberate and grossly
--      negligent acts) or agreed to in writing, shall any Contributor be
--      liable to You for damages, including any direct, indirect, special,
--      incidental, or consequential damages of any character arising as a
--      result of this License or out of the use or inability to use the
--      Work (including but not limited to damages for loss of goodwill,
--      work stoppage, computer failure or malfunction, or any and all
--      other commercial damages or losses), even if such Contributor
--      has been advised of the possibility of such damages.
--
--   9. Accepting Warranty or Additional Liability. While redistributing
--      the Work or Derivative Works thereof, You may choose to offer,
--      and charge a fee for, acceptance of support, warranty, indemnity,
--      or other liability obligations and/or rights consistent with this
--      License. However, in accepting such obligations, You may act only
--      on Your own behalf and on Your sole responsibility, not on behalf
--      of any other Contributor, and only if You agree to indemnify,
--      defend, and hold each Contributor harmless for any liability
--      incurred by, or claims asserted against, such Contributor by reason
--      of your accepting any such warranty or additional liability.
--
--   END OF TERMS AND CONDITIONS
--
--   APPENDIX: How to apply the Apache License to your work.
--
--      To apply the Apache License to your work, attach the following
--      boilerplate notice, with the fields enclosed by brackets "[]"
--      replaced with your own identifying information. (Don't include
--      the brackets!)  The text should be enclosed in the appropriate
--      comment syntax for the file format. We also recommend that a
--      file or class name and description of purpose be included on the
--      same "printed page" as the copyright notice for easier
--      identification within third-party archives.
--
--   Copyright [yyyy] [name of copyright owner]
--
--   Licensed under the Apache License, Version 2.0 (the "License");
--   you may not use this file except in compliance with the License.
--   You may obtain a copy of the License at
--
--       http://www.apache.org/licenses/LICENSE-2.0
--
--   Unless required by applicable law or agreed to in writing, software
--   distributed under the License is distributed on an "AS IS" BASIS,
--   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--   See the License for the specific language governing permissions and
--   limitations under the License.
-diff --git a/Godeps/_workspace/src/github.com/racker/perigee/README.md b/Godeps/_workspace/src/github.com/racker/perigee/README.md
-deleted file mode 100644
-index 81cbf4a..0000000
---- a/Godeps/_workspace/src/github.com/racker/perigee/README.md
-+++ /dev/null
-@@ -1,120 +0,0 @@
--# perigee
--
--Perigee provides a REST client that, while it should be generic enough to use with most any RESTful API, is nonetheless optimized to the needs of the OpenStack APIs.
--Perigee grew out of the need to refactor out common API access code from the [gorax](http://github.com/racker/gorax) project.
--
--Several things influenced the name of the project.
--Numerous elements of the OpenStack ecosystem are named after astronomical artifacts.
--Additionally, perigee occurs when two orbiting bodies are closest to each other.
--Perigee seemed appropriate for something aiming to bring OpenStack and other RESTful services closer to the end-user.
--
--**This library is still in the very early stages of development. Unless you want to contribute, it probably isn't what you want**
--
--## Installation and Testing
--
--To install:
--
--```bash
--go get github.com/racker/perigee
--```
--
--To run unit tests:
--
--```bash
--go test github.com/racker/perigee
--```
--
--## Contributing
--
--The following guidelines are preliminary, as this project is just starting out.
--However, this should serve as a working first-draft.
--
--### Branching
--
--The master branch must always be a valid build.
--The `go get` command will not work otherwise.
--Therefore, development must occur on a different branch.
--
--When creating a feature branch, do so off the master branch:
--
--```bash
--git checkout master
--git pull
--git checkout -b featureBranch
--git checkout -b featureBranch-wip   # optional
--```
--
--Perform all your editing and testing in the WIP-branch.
--Feel free to make as many commits as you see fit.
--You may even open "WIP" pull requests from your feature branch to seek feedback.
--WIP pull requests will **never** be merged, however.
--
--To get code merged, you'll need to "squash" your changes into one or more clean commits in the feature branch.
--These steps should be followed:
--
--```bash
--git checkout featureBranch
--git merge --squash featureBranch-wip
--git commit -a
--git push origin featureBranch
--```
--
--You may now open a nice, clean, self-contained pull request from featureBranch to master.
--
--The `git commit -a` command above will open a text editor so that
--you may provide a comprehensive description of the changes.
--
--In general, when submitting a pull request against master,
--be sure to answer the following questions:
--
--- What is the problem?
--- Why is it a problem?
--- What is your solution?
--- How does your solution work?  (Recommended for non-trivial changes.)
--- Why should we use your solution over someone elses?  (Recommended especially if multiple solutions being discussed.)
--
--Remember that monster-sized pull requests are a bear to code-review,
--so having helpful commit logs are an absolute must to review changes as quickly as possible.
--
--Finally, (s)he who breaks master is ultimately responsible for fixing master.
--
--### Source Representation
--
--The Go community firmly believes in a consistent representation for all Go source code.
--We do too.
--Make sure all source code is passed through "go fmt" *before* you create your pull request.
--
--Please note, however, that we fully acknowledge and recognize that we no longer rely upon punch-cards for representing source files.
--Therefore, no 80-column limit exists.
--However, if a line exceeds 132 columns, you may want to consider splitting the line.
--
--### Unit and Integration Tests
--
--Pull requests that include non-trivial code changes without accompanying unit tests will be flatly rejected.
--While we have no way of enforcing this practice,
--you can ensure your code is thoroughly tested by always [writing tests first by intention.](http://en.wikipedia.org/wiki/Test-driven_development)
--
--When creating a pull request, if even one test fails, the PR will be rejected.
--Make sure all unit tests pass.
--Make sure all integration tests pass.
--
--### Documentation
--
--Private functions and methods which are obvious to anyone unfamiliar with gorax needn't be accompanied by documentation.
--However, this is a code-smell; if submitting a PR, expect to justify your decision.
--
--Public functions, regardless of how obvious, **must** have accompanying godoc-style documentation.
--This is not to suggest you should provide a tome for each function, however.
--Sometimes a link to more information is more appropriate, provided the link is stable, reliable, and pertinent.
--
--Changing documentation often results in bizarre diffs in pull requests, due to text often spanning multiple lines.
--To work around this, put [one logical thought or sentence on a single line.](http://rhodesmill.org/brandon/2012/one-sentence-per-line/)
--While this looks weird in a plain-text editor,
--remember that both godoc and HTML viewers will reflow text.
--The source code and its comments should be easy to edit with minimal diff pollution.
--Let software dedicated to presenting the documentation to human readers deal with its presentation.
--
--## Examples
--
--t.b.d.
--
-diff --git a/Godeps/_workspace/src/github.com/racker/perigee/api.go b/Godeps/_workspace/src/github.com/racker/perigee/api.go
-deleted file mode 100644
-index 0fcbadb..0000000
---- a/Godeps/_workspace/src/github.com/racker/perigee/api.go
-+++ /dev/null
-@@ -1,269 +0,0 @@
--// vim: ts=8 sw=8 noet ai
--
--package perigee
--
--import (
--	"encoding/json"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"log"
--	"net/http"
--	"strings"
--)
--
--// The UnexpectedResponseCodeError structure represents a mismatch in understanding between server and client in terms of response codes.
--// Most often, this is due to an actual error condition (e.g., getting a 404 for a resource when you expect a 200).
--// However, it needn't always be the case (e.g., getting a 204 (No Content) response back when a 200 is expected).
--type UnexpectedResponseCodeError struct {
--	Url      string
--	Expected []int
--	Actual   int
--	Body     []byte
--}
--
--func (err *UnexpectedResponseCodeError) Error() string {
--	return fmt.Sprintf("Expected HTTP response code %d when accessing URL(%s); got %d instead with the following body:\n%s", err.Expected, err.Url, err.Actual, string(err.Body))
--}
--
--// Request issues an HTTP request, marshaling parameters, and unmarshaling results, as configured in the provided Options parameter.
--// The Response structure returned, if any, will include accumulated results recovered from the HTTP server.
--// See the Response structure for more details.
--func Request(method string, url string, opts Options) (*Response, error) {
--	var body io.Reader
--	var response Response
--
--	client := opts.CustomClient
--	if client == nil {
--		client = new(http.Client)
--	}
--
--	contentType := opts.ContentType
--
--	body = nil
--	if opts.ReqBody != nil {
--		if contentType == "" {
--			contentType = "application/json"
--		}
--
--		if contentType == "application/json" {
--			bodyText, err := json.Marshal(opts.ReqBody)
--			if err != nil {
--				return nil, err
--			}
--			body = strings.NewReader(string(bodyText))
--			if opts.DumpReqJson {
--				log.Printf("Making request:\n%#v\n", string(bodyText))
--			}
--		} else {
--			// assume opts.ReqBody implements the correct interface
--			body = opts.ReqBody.(io.Reader)
--		}
--	}
--
--	req, err := http.NewRequest(method, url, body)
--	if err != nil {
--		return nil, err
--	}
--
--	if contentType != "" {
--		req.Header.Add("Content-Type", contentType)
--	}
--
--	if opts.ContentLength > 0 {
--		req.ContentLength = opts.ContentLength
--		req.Header.Add("Content-Length", string(opts.ContentLength))
--	}
--
--	if opts.MoreHeaders != nil {
--		for k, v := range opts.MoreHeaders {
--			req.Header.Add(k, v)
--		}
--	}
--
--	if accept := req.Header.Get("Accept"); accept == "" {
--		accept = opts.Accept
--		if accept == "" {
--			accept = "application/json"
--		}
--		req.Header.Add("Accept", accept)
--	}
--
--	if opts.SetHeaders != nil {
--		err = opts.SetHeaders(req)
--		if err != nil {
--			return &response, err
--		}
--	}
--
--	httpResponse, err := client.Do(req)
--	if httpResponse != nil {
--		response.HttpResponse = *httpResponse
--		response.StatusCode = httpResponse.StatusCode
--	}
--
--	if err != nil {
--		return &response, err
--	}
--	// This if-statement is legacy code, preserved for backward compatibility.
--	if opts.StatusCode != nil {
--		*opts.StatusCode = httpResponse.StatusCode
--	}
--
--	acceptableResponseCodes := opts.OkCodes
--	if len(acceptableResponseCodes) != 0 {
--		if not_in(httpResponse.StatusCode, acceptableResponseCodes) {
--			b, _ := ioutil.ReadAll(httpResponse.Body)
--			httpResponse.Body.Close()
--			return &response, &UnexpectedResponseCodeError{
--				Url:      url,
--				Expected: acceptableResponseCodes,
--				Actual:   httpResponse.StatusCode,
--				Body:     b,
--			}
--		}
--	}
--	if opts.Results != nil {
--		defer httpResponse.Body.Close()
--		jsonResult, err := ioutil.ReadAll(httpResponse.Body)
--		response.JsonResult = jsonResult
--		if err != nil {
--			return &response, err
--		}
--
--		err = json.Unmarshal(jsonResult, opts.Results)
--		// This if-statement is legacy code, preserved for backward compatibility.
--		if opts.ResponseJson != nil {
--			*opts.ResponseJson = jsonResult
--		}
--	}
--	return &response, err
--}
--
--// not_in returns false if, and only if, the provided needle is _not_
--// in the given set of integers.
--func not_in(needle int, haystack []int) bool {
--	for _, straw := range haystack {
--		if needle == straw {
--			return false
--		}
--	}
--	return true
--}
--
--// Post makes a POST request against a server using the provided HTTP client.
--// The url must be a fully-formed URL string.
--// DEPRECATED.  Use Request() instead.
--func Post(url string, opts Options) error {
--	r, err := Request("POST", url, opts)
--	if opts.Response != nil {
--		*opts.Response = r
--	}
--	return err
--}
--
--// Get makes a GET request against a server using the provided HTTP client.
--// The url must be a fully-formed URL string.
--// DEPRECATED.  Use Request() instead.
--func Get(url string, opts Options) error {
--	r, err := Request("GET", url, opts)
--	if opts.Response != nil {
--		*opts.Response = r
--	}
--	return err
--}
--
--// Delete makes a DELETE request against a server using the provided HTTP client.
--// The url must be a fully-formed URL string.
--// DEPRECATED.  Use Request() instead.
--func Delete(url string, opts Options) error {
--	r, err := Request("DELETE", url, opts)
--	if opts.Response != nil {
--		*opts.Response = r
--	}
--	return err
--}
--
--// Put makes a PUT request against a server using the provided HTTP client.
--// The url must be a fully-formed URL string.
--// DEPRECATED.  Use Request() instead.
--func Put(url string, opts Options) error {
--	r, err := Request("PUT", url, opts)
--	if opts.Response != nil {
--		*opts.Response = r
--	}
--	return err
--}
--
--// Options describes a set of optional parameters to the various request calls.
--//
--// The custom client can be used for a variety of purposes beyond selecting encrypted versus unencrypted channels.
--// Transports can be defined to provide augmented logging, header manipulation, et. al.
--//
--// If the ReqBody field is provided, it will be embedded as a JSON object.
--// Otherwise, provide nil.
--//
--// If JSON output is to be expected from the response,
--// provide either a pointer to the container structure in Results,
--// or a pointer to a nil-initialized pointer variable.
--// The latter method will cause the unmarshaller to allocate the container type for you.
--// If no response is expected, provide a nil Results value.
--//
--// The MoreHeaders map, if non-nil or empty, provides a set of headers to add to those
--// already present in the request.  At present, only Accepted and Content-Type are set
--// by default.
--//
--// OkCodes provides a set of acceptable, positive responses.
--//
--// If provided, StatusCode specifies a pointer to an integer, which will receive the
--// returned HTTP status code, successful or not.  DEPRECATED; use the Response.StatusCode field instead for new software.
--//
--// ResponseJson, if specified, provides a means for returning the raw JSON.  This is
--// most useful for diagnostics.  DEPRECATED; use the Response.JsonResult field instead for new software.
--//
--// DumpReqJson, if set to true, will cause the request to appear to stdout for debugging purposes.
--// This attribute may be removed at any time in the future; DO NOT use this attribute in production software.
--//
--// Response, if set, provides a way to communicate the complete set of HTTP response, raw JSON, status code, and
--// other useful attributes back to the caller.  Note that the Request() method returns a Response structure as part
--// of its public interface; you don't need to set the Response field here to use this structure.  The Response field
--// exists primarily for legacy or deprecated functions.
--//
--// SetHeaders allows the caller to provide code to set any custom headers programmatically.  Typically, this
--// facility can invoke, e.g., SetBasicAuth() on the request to easily set up authentication.
--// Any error generated will terminate the request and will propegate back to the caller.
--type Options struct {
--	CustomClient  *http.Client
--	ReqBody       interface{}
--	Results       interface{}
--	MoreHeaders   map[string]string
--	OkCodes       []int
--	StatusCode    *int    `DEPRECATED`
--	DumpReqJson   bool    `UNSUPPORTED`
--	ResponseJson  *[]byte `DEPRECATED`
--	Response      **Response
--	ContentType   string `json:"Content-Type,omitempty"`
--	ContentLength int64  `json:"Content-Length,omitempty"`
--	Accept        string `json:"Accept,omitempty"`
--	SetHeaders    func(r *http.Request) error
--}
--
--// Response contains return values from the various request calls.
--//
--// HttpResponse will return the http response from the request call.
--// Note: HttpResponse.Body is always closed and will not be available from this return value.
--//
--// StatusCode specifies the returned HTTP status code, successful or not.
--//
--// If Results is specified in the Options:
--// - JsonResult will contain the raw return from the request call
--//   This is most useful for diagnostics.
--// - Result will contain the unmarshalled json either in the Result passed in
--//   or the unmarshaller will allocate the container type for you.
--
--type Response struct {
--	HttpResponse http.Response
--	JsonResult   []byte
--	Results      interface{}
--	StatusCode   int
--}
-diff --git a/Godeps/_workspace/src/github.com/racker/perigee/api_test.go b/Godeps/_workspace/src/github.com/racker/perigee/api_test.go
-deleted file mode 100644
-index da943b2..0000000
---- a/Godeps/_workspace/src/github.com/racker/perigee/api_test.go
-+++ /dev/null
-@@ -1,226 +0,0 @@
--package perigee
--
--import (
--	"bytes"
--	"fmt"
--	"net/http"
--	"net/http/httptest"
--	"strings"
--	"testing"
--)
--
--func TestNormal(t *testing.T) {
--	handler := http.HandlerFunc(
--		func(w http.ResponseWriter, r *http.Request) {
--			w.Write([]byte("testing"))
--		})
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--
--	response, err := Request("GET", ts.URL, Options{})
--	if err != nil {
--		t.Fatalf("should not have error: %s", err)
--	}
--	if response.StatusCode != 200 {
--		t.Fatalf("response code %d is not 200", response.StatusCode)
--	}
--}
--
--func TestOKCodes(t *testing.T) {
--	expectCode := 201
--	handler := http.HandlerFunc(
--		func(w http.ResponseWriter, r *http.Request) {
--			w.WriteHeader(expectCode)
--			w.Write([]byte("testing"))
--		})
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--
--	options := Options{
--		OkCodes: []int{expectCode},
--	}
--	results, err := Request("GET", ts.URL, options)
--	if err != nil {
--		t.Fatalf("should not have error: %s", err)
--	}
--	if results.StatusCode != expectCode {
--		t.Fatalf("response code %d is not %d", results.StatusCode, expectCode)
--	}
--}
--
--func TestLocation(t *testing.T) {
--	newLocation := "http://www.example.com"
--	handler := http.HandlerFunc(
--		func(w http.ResponseWriter, r *http.Request) {
--			w.Header().Set("Location", newLocation)
--			w.Write([]byte("testing"))
--		})
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--
--	response, err := Request("GET", ts.URL, Options{})
--	if err != nil {
--		t.Fatalf("should not have error: %s", err)
--	}
--
--	location, err := response.HttpResponse.Location()
--	if err != nil {
--		t.Fatalf("should not have error: %s", err)
--	}
--
--	if location.String() != newLocation {
--		t.Fatalf("location returned \"%s\" is not \"%s\"", location.String(), newLocation)
--	}
--}
--
--func TestHeaders(t *testing.T) {
--	newLocation := "http://www.example.com"
--	handler := http.HandlerFunc(
--		func(w http.ResponseWriter, r *http.Request) {
--			w.Header().Set("Location", newLocation)
--			w.Write([]byte("testing"))
--		})
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--
--	response, err := Request("GET", ts.URL, Options{})
--	if err != nil {
--		t.Fatalf("should not have error: %s", err)
--	}
--
--	location := response.HttpResponse.Header.Get("Location")
--	if location == "" {
--		t.Fatalf("Location should not empty")
--	}
--
--	if location != newLocation {
--		t.Fatalf("location returned \"%s\" is not \"%s\"", location, newLocation)
--	}
--}
--
--func TestCustomHeaders(t *testing.T) {
--	var contentType, accept, contentLength string
--
--	handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		m := map[string][]string(r.Header)
--		contentType = m["Content-Type"][0]
--		accept = m["Accept"][0]
--		contentLength = m["Content-Length"][0]
--	})
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--
--	_, err := Request("GET", ts.URL, Options{
--		ContentLength: 5,
--		ContentType:   "x-application/vb",
--		Accept:        "x-application/c",
--		ReqBody:       strings.NewReader("Hello"),
--	})
--	if err != nil {
--		t.Fatalf(err.Error())
--	}
--
--	if contentType != "x-application/vb" {
--		t.Fatalf("I expected x-application/vb; got ", contentType)
--	}
--
--	if contentLength != "5" {
--		t.Fatalf("I expected 5 byte content length; got ", contentLength)
--	}
--
--	if accept != "x-application/c" {
--		t.Fatalf("I expected x-application/c; got ", accept)
--	}
--}
--
--func TestJson(t *testing.T) {
--	newLocation := "http://www.example.com"
--	jsonBytes := []byte(`{"foo": {"bar": "baz"}}`)
--	handler := http.HandlerFunc(
--		func(w http.ResponseWriter, r *http.Request) {
--			w.Header().Set("Location", newLocation)
--			w.Write(jsonBytes)
--		})
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--
--	type Data struct {
--		Foo struct {
--			Bar string `json:"bar"`
--		} `json:"foo"`
--	}
--	var data Data
--
--	response, err := Request("GET", ts.URL, Options{Results: &data})
--	if err != nil {
--		t.Fatalf("should not have error: %s", err)
--	}
--
--	if bytes.Compare(jsonBytes, response.JsonResult) != 0 {
--		t.Fatalf("json returned \"%s\" is not \"%s\"", response.JsonResult, jsonBytes)
--	}
--
--	if data.Foo.Bar != "baz" {
--		t.Fatalf("Results returned %v", data)
--	}
--}
--
--func TestSetHeaders(t *testing.T) {
--	var wasCalled bool
--	handler := http.HandlerFunc(
--		func(w http.ResponseWriter, r *http.Request) {
--			w.Write([]byte("Hi"))
--		})
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--
--	_, err := Request("GET", ts.URL, Options{
--		SetHeaders: func(r *http.Request) error {
--			wasCalled = true
--			return nil
--		},
--	})
--
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if !wasCalled {
--		t.Fatal("I expected header setter callback to be called, but it wasn't")
--	}
--
--	myError := fmt.Errorf("boo")
--
--	_, err = Request("GET", ts.URL, Options{
--		SetHeaders: func(r *http.Request) error {
--			return myError
--		},
--	})
--
--	if err != myError {
--		t.Fatal("I expected errors to propegate back to the caller.")
--	}
--}
--
--func TestBodilessMethodsAreSentWithoutContentHeaders(t *testing.T) {
--	var h map[string][]string
--
--	handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
--		h = r.Header
--	})
--	ts := httptest.NewServer(handler)
--	defer ts.Close()
--
--	_, err := Request("GET", ts.URL, Options{})
--	if err != nil {
--		t.Fatalf(err.Error())
--	}
--
--	if len(h["Content-Type"]) != 0 {
--		t.Fatalf("I expected nothing for Content-Type but got ", h["Content-Type"])
--	}
--
--	if len(h["Content-Length"]) != 0 {
--		t.Fatalf("I expected nothing for Content-Length but got ", h["Content-Length"])
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/.travis.yml b/Godeps/_workspace/src/github.com/rackspace/gophercloud/.travis.yml
-deleted file mode 100644
-index cf4f8ca..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/.travis.yml
-+++ /dev/null
-@@ -1,14 +0,0 @@
--language: go
--install:
--  - go get -v -tags 'fixtures acceptance' ./...
--go:
--  - 1.1
--  - 1.2
--  - tip
--script: script/cibuild
--after_success:
--  - go get code.google.com/p/go.tools/cmd/cover
--  - go get github.com/axw/gocov/gocov
--  - go get github.com/mattn/goveralls
--  - export PATH=$PATH:$HOME/gopath/bin/
--  - goveralls 2k7PTU3xa474Hymwgdj6XjqenNfGTNkO8
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTING.md
-deleted file mode 100644
-index 4f596a1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTING.md
-+++ /dev/null
-@@ -1,275 +0,0 @@
--# Contributing to gophercloud
--
--- [Getting started](#getting-started)
--- [Tests](#tests)
--- [Style guide](#basic-style-guide)
--- [5 ways to get involved](#5-ways-to-get-involved)
--
--## Setting up your git workspace
--
--As a contributor you will need to setup your workspace in a slightly different
--way than just downloading it. Here are the basic installation instructions:
--
--1. Configure your `$GOPATH` and run `go get` as described in the main
--[README](/#how-to-install).
--
--2. Move into the directory that houses your local repository:
--
--   ```bash
--   cd ${GOPATH}/src/github.com/rackspace/gophercloud
--   ```
--
--3. Fork the `rackspace/gophercloud` repository and update your remote refs. You
--will need to rename the `origin` remote branch to `upstream`, and add your
--fork as `origin` instead:
--
--   ```bash
--   git remote rename origin upstream
--   git remote add origin git at github.com/<my_username>/gophercloud
--   ```
--
--4. Checkout the latest development branch ([click here](/branches) to see all
--the branches):
--
--   ```bash
--   git checkout release/v1.0.1
--   ```
--
--5. If you're working on something (discussed more in detail below), you will
--need to checkout a new feature branch:
--
--   ```bash
--   git checkout -b my-new-feature
--   ```
--
--Another thing to bear in mind is that you will need to add a few extra
--environment variables for acceptance tests - this is documented in our
--[acceptance tests readme](/acceptance).
--
--## Tests
--
--When working on a new or existing feature, testing will be the backbone of your
--work since it helps uncover and prevent regressions in the codebase. There are
--two types of test we use in gophercloud: unit tests and acceptance tests, which
--are both described below.
--
--### Unit tests
--
--Unit tests are the fine-grained tests that establish and ensure the behaviour
--of individual units of functionality. We usually test on an
--operation-by-operation basis (an operation typically being an API action) with
--the use of mocking to set up explicit expectations. Each operation will set up
--its HTTP response expectation, and then test how the system responds when fed
--this controlled, pre-determined input.
--
--To make life easier, we've introduced a bunch of test helpers to simplify the
--process of testing expectations with assertions:
--
--```go
--import (
--  "testing"
--
--  "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestSomething(t *testing.T) {
--  result, err := Operation()
--
--  testhelper.AssertEquals(t, "foo", result.Bar)
--  testhelper.AssertNoErr(t, err)
--}
--
--func TestSomethingElse(t *testing.T) {
--  testhelper.CheckEquals(t, "expected", "actual")
--}
--```
--
--`AssertEquals` and `AssertNoErr` will throw a fatal error if a value does not
--match an expected value or if an error has been declared, respectively. You can
--also use `CheckEquals` and `CheckNoErr` for the same purpose; the only difference
--being that `t.Errorf` is raised rather than `t.Fatalf`.
--
--Here is a truncated example of mocked HTTP responses:
--
--```go
--import (
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestGet(t *testing.T) {
--	// Setup the HTTP request multiplexer and server
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		// Test we're using the correct HTTP method
--		th.TestMethod(t, r, "GET")
--
--		// Test we're setting the auth token
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		// Set the appropriate headers for our mocked response
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		// Set the HTTP body
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "status": "ACTIVE",
--        "name": "private-network",
--        "admin_state_up": true,
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "shared": true,
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--    }
--}
--			`)
--	})
--
--	// Call our API operation
--	network, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--
--	// Assert no errors and equality
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, n.Status, "ACTIVE")
--}
--```
--
--### Acceptance tests
--
--As we've already mentioned, unit tests have a very narrow and confined focus -
--they test small units of behaviour. Acceptance tests on the other hand have a
--far larger scope: they are fully functional tests that test the entire API of a
--service in one fell swoop. They don't care about unit isolation or mocking
--expectations, they instead do a full run-through and consequently test how the
--entire system _integrates_ together. When an API satisfies expectations, it
--proves by default that the requirements for a contract have been met.
--
--Please be aware that acceptance tests will hit a live API - and may incur
--service charges from your provider. Although most tests handle their own
--teardown procedures, it is always worth manually checking that resources are
--deleted after the test suite finishes.
--
--### Running tests
--
--To run all tests:
--
--```bash
--go test ./...
--```
--
--To run all tests with verbose output:
--
--```bash
--go test -v ./...
--```
--
--To run tests that match certain [build tags]():
--
--```bash
--go test -tags "foo bar" ./...
--```
--
--To run tests for a particular sub-package:
--
--```bash
--cd ./path/to/package && go test .
--```
--
--## Basic style guide
--
--We follow the standard formatting recommendations and language idioms set out
--in the [Effective Go](https://golang.org/doc/effective_go.html) guide. It's
--definitely worth reading - but the relevant sections are
--[formatting](https://golang.org/doc/effective_go.html#formatting)
--and [names](https://golang.org/doc/effective_go.html#names).
--
--## 5 ways to get involved
--
--There are five main ways you can get involved in our open-source project, and
--each is described briefly below. Once you've made up your mind and decided on
--your fix, you will need to follow the same basic steps that all submissions are
--required to adhere to:
--
--1. [fork](https://help.github.com/articles/fork-a-repo/) the `rackspace/gophercloud` repository
--2. checkout a [new branch](https://github.com/Kunena/Kunena-Forum/wiki/Create-a-new-branch-with-git-and-manage-branches)
--3. submit your branch as a [pull request](https://help.github.com/articles/creating-a-pull-request/)
--
--### 1. Providing feedback
--
--On of the easiest ways to get readily involved in our project is to let us know
--about your experiences using our SDK. Feedback like this is incredibly useful
--to us, because it allows us to refine and change features based on what our
--users want and expect of us. There are a bunch of ways to get in contact! You
--can [ping us](mailto:sdk-support at rackspace.com) via e-mail, talk to us on irc
--(#rackspace-dev on freenode), [tweet us](https://twitter.com/rackspace), or
--submit an issue on our [bug tracker](/issues). Things you might like to tell us
--are:
--
--* how easy was it to start using our SDK?
--* did it meet your expectations? If not, why not?
--* did our documentation help or hinder you?
--* what could we improve in general?
--
--### 2. Fixing bugs
--
--If you want to start fixing open bugs, we'd really appreciate that! Bug fixing
--is central to any project. The best way to get started is by heading to our
--[bug tracker](https://github.com/rackspace/gophercloud/issues) and finding open
--bugs that you think nobody is working on. It might be useful to comment on the
--thread to see the current state of the issue and if anybody has made any
--breakthroughs on it so far.
--
--### 3. Improving documentation
--
--We have three forms of documentation:
--
--* short README documents that briefly introduce a topic
--* reference documentation on [godoc.org](http://godoc.org) that is automatically
--generated from source code comments
--* user documentation on our [homepage](http://gophercloud.io) that includes
--getting started guides, installation guides and code samples
--
--If you feel that a certain section could be improved - whether it's to clarify
--ambiguity, correct a technical mistake, or to fix a grammatical error - please
--feel entitled to do so! We welcome doc pull requests with the same childlike
--enthusiasm as any other contribution!
--
--### 4. Optimizing existing features
--
--If you would like to improve or optimize an existing feature, please be aware
--that we adhere to [semantic versioning](http://semver.org) - which means that
--we cannot introduce breaking changes to the API without a major version change
--(v1.x -> v2.x). Making that leap is a big step, so we encourage contributors to
--refactor rather than rewrite. Running tests will prevent regression and avoid
--the possibility of breaking somebody's current implementation.
--
--Another tip is to keep the focus of your work as small as possible - try not to
--introduce a change that affects lots and lots of files because it introduces
--added risk and increases the cognitive load on the reviewers checking your
--work. Change-sets which are easily understood and will not negatively impact
--users are more likely to be integrated quickly.
--
--Lastly, if you're seeking to optimize a particular operation, you should try to
--demonstrate a negative performance impact - perhaps using go's inbuilt
--[benchmark capabilities](http://dave.cheney.net/2013/06/30/how-to-write-benchmarks-in-go).
--
--### 5. Working on a new feature
--
--If you've found something we've left out, definitely feel free to start work on
--introducing that feature. It's always useful to open an issue or submit a pull
--request early on to indicate your intent to a core contributor - this enables
--quick/early feedback and can help steer you in the right direction by avoiding
--known issues. It might also help you avoid losing time implementing something
--that might not ever work. One tip is to prefix your Pull Request issue title
--with [wip] - then people know it's a work in progress.
--
--You must ensure that all of your work is well tested - both in terms of unit
--and acceptance tests. Untested code will not be merged because it introduces
--too much of a risk to end-users.
--
--Happy hacking!
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTORS.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTORS.md
-deleted file mode 100644
-index eb97094..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTORS.md
-+++ /dev/null
-@@ -1,12 +0,0 @@
--Contributors
--============
--
--| Name | Email |
--| ---- | ----- |
--| Samuel A. Falvo II | <sam.falvo at rackspace.com>
--| Glen Campbell | <glen.campbell at rackspace.com>
--| Jesse Noller | <jesse.noller at rackspace.com>
--| Jon Perritt | <jon.perritt at rackspace.com>
--| Ash Wilson | <ash.wilson at rackspace.com>
--| Jamie Hannaford | <jamie.hannaford at rackspace.com>
--| Don Schenck | don.schenck at rackspace.com>
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/LICENSE b/Godeps/_workspace/src/github.com/rackspace/gophercloud/LICENSE
-deleted file mode 100644
-index fbbbc9e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/LICENSE
-+++ /dev/null
-@@ -1,191 +0,0 @@
--Copyright 2012-2013 Rackspace, Inc.
--
--Licensed under the Apache License, Version 2.0 (the "License"); you may not use
--this file except in compliance with the License.  You may obtain a copy of the
--License at
--
--  http://www.apache.org/licenses/LICENSE-2.0
--
--Unless required by applicable law or agreed to in writing, software distributed
--under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
--CONDITIONS OF ANY KIND, either express or implied.  See the License for the
--specific language governing permissions and limitations under the License.                                
--
--------
-- 
--				Apache License
--                           Version 2.0, January 2004
--                        http://www.apache.org/licenses/
--
--   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
--
--   1. Definitions.
--
--      "License" shall mean the terms and conditions for use, reproduction,
--      and distribution as defined by Sections 1 through 9 of this document.
--
--      "Licensor" shall mean the copyright owner or entity authorized by
--      the copyright owner that is granting the License.
--
--      "Legal Entity" shall mean the union of the acting entity and all
--      other entities that control, are controlled by, or are under common
--      control with that entity. For the purposes of this definition,
--      "control" means (i) the power, direct or indirect, to cause the
--      direction or management of such entity, whether by contract or
--      otherwise, or (ii) ownership of fifty percent (50%) or more of the
--      outstanding shares, or (iii) beneficial ownership of such entity.
--
--      "You" (or "Your") shall mean an individual or Legal Entity
--      exercising permissions granted by this License.
--
--      "Source" form shall mean the preferred form for making modifications,
--      including but not limited to software source code, documentation
--      source, and configuration files.
--
--      "Object" form shall mean any form resulting from mechanical
--      transformation or translation of a Source form, including but
--      not limited to compiled object code, generated documentation,
--      and conversions to other media types.
--
--      "Work" shall mean the work of authorship, whether in Source or
--      Object form, made available under the License, as indicated by a
--      copyright notice that is included in or attached to the work
--      (an example is provided in the Appendix below).
--
--      "Derivative Works" shall mean any work, whether in Source or Object
--      form, that is based on (or derived from) the Work and for which the
--      editorial revisions, annotations, elaborations, or other modifications
--      represent, as a whole, an original work of authorship. For the purposes
--      of this License, Derivative Works shall not include works that remain
--      separable from, or merely link (or bind by name) to the interfaces of,
--      the Work and Derivative Works thereof.
--
--      "Contribution" shall mean any work of authorship, including
--      the original version of the Work and any modifications or additions
--      to that Work or Derivative Works thereof, that is intentionally
--      submitted to Licensor for inclusion in the Work by the copyright owner
--      or by an individual or Legal Entity authorized to submit on behalf of
--      the copyright owner. For the purposes of this definition, "submitted"
--      means any form of electronic, verbal, or written communication sent
--      to the Licensor or its representatives, including but not limited to
--      communication on electronic mailing lists, source code control systems,
--      and issue tracking systems that are managed by, or on behalf of, the
--      Licensor for the purpose of discussing and improving the Work, but
--      excluding communication that is conspicuously marked or otherwise
--      designated in writing by the copyright owner as "Not a Contribution."
--
--      "Contributor" shall mean Licensor and any individual or Legal Entity
--      on behalf of whom a Contribution has been received by Licensor and
--      subsequently incorporated within the Work.
--
--   2. Grant of Copyright License. Subject to the terms and conditions of
--      this License, each Contributor hereby grants to You a perpetual,
--      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
--      copyright license to reproduce, prepare Derivative Works of,
--      publicly display, publicly perform, sublicense, and distribute the
--      Work and such Derivative Works in Source or Object form.
--
--   3. Grant of Patent License. Subject to the terms and conditions of
--      this License, each Contributor hereby grants to You a perpetual,
--      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
--      (except as stated in this section) patent license to make, have made,
--      use, offer to sell, sell, import, and otherwise transfer the Work,
--      where such license applies only to those patent claims licensable
--      by such Contributor that are necessarily infringed by their
--      Contribution(s) alone or by combination of their Contribution(s)
--      with the Work to which such Contribution(s) was submitted. If You
--      institute patent litigation against any entity (including a
--      cross-claim or counterclaim in a lawsuit) alleging that the Work
--      or a Contribution incorporated within the Work constitutes direct
--      or contributory patent infringement, then any patent licenses
--      granted to You under this License for that Work shall terminate
--      as of the date such litigation is filed.
--
--   4. Redistribution. You may reproduce and distribute copies of the
--      Work or Derivative Works thereof in any medium, with or without
--      modifications, and in Source or Object form, provided that You
--      meet the following conditions:
--
--      (a) You must give any other recipients of the Work or
--          Derivative Works a copy of this License; and
--
--      (b) You must cause any modified files to carry prominent notices
--          stating that You changed the files; and
--
--      (c) You must retain, in the Source form of any Derivative Works
--          that You distribute, all copyright, patent, trademark, and
--          attribution notices from the Source form of the Work,
--          excluding those notices that do not pertain to any part of
--          the Derivative Works; and
--
--      (d) If the Work includes a "NOTICE" text file as part of its
--          distribution, then any Derivative Works that You distribute must
--          include a readable copy of the attribution notices contained
--          within such NOTICE file, excluding those notices that do not
--          pertain to any part of the Derivative Works, in at least one
--          of the following places: within a NOTICE text file distributed
--          as part of the Derivative Works; within the Source form or
--          documentation, if provided along with the Derivative Works; or,
--          within a display generated by the Derivative Works, if and
--          wherever such third-party notices normally appear. The contents
--          of the NOTICE file are for informational purposes only and
--          do not modify the License. You may add Your own attribution
--          notices within Derivative Works that You distribute, alongside
--          or as an addendum to the NOTICE text from the Work, provided
--          that such additional attribution notices cannot be construed
--          as modifying the License.
--
--      You may add Your own copyright statement to Your modifications and
--      may provide additional or different license terms and conditions
--      for use, reproduction, or distribution of Your modifications, or
--      for any such Derivative Works as a whole, provided Your use,
--      reproduction, and distribution of the Work otherwise complies with
--      the conditions stated in this License.
--
--   5. Submission of Contributions. Unless You explicitly state otherwise,
--      any Contribution intentionally submitted for inclusion in the Work
--      by You to the Licensor shall be under the terms and conditions of
--      this License, without any additional terms or conditions.
--      Notwithstanding the above, nothing herein shall supersede or modify
--      the terms of any separate license agreement you may have executed
--      with Licensor regarding such Contributions.
--
--   6. Trademarks. This License does not grant permission to use the trade
--      names, trademarks, service marks, or product names of the Licensor,
--      except as required for reasonable and customary use in describing the
--      origin of the Work and reproducing the content of the NOTICE file.
--
--   7. Disclaimer of Warranty. Unless required by applicable law or
--      agreed to in writing, Licensor provides the Work (and each
--      Contributor provides its Contributions) on an "AS IS" BASIS,
--      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
--      implied, including, without limitation, any warranties or conditions
--      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
--      PARTICULAR PURPOSE. You are solely responsible for determining the
--      appropriateness of using or redistributing the Work and assume any
--      risks associated with Your exercise of permissions under this License.
--
--   8. Limitation of Liability. In no event and under no legal theory,
--      whether in tort (including negligence), contract, or otherwise,
--      unless required by applicable law (such as deliberate and grossly
--      negligent acts) or agreed to in writing, shall any Contributor be
--      liable to You for damages, including any direct, indirect, special,
--      incidental, or consequential damages of any character arising as a
--      result of this License or out of the use or inability to use the
--      Work (including but not limited to damages for loss of goodwill,
--      work stoppage, computer failure or malfunction, or any and all
--      other commercial damages or losses), even if such Contributor
--      has been advised of the possibility of such damages.
--
--   9. Accepting Warranty or Additional Liability. While redistributing
--      the Work or Derivative Works thereof, You may choose to offer,
--      and charge a fee for, acceptance of support, warranty, indemnity,
--      or other liability obligations and/or rights consistent with this
--      License. However, in accepting such obligations, You may act only
--      on Your own behalf and on Your sole responsibility, not on behalf
--      of any other Contributor, and only if You agree to indemnify,
--      defend, and hold each Contributor harmless for any liability
--      incurred by, or claims asserted against, such Contributor by reason
--      of your accepting any such warranty or additional liability.
--
--   END OF TERMS AND CONDITIONS
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/README.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/README.md
-deleted file mode 100644
-index 9f7552b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/README.md
-+++ /dev/null
-@@ -1,161 +0,0 @@
--# Gophercloud: the OpenStack SDK for Go
--[![Build Status](https://travis-ci.org/rackspace/gophercloud.svg?branch=master)](https://travis-ci.org/rackspace/gophercloud)
--
--Gophercloud is a flexible SDK that allows you to consume and work with OpenStack
--clouds in a simple and idiomatic way using golang. Many services are supported,
--including Compute, Block Storage, Object Storage, Networking, and Identity.
--Each service API is backed with getting started guides, code samples, reference
--documentation, unit tests and acceptance tests.
--
--## Useful links
--
--* [Gophercloud homepage](http://gophercloud.io)
--* [Reference documentation](http://godoc.org/github.com/rackspace/gophercloud)
--* [Getting started guides](http://gophercloud.io/docs)
--* [Effective Go](https://golang.org/doc/effective_go.html)
--
--## How to install
--
--Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH)
--is pointing to an appropriate directory where you want to install Gophercloud:
--
--```bash
--mkdir $HOME/go
--export GOPATH=$HOME/go
--```
--
--To protect yourself against changes in your dependencies, we highly recommend choosing a
--[dependency management solution](https://code.google.com/p/go-wiki/wiki/PackageManagementTools) for
--your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install
--Gophercloud as a dependency like so:
--
--```bash
--go get github.com/rackspace/gophercloud
--
--# Edit your code to import relevant packages from "github.com/rackspace/gophercloud"
--
--godep save ./...
--```
--
--This will install all the source files you need into a `Godeps/_workspace` directory, which is
--referenceable from your own source files when you use the `godep go` command.
--
--## Getting started
--
--### Credentials
--
--Because you'll be hitting an API, you will need to retrieve your OpenStack
--credentials and either store them as environment variables or in your local Go
--files. The first method is recommended because it decouples credential
--information from source code, allowing you to push the latter to your version
--control system without any security risk.
--
--You will need to retrieve the following:
--
--* username
--* password
--* tenant name or tenant ID
--* a valid Keystone identity URL
--
--For users that have the OpenStack dashboard installed, there's a shortcut. If
--you visit the `project/access_and_security` path in Horizon and click on the
--"Download OpenStack RC File" button at the top right hand corner, you will
--download a bash file that exports all of your access details to environment
--variables. To execute the file, run `source admin-openrc.sh` and you will be
--prompted for your password.
--
--### Authentication
--
--Once you have access to your credentials, you can begin plugging them into
--Gophercloud. The next step is authentication, and this is handled by a base
--"Provider" struct. To get one, you can either pass in your credentials
--explicitly, or tell Gophercloud to use environment variables:
--
--```go
--import (
--  "github.com/rackspace/gophercloud"
--  "github.com/rackspace/gophercloud/openstack"
--  "github.com/rackspace/gophercloud/openstack/utils"
--)
--
--// Option 1: Pass in the values yourself
--opts := gophercloud.AuthOptions{
--  IdentityEndpoint: "https://my-openstack.com:5000/v2.0",
--  Username: "{username}",
--  Password: "{password}",
--  TenantID: "{tenant_id}",
--}
--
--// Option 2: Use a utility function to retrieve all your environment variables
--opts, err := openstack.AuthOptionsFromEnv()
--```
--
--Once you have the `opts` variable, you can pass it in and get back a
--`ProviderClient` struct:
--
--```go
--provider, err := openstack.AuthenticatedClient(opts)
--```
--
--The `ProviderClient` is the top-level client that all of your OpenStack services
--derive from. The provider contains all of the authentication details that allow
--your Go code to access the API - such as the base URL and token ID.
--
--### Provision a server
--
--Once we have a base Provider, we inject it as a dependency into each OpenStack
--service. In order to work with the Compute API, we need a Compute service
--client; which can be created like so:
--
--```go
--client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{
--  Region: os.Getenv("OS_REGION_NAME"),
--})
--```
--
--We then use this `client` for any Compute API operation we want. In our case,
--we want to provision a new server - so we invoke the `Create` method and pass
--in the flavor ID (hardware specification) and image ID (operating system) we're
--interested in:
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--server, err := servers.Create(client, servers.CreateOpts{
--  Name:      "My new server!",
--  FlavorRef: "flavor_id",
--  ImageRef:  "image_id",
--}).Extract()
--```
--
--If you are unsure about what images and flavors are, you can read our [Compute
--Getting Started guide](http://gophercloud.io/docs/compute). The above code
--sample creates a new server with the parameters, and embodies the new resource
--in the `server` variable (a
--[`servers.Server`](http://godoc.org/github.com/rackspace/gophercloud) struct).
--
--### Next steps
--
--Cool! You've handled authentication, got your `ProviderClient` and provisioned
--a new server. You're now ready to use more OpenStack services.
--
--* [Getting started with Compute](http://gophercloud.io/docs/compute)
--* [Getting started with Object Storage](http://gophercloud.io/docs/object-storage)
--* [Getting started with Networking](http://gophercloud.io/docs/networking)
--* [Getting started with Block Storage](http://gophercloud.io/docs/block-storage)
--* [Getting started with Identity](http://gophercloud.io/docs/identity)
--
--## Contributing
--
--Engaging the community and lowering barriers for contributors is something we
--care a lot about. For this reason, we've taken the time to write a [contributing
--guide](./CONTRIBUTING.md) for folks interested in getting involved in our project.
--If you're not sure how you can get involved, feel free to submit an issue or
--[e-mail us](mailto:sdk-support at rackspace.com) privately. You don't need to be a
--Go expert - all members of the community are welcome!
--
--## Help and feedback
--
--If you're struggling with something or have spotted a potential bug, feel free
--to submit an issue to our [bug tracker](/issues) or e-mail us directly at
--[sdk-support at rackspace.com](mailto:sdk-support at rackspace.com).
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/UPGRADING.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/UPGRADING.md
-deleted file mode 100644
-index da3758b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/UPGRADING.md
-+++ /dev/null
-@@ -1,338 +0,0 @@
--# Upgrading to v1.0.0
--
--With the arrival of this new major version increment, the unfortunate news is
--that breaking changes have been introduced to existing services. The API
--has been completely rewritten from the ground up to make the library more
--extensible, maintainable and easy-to-use.
--
--Below we've compiled upgrade instructions for the various services that
--existed before. If you have a specific issue that is not addressed below,
--please [submit an issue](/issues/new) or
--[e-mail our support team](mailto:sdk-support at rackspace.com).
--
--* [Authentication](#authentication)
--* [Servers](#servers)
--  * [List servers](#list-servers)
--  * [Get server details](#get-server-details)
--  * [Create server](#create-server)
--  * [Resize server](#resize-server)
--  * [Reboot server](#reboot-server)
--  * [Update server](#update-server)
--  * [Rebuild server](#rebuild-server)
--  * [Change admin password](#change-admin-password)
--  * [Delete server](#delete-server)
--  * [Rescue server](#rescue-server)
--* [Images and flavors](#images-and-flavors)
--  * [List images](#list-images)
--  * [List flavors](#list-flavors)
--  * [Create/delete image](#createdelete-image)
--* [Other](#other)
--  * [List keypairs](#list-keypairs)
--  * [Create/delete keypair](#createdelete-keypair)
--  * [List IP addresses](#list-ip-addresses)
--
--# Authentication
--
--One of the major differences that this release introduces is the level of
--sub-packaging to differentiate between services and providers. You now have
--the option of authenticating with OpenStack and other providers (like Rackspace).
--
--To authenticate with a vanilla OpenStack installation, you can either specify
--your credentials like this:
--
--```go
--import (
--  "github.com/rackspace/gophercloud"
--  "github.com/rackspace/gophercloud/openstack"
--)
--
--opts := gophercloud.AuthOptions{
--  IdentityEndpoint: "https://my-openstack.com:5000/v2.0",
--  Username: "{username}",
--  Password: "{password}",
--  TenantID: "{tenant_id}",
--}
--```
--
--Or have them pulled in through environment variables, like this:
--
--```go
--opts, err := openstack.AuthOptionsFromEnv()
--```
--
--Once you have your `AuthOptions` struct, you pass it in to get back a `Provider`,
--like so:
--
--```go
--provider, err := openstack.AuthenticatedClient(opts)
--```
--
--This provider is the top-level structure that all services are created from.
--
--# Servers
--
--Before you can interact with the Compute API, you need to retrieve a
--`gophercloud.ServiceClient`. To do this:
--
--```go
--// Define your region, etc.
--opts := gophercloud.EndpointOpts{Region: "RegionOne"}
--
--client, err := openstack.NewComputeV2(provider, opts)
--```
--
--## List servers
--
--All operations that involve API collections (servers, flavors, images) now use
--the `pagination.Pager` interface. This interface represents paginated entities
--that can be iterated over.
--
--Once you have a Pager, you can then pass a callback function into its `EachPage`
--method, and this will allow you to traverse over the collection and execute
--arbitrary functionality. So, an example with list servers:
--
--```go
--import (
--  "fmt"
--  "github.com/rackspace/gophercloud/pagination"
--  "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--)
--
--// We have the option of filtering the server list. If we want the full
--// collection, leave it as an empty struct or nil
--opts := servers.ListOpts{Name: "server_1"}
--
--// Retrieve a pager (i.e. a paginated collection)
--pager := servers.List(client, opts)
--
--// Define an anonymous function to be executed on each page's iteration
--err := pager.EachPage(func(page pagination.Page) (bool, error) {
--  serverList, err := servers.ExtractServers(page)
--
--  // `s' will be a servers.Server struct
--  for _, s := range serverList {
--    fmt.Printf("We have a server. ID=%s, Name=%s", s.ID, s.Name)
--  }
--})
--```
--
--## Get server details
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--// Get the HTTP result
--response := servers.Get(client, "server_id")
--
--// Extract a Server struct from the response
--server, err := response.Extract()
--```
--
--## Create server
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--// Define our options
--opts := servers.CreateOpts{
--  Name: "new_server",
--  FlavorRef: "flavorID",
--  ImageRef: "imageID",
--}
--
--// Get our response
--response := servers.Create(client, opts)
--
--// Extract
--server, err := response.Extract()
--```
--
--## Change admin password
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--result := servers.ChangeAdminPassword(client, "server_id", "newPassword_&123")
--```
--
--## Resize server
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--result := servers.Resize(client, "server_id", "new_flavor_id")
--```
--
--## Reboot server
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--// You have a choice of two reboot methods: servers.SoftReboot or servers.HardReboot
--result := servers.Reboot(client, "server_id", servers.SoftReboot)
--```
--
--## Update server
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--opts := servers.UpdateOpts{Name: "new_name"}
--
--server, err := servers.Update(client, "server_id", opts).Extract()
--```
--
--## Rebuild server
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--// You have the option of specifying additional options
--opts := RebuildOpts{
--  Name:      "new_name",
--  AdminPass: "admin_password",
--  ImageID:   "image_id",
--  Metadata:  map[string]string{"owner": "me"},
--}
--
--result := servers.Rebuild(client, "server_id", opts)
--
--// You can extract a servers.Server struct from the HTTP response
--server, err := result.Extract()
--```
--
--## Delete server
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--response := servers.Delete(client, "server_id")
--```
--
--## Rescue server
--
--The server rescue extension for Compute is not currently supported.
--
--# Images and flavors
--
--## List images
--
--As with listing servers (see above), you first retrieve a Pager, and then pass
--in a callback over each page:
--
--```go
--import (
--  "github.com/rackspace/gophercloud/pagination"
--  "github.com/rackspace/gophercloud/openstack/compute/v2/images"
--)
--
--// We have the option of filtering the image list. If we want the full
--// collection, leave it as an empty struct
--opts := images.ListOpts{ChangesSince: "2014-01-01T01:02:03Z", Name: "Ubuntu 12.04"}
--
--// Retrieve a pager (i.e. a paginated collection)
--pager := images.List(client, opts)
--
--// Define an anonymous function to be executed on each page's iteration
--err := pager.EachPage(func(page pagination.Page) (bool, error) {
--  imageList, err := images.ExtractImages(page)
--
--  for _, i := range imageList {
--    // "i" will be a images.Image
--  }
--})
--```
--
--## List flavors
--
--```go
--import (
--  "github.com/rackspace/gophercloud/pagination"
--  "github.com/rackspace/gophercloud/openstack/compute/v2/flavors"
--)
--
--// We have the option of filtering the flavor list. If we want the full
--// collection, leave it as an empty struct
--opts := flavors.ListOpts{ChangesSince: "2014-01-01T01:02:03Z", MinRAM: 4}
--
--// Retrieve a pager (i.e. a paginated collection)
--pager := flavors.List(client, opts)
--
--// Define an anonymous function to be executed on each page's iteration
--err := pager.EachPage(func(page pagination.Page) (bool, error) {
--  flavorList, err := networks.ExtractFlavors(page)
--
--  for _, f := range flavorList {
--    // "f" will be a flavors.Flavor
--  }
--})
--```
--
--## Create/delete image
--
--Image management has been shifted to Glance, but unfortunately this service is
--not supported as of yet. You can, however, list Compute images like so:
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/images"
--
--// Retrieve a pager (i.e. a paginated collection)
--pager := images.List(client, opts)
--
--// Define an anonymous function to be executed on each page's iteration
--err := pager.EachPage(func(page pagination.Page) (bool, error) {
--  imageList, err := images.ExtractImages(page)
--
--  for _, i := range imageList {
--    // "i" will be a images.Image
--  }
--})
--```
--
--# Other
--
--## List keypairs
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs"
--
--// Retrieve a pager (i.e. a paginated collection)
--pager := keypairs.List(client, opts)
--
--// Define an anonymous function to be executed on each page's iteration
--err := pager.EachPage(func(page pagination.Page) (bool, error) {
--  keyList, err := keypairs.ExtractKeyPairs(page)
--
--  for _, k := range keyList {
--    // "k" will be a keypairs.KeyPair
--  }
--})
--```
--
--## Create/delete keypairs
--
--To create a new keypair, you need to specify its name and, optionally, a
--pregenerated OpenSSH-formatted public key.
--
--```go
--import "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs"
--
--opts := keypairs.CreateOpts{
--  Name: "new_key",
--  PublicKey: "...",
--}
--
--response := keypairs.Create(client, opts)
--
--key, err := response.Extract()
--```
--
--To delete an existing keypair:
--
--```go
--response := keypairs.Delete(client, "keypair_id")
--```
--
--## List IP addresses
--
--This operation is not currently supported.
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/README.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/README.md
-deleted file mode 100644
-index 3199837..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/README.md
-+++ /dev/null
-@@ -1,57 +0,0 @@
--# Gophercloud Acceptance tests
--
--The purpose of these acceptance tests is to validate that SDK features meet
--the requirements of a contract - to consumers, other parts of the library, and
--to a remote API.
--
--> **Note:** Because every test will be run against a real API endpoint, you
--> may incur bandwidth and service charges for all the resource usage. These
--> tests *should* remove their remote products automatically. However, there may
--> be certain cases where this does not happen; always double-check to make sure
--> you have no stragglers left behind.
--
--### Step 1. Set environment variables
--
--A lot of tests rely on environment variables for configuration - so you will need
--to set them before running the suite. If you're testing against pure OpenStack APIs,
--you can download a file that contains all of these variables for you: just visit
--the `project/access_and_security` page in your control panel and click the "Download
--OpenStack RC File" button at the top right. For all other providers, you will need
--to set them manually.
--
--#### Authentication
--
--|Name|Description|
--|---|---|
--|`OS_USERNAME`|Your API username|
--|`OS_PASSWORD`|Your API password|
--|`OS_AUTH_URL`|The identity URL you need to authenticate|
--|`OS_TENANT_NAME`|Your API tenant name|
--|`OS_TENANT_ID`|Your API tenant ID|
--|`RS_USERNAME`|Your Rackspace username|
--|`RS_API_KEY`|Your Rackspace API key|
--
--#### General
--
--|Name|Description|
--|---|---|
--|`OS_REGION_NAME`|The region you want your resources to reside in|
--|`RS_REGION`|Rackspace region you want your resource to reside in|
--
--#### Compute
--
--|Name|Description|
--|---|---|
--|`OS_IMAGE_ID`|The ID of the image your want your server to be based on|
--|`OS_FLAVOR_ID`|The ID of the flavor you want your server to be based on|
--|`OS_FLAVOR_ID_RESIZE`|The ID of the flavor you want your server to be resized to|
--|`RS_IMAGE_ID`|The ID of the image you want servers to be created with|
--|`RS_FLAVOR_ID`|The ID of the flavor you want your server to be created with|
--
--### 2. Run the test suite
--
--From the root directory, run:
--
--```
--./script/acceptancetest
--```
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go
-deleted file mode 100644
-index 9132ee5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go
-+++ /dev/null
-@@ -1,70 +0,0 @@
--// +build acceptance
--
--package v1
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots"
--	"github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestSnapshots(t *testing.T) {
--
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--	
--	v, err := volumes.Create(client, &volumes.CreateOpts{
--		Name: "gophercloud-test-volume",
--		Size: 1,
--	}).Extract()
--	th.AssertNoErr(t, err)
--
--	err = volumes.WaitForStatus(client, v.ID, "available", 120)
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created volume: %v\n", v)
--
--	ss, err := snapshots.Create(client, &snapshots.CreateOpts{
--		Name:     "gophercloud-test-snapshot",
--		VolumeID: v.ID,
--	}).Extract()
--	th.AssertNoErr(t, err)
--
--	err = snapshots.WaitForStatus(client, ss.ID, "available", 120)
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created snapshot: %+v\n", ss)
--
--	err = snapshots.Delete(client, ss.ID).ExtractErr()
--	th.AssertNoErr(t, err)
--
--	err = gophercloud.WaitFor(120, func() (bool, error) {
--		_, err := snapshots.Get(client, ss.ID).Extract()
--		if err != nil {
--			return true, nil
--		}
--
--		return false, nil
--	})
--	th.AssertNoErr(t, err)
--
--	t.Log("Deleted snapshot\n")
--
--	err = volumes.Delete(client, v.ID).ExtractErr()
--	th.AssertNoErr(t, err)
--
--	err = gophercloud.WaitFor(120, func() (bool, error) {
--		_, err := volumes.Get(client, v.ID).Extract()
--		if err != nil {
--			return true, nil
--		}
--
--		return false, nil
--	})
--	th.AssertNoErr(t, err)
--
--	t.Log("Deleted volume\n")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go
-deleted file mode 100644
-index 99da39a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go
-+++ /dev/null
-@@ -1,63 +0,0 @@
--// +build acceptance blockstorage
--
--package v1
--
--import (
--	"os"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack"
--	"github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func newClient() (*gophercloud.ServiceClient, error) {
--	ao, err := openstack.AuthOptionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	client, err := openstack.AuthenticatedClient(ao)
--	th.AssertNoErr(t, err)
--
--	return openstack.NewBlockStorageV1(client, gophercloud.EndpointOpts{
--		Region: os.Getenv("OS_REGION_NAME"),
--	})
--}
--
--func TestVolumes(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	cv, err := volumes.Create(client, &volumes.CreateOpts{
--		Size: 1,
--		Name: "gophercloud-test-volume",
--	}).Extract()
--	th.AssertNoErr(t, err)
--	defer func() {
--		err = volumes.WaitForStatus(client, cv.ID, "available", 60)
--		th.AssertNoErr(t, err)
--		err = volumes.Delete(client, cv.ID).ExtractErr()
--		th.AssertNoErr(t, err)
--	}()
--
--	_, err = volumes.Update(client, cv.ID, &volumes.UpdateOpts{
--		Name: "gophercloud-updated-volume",
--	}).Extract()
--	th.AssertNoErr(t, err)
--
--	v, err := volumes.Get(client, cv.ID).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Got volume: %+v\n", v)
--
--	if v.Name != "gophercloud-updated-volume" {
--		t.Errorf("Unable to update volume: Expected name: gophercloud-updated-volume\nActual name: %s", v.Name)
--	}
--
--	err = volumes.List(client, &volumes.ListOpts{Name: "gophercloud-updated-volume"}).EachPage(func(page pagination.Page) (bool, error) {
--		vols, err := volumes.ExtractVolumes(page)
--		th.CheckEquals(t, 1, len(vols))
--		return true, err
--	})
--	th.AssertNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go
-deleted file mode 100644
-index 5adcd81..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go
-+++ /dev/null
-@@ -1,49 +0,0 @@
--// +build acceptance
--
--package v1
--
--import (
--	"testing"
--	"time"
--
--	"github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestVolumeTypes(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	vt, err := volumetypes.Create(client, &volumetypes.CreateOpts{
--		ExtraSpecs: map[string]interface{}{
--			"capabilities": "gpu",
--			"priority":     3,
--		},
--		Name: "gophercloud-test-volumeType",
--	}).Extract()
--	th.AssertNoErr(t, err)
--	defer func() {
--		time.Sleep(10000 * time.Millisecond)
--		err = volumetypes.Delete(client, vt.ID).ExtractErr()
--		if err != nil {
--			t.Error(err)
--			return
--		}
--	}()
--	t.Logf("Created volume type: %+v\n", vt)
--
--	vt, err = volumetypes.Get(client, vt.ID).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Got volume type: %+v\n", vt)
--
--	err = volumetypes.List(client).EachPage(func(page pagination.Page) (bool, error) {
--		volTypes, err := volumetypes.ExtractVolumeTypes(page)
--		if len(volTypes) != 1 {
--			t.Errorf("Expected 1 volume type, got %d", len(volTypes))
--		}
--		t.Logf("Listing volume types: %+v\n", volTypes)
--		return true, err
--	})
--	th.AssertNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/client_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/client_test.go
-deleted file mode 100644
-index 6e88819..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/client_test.go
-+++ /dev/null
-@@ -1,40 +0,0 @@
--// +build acceptance
--
--package openstack
--
--import (
--	"os"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack"
--)
--
--func TestAuthenticatedClient(t *testing.T) {
--	// Obtain credentials from the environment.
--	ao, err := openstack.AuthOptionsFromEnv()
--	if err != nil {
--		t.Fatalf("Unable to acquire credentials: %v", err)
--	}
--
--	client, err := openstack.AuthenticatedClient(ao)
--	if err != nil {
--		t.Fatalf("Unable to authenticate: %v", err)
--	}
--
--	if client.TokenID == "" {
--		t.Errorf("No token ID assigned to the client")
--	}
--
--	t.Logf("Client successfully acquired a token: %v", client.TokenID)
--
--	// Find the storage service in the service catalog.
--	storage, err := openstack.NewObjectStorageV1(client, gophercloud.EndpointOpts{
--		Region: os.Getenv("OS_REGION_NAME"),
--	})
--	if err != nil {
--		t.Errorf("Unable to locate a storage service: %v", err)
--	} else {
--		t.Logf("Located a storage service at endpoint: [%s]", storage.Endpoint)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go
-deleted file mode 100644
-index d08abe6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go
-+++ /dev/null
-@@ -1,50 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume"
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/smashwilson/gophercloud/acceptance/tools"
--)
--
--func TestBootFromVolume(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	if testing.Short() {
--		t.Skip("Skipping test that requires server creation in short mode.")
--	}
--
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	name := tools.RandomString("Gophercloud-", 8)
--	t.Logf("Creating server [%s].", name)
--
--	bd := []bootfromvolume.BlockDevice{
--		bootfromvolume.BlockDevice{
--			UUID:       choices.ImageID,
--			SourceType: bootfromvolume.Image,
--			VolumeSize: 10,
--		},
--	}
--
--	serverCreateOpts := servers.CreateOpts{
--		Name:      name,
--		FlavorRef: "3",
--	}
--	server, err := bootfromvolume.Create(client, bootfromvolume.CreateOptsExt{
--		serverCreateOpts,
--		bd,
--	}).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created server: %+v\n", server)
--	//defer deleteServer(t, client, server)
--	t.Logf("Deleting server [%s]...", name)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go
-deleted file mode 100644
-index 46eb9ff..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go
-+++ /dev/null
-@@ -1,97 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"fmt"
--	"os"
--	"strings"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/openstack"
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--)
--
--func newClient() (*gophercloud.ServiceClient, error) {
--	ao, err := openstack.AuthOptionsFromEnv()
--	if err != nil {
--		return nil, err
--	}
--
--	client, err := openstack.AuthenticatedClient(ao)
--	if err != nil {
--		return nil, err
--	}
--
--	return openstack.NewComputeV2(client, gophercloud.EndpointOpts{
--		Region: os.Getenv("OS_REGION_NAME"),
--	})
--}
--
--func waitForStatus(client *gophercloud.ServiceClient, server *servers.Server, status string) error {
--	return tools.WaitFor(func() (bool, error) {
--		latest, err := servers.Get(client, server.ID).Extract()
--		if err != nil {
--			return false, err
--		}
--
--		if latest.Status == status {
--			// Success!
--			return true, nil
--		}
--
--		return false, nil
--	})
--}
--
--// ComputeChoices contains image and flavor selections for use by the acceptance tests.
--type ComputeChoices struct {
--	// ImageID contains the ID of a valid image.
--	ImageID string
--
--	// FlavorID contains the ID of a valid flavor.
--	FlavorID string
--
--	// FlavorIDResize contains the ID of a different flavor available on the same OpenStack installation, that is distinct
--	// from FlavorID.
--	FlavorIDResize string
--}
--
--// ComputeChoicesFromEnv populates a ComputeChoices struct from environment variables.
--// If any required state is missing, an `error` will be returned that enumerates the missing properties.
--func ComputeChoicesFromEnv() (*ComputeChoices, error) {
--	imageID := os.Getenv("OS_IMAGE_ID")
--	flavorID := os.Getenv("OS_FLAVOR_ID")
--	flavorIDResize := os.Getenv("OS_FLAVOR_ID_RESIZE")
--
--	missing := make([]string, 0, 3)
--	if imageID == "" {
--		missing = append(missing, "OS_IMAGE_ID")
--	}
--	if flavorID == "" {
--		missing = append(missing, "OS_FLAVOR_ID")
--	}
--	if flavorIDResize == "" {
--		missing = append(missing, "OS_FLAVOR_ID_RESIZE")
--	}
--
--	notDistinct := ""
--	if flavorID == flavorIDResize {
--		notDistinct = "OS_FLAVOR_ID and OS_FLAVOR_ID_RESIZE must be distinct."
--	}
--
--	if len(missing) > 0 || notDistinct != "" {
--		text := "You're missing some important setup:\n"
--		if len(missing) > 0 {
--			text += " * These environment variables must be provided: " + strings.Join(missing, ", ") + "\n"
--		}
--		if notDistinct != "" {
--			text += " * " + notDistinct + "\n"
--		}
--
--		return nil, fmt.Errorf(text)
--	}
--
--	return &ComputeChoices{ImageID: imageID, FlavorID: flavorID, FlavorIDResize: flavorIDResize}, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/extension_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/extension_test.go
-deleted file mode 100644
-index 1356ffa..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/extension_test.go
-+++ /dev/null
-@@ -1,47 +0,0 @@
--// +build acceptance compute extensionss
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestListExtensions(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	err = extensions.List(client).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		exts, err := extensions.ExtractExtensions(page)
--		th.AssertNoErr(t, err)
--
--		for i, ext := range exts {
--			t.Logf("[%02d]    name=[%s]\n", i, ext.Name)
--			t.Logf("       alias=[%s]\n", ext.Alias)
--			t.Logf(" description=[%s]\n", ext.Description)
--		}
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--}
--
--func TestGetExtension(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	ext, err := extensions.Get(client, "os-admin-actions").Extract()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Extension details:")
--	t.Logf("        name=[%s]\n", ext.Name)
--	t.Logf("   namespace=[%s]\n", ext.Namespace)
--	t.Logf("       alias=[%s]\n", ext.Alias)
--	t.Logf(" description=[%s]\n", ext.Description)
--	t.Logf("     updated=[%s]\n", ext.Updated)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/flavors_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/flavors_test.go
-deleted file mode 100644
-index 9f51b12..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/flavors_test.go
-+++ /dev/null
-@@ -1,57 +0,0 @@
--// +build acceptance compute flavors
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/flavors"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--func TestListFlavors(t *testing.T) {
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	t.Logf("ID\tRegion\tName\tStatus\tCreated")
--
--	pager := flavors.ListDetail(client, nil)
--	count, pages := 0, 0
--	pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("---")
--		pages++
--		flavors, err := flavors.ExtractFlavors(page)
--		if err != nil {
--			return false, err
--		}
--
--		for _, f := range flavors {
--			t.Logf("%s\t%s\t%d\t%d\t%d", f.ID, f.Name, f.RAM, f.Disk, f.VCPUs)
--		}
--
--		return true, nil
--	})
--
--	t.Logf("--------\n%d flavors listed on %d pages.", count, pages)
--}
--
--func TestGetFlavor(t *testing.T) {
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	flavor, err := flavors.Get(client, choices.FlavorID).Extract()
--	if err != nil {
--		t.Fatalf("Unable to get flavor information: %v", err)
--	}
--
--	t.Logf("Flavor: %#v", flavor)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/images_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/images_test.go
-deleted file mode 100644
-index ceab22f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/images_test.go
-+++ /dev/null
-@@ -1,37 +0,0 @@
--// +build acceptance compute images
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/images"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--func TestListImages(t *testing.T) {
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute: client: %v", err)
--	}
--
--	t.Logf("ID\tRegion\tName\tStatus\tCreated")
--
--	pager := images.ListDetail(client, nil)
--	count, pages := 0, 0
--	pager.EachPage(func(page pagination.Page) (bool, error) {
--		pages++
--		images, err := images.ExtractImages(page)
--		if err != nil {
--			return false, err
--		}
--
--		for _, i := range images {
--			t.Logf("%s\t%s\t%s\t%s", i.ID, i.Name, i.Status, i.Created)
--		}
--
--		return true, nil
--	})
--
--	t.Logf("--------\n%d images listed on %d pages.", count, pages)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/pkg.go
-deleted file mode 100644
-index bb158c3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/pkg.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// The v2 package contains acceptance tests for the Openstack Compute V2 service.
--
--package v2
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go
-deleted file mode 100644
-index e223c18..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go
-+++ /dev/null
-@@ -1,393 +0,0 @@
--// +build acceptance compute servers
--
--package v2
--
--import (
--	"os"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/openstack"
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--func TestListServers(t *testing.T) {
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	t.Logf("ID\tRegion\tName\tStatus\tIPv4\tIPv6")
--
--	pager := servers.List(client, servers.ListOpts{})
--	count, pages := 0, 0
--	pager.EachPage(func(page pagination.Page) (bool, error) {
--		pages++
--		t.Logf("---")
--
--		servers, err := servers.ExtractServers(page)
--		if err != nil {
--			return false, err
--		}
--
--		for _, s := range servers {
--			t.Logf("%s\t%s\t%s\t%s\t%s\t\n", s.ID, s.Name, s.Status, s.AccessIPv4, s.AccessIPv6)
--			count++
--		}
--
--		return true, nil
--	})
--
--	t.Logf("--------\n%d servers listed on %d pages.\n", count, pages)
--}
--
--func networkingClient() (*gophercloud.ServiceClient, error) {
--	opts, err := openstack.AuthOptionsFromEnv()
--	if err != nil {
--		return nil, err
--	}
--
--	provider, err := openstack.AuthenticatedClient(opts)
--	if err != nil {
--		return nil, err
--	}
--
--	return openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{
--		Name:   "neutron",
--		Region: os.Getenv("OS_REGION_NAME"),
--	})
--}
--
--func createServer(t *testing.T, client *gophercloud.ServiceClient, choices *ComputeChoices) (*servers.Server, error) {
--	if testing.Short() {
--		t.Skip("Skipping test that requires server creation in short mode.")
--	}
--
--	var network networks.Network
--
--	networkingClient, err := networkingClient()
--	if err != nil {
--		t.Fatalf("Unable to create a networking client: %v", err)
--	}
--
--	pager := networks.List(networkingClient, networks.ListOpts{Name: "public", Limit: 1})
--	pager.EachPage(func(page pagination.Page) (bool, error) {
--		networks, err := networks.ExtractNetworks(page)
--		if err != nil {
--			t.Errorf("Failed to extract networks: %v", err)
--			return false, err
--		}
--
--		if len(networks) == 0 {
--			t.Fatalf("No networks to attach to server")
--			return false, err
--		}
--
--		network = networks[0]
--
--		return false, nil
--	})
--
--	name := tools.RandomString("ACPTTEST", 16)
--	t.Logf("Attempting to create server: %s\n", name)
--
--	server, err := servers.Create(client, servers.CreateOpts{
--		Name:      name,
--		FlavorRef: choices.FlavorID,
--		ImageRef:  choices.ImageID,
--		Networks: []servers.Network{
--			servers.Network{UUID: network.ID},
--		},
--	}).Extract()
--	if err != nil {
--		t.Fatalf("Unable to create server: %v", err)
--	}
--
--	return server, err
--}
--
--func TestCreateDestroyServer(t *testing.T) {
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	server, err := createServer(t, client, choices)
--	if err != nil {
--		t.Fatalf("Unable to create server: %v", err)
--	}
--	defer func() {
--		servers.Delete(client, server.ID)
--		t.Logf("Server deleted.")
--	}()
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatalf("Unable to wait for server: %v", err)
--	}
--}
--
--func TestUpdateServer(t *testing.T) {
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	server, err := createServer(t, client, choices)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer servers.Delete(client, server.ID)
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--
--	alternateName := tools.RandomString("ACPTTEST", 16)
--	for alternateName == server.Name {
--		alternateName = tools.RandomString("ACPTTEST", 16)
--	}
--
--	t.Logf("Attempting to rename the server to %s.", alternateName)
--
--	updated, err := servers.Update(client, server.ID, servers.UpdateOpts{Name: alternateName}).Extract()
--	if err != nil {
--		t.Fatalf("Unable to rename server: %v", err)
--	}
--
--	if updated.ID != server.ID {
--		t.Errorf("Updated server ID [%s] didn't match original server ID [%s]!", updated.ID, server.ID)
--	}
--
--	err = tools.WaitFor(func() (bool, error) {
--		latest, err := servers.Get(client, updated.ID).Extract()
--		if err != nil {
--			return false, err
--		}
--
--		return latest.Name == alternateName, nil
--	})
--}
--
--func TestActionChangeAdminPassword(t *testing.T) {
--	t.Parallel()
--
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	server, err := createServer(t, client, choices)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer servers.Delete(client, server.ID)
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--
--	randomPassword := tools.MakeNewPassword(server.AdminPass)
--	res := servers.ChangeAdminPassword(client, server.ID, randomPassword)
--	if res.Err != nil {
--		t.Fatal(err)
--	}
--
--	if err = waitForStatus(client, server, "PASSWORD"); err != nil {
--		t.Fatal(err)
--	}
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestActionReboot(t *testing.T) {
--	t.Parallel()
--
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	server, err := createServer(t, client, choices)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer servers.Delete(client, server.ID)
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--
--	res := servers.Reboot(client, server.ID, "aldhjflaskhjf")
--	if res.Err == nil {
--		t.Fatal("Expected the SDK to provide an ArgumentError here")
--	}
--
--	t.Logf("Attempting reboot of server %s", server.ID)
--	res = servers.Reboot(client, server.ID, servers.OSReboot)
--	if res.Err != nil {
--		t.Fatalf("Unable to reboot server: %v", err)
--	}
--
--	if err = waitForStatus(client, server, "REBOOT"); err != nil {
--		t.Fatal(err)
--	}
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestActionRebuild(t *testing.T) {
--	t.Parallel()
--
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	server, err := createServer(t, client, choices)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer servers.Delete(client, server.ID)
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--
--	t.Logf("Attempting to rebuild server %s", server.ID)
--
--	rebuildOpts := servers.RebuildOpts{
--		Name:      tools.RandomString("ACPTTEST", 16),
--		AdminPass: tools.MakeNewPassword(server.AdminPass),
--		ImageID:   choices.ImageID,
--	}
--
--	rebuilt, err := servers.Rebuild(client, server.ID, rebuildOpts).Extract()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if rebuilt.ID != server.ID {
--		t.Errorf("Expected rebuilt server ID of [%s]; got [%s]", server.ID, rebuilt.ID)
--	}
--
--	if err = waitForStatus(client, rebuilt, "REBUILD"); err != nil {
--		t.Fatal(err)
--	}
--
--	if err = waitForStatus(client, rebuilt, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--}
--
--func resizeServer(t *testing.T, client *gophercloud.ServiceClient, server *servers.Server, choices *ComputeChoices) {
--	if err := waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--
--	t.Logf("Attempting to resize server [%s]", server.ID)
--
--	opts := &servers.ResizeOpts{
--		FlavorRef: choices.FlavorIDResize,
--	}
--	if res := servers.Resize(client, server.ID, opts); res.Err != nil {
--		t.Fatal(res.Err)
--	}
--
--	if err := waitForStatus(client, server, "VERIFY_RESIZE"); err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestActionResizeConfirm(t *testing.T) {
--	t.Parallel()
--
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	server, err := createServer(t, client, choices)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer servers.Delete(client, server.ID)
--	resizeServer(t, client, server, choices)
--
--	t.Logf("Attempting to confirm resize for server %s", server.ID)
--
--	if res := servers.ConfirmResize(client, server.ID); res.Err != nil {
--		t.Fatal(err)
--	}
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--}
--
--func TestActionResizeRevert(t *testing.T) {
--	t.Parallel()
--
--	choices, err := ComputeChoicesFromEnv()
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	client, err := newClient()
--	if err != nil {
--		t.Fatalf("Unable to create a compute client: %v", err)
--	}
--
--	server, err := createServer(t, client, choices)
--	if err != nil {
--		t.Fatal(err)
--	}
--	defer servers.Delete(client, server.ID)
--	resizeServer(t, client, server, choices)
--
--	t.Logf("Attempting to revert resize for server %s", server.ID)
--
--	if res := servers.RevertResize(client, server.ID); res.Err != nil {
--		t.Fatal(err)
--	}
--
--	if err = waitForStatus(client, server, "ACTIVE"); err != nil {
--		t.Fatal(err)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/extension_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/extension_test.go
-deleted file mode 100644
-index 2b4e062..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/extension_test.go
-+++ /dev/null
-@@ -1,46 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	extensions2 "github.com/rackspace/gophercloud/openstack/identity/v2/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestEnumerateExtensions(t *testing.T) {
--	service := authenticatedClient(t)
--
--	t.Logf("Extensions available on this identity endpoint:")
--	count := 0
--	err := extensions2.List(service).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		extensions, err := extensions2.ExtractExtensions(page)
--		th.AssertNoErr(t, err)
--
--		for i, ext := range extensions {
--			t.Logf("[%02d] name=[%s] namespace=[%s]", i, ext.Name, ext.Namespace)
--			t.Logf("     alias=[%s] updated=[%s]", ext.Alias, ext.Updated)
--			t.Logf("     description=[%s]", ext.Description)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--}
--
--func TestGetExtension(t *testing.T) {
--	service := authenticatedClient(t)
--
--	ext, err := extensions2.Get(service, "OS-KSCRUD").Extract()
--	th.AssertNoErr(t, err)
--
--	th.CheckEquals(t, "OpenStack Keystone User CRUD", ext.Name)
--	th.CheckEquals(t, "http://docs.openstack.org/identity/api/ext/OS-KSCRUD/v1.0", ext.Namespace)
--	th.CheckEquals(t, "OS-KSCRUD", ext.Alias)
--	th.CheckEquals(t, "OpenStack extensions to Keystone v2.0 API enabling User Operations.", ext.Description)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/identity_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/identity_test.go
-deleted file mode 100644
-index feae233..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/identity_test.go
-+++ /dev/null
-@@ -1,47 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func v2AuthOptions(t *testing.T) gophercloud.AuthOptions {
--	// Obtain credentials from the environment.
--	ao, err := openstack.AuthOptionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	// Trim out unused fields. Prefer authentication by API key to password.
--	ao.UserID, ao.DomainID, ao.DomainName = "", "", ""
--	if ao.APIKey != "" {
--		ao.Password = ""
--	}
--
--	return ao
--}
--
--func createClient(t *testing.T, auth bool) *gophercloud.ServiceClient {
--	ao := v2AuthOptions(t)
--
--	provider, err := openstack.NewClient(ao.IdentityEndpoint)
--	th.AssertNoErr(t, err)
--
--	if auth {
--		err = openstack.AuthenticateV2(provider, ao)
--		th.AssertNoErr(t, err)
--	}
--
--	return openstack.NewIdentityV2(provider)
--}
--
--func unauthenticatedClient(t *testing.T) *gophercloud.ServiceClient {
--	return createClient(t, false)
--}
--
--func authenticatedClient(t *testing.T) *gophercloud.ServiceClient {
--	return createClient(t, true)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/pkg.go
-deleted file mode 100644
-index 5ec3cc8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/pkg.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package v2
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/tenant_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/tenant_test.go
-deleted file mode 100644
-index 2054598..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/tenant_test.go
-+++ /dev/null
-@@ -1,32 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	tenants2 "github.com/rackspace/gophercloud/openstack/identity/v2/tenants"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestEnumerateTenants(t *testing.T) {
--	service := authenticatedClient(t)
--
--	t.Logf("Tenants to which your current token grants access:")
--	count := 0
--	err := tenants2.List(service, nil).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		tenants, err := tenants2.ExtractTenants(page)
--		th.AssertNoErr(t, err)
--		for i, tenant := range tenants {
--			t.Logf("[%02d] name=[%s] id=[%s] description=[%s] enabled=[%v]",
--				i, tenant.Name, tenant.ID, tenant.Description, tenant.Enabled)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/token_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/token_test.go
-deleted file mode 100644
-index 0632a48..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v2/token_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestAuthenticate(t *testing.T) {
--	ao := v2AuthOptions(t)
--	service := unauthenticatedClient(t)
--
--	// Authenticated!
--	result := tokens2.Create(service, tokens2.WrapOptions(ao))
--
--	// Extract and print the token.
--	token, err := result.ExtractToken()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Acquired token: [%s]", token.ID)
--	t.Logf("The token will expire at: [%s]", token.ExpiresAt.String())
--	t.Logf("The token is valid for tenant: [%#v]", token.Tenant)
--
--	// Extract and print the service catalog.
--	catalog, err := result.ExtractServiceCatalog()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Acquired service catalog listing [%d] services", len(catalog.Entries))
--	for i, entry := range catalog.Entries {
--		t.Logf("[%02d]: name=[%s], type=[%s]", i, entry.Name, entry.Type)
--		for _, endpoint := range entry.Endpoints {
--			t.Logf("      - region=[%s] publicURL=[%s]", endpoint.Region, endpoint.PublicURL)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go
-deleted file mode 100644
-index ea893c2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go
-+++ /dev/null
-@@ -1,111 +0,0 @@
--// +build acceptance
--
--package v3
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	endpoints3 "github.com/rackspace/gophercloud/openstack/identity/v3/endpoints"
--	services3 "github.com/rackspace/gophercloud/openstack/identity/v3/services"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--func TestListEndpoints(t *testing.T) {
--	// Create a service client.
--	serviceClient := createAuthenticatedClient(t)
--	if serviceClient == nil {
--		return
--	}
--
--	// Use the service to list all available endpoints.
--	pager := endpoints3.List(serviceClient, endpoints3.ListOpts{})
--	err := pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		endpoints, err := endpoints3.ExtractEndpoints(page)
--		if err != nil {
--			t.Fatalf("Error extracting endpoings: %v", err)
--		}
--
--		for _, endpoint := range endpoints {
--			t.Logf("Endpoint: %8s %10s %9s %s",
--				endpoint.ID,
--				endpoint.Availability,
--				endpoint.Name,
--				endpoint.URL)
--		}
--
--		return true, nil
--	})
--	if err != nil {
--		t.Errorf("Unexpected error while iterating endpoint pages: %v", err)
--	}
--}
--
--func TestNavigateCatalog(t *testing.T) {
--	// Create a service client.
--	client := createAuthenticatedClient(t)
--	if client == nil {
--		return
--	}
--
--	var compute *services3.Service
--	var endpoint *endpoints3.Endpoint
--
--	// Discover the service we're interested in.
--	servicePager := services3.List(client, services3.ListOpts{ServiceType: "compute"})
--	err := servicePager.EachPage(func(page pagination.Page) (bool, error) {
--		part, err := services3.ExtractServices(page)
--		if err != nil {
--			return false, err
--		}
--		if compute != nil {
--			t.Fatalf("Expected one service, got more than one page")
--			return false, nil
--		}
--		if len(part) != 1 {
--			t.Fatalf("Expected one service, got %d", len(part))
--			return false, nil
--		}
--
--		compute = &part[0]
--		return true, nil
--	})
--	if err != nil {
--		t.Fatalf("Unexpected error iterating pages: %v", err)
--	}
--
--	if compute == nil {
--		t.Fatalf("No compute service found.")
--	}
--
--	// Enumerate the endpoints available for this service.
--	computePager := endpoints3.List(client, endpoints3.ListOpts{
--		Availability: gophercloud.AvailabilityPublic,
--		ServiceID:    compute.ID,
--	})
--	err = computePager.EachPage(func(page pagination.Page) (bool, error) {
--		part, err := endpoints3.ExtractEndpoints(page)
--		if err != nil {
--			return false, err
--		}
--		if endpoint != nil {
--			t.Fatalf("Expected one endpoint, got more than one page")
--			return false, nil
--		}
--		if len(part) != 1 {
--			t.Fatalf("Expected one endpoint, got %d", len(part))
--			return false, nil
--		}
--
--		endpoint = &part[0]
--		return true, nil
--	})
--
--	if endpoint == nil {
--		t.Fatalf("No endpoint found.")
--	}
--
--	t.Logf("Success. The compute endpoint is at %s.", endpoint.URL)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/identity_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/identity_test.go
-deleted file mode 100644
-index ce64345..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/identity_test.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--// +build acceptance
--
--package v3
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func createAuthenticatedClient(t *testing.T) *gophercloud.ServiceClient {
--	// Obtain credentials from the environment.
--	ao, err := openstack.AuthOptionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	// Trim out unused fields.
--	ao.Username, ao.TenantID, ao.TenantName = "", "", ""
--
--	if ao.UserID == "" {
--		t.Logf("Skipping identity v3 tests because no OS_USERID is present.")
--		return nil
--	}
--
--	// Create a client and manually authenticate against v3.
--	providerClient, err := openstack.NewClient(ao.IdentityEndpoint)
--	if err != nil {
--		t.Fatalf("Unable to instantiate client: %v", err)
--	}
--
--	err = openstack.AuthenticateV3(providerClient, ao)
--	if err != nil {
--		t.Fatalf("Unable to authenticate against identity v3: %v", err)
--	}
--
--	// Create a service client.
--	return openstack.NewIdentityV3(providerClient)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/pkg.go
-deleted file mode 100644
-index eac3ae9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/pkg.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package v3
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/service_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/service_test.go
-deleted file mode 100644
-index 082bd11..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/service_test.go
-+++ /dev/null
-@@ -1,36 +0,0 @@
--// +build acceptance
--
--package v3
--
--import (
--	"testing"
--
--	services3 "github.com/rackspace/gophercloud/openstack/identity/v3/services"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--func TestListServices(t *testing.T) {
--	// Create a service client.
--	serviceClient := createAuthenticatedClient(t)
--	if serviceClient == nil {
--		return
--	}
--
--	// Use the client to list all available services.
--	pager := services3.List(serviceClient, services3.ListOpts{})
--	err := pager.EachPage(func(page pagination.Page) (bool, error) {
--		parts, err := services3.ExtractServices(page)
--		if err != nil {
--			return false, err
--		}
--
--		t.Logf("--- Page ---")
--		for _, service := range parts {
--			t.Logf("Service: %32s %15s %10s %s", service.ID, service.Type, service.Name, *service.Description)
--		}
--		return true, nil
--	})
--	if err != nil {
--		t.Errorf("Unexpected error traversing pages: %v", err)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/token_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/token_test.go
-deleted file mode 100644
-index 4342ade..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/identity/v3/token_test.go
-+++ /dev/null
-@@ -1,42 +0,0 @@
--// +build acceptance
--
--package v3
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack"
--	tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens"
--)
--
--func TestGetToken(t *testing.T) {
--	// Obtain credentials from the environment.
--	ao, err := openstack.AuthOptionsFromEnv()
--	if err != nil {
--		t.Fatalf("Unable to acquire credentials: %v", err)
--	}
--
--	// Trim out unused fields. Skip if we don't have a UserID.
--	ao.Username, ao.TenantID, ao.TenantName = "", "", ""
--	if ao.UserID == "" {
--		t.Logf("Skipping identity v3 tests because no OS_USERID is present.")
--		return
--	}
--
--	// Create an unauthenticated client.
--	provider, err := openstack.NewClient(ao.IdentityEndpoint)
--	if err != nil {
--		t.Fatalf("Unable to instantiate client: %v", err)
--	}
--
--	// Create a service client.
--	service := openstack.NewIdentityV3(provider)
--
--	// Use the service to create a token.
--	token, err := tokens3.Create(service, ao, nil).Extract()
--	if err != nil {
--		t.Fatalf("Unable to get token: %v", err)
--	}
--
--	t.Logf("Acquired token: %s", token.ID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go
-deleted file mode 100644
-index 99e1d01..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go
-+++ /dev/null
-@@ -1,51 +0,0 @@
--// +build acceptance networking
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/networking/v2/apiversions"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestListAPIVersions(t *testing.T) {
--	Setup(t)
--	defer Teardown()
--
--	pager := apiversions.ListVersions(Client)
--	err := pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		versions, err := apiversions.ExtractAPIVersions(page)
--		th.AssertNoErr(t, err)
--
--		for _, v := range versions {
--			t.Logf("API Version: ID [%s] Status [%s]", v.ID, v.Status)
--		}
--
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--}
--
--func TestListAPIResources(t *testing.T) {
--	Setup(t)
--	defer Teardown()
--
--	pager := apiversions.ListVersionResources(Client, "v2.0")
--	err := pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		vrs, err := apiversions.ExtractVersionResources(page)
--		th.AssertNoErr(t, err)
--
--		for _, vr := range vrs {
--			t.Logf("Network: Name [%s] Collection [%s]", vr.Name, vr.Collection)
--		}
--
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/common.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/common.go
-deleted file mode 100644
-index 1efac2c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/common.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--package v2
--
--import (
--	"os"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--var Client *gophercloud.ServiceClient
--
--func NewClient() (*gophercloud.ServiceClient, error) {
--	opts, err := openstack.AuthOptionsFromEnv()
--	if err != nil {
--		return nil, err
--	}
--
--	provider, err := openstack.AuthenticatedClient(opts)
--	if err != nil {
--		return nil, err
--	}
--
--	return openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{
--		Name:   "neutron",
--		Region: os.Getenv("OS_REGION_NAME"),
--	})
--}
--
--func Setup(t *testing.T) {
--	client, err := NewClient()
--	th.AssertNoErr(t, err)
--	Client = client
--}
--
--func Teardown() {
--	Client = nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extension_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extension_test.go
-deleted file mode 100644
-index edcbba4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extension_test.go
-+++ /dev/null
-@@ -1,45 +0,0 @@
--// +build acceptance networking
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestListExts(t *testing.T) {
--	Setup(t)
--	defer Teardown()
--
--	pager := extensions.List(Client)
--	err := pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		exts, err := extensions.ExtractExtensions(page)
--		th.AssertNoErr(t, err)
--
--		for _, ext := range exts {
--			t.Logf("Extension: Name [%s] Description [%s]", ext.Name, ext.Description)
--		}
--
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--}
--
--func TestGetExt(t *testing.T) {
--	Setup(t)
--	defer Teardown()
--
--	ext, err := extensions.Get(Client, "service-type").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, ext.Updated, "2013-01-20T00:00:00-00:00")
--	th.AssertEquals(t, ext.Name, "Neutron Service Type Management")
--	th.AssertEquals(t, ext.Namespace, "http://docs.openstack.org/ext/neutron/service-type/api/v1.0")
--	th.AssertEquals(t, ext.Alias, "service-type")
--	th.AssertEquals(t, ext.Description, "API for retrieving service providers for Neutron advanced services")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/layer3_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/layer3_test.go
-deleted file mode 100644
-index 63e0be3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/layer3_test.go
-+++ /dev/null
-@@ -1,300 +0,0 @@
--// +build acceptance networking layer3ext
--
--package extensions
--
--import (
--	"testing"
--
--	base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/ports"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/subnets"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const (
--	cidr1 = "10.0.0.1/24"
--	cidr2 = "20.0.0.1/24"
--)
--
--func TestAll(t *testing.T) {
--	base.Setup(t)
--	defer base.Teardown()
--
--	testRouter(t)
--	testFloatingIP(t)
--}
--
--func testRouter(t *testing.T) {
--	// Setup: Create network
--	networkID := createNetwork(t)
--
--	// Create router
--	routerID := createRouter(t, networkID)
--
--	// Lists routers
--	listRouters(t)
--
--	// Update router
--	updateRouter(t, routerID)
--
--	// Get router
--	getRouter(t, routerID)
--
--	// Create new subnet. Note: this subnet will be deleted when networkID is deleted
--	subnetID := createSubnet(t, networkID, cidr2)
--
--	// Add interface
--	addInterface(t, routerID, subnetID)
--
--	// Remove interface
--	removeInterface(t, routerID, subnetID)
--
--	// Delete router
--	deleteRouter(t, routerID)
--
--	// Cleanup
--	deleteNetwork(t, networkID)
--}
--
--func testFloatingIP(t *testing.T) {
--	// Setup external network
--	extNetworkID := createNetwork(t)
--
--	// Setup internal network, subnet and port
--	intNetworkID, subnetID, portID := createInternalTopology(t)
--
--	// Now the important part: we need to allow the external network to talk to
--	// the internal subnet. For this we need a router that has an interface to
--	// the internal subnet.
--	routerID := bridgeIntSubnetWithExtNetwork(t, extNetworkID, subnetID)
--
--	// Create floating IP
--	ipID := createFloatingIP(t, extNetworkID, portID)
--
--	// Get floating IP
--	getFloatingIP(t, ipID)
--
--	// Update floating IP
--	updateFloatingIP(t, ipID, portID)
--
--	// Delete floating IP
--	deleteFloatingIP(t, ipID)
--
--	// Remove the internal subnet interface
--	removeInterface(t, routerID, subnetID)
--
--	// Delete router and external network
--	deleteRouter(t, routerID)
--	deleteNetwork(t, extNetworkID)
--
--	// Delete internal port and network
--	deletePort(t, portID)
--	deleteNetwork(t, intNetworkID)
--}
--
--func createNetwork(t *testing.T) string {
--	t.Logf("Creating a network")
--
--	asu := true
--	opts := external.CreateOpts{
--		Parent:   networks.CreateOpts{Name: "sample_network", AdminStateUp: &asu},
--		External: true,
--	}
--	n, err := networks.Create(base.Client, opts).Extract()
--
--	th.AssertNoErr(t, err)
--
--	if n.ID == "" {
--		t.Fatalf("No ID returned when creating a network")
--	}
--
--	createSubnet(t, n.ID, cidr1)
--
--	t.Logf("Network created: ID [%s]", n.ID)
--
--	return n.ID
--}
--
--func deleteNetwork(t *testing.T, networkID string) {
--	t.Logf("Deleting network %s", networkID)
--	networks.Delete(base.Client, networkID)
--}
--
--func deletePort(t *testing.T, portID string) {
--	t.Logf("Deleting port %s", portID)
--	ports.Delete(base.Client, portID)
--}
--
--func createInternalTopology(t *testing.T) (string, string, string) {
--	t.Logf("Creating an internal network (for port)")
--	opts := networks.CreateOpts{Name: "internal_network"}
--	n, err := networks.Create(base.Client, opts).Extract()
--	th.AssertNoErr(t, err)
--
--	// A subnet is also needed
--	subnetID := createSubnet(t, n.ID, cidr2)
--
--	t.Logf("Creating an internal port on network %s", n.ID)
--	p, err := ports.Create(base.Client, ports.CreateOpts{
--		NetworkID: n.ID,
--		Name:      "fixed_internal_port",
--	}).Extract()
--	th.AssertNoErr(t, err)
--
--	return n.ID, subnetID, p.ID
--}
--
--func bridgeIntSubnetWithExtNetwork(t *testing.T, networkID, subnetID string) string {
--	// Create router with external gateway info
--	routerID := createRouter(t, networkID)
--
--	// Add interface for internal subnet
--	addInterface(t, routerID, subnetID)
--
--	return routerID
--}
--
--func createSubnet(t *testing.T, networkID, cidr string) string {
--	t.Logf("Creating a subnet for network %s", networkID)
--
--	iFalse := false
--	s, err := subnets.Create(base.Client, subnets.CreateOpts{
--		NetworkID:  networkID,
--		CIDR:       cidr,
--		IPVersion:  subnets.IPv4,
--		Name:       "my_subnet",
--		EnableDHCP: &iFalse,
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Subnet created: ID [%s]", s.ID)
--
--	return s.ID
--}
--
--func createRouter(t *testing.T, networkID string) string {
--	t.Logf("Creating a router for network %s", networkID)
--
--	asu := false
--	gwi := routers.GatewayInfo{NetworkID: networkID}
--	r, err := routers.Create(base.Client, routers.CreateOpts{
--		Name:         "foo_router",
--		AdminStateUp: &asu,
--		GatewayInfo:  &gwi,
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	if r.ID == "" {
--		t.Fatalf("No ID returned when creating a router")
--	}
--
--	t.Logf("Router created: ID [%s]", r.ID)
--
--	return r.ID
--}
--
--func listRouters(t *testing.T) {
--	pager := routers.List(base.Client, routers.ListOpts{})
--
--	err := pager.EachPage(func(page pagination.Page) (bool, error) {
--		routerList, err := routers.ExtractRouters(page)
--		th.AssertNoErr(t, err)
--
--		for _, r := range routerList {
--			t.Logf("Listing router: ID [%s] Name [%s] Status [%s] GatewayInfo [%#v]",
--				r.ID, r.Name, r.Status, r.GatewayInfo)
--		}
--
--		return true, nil
--	})
--
--	th.AssertNoErr(t, err)
--}
--
--func updateRouter(t *testing.T, routerID string) {
--	_, err := routers.Update(base.Client, routerID, routers.UpdateOpts{
--		Name: "another_name",
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--}
--
--func getRouter(t *testing.T, routerID string) {
--	r, err := routers.Get(base.Client, routerID).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Getting router: ID [%s] Name [%s] Status [%s]", r.ID, r.Name, r.Status)
--}
--
--func addInterface(t *testing.T, routerID, subnetID string) {
--	ir, err := routers.AddInterface(base.Client, routerID, routers.InterfaceOpts{SubnetID: subnetID}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Interface added to router %s: SubnetID [%s] PortID [%s]", routerID, ir.SubnetID, ir.PortID)
--}
--
--func removeInterface(t *testing.T, routerID, subnetID string) {
--	ir, err := routers.RemoveInterface(base.Client, routerID, routers.InterfaceOpts{SubnetID: subnetID}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Interface %s removed from %s", ir.ID, routerID)
--}
--
--func deleteRouter(t *testing.T, routerID string) {
--	t.Logf("Deleting router %s", routerID)
--
--	res := routers.Delete(base.Client, routerID)
--
--	th.AssertNoErr(t, res.Err)
--}
--
--func createFloatingIP(t *testing.T, networkID, portID string) string {
--	t.Logf("Creating floating IP on network [%s] with port [%s]", networkID, portID)
--
--	opts := floatingips.CreateOpts{
--		FloatingNetworkID: networkID,
--		PortID:            portID,
--	}
--
--	ip, err := floatingips.Create(base.Client, opts).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Floating IP created: ID [%s] Status [%s] Fixed (internal) IP: [%s] Floating (external) IP: [%s]",
--		ip.ID, ip.Status, ip.FixedIP, ip.FloatingIP)
--
--	return ip.ID
--}
--
--func getFloatingIP(t *testing.T, ipID string) {
--	ip, err := floatingips.Get(base.Client, ipID).Extract()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Getting floating IP: ID [%s] Status [%s]", ip.ID, ip.Status)
--}
--
--func updateFloatingIP(t *testing.T, ipID, portID string) {
--	t.Logf("Disassociate all ports from IP %s", ipID)
--	_, err := floatingips.Update(base.Client, ipID, floatingips.UpdateOpts{PortID: ""}).Extract()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Re-associate the port %s", portID)
--	_, err = floatingips.Update(base.Client, ipID, floatingips.UpdateOpts{PortID: portID}).Extract()
--	th.AssertNoErr(t, err)
--}
--
--func deleteFloatingIP(t *testing.T, ipID string) {
--	t.Logf("Deleting IP %s", ipID)
--	res := floatingips.Delete(base.Client, ipID)
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/common.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/common.go
-deleted file mode 100644
-index 27dfe5f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/common.go
-+++ /dev/null
-@@ -1,78 +0,0 @@
--package lbaas
--
--import (
--	"testing"
--
--	base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/subnets"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func SetupTopology(t *testing.T) (string, string) {
--	// create network
--	n, err := networks.Create(base.Client, networks.CreateOpts{Name: "tmp_network"}).Extract()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created network %s", n.ID)
--
--	// create subnet
--	s, err := subnets.Create(base.Client, subnets.CreateOpts{
--		NetworkID: n.ID,
--		CIDR:      "192.168.199.0/24",
--		IPVersion: subnets.IPv4,
--		Name:      "tmp_subnet",
--	}).Extract()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created subnet %s", s.ID)
--
--	return n.ID, s.ID
--}
--
--func DeleteTopology(t *testing.T, networkID string) {
--	res := networks.Delete(base.Client, networkID)
--	th.AssertNoErr(t, res.Err)
--	t.Logf("Deleted network %s", networkID)
--}
--
--func CreatePool(t *testing.T, subnetID string) string {
--	p, err := pools.Create(base.Client, pools.CreateOpts{
--		LBMethod: pools.LBMethodRoundRobin,
--		Protocol: "HTTP",
--		Name:     "tmp_pool",
--		SubnetID: subnetID,
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created pool %s", p.ID)
--
--	return p.ID
--}
--
--func DeletePool(t *testing.T, poolID string) {
--	res := pools.Delete(base.Client, poolID)
--	th.AssertNoErr(t, res.Err)
--	t.Logf("Deleted pool %s", poolID)
--}
--
--func CreateMonitor(t *testing.T) string {
--	m, err := monitors.Create(base.Client, monitors.CreateOpts{
--		Delay:         10,
--		Timeout:       10,
--		MaxRetries:    3,
--		Type:          monitors.TypeHTTP,
--		ExpectedCodes: "200",
--		URLPath:       "/login",
--		HTTPMethod:    "GET",
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created monitor ID [%s]", m.ID)
--
--	return m.ID
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/member_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/member_test.go
-deleted file mode 100644
-index 9b60582..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/member_test.go
-+++ /dev/null
-@@ -1,95 +0,0 @@
--// +build acceptance networking lbaas lbaasmember
--
--package lbaas
--
--import (
--	"testing"
--
--	base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestMembers(t *testing.T) {
--	base.Setup(t)
--	defer base.Teardown()
--
--	// setup
--	networkID, subnetID := SetupTopology(t)
--	poolID := CreatePool(t, subnetID)
--
--	// create member
--	memberID := createMember(t, poolID)
--
--	// list members
--	listMembers(t)
--
--	// update member
--	updateMember(t, memberID)
--
--	// get member
--	getMember(t, memberID)
--
--	// delete member
--	deleteMember(t, memberID)
--
--	// teardown
--	DeletePool(t, poolID)
--	DeleteTopology(t, networkID)
--}
--
--func createMember(t *testing.T, poolID string) string {
--	m, err := members.Create(base.Client, members.CreateOpts{
--		Address:      "192.168.199.1",
--		ProtocolPort: 8080,
--		PoolID:       poolID,
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created member: ID [%s] Status [%s] Weight [%d] Address [%s] Port [%d]",
--		m.ID, m.Status, m.Weight, m.Address, m.ProtocolPort)
--
--	return m.ID
--}
--
--func listMembers(t *testing.T) {
--	err := members.List(base.Client, members.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		memberList, err := members.ExtractMembers(page)
--		if err != nil {
--			t.Errorf("Failed to extract members: %v", err)
--			return false, err
--		}
--
--		for _, m := range memberList {
--			t.Logf("Listing member: ID [%s] Status [%s]", m.ID, m.Status)
--		}
--
--		return true, nil
--	})
--
--	th.AssertNoErr(t, err)
--}
--
--func updateMember(t *testing.T, memberID string) {
--	m, err := members.Update(base.Client, memberID, members.UpdateOpts{AdminStateUp: true}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Updated member ID [%s]", m.ID)
--}
--
--func getMember(t *testing.T, memberID string) {
--	m, err := members.Get(base.Client, memberID).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Getting member ID [%s]", m.ID)
--}
--
--func deleteMember(t *testing.T, memberID string) {
--	res := members.Delete(base.Client, memberID)
--	th.AssertNoErr(t, res.Err)
--	t.Logf("Deleted member %s", memberID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitor_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitor_test.go
-deleted file mode 100644
-index 9056fff..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitor_test.go
-+++ /dev/null
-@@ -1,77 +0,0 @@
--// +build acceptance networking lbaas lbaasmonitor
--
--package lbaas
--
--import (
--	"testing"
--
--	base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestMonitors(t *testing.T) {
--	base.Setup(t)
--	defer base.Teardown()
--
--	// create monitor
--	monitorID := CreateMonitor(t)
--
--	// list monitors
--	listMonitors(t)
--
--	// update monitor
--	updateMonitor(t, monitorID)
--
--	// get monitor
--	getMonitor(t, monitorID)
--
--	// delete monitor
--	deleteMonitor(t, monitorID)
--}
--
--func listMonitors(t *testing.T) {
--	err := monitors.List(base.Client, monitors.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		monitorList, err := monitors.ExtractMonitors(page)
--		if err != nil {
--			t.Errorf("Failed to extract monitors: %v", err)
--			return false, err
--		}
--
--		for _, m := range monitorList {
--			t.Logf("Listing monitor: ID [%s] Type [%s] Delay [%ds] Timeout [%d] Retries [%d] Status [%s]",
--				m.ID, m.Type, m.Delay, m.Timeout, m.MaxRetries, m.Status)
--		}
--
--		return true, nil
--	})
--
--	th.AssertNoErr(t, err)
--}
--
--func updateMonitor(t *testing.T, monitorID string) {
--	opts := monitors.UpdateOpts{Delay: 10, Timeout: 10, MaxRetries: 3}
--	m, err := monitors.Update(base.Client, monitorID, opts).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Updated monitor ID [%s]", m.ID)
--}
--
--func getMonitor(t *testing.T, monitorID string) {
--	m, err := monitors.Get(base.Client, monitorID).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Getting monitor ID [%s]: URL path [%s] HTTP Method [%s] Accepted codes [%s]",
--		m.ID, m.URLPath, m.HTTPMethod, m.ExpectedCodes)
--}
--
--func deleteMonitor(t *testing.T, monitorID string) {
--	res := monitors.Delete(base.Client, monitorID)
--
--	th.AssertNoErr(t, res.Err)
--
--	t.Logf("Deleted monitor %s", monitorID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go
-deleted file mode 100644
-index f5a7df7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package lbaas
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go
-deleted file mode 100644
-index 8194064..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go
-+++ /dev/null
-@@ -1,98 +0,0 @@
--// +build acceptance networking lbaas lbaaspool
--
--package lbaas
--
--import (
--	"testing"
--
--	base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestPools(t *testing.T) {
--	base.Setup(t)
--	defer base.Teardown()
--
--	// setup
--	networkID, subnetID := SetupTopology(t)
--
--	// create pool
--	poolID := CreatePool(t, subnetID)
--
--	// list pools
--	listPools(t)
--
--	// update pool
--	updatePool(t, poolID)
--
--	// get pool
--	getPool(t, poolID)
--
--	// create monitor
--	monitorID := CreateMonitor(t)
--
--	// associate health monitor
--	associateMonitor(t, poolID, monitorID)
--
--	// disassociate health monitor
--	disassociateMonitor(t, poolID, monitorID)
--
--	// delete pool
--	DeletePool(t, poolID)
--
--	// teardown
--	DeleteTopology(t, networkID)
--}
--
--func listPools(t *testing.T) {
--	err := pools.List(base.Client, pools.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		poolList, err := pools.ExtractPools(page)
--		if err != nil {
--			t.Errorf("Failed to extract pools: %v", err)
--			return false, err
--		}
--
--		for _, p := range poolList {
--			t.Logf("Listing pool: ID [%s] Name [%s] Status [%s] LB algorithm [%s]", p.ID, p.Name, p.Status, p.LBMethod)
--		}
--
--		return true, nil
--	})
--
--	th.AssertNoErr(t, err)
--}
--
--func updatePool(t *testing.T, poolID string) {
--	opts := pools.UpdateOpts{Name: "SuperPool", LBMethod: pools.LBMethodLeastConnections}
--	p, err := pools.Update(base.Client, poolID, opts).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Updated pool ID [%s]", p.ID)
--}
--
--func getPool(t *testing.T, poolID string) {
--	p, err := pools.Get(base.Client, poolID).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Getting pool ID [%s]", p.ID)
--}
--
--func associateMonitor(t *testing.T, poolID, monitorID string) {
--	res := pools.AssociateMonitor(base.Client, poolID, monitorID)
--
--	th.AssertNoErr(t, res.Err)
--
--	t.Logf("Associated pool %s with monitor %s", poolID, monitorID)
--}
--
--func disassociateMonitor(t *testing.T, poolID, monitorID string) {
--	res := pools.DisassociateMonitor(base.Client, poolID, monitorID)
--
--	th.AssertNoErr(t, res.Err)
--
--	t.Logf("Disassociated pool %s with monitor %s", poolID, monitorID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vip_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vip_test.go
-deleted file mode 100644
-index c8dff2d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vip_test.go
-+++ /dev/null
-@@ -1,101 +0,0 @@
--// +build acceptance networking lbaas lbaasvip
--
--package lbaas
--
--import (
--	"testing"
--
--	base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestVIPs(t *testing.T) {
--	base.Setup(t)
--	defer base.Teardown()
--
--	// setup
--	networkID, subnetID := SetupTopology(t)
--	poolID := CreatePool(t, subnetID)
--
--	// create VIP
--	VIPID := createVIP(t, subnetID, poolID)
--
--	// list VIPs
--	listVIPs(t)
--
--	// update VIP
--	updateVIP(t, VIPID)
--
--	// get VIP
--	getVIP(t, VIPID)
--
--	// delete VIP
--	deleteVIP(t, VIPID)
--
--	// teardown
--	DeletePool(t, poolID)
--	DeleteTopology(t, networkID)
--}
--
--func createVIP(t *testing.T, subnetID, poolID string) string {
--	p, err := vips.Create(base.Client, vips.CreateOpts{
--		Protocol:     "HTTP",
--		Name:         "New_VIP",
--		AdminStateUp: vips.Up,
--		SubnetID:     subnetID,
--		PoolID:       poolID,
--		ProtocolPort: 80,
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created pool %s", p.ID)
--
--	return p.ID
--}
--
--func listVIPs(t *testing.T) {
--	err := vips.List(base.Client, vips.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		vipList, err := vips.ExtractVIPs(page)
--		if err != nil {
--			t.Errorf("Failed to extract VIPs: %v", err)
--			return false, err
--		}
--
--		for _, vip := range vipList {
--			t.Logf("Listing VIP: ID [%s] Name [%s] Address [%s] Port [%s] Connection Limit [%d]",
--				vip.ID, vip.Name, vip.Address, vip.ProtocolPort, vip.ConnLimit)
--		}
--
--		return true, nil
--	})
--
--	th.AssertNoErr(t, err)
--}
--
--func updateVIP(t *testing.T, VIPID string) {
--	i1000 := 1000
--	_, err := vips.Update(base.Client, VIPID, vips.UpdateOpts{ConnLimit: &i1000}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Updated VIP ID [%s]", VIPID)
--}
--
--func getVIP(t *testing.T, VIPID string) {
--	vip, err := vips.Get(base.Client, VIPID).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Getting VIP ID [%s]: Status [%s]", vip.ID, vip.Status)
--}
--
--func deleteVIP(t *testing.T, VIPID string) {
--	res := vips.Delete(base.Client, VIPID)
--
--	th.AssertNoErr(t, res.Err)
--
--	t.Logf("Deleted VIP %s", VIPID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go
-deleted file mode 100644
-index aeec0fa..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package extensions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go
-deleted file mode 100644
-index f10c9d9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go
-+++ /dev/null
-@@ -1,68 +0,0 @@
--// +build acceptance networking
--
--package extensions
--
--import (
--	"strconv"
--	"testing"
--
--	base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestNetworkCRUDOperations(t *testing.T) {
--	base.Setup(t)
--	defer base.Teardown()
--
--	// Create a network
--	n, err := networks.Create(base.Client, networks.CreateOpts{Name: "sample_network", AdminStateUp: networks.Up}).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, n.Name, "sample_network")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	networkID := n.ID
--
--	// List networks
--	pager := networks.List(base.Client, networks.ListOpts{Limit: 2})
--	err = pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		networkList, err := networks.ExtractNetworks(page)
--		th.AssertNoErr(t, err)
--
--		for _, n := range networkList {
--			t.Logf("Network: ID [%s] Name [%s] Status [%s] Is shared? [%s]",
--				n.ID, n.Name, n.Status, strconv.FormatBool(n.Shared))
--		}
--
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--
--	// Get a network
--	if networkID == "" {
--		t.Fatalf("In order to retrieve a network, the NetworkID must be set")
--	}
--	n, err = networks.Get(base.Client, networkID).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, n.Status, "ACTIVE")
--	th.AssertDeepEquals(t, n.Subnets, []string{})
--	th.AssertEquals(t, n.Name, "sample_network")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	th.AssertEquals(t, n.Shared, false)
--	th.AssertEquals(t, n.ID, networkID)
--
--	// Update network
--	n, err = networks.Update(base.Client, networkID, networks.UpdateOpts{Name: "new_network_name"}).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, n.Name, "new_network_name")
--
--	// Delete network
--	res := networks.Delete(base.Client, networkID)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestCreateMultipleNetworks(t *testing.T) {
--	//networks.CreateMany()
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go
-deleted file mode 100644
-index 7d75292..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go
-+++ /dev/null
-@@ -1,171 +0,0 @@
--// +build acceptance networking security
--
--package extensions
--
--import (
--	"testing"
--
--	base "github.com/rackspace/gophercloud/acceptance/openstack/networking/v2"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/ports"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestSecurityGroups(t *testing.T) {
--	base.Setup(t)
--	defer base.Teardown()
--
--	// create security group
--	groupID := createSecGroup(t)
--
--	// delete security group
--	defer deleteSecGroup(t, groupID)
--
--	// list security group
--	listSecGroups(t)
--
--	// get security group
--	getSecGroup(t, groupID)
--
--	// create port with security group
--	networkID, portID := createPort(t, groupID)
--
--	// teardown
--	defer deleteNetwork(t, networkID)
--
--	// delete port
--	defer deletePort(t, portID)
--}
--
--func TestSecurityGroupRules(t *testing.T) {
--	base.Setup(t)
--	defer base.Teardown()
--
--	// create security group
--	groupID := createSecGroup(t)
--
--	defer deleteSecGroup(t, groupID)
--
--	// create security group rule
--	ruleID := createSecRule(t, groupID)
--
--	// delete security group rule
--	defer deleteSecRule(t, ruleID)
--
--	// list security group rule
--	listSecRules(t)
--
--	// get security group rule
--	getSecRule(t, ruleID)
--}
--
--func createSecGroup(t *testing.T) string {
--	sg, err := groups.Create(base.Client, groups.CreateOpts{
--		Name:        "new-webservers",
--		Description: "security group for webservers",
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created security group %s", sg.ID)
--
--	return sg.ID
--}
--
--func listSecGroups(t *testing.T) {
--	err := groups.List(base.Client, groups.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		list, err := groups.ExtractGroups(page)
--		if err != nil {
--			t.Errorf("Failed to extract secgroups: %v", err)
--			return false, err
--		}
--
--		for _, sg := range list {
--			t.Logf("Listing security group: ID [%s] Name [%s]", sg.ID, sg.Name)
--		}
--
--		return true, nil
--	})
--
--	th.AssertNoErr(t, err)
--}
--
--func getSecGroup(t *testing.T, id string) {
--	sg, err := groups.Get(base.Client, id).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Getting security group: ID [%s] Name [%s] Description [%s]", sg.ID, sg.Name, sg.Description)
--}
--
--func createPort(t *testing.T, groupID string) (string, string) {
--	n, err := networks.Create(base.Client, networks.CreateOpts{Name: "tmp_network"}).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created network %s", n.ID)
--
--	opts := ports.CreateOpts{
--		NetworkID:      n.ID,
--		Name:           "my_port",
--		SecurityGroups: []string{groupID},
--	}
--	p, err := ports.Create(base.Client, opts).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created port %s with security group %s", p.ID, groupID)
--
--	return n.ID, p.ID
--}
--
--func deleteSecGroup(t *testing.T, groupID string) {
--	res := groups.Delete(base.Client, groupID)
--	th.AssertNoErr(t, res.Err)
--	t.Logf("Deleted security group %s", groupID)
--}
--
--func createSecRule(t *testing.T, groupID string) string {
--	r, err := rules.Create(base.Client, rules.CreateOpts{
--		Direction:    "ingress",
--		PortRangeMin: 80,
--		EtherType:    "IPv4",
--		PortRangeMax: 80,
--		Protocol:     "tcp",
--		SecGroupID:   groupID,
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--
--	t.Logf("Created security group rule %s", r.ID)
--
--	return r.ID
--}
--
--func listSecRules(t *testing.T) {
--	err := rules.List(base.Client, rules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		list, err := rules.ExtractRules(page)
--		if err != nil {
--			t.Errorf("Failed to extract sec rules: %v", err)
--			return false, err
--		}
--
--		for _, r := range list {
--			t.Logf("Listing security rule: ID [%s]", r.ID)
--		}
--
--		return true, nil
--	})
--
--	th.AssertNoErr(t, err)
--}
--
--func getSecRule(t *testing.T, id string) {
--	r, err := rules.Get(base.Client, id).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Getting security rule: ID [%s] Direction [%s] EtherType [%s] Protocol [%s]",
--		r.ID, r.Direction, r.EtherType, r.Protocol)
--}
--
--func deleteSecRule(t *testing.T, id string) {
--	res := rules.Delete(base.Client, id)
--	th.AssertNoErr(t, res.Err)
--	t.Logf("Deleted security rule %s", id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/network_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/network_test.go
-deleted file mode 100644
-index be8a3a1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/network_test.go
-+++ /dev/null
-@@ -1,68 +0,0 @@
--// +build acceptance networking
--
--package v2
--
--import (
--	"strconv"
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestNetworkCRUDOperations(t *testing.T) {
--	Setup(t)
--	defer Teardown()
--
--	// Create a network
--	n, err := networks.Create(Client, networks.CreateOpts{Name: "sample_network", AdminStateUp: networks.Up}).Extract()
--	th.AssertNoErr(t, err)
--	defer networks.Delete(Client, n.ID)
--	th.AssertEquals(t, n.Name, "sample_network")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	networkID := n.ID
--
--	// List networks
--	pager := networks.List(Client, networks.ListOpts{Limit: 2})
--	err = pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		networkList, err := networks.ExtractNetworks(page)
--		th.AssertNoErr(t, err)
--
--		for _, n := range networkList {
--			t.Logf("Network: ID [%s] Name [%s] Status [%s] Is shared? [%s]",
--				n.ID, n.Name, n.Status, strconv.FormatBool(n.Shared))
--		}
--
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--
--	// Get a network
--	if networkID == "" {
--		t.Fatalf("In order to retrieve a network, the NetworkID must be set")
--	}
--	n, err = networks.Get(Client, networkID).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, n.Status, "ACTIVE")
--	th.AssertDeepEquals(t, n.Subnets, []string{})
--	th.AssertEquals(t, n.Name, "sample_network")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	th.AssertEquals(t, n.Shared, false)
--	th.AssertEquals(t, n.ID, networkID)
--
--	// Update network
--	n, err = networks.Update(Client, networkID, networks.UpdateOpts{Name: "new_network_name"}).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, n.Name, "new_network_name")
--
--	// Delete network
--	res := networks.Delete(Client, networkID)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestCreateMultipleNetworks(t *testing.T) {
--	//networks.CreateMany()
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/pkg.go
-deleted file mode 100644
-index 5ec3cc8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/pkg.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package v2
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/port_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/port_test.go
-deleted file mode 100644
-index 7f22dbd..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/port_test.go
-+++ /dev/null
-@@ -1,117 +0,0 @@
--// +build acceptance networking
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/ports"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/subnets"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestPortCRUD(t *testing.T) {
--	Setup(t)
--	defer Teardown()
--
--	// Setup network
--	t.Log("Setting up network")
--	networkID, err := createNetwork()
--	th.AssertNoErr(t, err)
--	defer networks.Delete(Client, networkID)
--
--	// Setup subnet
--	t.Logf("Setting up subnet on network %s", networkID)
--	subnetID, err := createSubnet(networkID)
--	th.AssertNoErr(t, err)
--	defer subnets.Delete(Client, subnetID)
--
--	// Create port
--	t.Logf("Create port based on subnet %s", subnetID)
--	portID := createPort(t, networkID, subnetID)
--
--	// List ports
--	t.Logf("Listing all ports")
--	listPorts(t)
--
--	// Get port
--	if portID == "" {
--		t.Fatalf("In order to retrieve a port, the portID must be set")
--	}
--	p, err := ports.Get(Client, portID).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, p.ID, portID)
--
--	// Update port
--	p, err = ports.Update(Client, portID, ports.UpdateOpts{Name: "new_port_name"}).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, p.Name, "new_port_name")
--
--	// Delete port
--	res := ports.Delete(Client, portID)
--	th.AssertNoErr(t, res.Err)
--}
--
--func createPort(t *testing.T, networkID, subnetID string) string {
--	enable := false
--	opts := ports.CreateOpts{
--		NetworkID:    networkID,
--		Name:         "my_port",
--		AdminStateUp: &enable,
--		FixedIPs:     []ports.IP{ports.IP{SubnetID: subnetID}},
--	}
--	p, err := ports.Create(Client, opts).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, p.NetworkID, networkID)
--	th.AssertEquals(t, p.Name, "my_port")
--	th.AssertEquals(t, p.AdminStateUp, false)
--
--	return p.ID
--}
--
--func listPorts(t *testing.T) {
--	count := 0
--	pager := ports.List(Client, ports.ListOpts{})
--	err := pager.EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		t.Logf("--- Page ---")
--
--		portList, err := ports.ExtractPorts(page)
--		th.AssertNoErr(t, err)
--
--		for _, p := range portList {
--			t.Logf("Port: ID [%s] Name [%s] Status [%d] MAC addr [%s] Fixed IPs [%#v] Security groups [%#v]",
--				p.ID, p.Name, p.Status, p.MACAddress, p.FixedIPs, p.SecurityGroups)
--		}
--
--		return true, nil
--	})
--
--	th.CheckNoErr(t, err)
--
--	if count == 0 {
--		t.Logf("No pages were iterated over when listing ports")
--	}
--}
--
--func createNetwork() (string, error) {
--	res, err := networks.Create(Client, networks.CreateOpts{Name: "tmp_network", AdminStateUp: networks.Up}).Extract()
--	return res.ID, err
--}
--
--func createSubnet(networkID string) (string, error) {
--	s, err := subnets.Create(Client, subnets.CreateOpts{
--		NetworkID:  networkID,
--		CIDR:       "192.168.199.0/24",
--		IPVersion:  subnets.IPv4,
--		Name:       "my_subnet",
--		EnableDHCP: subnets.Down,
--	}).Extract()
--	return s.ID, err
--}
--
--func TestPortBatchCreate(t *testing.T) {
--	// todo
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/subnet_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/subnet_test.go
-deleted file mode 100644
-index 097a303..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/networking/v2/subnet_test.go
-+++ /dev/null
-@@ -1,86 +0,0 @@
--// +build acceptance networking
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/subnets"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestList(t *testing.T) {
--	Setup(t)
--	defer Teardown()
--
--	pager := subnets.List(Client, subnets.ListOpts{Limit: 2})
--	err := pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		subnetList, err := subnets.ExtractSubnets(page)
--		th.AssertNoErr(t, err)
--
--		for _, s := range subnetList {
--			t.Logf("Subnet: ID [%s] Name [%s] IP Version [%d] CIDR [%s] GatewayIP [%s]",
--				s.ID, s.Name, s.IPVersion, s.CIDR, s.GatewayIP)
--		}
--
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--}
--
--func TestCRUD(t *testing.T) {
--	Setup(t)
--	defer Teardown()
--
--	// Setup network
--	t.Log("Setting up network")
--	n, err := networks.Create(Client, networks.CreateOpts{Name: "tmp_network", AdminStateUp: networks.Up}).Extract()
--	th.AssertNoErr(t, err)
--	networkID := n.ID
--	defer networks.Delete(Client, networkID)
--
--	// Create subnet
--	t.Log("Create subnet")
--	enable := false
--	opts := subnets.CreateOpts{
--		NetworkID:  networkID,
--		CIDR:       "192.168.199.0/24",
--		IPVersion:  subnets.IPv4,
--		Name:       "my_subnet",
--		EnableDHCP: &enable,
--	}
--	s, err := subnets.Create(Client, opts).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, s.NetworkID, networkID)
--	th.AssertEquals(t, s.CIDR, "192.168.199.0/24")
--	th.AssertEquals(t, s.IPVersion, 4)
--	th.AssertEquals(t, s.Name, "my_subnet")
--	th.AssertEquals(t, s.EnableDHCP, false)
--	subnetID := s.ID
--
--	// Get subnet
--	t.Log("Getting subnet")
--	s, err = subnets.Get(Client, subnetID).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, s.ID, subnetID)
--
--	// Update subnet
--	t.Log("Update subnet")
--	s, err = subnets.Update(Client, subnetID, subnets.UpdateOpts{Name: "new_subnet_name"}).Extract()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, s.Name, "new_subnet_name")
--
--	// Delete subnet
--	t.Log("Delete subnet")
--	res := subnets.Delete(Client, subnetID)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestBatchCreate(t *testing.T) {
--	// todo
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go
-deleted file mode 100644
-index f7c01a7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go
-+++ /dev/null
-@@ -1,44 +0,0 @@
--// +build acceptance
--
--package v1
--
--import (
--	"strings"
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestAccounts(t *testing.T) {
--	// Create a provider client for making the HTTP requests.
--	// See common.go in this directory for more information.
--	client := newClient(t)
--
--	// Update an account's metadata.
--	updateres := accounts.Update(client, accounts.UpdateOpts{Metadata: metadata})
--	th.AssertNoErr(t, updateres.Err)
--
--	// Defer the deletion of the metadata set above.
--	defer func() {
--		tempMap := make(map[string]string)
--		for k := range metadata {
--			tempMap[k] = ""
--		}
--		updateres = accounts.Update(client, accounts.UpdateOpts{Metadata: tempMap})
--		th.AssertNoErr(t, updateres.Err)
--	}()
--
--	// Retrieve account metadata.
--	getres := accounts.Get(client, nil)
--	th.AssertNoErr(t, getres.Err)
--	// Extract the custom metadata from the 'Get' response.
--	am, err := getres.ExtractMetadata()
--	th.AssertNoErr(t, err)
--	for k := range metadata {
--		if am[k] != metadata[strings.Title(k)] {
--			t.Errorf("Expected custom metadata with key: %s", k)
--			return
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/common.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/common.go
-deleted file mode 100644
-index 1eac681..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/common.go
-+++ /dev/null
-@@ -1,28 +0,0 @@
--// +build acceptance
--
--package v1
--
--import (
--	"os"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--var metadata = map[string]string{"gopher": "cloud"}
--
--func newClient(t *testing.T) *gophercloud.ServiceClient {
--	ao, err := openstack.AuthOptionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	client, err := openstack.AuthenticatedClient(ao)
--	th.AssertNoErr(t, err)
--
--	c, err := openstack.NewObjectStorageV1(client, gophercloud.EndpointOpts{
--		Region: os.Getenv("OS_REGION_NAME"),
--	})
--	th.AssertNoErr(t, err)
--	return c
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go
-deleted file mode 100644
-index d6832f1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go
-+++ /dev/null
-@@ -1,89 +0,0 @@
--// +build acceptance
--
--package v1
--
--import (
--	"strings"
--	"testing"
--
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--// numContainers is the number of containers to create for testing.
--var numContainers = 2
--
--func TestContainers(t *testing.T) {
--	// Create a new client to execute the HTTP requests. See common.go for newClient body.
--	client := newClient(t)
--
--	// Create a slice of random container names.
--	cNames := make([]string, numContainers)
--	for i := 0; i < numContainers; i++ {
--		cNames[i] = tools.RandomString("gophercloud-test-container-", 8)
--	}
--
--	// Create numContainers containers.
--	for i := 0; i < len(cNames); i++ {
--		res := containers.Create(client, cNames[i], nil)
--		th.AssertNoErr(t, res.Err)
--	}
--	// Delete the numContainers containers after function completion.
--	defer func() {
--		for i := 0; i < len(cNames); i++ {
--			res := containers.Delete(client, cNames[i])
--			th.AssertNoErr(t, res.Err)
--		}
--	}()
--
--	// List the numContainer names that were just created. To just list those,
--	// the 'prefix' parameter is used.
--	err := containers.List(client, &containers.ListOpts{Full: true, Prefix: "gophercloud-test-container-"}).EachPage(func(page pagination.Page) (bool, error) {
--		containerList, err := containers.ExtractInfo(page)
--		th.AssertNoErr(t, err)
--
--		for _, n := range containerList {
--			t.Logf("Container: Name [%s] Count [%d] Bytes [%d]",
--				n.Name, n.Count, n.Bytes)
--		}
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--
--	// List the info for the numContainer containers that were created.
--	err = containers.List(client, &containers.ListOpts{Full: false, Prefix: "gophercloud-test-container-"}).EachPage(func(page pagination.Page) (bool, error) {
--		containerList, err := containers.ExtractNames(page)
--		th.AssertNoErr(t, err)
--		for _, n := range containerList {
--			t.Logf("Container: Name [%s]", n)
--		}
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--
--	// Update one of the numContainer container metadata.
--	updateres := containers.Update(client, cNames[0], &containers.UpdateOpts{Metadata: metadata})
--	th.AssertNoErr(t, updateres.Err)
--	// After the tests are done, delete the metadata that was set.
--	defer func() {
--		tempMap := make(map[string]string)
--		for k := range metadata {
--			tempMap[k] = ""
--		}
--		res := containers.Update(client, cNames[0], &containers.UpdateOpts{Metadata: tempMap})
--		th.AssertNoErr(t, res.Err)
--	}()
--
--	// Retrieve a container's metadata.
--	cm, err := containers.Get(client, cNames[0]).ExtractMetadata()
--	th.AssertNoErr(t, err)
--	for k := range metadata {
--		if cm[k] != metadata[strings.Title(k)] {
--			t.Errorf("Expected custom metadata with key: %s", k)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go
-deleted file mode 100644
-index a8de338..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go
-+++ /dev/null
-@@ -1,119 +0,0 @@
--// +build acceptance
--
--package v1
--
--import (
--	"bytes"
--	"strings"
--	"testing"
--
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers"
--	"github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--// numObjects is the number of objects to create for testing.
--var numObjects = 2
--
--func TestObjects(t *testing.T) {
--	// Create a provider client for executing the HTTP request.
--	// See common.go for more information.
--	client := newClient(t)
--
--	// Make a slice of length numObjects to hold the random object names.
--	oNames := make([]string, numObjects)
--	for i := 0; i < len(oNames); i++ {
--		oNames[i] = tools.RandomString("test-object-", 8)
--	}
--
--	// Create a container to hold the test objects.
--	cName := tools.RandomString("test-container-", 8)
--	header, err := containers.Create(client, cName, nil).ExtractHeader()
--	th.AssertNoErr(t, err)
--	t.Logf("Create object headers: %+v\n", header)
--
--	// Defer deletion of the container until after testing.
--	defer func() {
--		res := containers.Delete(client, cName)
--		th.AssertNoErr(t, res.Err)
--	}()
--
--	// Create a slice of buffers to hold the test object content.
--	oContents := make([]*bytes.Buffer, numObjects)
--	for i := 0; i < numObjects; i++ {
--		oContents[i] = bytes.NewBuffer([]byte(tools.RandomString("", 10)))
--		res := objects.Create(client, cName, oNames[i], oContents[i], nil)
--		th.AssertNoErr(t, res.Err)
--	}
--	// Delete the objects after testing.
--	defer func() {
--		for i := 0; i < numObjects; i++ {
--			res := objects.Delete(client, cName, oNames[i], nil)
--			th.AssertNoErr(t, res.Err)
--		}
--	}()
--
--	ons := make([]string, 0, len(oNames))
--	err = objects.List(client, cName, &objects.ListOpts{Full: false, Prefix: "test-object-"}).EachPage(func(page pagination.Page) (bool, error) {
--		names, err := objects.ExtractNames(page)
--		th.AssertNoErr(t, err)
--		ons = append(ons, names...)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, len(ons), len(oNames))
--
--	ois := make([]objects.Object, 0, len(oNames))
--	err = objects.List(client, cName, &objects.ListOpts{Full: true, Prefix: "test-object-"}).EachPage(func(page pagination.Page) (bool, error) {
--		info, err := objects.ExtractInfo(page)
--		th.AssertNoErr(t, err)
--
--		ois = append(ois, info...)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, len(ois), len(oNames))
--
--	// Copy the contents of one object to another.
--	copyres := objects.Copy(client, cName, oNames[0], &objects.CopyOpts{Destination: cName + "/" + oNames[1]})
--	th.AssertNoErr(t, copyres.Err)
--
--	// Download one of the objects that was created above.
--	o1Content, err := objects.Download(client, cName, oNames[0], nil).ExtractContent()
--	th.AssertNoErr(t, err)
--
--	// Download the another object that was create above.
--	o2Content, err := objects.Download(client, cName, oNames[1], nil).ExtractContent()
--	th.AssertNoErr(t, err)
--
--	// Compare the two object's contents to test that the copy worked.
--	th.AssertEquals(t, string(o2Content), string(o1Content))
--
--	// Update an object's metadata.
--	updateres := objects.Update(client, cName, oNames[0], &objects.UpdateOpts{Metadata: metadata})
--	th.AssertNoErr(t, updateres.Err)
--
--	// Delete the object's metadata after testing.
--	defer func() {
--		tempMap := make(map[string]string)
--		for k := range metadata {
--			tempMap[k] = ""
--		}
--		res := objects.Update(client, cName, oNames[0], &objects.UpdateOpts{Metadata: tempMap})
--		th.AssertNoErr(t, res.Err)
--	}()
--
--	// Retrieve an object's metadata.
--	om, err := objects.Get(client, cName, oNames[0], nil).ExtractMetadata()
--	th.AssertNoErr(t, err)
--	for k := range metadata {
--		if om[k] != metadata[strings.Title(k)] {
--			t.Errorf("Expected custom metadata with key: %s", k)
--			return
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/pkg.go
-deleted file mode 100644
-index 3a8ecdb..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/pkg.go
-+++ /dev/null
-@@ -1,4 +0,0 @@
--// +build acceptance
--
--package openstack
--
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/common.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/common.go
-deleted file mode 100644
-index e9fdd99..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/common.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--// +build acceptance
--
--package v1
--
--import (
--	"os"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/rackspace"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func newClient() (*gophercloud.ServiceClient, error) {
--	opts, err := rackspace.AuthOptionsFromEnv()
--	if err != nil {
--		return nil, err
--	}
--	opts = tools.OnlyRS(opts)
--	region := os.Getenv("RS_REGION")
--
--	provider, err := rackspace.AuthenticatedClient(opts)
--	if err != nil {
--		return nil, err
--	}
--
--	return rackspace.NewBlockStorageV1(provider, gophercloud.EndpointOpts{
--		Region: region,
--	})
--}
--
--func setup(t *testing.T) *gophercloud.ServiceClient {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	return client
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/snapshot_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/snapshot_test.go
-deleted file mode 100644
-index 25b2cfe..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/snapshot_test.go
-+++ /dev/null
-@@ -1,82 +0,0 @@
--// +build acceptance blockstorage snapshots
--
--package v1
--
--import (
--	"testing"
--	"time"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestSnapshots(t *testing.T) {
--	client := setup(t)
--	volID := testVolumeCreate(t, client)
--
--	t.Log("Creating snapshots")
--	s := testSnapshotCreate(t, client, volID)
--	id := s.ID
--
--	t.Log("Listing snapshots")
--	testSnapshotList(t, client)
--
--	t.Logf("Getting snapshot %s", id)
--	testSnapshotGet(t, client, id)
--
--	t.Logf("Updating snapshot %s", id)
--	testSnapshotUpdate(t, client, id)
--
--	t.Logf("Deleting snapshot %s", id)
--	testSnapshotDelete(t, client, id)
--	s.WaitUntilDeleted(client, -1)
--
--	t.Logf("Deleting volume %s", volID)
--	testVolumeDelete(t, client, volID)
--}
--
--func testSnapshotCreate(t *testing.T, client *gophercloud.ServiceClient, volID string) *snapshots.Snapshot {
--	opts := snapshots.CreateOpts{VolumeID: volID, Name: "snapshot-001"}
--	s, err := snapshots.Create(client, opts).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created snapshot %s", s.ID)
--
--	t.Logf("Waiting for new snapshot to become available...")
--	start := time.Now().Second()
--	s.WaitUntilComplete(client, -1)
--	t.Logf("Snapshot completed after %ds", time.Now().Second()-start)
--
--	return s
--}
--
--func testSnapshotList(t *testing.T, client *gophercloud.ServiceClient) {
--	snapshots.List(client).EachPage(func(page pagination.Page) (bool, error) {
--		sList, err := snapshots.ExtractSnapshots(page)
--		th.AssertNoErr(t, err)
--
--		for _, s := range sList {
--			t.Logf("Snapshot: ID [%s] Name [%s] Volume ID [%s] Progress [%s] Created [%s]",
--				s.ID, s.Name, s.VolumeID, s.Progress, s.CreatedAt)
--		}
--
--		return true, nil
--	})
--}
--
--func testSnapshotGet(t *testing.T, client *gophercloud.ServiceClient, id string) {
--	_, err := snapshots.Get(client, id).Extract()
--	th.AssertNoErr(t, err)
--}
--
--func testSnapshotUpdate(t *testing.T, client *gophercloud.ServiceClient, id string) {
--	_, err := snapshots.Update(client, id, snapshots.UpdateOpts{Name: "new_name"}).Extract()
--	th.AssertNoErr(t, err)
--}
--
--func testSnapshotDelete(t *testing.T, client *gophercloud.ServiceClient, id string) {
--	res := snapshots.Delete(client, id)
--	th.AssertNoErr(t, res.Err)
--	t.Logf("Deleted snapshot %s", id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_test.go
-deleted file mode 100644
-index f86f9ad..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_test.go
-+++ /dev/null
-@@ -1,71 +0,0 @@
--// +build acceptance blockstorage volumes
--
--package v1
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestVolumes(t *testing.T) {
--	client := setup(t)
--
--	t.Logf("Listing volumes")
--	testVolumeList(t, client)
--
--	t.Logf("Creating volume")
--	volumeID := testVolumeCreate(t, client)
--
--	t.Logf("Getting volume %s", volumeID)
--	testVolumeGet(t, client, volumeID)
--
--	t.Logf("Updating volume %s", volumeID)
--	testVolumeUpdate(t, client, volumeID)
--
--	t.Logf("Deleting volume %s", volumeID)
--	testVolumeDelete(t, client, volumeID)
--}
--
--func testVolumeList(t *testing.T, client *gophercloud.ServiceClient) {
--	volumes.List(client).EachPage(func(page pagination.Page) (bool, error) {
--		vList, err := volumes.ExtractVolumes(page)
--		th.AssertNoErr(t, err)
--
--		for _, v := range vList {
--			t.Logf("Volume: ID [%s] Name [%s] Type [%s] Created [%s]", v.ID, v.Name,
--				v.VolumeType, v.CreatedAt)
--		}
--
--		return true, nil
--	})
--}
--
--func testVolumeCreate(t *testing.T, client *gophercloud.ServiceClient) string {
--	vol, err := volumes.Create(client, os.CreateOpts{Size: 75}).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created volume: ID [%s] Size [%s]", vol.ID, vol.Size)
--	return vol.ID
--}
--
--func testVolumeGet(t *testing.T, client *gophercloud.ServiceClient, id string) {
--	vol, err := volumes.Get(client, id).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created volume: ID [%s] Size [%s]", vol.ID, vol.Size)
--}
--
--func testVolumeUpdate(t *testing.T, client *gophercloud.ServiceClient, id string) {
--	vol, err := volumes.Update(client, id, volumes.UpdateOpts{Name: "new_name"}).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created volume: ID [%s] Name [%s]", vol.ID, vol.Name)
--}
--
--func testVolumeDelete(t *testing.T, client *gophercloud.ServiceClient, id string) {
--	res := volumes.Delete(client, id)
--	th.AssertNoErr(t, res.Err)
--	t.Logf("Deleted volume %s", id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_type_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_type_test.go
-deleted file mode 100644
-index 716f2b9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/blockstorage/v1/volume_type_test.go
-+++ /dev/null
-@@ -1,46 +0,0 @@
--// +build acceptance blockstorage volumetypes
--
--package v1
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestAll(t *testing.T) {
--	client := setup(t)
--
--	t.Logf("Listing volume types")
--	id := testList(t, client)
--
--	t.Logf("Getting volume type %s", id)
--	testGet(t, client, id)
--}
--
--func testList(t *testing.T, client *gophercloud.ServiceClient) string {
--	var lastID string
--
--	volumetypes.List(client).EachPage(func(page pagination.Page) (bool, error) {
--		typeList, err := volumetypes.ExtractVolumeTypes(page)
--		th.AssertNoErr(t, err)
--
--		for _, vt := range typeList {
--			t.Logf("Volume type: ID [%s] Name [%s]", vt.ID, vt.Name)
--			lastID = vt.ID
--		}
--
--		return true, nil
--	})
--
--	return lastID
--}
--
--func testGet(t *testing.T, client *gophercloud.ServiceClient, id string) {
--	vt, err := volumetypes.Get(client, id).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Volume: ID [%s] Name [%s]", vt.ID, vt.Name)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/client_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/client_test.go
-deleted file mode 100644
-index 61214c0..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/client_test.go
-+++ /dev/null
-@@ -1,28 +0,0 @@
--// +build acceptance
--
--package rackspace
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/rackspace"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestAuthenticatedClient(t *testing.T) {
--	// Obtain credentials from the environment.
--	ao, err := rackspace.AuthOptionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	client, err := rackspace.AuthenticatedClient(tools.OnlyRS(ao))
--	if err != nil {
--		t.Fatalf("Unable to authenticate: %v", err)
--	}
--
--	if client.TokenID == "" {
--		t.Errorf("No token ID assigned to the client")
--	}
--
--	t.Logf("Client successfully acquired a token: %v", client.TokenID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/bootfromvolume_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/bootfromvolume_test.go
-deleted file mode 100644
-index 010bf42..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/bootfromvolume_test.go
-+++ /dev/null
-@@ -1,46 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	osBFV "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/servers"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/smashwilson/gophercloud/acceptance/tools"
--)
--
--func TestBootFromVolume(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	if testing.Short() {
--		t.Skip("Skipping test that requires server creation in short mode.")
--	}
--
--	options, err := optionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	name := tools.RandomString("Gophercloud-", 8)
--	t.Logf("Creating server [%s].", name)
--
--	bd := []osBFV.BlockDevice{
--		osBFV.BlockDevice{
--			UUID:       options.imageID,
--			SourceType: osBFV.Image,
--			VolumeSize: 10,
--		},
--	}
--
--	server, err := bootfromvolume.Create(client, servers.CreateOpts{
--		Name:        name,
--		FlavorRef:   "performance1-1",
--		BlockDevice: bd,
--	}).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created server: %+v\n", server)
--	//defer deleteServer(t, client, server)
--	t.Logf("Deleting server [%s]...", name)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/compute_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/compute_test.go
-deleted file mode 100644
-index 3ca6dc9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/compute_test.go
-+++ /dev/null
-@@ -1,60 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"errors"
--	"os"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/rackspace"
--)
--
--func newClient() (*gophercloud.ServiceClient, error) {
--	// Obtain credentials from the environment.
--	options, err := rackspace.AuthOptionsFromEnv()
--	if err != nil {
--		return nil, err
--	}
--	options = tools.OnlyRS(options)
--	region := os.Getenv("RS_REGION")
--
--	if options.Username == "" {
--		return nil, errors.New("Please provide a Rackspace username as RS_USERNAME.")
--	}
--	if options.APIKey == "" {
--		return nil, errors.New("Please provide a Rackspace API key as RS_API_KEY.")
--	}
--	if region == "" {
--		return nil, errors.New("Please provide a Rackspace region as RS_REGION.")
--	}
--
--	client, err := rackspace.AuthenticatedClient(options)
--	if err != nil {
--		return nil, err
--	}
--
--	return rackspace.NewComputeV2(client, gophercloud.EndpointOpts{
--		Region: region,
--	})
--}
--
--type serverOpts struct {
--	imageID  string
--	flavorID string
--}
--
--func optionsFromEnv() (*serverOpts, error) {
--	options := &serverOpts{
--		imageID:  os.Getenv("RS_IMAGE_ID"),
--		flavorID: os.Getenv("RS_FLAVOR_ID"),
--	}
--	if options.imageID == "" {
--		return nil, errors.New("Please provide a valid Rackspace image ID as RS_IMAGE_ID")
--	}
--	if options.flavorID == "" {
--		return nil, errors.New("Please provide a valid Rackspace flavor ID as RS_FLAVOR_ID")
--	}
--	return options, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/flavors_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/flavors_test.go
-deleted file mode 100644
-index 4618ecc..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/flavors_test.go
-+++ /dev/null
-@@ -1,61 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/flavors"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestListFlavors(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	count := 0
--	err = flavors.ListDetail(client, nil).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		t.Logf("-- Page %0d --", count)
--
--		fs, err := flavors.ExtractFlavors(page)
--		th.AssertNoErr(t, err)
--
--		for i, flavor := range fs {
--			t.Logf("[%02d]      id=[%s]", i, flavor.ID)
--			t.Logf("        name=[%s]", flavor.Name)
--			t.Logf("        disk=[%d]", flavor.Disk)
--			t.Logf("         RAM=[%d]", flavor.RAM)
--			t.Logf(" rxtx_factor=[%f]", flavor.RxTxFactor)
--			t.Logf("        swap=[%d]", flavor.Swap)
--			t.Logf("       VCPUs=[%d]", flavor.VCPUs)
--		}
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	if count == 0 {
--		t.Errorf("No flavors listed!")
--	}
--}
--
--func TestGetFlavor(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	options, err := optionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	flavor, err := flavors.Get(client, options.flavorID).Extract()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Requested flavor:")
--	t.Logf("          id=[%s]", flavor.ID)
--	t.Logf("        name=[%s]", flavor.Name)
--	t.Logf("        disk=[%d]", flavor.Disk)
--	t.Logf("         RAM=[%d]", flavor.RAM)
--	t.Logf(" rxtx_factor=[%f]", flavor.RxTxFactor)
--	t.Logf("        swap=[%d]", flavor.Swap)
--	t.Logf("       VCPUs=[%d]", flavor.VCPUs)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/images_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/images_test.go
-deleted file mode 100644
-index 5e36c2e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/images_test.go
-+++ /dev/null
-@@ -1,63 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/images"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestListImages(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	count := 0
--	err = images.ListDetail(client, nil).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		t.Logf("-- Page %02d --", count)
--
--		is, err := images.ExtractImages(page)
--		th.AssertNoErr(t, err)
--
--		for i, image := range is {
--			t.Logf("[%02d]   id=[%s]", i, image.ID)
--			t.Logf("     name=[%s]", image.Name)
--			t.Logf("  created=[%s]", image.Created)
--			t.Logf("  updated=[%s]", image.Updated)
--			t.Logf(" min disk=[%d]", image.MinDisk)
--			t.Logf("  min RAM=[%d]", image.MinRAM)
--			t.Logf(" progress=[%d]", image.Progress)
--			t.Logf("   status=[%s]", image.Status)
--		}
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	if count < 1 {
--		t.Errorf("Expected at least one page of images.")
--	}
--}
--
--func TestGetImage(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	options, err := optionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	image, err := images.Get(client, options.imageID).Extract()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Requested image:")
--	t.Logf("       id=[%s]", image.ID)
--	t.Logf("     name=[%s]", image.Name)
--	t.Logf("  created=[%s]", image.Created)
--	t.Logf("  updated=[%s]", image.Updated)
--	t.Logf(" min disk=[%d]", image.MinDisk)
--	t.Logf("  min RAM=[%d]", image.MinRAM)
--	t.Logf(" progress=[%d]", image.Progress)
--	t.Logf("   status=[%s]", image.Status)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/keypairs_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/keypairs_test.go
-deleted file mode 100644
-index 9bd6eb4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/keypairs_test.go
-+++ /dev/null
-@@ -1,87 +0,0 @@
--// +build acceptance rackspace
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs"
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func deleteKeyPair(t *testing.T, client *gophercloud.ServiceClient, name string) {
--	err := keypairs.Delete(client, name).ExtractErr()
--	th.AssertNoErr(t, err)
--	t.Logf("Successfully deleted key [%s].", name)
--}
--
--func TestCreateKeyPair(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	name := tools.RandomString("createdkey-", 8)
--	k, err := keypairs.Create(client, os.CreateOpts{Name: name}).Extract()
--	th.AssertNoErr(t, err)
--	defer deleteKeyPair(t, client, name)
--
--	t.Logf("Created a new keypair:")
--	t.Logf("        name=[%s]", k.Name)
--	t.Logf(" fingerprint=[%s]", k.Fingerprint)
--	t.Logf("   publickey=[%s]", tools.Elide(k.PublicKey))
--	t.Logf("  privatekey=[%s]", tools.Elide(k.PrivateKey))
--	t.Logf("      userid=[%s]", k.UserID)
--}
--
--func TestImportKeyPair(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	name := tools.RandomString("importedkey-", 8)
--	pubkey := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDlIQ3r+zd97kb9Hzmujd3V6pbO53eb3Go4q2E8iqVGWQfZTrFdL9KACJnqJIm9HmncfRkUTxE37hqeGCCv8uD+ZPmPiZG2E60OX1mGDjbbzAyReRwYWXgXHopggZTLak5k4mwZYaxwaufbVBDRn847e01lZnaXaszEToLM37NLw+uz29sl3TwYy2R0RGHPwPc160aWmdLjSyd1Nd4c9pvvOP/EoEuBjIC6NJJwg2Rvg9sjjx9jYj0QUgc8CqKLN25oMZ69kNJzlFylKRUoeeVr89txlR59yehJWk6Uw6lYFTdJmcmQOFVAJ12RMmS1hLWCM8UzAgtw+EDa0eqBxBDl smash at winter"
--
--	k, err := keypairs.Create(client, os.CreateOpts{
--		Name:      name,
--		PublicKey: pubkey,
--	}).Extract()
--	th.AssertNoErr(t, err)
--	defer deleteKeyPair(t, client, name)
--
--	th.CheckEquals(t, pubkey, k.PublicKey)
--	th.CheckEquals(t, "", k.PrivateKey)
--
--	t.Logf("Imported an existing keypair:")
--	t.Logf("        name=[%s]", k.Name)
--	t.Logf(" fingerprint=[%s]", k.Fingerprint)
--	t.Logf("   publickey=[%s]", tools.Elide(k.PublicKey))
--	t.Logf("  privatekey=[%s]", tools.Elide(k.PrivateKey))
--	t.Logf("      userid=[%s]", k.UserID)
--}
--
--func TestListKeyPairs(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	count := 0
--	err = keypairs.List(client).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		t.Logf("--- %02d ---", count)
--
--		ks, err := keypairs.ExtractKeyPairs(page)
--		th.AssertNoErr(t, err)
--
--		for i, keypair := range ks {
--			t.Logf("[%02d]    name=[%s]", i, keypair.Name)
--			t.Logf(" fingerprint=[%s]", keypair.Fingerprint)
--			t.Logf("   publickey=[%s]", tools.Elide(keypair.PublicKey))
--			t.Logf("  privatekey=[%s]", tools.Elide(keypair.PrivateKey))
--			t.Logf("      userid=[%s]", keypair.UserID)
--		}
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/networks_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/networks_test.go
-deleted file mode 100644
-index e8fc4d3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/networks_test.go
-+++ /dev/null
-@@ -1,53 +0,0 @@
--// +build acceptance rackspace
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/networks"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestNetworks(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	// Create a network
--	n, err := networks.Create(client, networks.CreateOpts{Label: "sample_network", CIDR: "172.20.0.0/24"}).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created network: %+v\n", n)
--	defer networks.Delete(client, n.ID)
--	th.AssertEquals(t, n.Label, "sample_network")
--	th.AssertEquals(t, n.CIDR, "172.20.0.0/24")
--	networkID := n.ID
--
--	// List networks
--	pager := networks.List(client)
--	err = pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		networkList, err := networks.ExtractNetworks(page)
--		th.AssertNoErr(t, err)
--
--		for _, n := range networkList {
--			t.Logf("Network: ID [%s] Label [%s] CIDR [%s]",
--				n.ID, n.Label, n.CIDR)
--		}
--
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--
--	// Get a network
--	if networkID == "" {
--		t.Fatalf("In order to retrieve a network, the NetworkID must be set")
--	}
--	n, err = networks.Get(client, networkID).Extract()
--	t.Logf("Retrieved Network: %+v\n", n)
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, n.CIDR, "172.20.0.0/24")
--	th.AssertEquals(t, n.Label, "sample_network")
--	th.AssertEquals(t, n.ID, networkID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/pkg.go
-deleted file mode 100644
-index 5ec3cc8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/pkg.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package v2
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/servers_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/servers_test.go
-deleted file mode 100644
-index 511f0a9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/servers_test.go
-+++ /dev/null
-@@ -1,199 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig"
--	oskey "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs"
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/servers"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func createServerKeyPair(t *testing.T, client *gophercloud.ServiceClient) *oskey.KeyPair {
--	name := tools.RandomString("importedkey-", 8)
--	pubkey := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDlIQ3r+zd97kb9Hzmujd3V6pbO53eb3Go4q2E8iqVGWQfZTrFdL9KACJnqJIm9HmncfRkUTxE37hqeGCCv8uD+ZPmPiZG2E60OX1mGDjbbzAyReRwYWXgXHopggZTLak5k4mwZYaxwaufbVBDRn847e01lZnaXaszEToLM37NLw+uz29sl3TwYy2R0RGHPwPc160aWmdLjSyd1Nd4c9pvvOP/EoEuBjIC6NJJwg2Rvg9sjjx9jYj0QUgc8CqKLN25oMZ69kNJzlFylKRUoeeVr89txlR59yehJWk6Uw6lYFTdJmcmQOFVAJ12RMmS1hLWCM8UzAgtw+EDa0eqBxBDl smash at winter"
--
--	k, err := keypairs.Create(client, oskey.CreateOpts{
--		Name:      name,
--		PublicKey: pubkey,
--	}).Extract()
--	th.AssertNoErr(t, err)
--
--	return k
--}
--
--func createServer(t *testing.T, client *gophercloud.ServiceClient, keyName string) *os.Server {
--	if testing.Short() {
--		t.Skip("Skipping test that requires server creation in short mode.")
--	}
--
--	options, err := optionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	name := tools.RandomString("Gophercloud-", 8)
--
--	opts := &servers.CreateOpts{
--		Name:       name,
--		ImageRef:   options.imageID,
--		FlavorRef:  options.flavorID,
--		DiskConfig: diskconfig.Manual,
--	}
--
--	if keyName != "" {
--		opts.KeyPair = keyName
--	}
--
--	t.Logf("Creating server [%s].", name)
--	s, err := servers.Create(client, opts).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Creating server.")
--
--	err = servers.WaitForStatus(client, s.ID, "ACTIVE", 300)
--	th.AssertNoErr(t, err)
--	t.Logf("Server created successfully.")
--
--	return s
--}
--
--func logServer(t *testing.T, server *os.Server, index int) {
--	if index == -1 {
--		t.Logf("             id=[%s]", server.ID)
--	} else {
--		t.Logf("[%02d]             id=[%s]", index, server.ID)
--	}
--	t.Logf("           name=[%s]", server.Name)
--	t.Logf("      tenant ID=[%s]", server.TenantID)
--	t.Logf("        user ID=[%s]", server.UserID)
--	t.Logf("        updated=[%s]", server.Updated)
--	t.Logf("        created=[%s]", server.Created)
--	t.Logf("        host ID=[%s]", server.HostID)
--	t.Logf("    access IPv4=[%s]", server.AccessIPv4)
--	t.Logf("    access IPv6=[%s]", server.AccessIPv6)
--	t.Logf("          image=[%v]", server.Image)
--	t.Logf("         flavor=[%v]", server.Flavor)
--	t.Logf("      addresses=[%v]", server.Addresses)
--	t.Logf("       metadata=[%v]", server.Metadata)
--	t.Logf("          links=[%v]", server.Links)
--	t.Logf("        keyname=[%s]", server.KeyName)
--	t.Logf(" admin password=[%s]", server.AdminPass)
--	t.Logf("         status=[%s]", server.Status)
--	t.Logf("       progress=[%d]", server.Progress)
--}
--
--func getServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) {
--	t.Logf("> servers.Get")
--
--	details, err := servers.Get(client, server.ID).Extract()
--	th.AssertNoErr(t, err)
--	logServer(t, details, -1)
--}
--
--func listServers(t *testing.T, client *gophercloud.ServiceClient) {
--	t.Logf("> servers.List")
--
--	count := 0
--	err := servers.List(client, nil).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		t.Logf("--- Page %02d ---", count)
--
--		s, err := servers.ExtractServers(page)
--		th.AssertNoErr(t, err)
--		for index, server := range s {
--			logServer(t, &server, index)
--		}
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--}
--
--func changeAdminPassword(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) {
--	t.Logf("> servers.ChangeAdminPassword")
--
--	original := server.AdminPass
--
--	t.Logf("Changing server password.")
--	err := servers.ChangeAdminPassword(client, server.ID, tools.MakeNewPassword(original)).Extract()
--	th.AssertNoErr(t, err)
--
--	err = servers.WaitForStatus(client, server.ID, "ACTIVE", 300)
--	th.AssertNoErr(t, err)
--	t.Logf("Password changed successfully.")
--}
--
--func rebootServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) {
--	t.Logf("> servers.Reboot")
--
--	err := servers.Reboot(client, server.ID, os.HardReboot).Extract()
--	th.AssertNoErr(t, err)
--
--	err = servers.WaitForStatus(client, server.ID, "ACTIVE", 300)
--	th.AssertNoErr(t, err)
--
--	t.Logf("Server successfully rebooted.")
--}
--
--func rebuildServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) {
--	t.Logf("> servers.Rebuild")
--
--	options, err := optionsFromEnv()
--	th.AssertNoErr(t, err)
--
--	opts := servers.RebuildOpts{
--		Name:       tools.RandomString("RenamedGopher", 16),
--		AdminPass:  tools.MakeNewPassword(server.AdminPass),
--		ImageID:    options.imageID,
--		DiskConfig: diskconfig.Manual,
--	}
--	after, err := servers.Rebuild(client, server.ID, opts).Extract()
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, after.ID, server.ID)
--
--	err = servers.WaitForStatus(client, after.ID, "ACTIVE", 300)
--	th.AssertNoErr(t, err)
--
--	t.Logf("Server successfully rebuilt.")
--	logServer(t, after, -1)
--}
--
--func deleteServer(t *testing.T, client *gophercloud.ServiceClient, server *os.Server) {
--	t.Logf("> servers.Delete")
--
--	res := servers.Delete(client, server.ID)
--	th.AssertNoErr(t, res.Err)
--
--	t.Logf("Server deleted successfully.")
--}
--
--func deleteServerKeyPair(t *testing.T, client *gophercloud.ServiceClient, k *oskey.KeyPair) {
--	t.Logf("> keypairs.Delete")
--
--	err := keypairs.Delete(client, k.Name).ExtractErr()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Keypair deleted successfully.")
--}
--
--func TestServerOperations(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	kp := createServerKeyPair(t, client)
--	defer deleteServerKeyPair(t, client, kp)
--
--	server := createServer(t, client, kp.Name)
--	defer deleteServer(t, client, server)
--
--	getServer(t, client, server)
--	listServers(t, client)
--	changeAdminPassword(t, client, server)
--	rebootServer(t, client, server)
--	rebuildServer(t, client, server)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/virtualinterfaces_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/virtualinterfaces_test.go
-deleted file mode 100644
-index 39475e1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/compute/v2/virtualinterfaces_test.go
-+++ /dev/null
-@@ -1,53 +0,0 @@
--// +build acceptance rackspace
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/networks"
--	"github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestVirtualInterfaces(t *testing.T) {
--	client, err := newClient()
--	th.AssertNoErr(t, err)
--
--	// Create a server
--	server := createServer(t, client, "")
--	t.Logf("Created Server: %v\n", server)
--	defer deleteServer(t, client, server)
--	serverID := server.ID
--
--	// Create a network
--	n, err := networks.Create(client, networks.CreateOpts{Label: "sample_network", CIDR: "172.20.0.0/24"}).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created Network: %v\n", n)
--	defer networks.Delete(client, n.ID)
--	networkID := n.ID
--
--	// Create a virtual interface
--	vi, err := virtualinterfaces.Create(client, serverID, networkID).Extract()
--	th.AssertNoErr(t, err)
--	t.Logf("Created virtual interface: %+v\n", vi)
--	defer virtualinterfaces.Delete(client, serverID, vi.ID)
--
--	// List virtual interfaces
--	pager := virtualinterfaces.List(client, serverID)
--	err = pager.EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page ---")
--
--		virtualinterfacesList, err := virtualinterfaces.ExtractVirtualInterfaces(page)
--		th.AssertNoErr(t, err)
--
--		for _, vi := range virtualinterfacesList {
--			t.Logf("Virtual Interface: ID [%s] MAC Address [%s] IP Addresses [%v]",
--				vi.ID, vi.MACAddress, vi.IPAddresses)
--		}
--
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/extension_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/extension_test.go
-deleted file mode 100644
-index a50e015..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/extension_test.go
-+++ /dev/null
-@@ -1,54 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	extensions2 "github.com/rackspace/gophercloud/rackspace/identity/v2/extensions"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestExtensions(t *testing.T) {
--	service := authenticatedClient(t)
--
--	t.Logf("Extensions available on this identity endpoint:")
--	count := 0
--	var chosen string
--	err := extensions2.List(service).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		extensions, err := extensions2.ExtractExtensions(page)
--		th.AssertNoErr(t, err)
--
--		for i, ext := range extensions {
--			if chosen == "" {
--				chosen = ext.Alias
--			}
--
--			t.Logf("[%02d] name=[%s] namespace=[%s]", i, ext.Name, ext.Namespace)
--			t.Logf("     alias=[%s] updated=[%s]", ext.Alias, ext.Updated)
--			t.Logf("     description=[%s]", ext.Description)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--
--	if chosen == "" {
--		t.Logf("No extensions found.")
--		return
--	}
--
--	ext, err := extensions2.Get(service, chosen).Extract()
--	th.AssertNoErr(t, err)
--
--	t.Logf("Detail for extension [%s]:", chosen)
--	t.Logf("        name=[%s]", ext.Name)
--	t.Logf("   namespace=[%s]", ext.Namespace)
--	t.Logf("       alias=[%s]", ext.Alias)
--	t.Logf("     updated=[%s]", ext.Updated)
--	t.Logf(" description=[%s]", ext.Description)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/identity_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/identity_test.go
-deleted file mode 100644
-index 1182982..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/identity_test.go
-+++ /dev/null
-@@ -1,50 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/rackspace"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func rackspaceAuthOptions(t *testing.T) gophercloud.AuthOptions {
--	// Obtain credentials from the environment.
--	options, err := rackspace.AuthOptionsFromEnv()
--	th.AssertNoErr(t, err)
--	options = tools.OnlyRS(options)
--
--	if options.Username == "" {
--		t.Fatal("Please provide a Rackspace username as RS_USERNAME.")
--	}
--	if options.APIKey == "" {
--		t.Fatal("Please provide a Rackspace API key as RS_API_KEY.")
--	}
--
--	return options
--}
--
--func createClient(t *testing.T, auth bool) *gophercloud.ServiceClient {
--	ao := rackspaceAuthOptions(t)
--
--	provider, err := rackspace.NewClient(ao.IdentityEndpoint)
--	th.AssertNoErr(t, err)
--
--	if auth {
--		err = rackspace.Authenticate(provider, ao)
--		th.AssertNoErr(t, err)
--	}
--
--	return rackspace.NewIdentityV2(provider)
--}
--
--func unauthenticatedClient(t *testing.T) *gophercloud.ServiceClient {
--	return createClient(t, false)
--}
--
--func authenticatedClient(t *testing.T) *gophercloud.ServiceClient {
--	return createClient(t, true)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tenant_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tenant_test.go
-deleted file mode 100644
-index 6081a49..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/identity/v2/tenant_test.go
-+++ /dev/null
-@@ -1,37 +0,0 @@
--// +build acceptance
--
--package v2
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	rstenants "github.com/rackspace/gophercloud/rackspace/identity/v2/tenants"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestTenants(t *testing.T) {
--	service := authenticatedClient(t)
--
--	t.Logf("Tenants available to the currently issued token:")
--	count := 0
--	err := rstenants.List(service, nil).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		tenants, err := rstenants.ExtractTenants(page)
--		th.AssertNoErr(t, err)
--
--		for i, tenant := range tenants {
--			t.Logf("[%02d]      id=[%s]", i, tenant.ID)
--			t.Logf("        name=[%s] enabled=[%v]", i, tenant.Name, tenant.Enabled)
--			t.Logf(" description=[%s]", tenant.Description)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	if count == 0 {
--		t.Errorf("No tenants listed for your current token.")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/accounts_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/accounts_test.go
-deleted file mode 100644
-index 145e4e0..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/accounts_test.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--// +build acceptance rackspace
--
--package v1
--
--import (
--	"testing"
--
--	raxAccounts "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestAccounts(t *testing.T) {
--	c, err := createClient(t, false)
--	th.AssertNoErr(t, err)
--
--	updateres := raxAccounts.Update(c, raxAccounts.UpdateOpts{Metadata: map[string]string{"white": "mountains"}})
--	th.AssertNoErr(t, updateres.Err)
--	t.Logf("Headers from Update Account request: %+v\n", updateres.Header)
--	defer func() {
--		updateres = raxAccounts.Update(c, raxAccounts.UpdateOpts{Metadata: map[string]string{"white": ""}})
--		th.AssertNoErr(t, updateres.Err)
--		metadata, err := raxAccounts.Get(c).ExtractMetadata()
--		th.AssertNoErr(t, err)
--		t.Logf("Metadata from Get Account request (after update reverted): %+v\n", metadata)
--		th.CheckEquals(t, metadata["White"], "")
--	}()
--
--	metadata, err := raxAccounts.Get(c).ExtractMetadata()
--	th.AssertNoErr(t, err)
--	t.Logf("Metadata from Get Account request (after update): %+v\n", metadata)
--
--	th.CheckEquals(t, metadata["White"], "mountains")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/bulk_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/bulk_test.go
-deleted file mode 100644
-index 79013a5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/bulk_test.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--// +build acceptance rackspace objectstorage v1
--
--package v1
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestBulk(t *testing.T) {
--	c, err := createClient(t, false)
--	th.AssertNoErr(t, err)
--
--	var options bulk.DeleteOpts
--	options = append(options, "container/object1")
--	res := bulk.Delete(c, options)
--	th.AssertNoErr(t, res.Err)
--	body, err := res.ExtractBody()
--	th.AssertNoErr(t, err)
--	t.Logf("Response body from Bulk Delete Request: %+v\n", body)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdncontainers_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdncontainers_test.go
-deleted file mode 100644
-index e1bf38b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdncontainers_test.go
-+++ /dev/null
-@@ -1,61 +0,0 @@
--// +build acceptance rackspace objectstorage v1
--
--package v1
--
--import (
--	"testing"
--
--	osContainers "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers"
--	"github.com/rackspace/gophercloud/pagination"
--	raxCDNContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers"
--	raxContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestCDNContainers(t *testing.T) {
--	raxClient, err := createClient(t, false)
--	th.AssertNoErr(t, err)
--
--	createres := raxContainers.Create(raxClient, "gophercloud-test", nil)
--	th.AssertNoErr(t, createres.Err)
--	t.Logf("Headers from Create Container request: %+v\n", createres.Header)
--	defer func() {
--		res := raxContainers.Delete(raxClient, "gophercloud-test")
--		th.AssertNoErr(t, res.Err)
--	}()
--
--	raxCDNClient, err := createClient(t, true)
--	th.AssertNoErr(t, err)
--
--	r := raxCDNContainers.Enable(raxCDNClient, "gophercloud-test", raxCDNContainers.EnableOpts{CDNEnabled: true, TTL: 900})
--	th.AssertNoErr(t, r.Err)
--	t.Logf("Headers from Enable CDN Container request: %+v\n", r.Header)
--
--	t.Logf("Container Names available to the currently issued token:")
--	count := 0
--	err = raxCDNContainers.List(raxCDNClient, &osContainers.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		names, err := raxCDNContainers.ExtractNames(page)
--		th.AssertNoErr(t, err)
--
--		for i, name := range names {
--			t.Logf("[%02d] %s", i, name)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	if count == 0 {
--		t.Errorf("No CDN containers listed for your current token.")
--	}
--
--	updateres := raxCDNContainers.Update(raxCDNClient, "gophercloud-test", raxCDNContainers.UpdateOpts{CDNEnabled: false})
--	th.AssertNoErr(t, updateres.Err)
--	t.Logf("Headers from Update CDN Container request: %+v\n", updateres.Header)
--
--	metadata, err := raxCDNContainers.Get(raxCDNClient, "gophercloud-test").ExtractMetadata()
--	th.AssertNoErr(t, err)
--	t.Logf("Headers from Get CDN Container request (after update): %+v\n", metadata)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdnobjects_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdnobjects_test.go
-deleted file mode 100644
-index 6e477ae..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/cdnobjects_test.go
-+++ /dev/null
-@@ -1,46 +0,0 @@
--// +build acceptance rackspace objectstorage v1
--
--package v1
--
--import (
--	"bytes"
--	"testing"
--
--	raxCDNContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers"
--	raxCDNObjects "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects"
--	raxContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers"
--	raxObjects "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestCDNObjects(t *testing.T) {
--	raxClient, err := createClient(t, false)
--	th.AssertNoErr(t, err)
--
--	createContResult := raxContainers.Create(raxClient, "gophercloud-test", nil)
--	th.AssertNoErr(t, createContResult.Err)
--	t.Logf("Headers from Create Container request: %+v\n", createContResult.Header)
--	defer func() {
--		deleteResult := raxContainers.Delete(raxClient, "gophercloud-test")
--		th.AssertNoErr(t, deleteResult.Err)
--	}()
--
--	header, err := raxObjects.Create(raxClient, "gophercloud-test", "test-object", bytes.NewBufferString("gophercloud cdn test"), nil).ExtractHeader()
--	th.AssertNoErr(t, err)
--	t.Logf("Headers from Create Object request: %+v\n", header)
--	defer func() {
--		deleteResult := raxObjects.Delete(raxClient, "gophercloud-test", "test-object", nil)
--		th.AssertNoErr(t, deleteResult.Err)
--	}()
--
--	raxCDNClient, err := createClient(t, true)
--	th.AssertNoErr(t, err)
--
--	enableResult := raxCDNContainers.Enable(raxCDNClient, "gophercloud-test", raxCDNContainers.EnableOpts{CDNEnabled: true, TTL: 900})
--	th.AssertNoErr(t, enableResult.Err)
--	t.Logf("Headers from Enable CDN Container request: %+v\n", enableResult.Header)
--
--	deleteResult := raxCDNObjects.Delete(raxCDNClient, "gophercloud-test", "test-object", nil)
--	th.AssertNoErr(t, deleteResult.Err)
--	t.Logf("Headers from Delete CDN Object request: %+v\n", deleteResult.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go
-deleted file mode 100644
-index 1ae0727..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/common.go
-+++ /dev/null
-@@ -1,54 +0,0 @@
--// +build acceptance rackspace objectstorage v1
--
--package v1
--
--import (
--	"os"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/acceptance/tools"
--	"github.com/rackspace/gophercloud/rackspace"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func rackspaceAuthOptions(t *testing.T) gophercloud.AuthOptions {
--	// Obtain credentials from the environment.
--	options, err := rackspace.AuthOptionsFromEnv()
--	th.AssertNoErr(t, err)
--	options = tools.OnlyRS(options)
--
--	if options.Username == "" {
--		t.Fatal("Please provide a Rackspace username as RS_USERNAME.")
--	}
--	if options.APIKey == "" {
--		t.Fatal("Please provide a Rackspace API key as RS_API_KEY.")
--	}
--
--	return options
--}
--
--func createClient(t *testing.T, cdn bool) (*gophercloud.ServiceClient, error) {
--	region := os.Getenv("RS_REGION")
--	if region == "" {
--		t.Fatal("Please provide a Rackspace region as RS_REGION")
--	}
--
--	ao := rackspaceAuthOptions(t)
--
--	provider, err := rackspace.NewClient(ao.IdentityEndpoint)
--	th.AssertNoErr(t, err)
--
--	err = rackspace.Authenticate(provider, ao)
--	th.AssertNoErr(t, err)
--
--	if cdn {
--		return rackspace.NewObjectCDNV1(provider, gophercloud.EndpointOpts{
--			Region: region,
--		})
--	}
--
--	return rackspace.NewObjectStorageV1(provider, gophercloud.EndpointOpts{
--		Region: region,
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/containers_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/containers_test.go
-deleted file mode 100644
-index a7339cf..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/containers_test.go
-+++ /dev/null
-@@ -1,85 +0,0 @@
--// +build acceptance rackspace objectstorage v1
--
--package v1
--
--import (
--	"testing"
--
--	osContainers "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers"
--	"github.com/rackspace/gophercloud/pagination"
--	raxContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestContainers(t *testing.T) {
--	c, err := createClient(t, false)
--	th.AssertNoErr(t, err)
--
--	t.Logf("Containers Info available to the currently issued token:")
--	count := 0
--	err = raxContainers.List(c, &osContainers.ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		containers, err := raxContainers.ExtractInfo(page)
--		th.AssertNoErr(t, err)
--
--		for i, container := range containers {
--			t.Logf("[%02d]      name=[%s]", i, container.Name)
--			t.Logf("            count=[%d]", container.Count)
--			t.Logf("            bytes=[%d]", container.Bytes)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	if count == 0 {
--		t.Errorf("No containers listed for your current token.")
--	}
--
--	t.Logf("Container Names available to the currently issued token:")
--	count = 0
--	err = raxContainers.List(c, &osContainers.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		names, err := raxContainers.ExtractNames(page)
--		th.AssertNoErr(t, err)
--
--		for i, name := range names {
--			t.Logf("[%02d] %s", i, name)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	if count == 0 {
--		t.Errorf("No containers listed for your current token.")
--	}
--
--	createres := raxContainers.Create(c, "gophercloud-test", nil)
--	th.AssertNoErr(t, createres.Err)
--	defer func() {
--		res := raxContainers.Delete(c, "gophercloud-test")
--		th.AssertNoErr(t, res.Err)
--	}()
--
--	updateres := raxContainers.Update(c, "gophercloud-test", raxContainers.UpdateOpts{Metadata: map[string]string{"white": "mountains"}})
--	th.AssertNoErr(t, updateres.Err)
--	t.Logf("Headers from Update Account request: %+v\n", updateres.Header)
--	defer func() {
--		res := raxContainers.Update(c, "gophercloud-test", raxContainers.UpdateOpts{Metadata: map[string]string{"white": ""}})
--		th.AssertNoErr(t, res.Err)
--		metadata, err := raxContainers.Get(c, "gophercloud-test").ExtractMetadata()
--		th.AssertNoErr(t, err)
--		t.Logf("Metadata from Get Account request (after update reverted): %+v\n", metadata)
--		th.CheckEquals(t, metadata["White"], "")
--	}()
--
--	getres := raxContainers.Get(c, "gophercloud-test")
--	t.Logf("Headers from Get Account request (after update): %+v\n", getres.Header)
--	metadata, err := getres.ExtractMetadata()
--	th.AssertNoErr(t, err)
--	t.Logf("Metadata from Get Account request (after update): %+v\n", metadata)
--	th.CheckEquals(t, metadata["White"], "mountains")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/objects_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/objects_test.go
-deleted file mode 100644
-index 462f284..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/objectstorage/v1/objects_test.go
-+++ /dev/null
-@@ -1,112 +0,0 @@
--// +build acceptance rackspace objectstorage v1
--
--package v1
--
--import (
--	"bytes"
--	"testing"
--
--	osObjects "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects"
--	"github.com/rackspace/gophercloud/pagination"
--	raxContainers "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers"
--	raxObjects "github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestObjects(t *testing.T) {
--	c, err := createClient(t, false)
--	th.AssertNoErr(t, err)
--
--	res := raxContainers.Create(c, "gophercloud-test", nil)
--	th.AssertNoErr(t, res.Err)
--
--	defer func() {
--		res := raxContainers.Delete(c, "gophercloud-test")
--		th.AssertNoErr(t, res.Err)
--	}()
--
--	content := bytes.NewBufferString("Lewis Carroll")
--	options := &osObjects.CreateOpts{ContentType: "text/plain"}
--	createres := raxObjects.Create(c, "gophercloud-test", "o1", content, options)
--	th.AssertNoErr(t, createres.Err)
--	defer func() {
--		res := raxObjects.Delete(c, "gophercloud-test", "o1", nil)
--		th.AssertNoErr(t, res.Err)
--	}()
--
--	t.Logf("Objects Info available to the currently issued token:")
--	count := 0
--	err = raxObjects.List(c, "gophercloud-test", &osObjects.ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		objects, err := raxObjects.ExtractInfo(page)
--		th.AssertNoErr(t, err)
--
--		for i, object := range objects {
--			t.Logf("[%02d]      name=[%s]", i, object.Name)
--			t.Logf("            content-type=[%s]", object.ContentType)
--			t.Logf("            bytes=[%d]", object.Bytes)
--			t.Logf("            last-modified=[%s]", object.LastModified)
--			t.Logf("            hash=[%s]", object.Hash)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	if count == 0 {
--		t.Errorf("No objects listed for your current token.")
--	}
--	t.Logf("Container Names available to the currently issued token:")
--	count = 0
--	err = raxObjects.List(c, "gophercloud-test", &osObjects.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) {
--		t.Logf("--- Page %02d ---", count)
--
--		names, err := raxObjects.ExtractNames(page)
--		th.AssertNoErr(t, err)
--
--		for i, name := range names {
--			t.Logf("[%02d] %s", i, name)
--		}
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	if count == 0 {
--		t.Errorf("No objects listed for your current token.")
--	}
--
--	copyres := raxObjects.Copy(c, "gophercloud-test", "o1", &raxObjects.CopyOpts{Destination: "gophercloud-test/o2"})
--	th.AssertNoErr(t, copyres.Err)
--	defer func() {
--		res := raxObjects.Delete(c, "gophercloud-test", "o2", nil)
--		th.AssertNoErr(t, res.Err)
--	}()
--
--	o1Content, err := raxObjects.Download(c, "gophercloud-test", "o1", nil).ExtractContent()
--	th.AssertNoErr(t, err)
--	o2Content, err := raxObjects.Download(c, "gophercloud-test", "o2", nil).ExtractContent()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, string(o2Content), string(o1Content))
--
--	updateres := raxObjects.Update(c, "gophercloud-test", "o2", osObjects.UpdateOpts{Metadata: map[string]string{"white": "mountains"}})
--	th.AssertNoErr(t, updateres.Err)
--	t.Logf("Headers from Update Account request: %+v\n", updateres.Header)
--	defer func() {
--		res := raxObjects.Update(c, "gophercloud-test", "o2", osObjects.UpdateOpts{Metadata: map[string]string{"white": ""}})
--		th.AssertNoErr(t, res.Err)
--		metadata, err := raxObjects.Get(c, "gophercloud-test", "o2", nil).ExtractMetadata()
--		th.AssertNoErr(t, err)
--		t.Logf("Metadata from Get Account request (after update reverted): %+v\n", metadata)
--		th.CheckEquals(t, metadata["White"], "")
--	}()
--
--	getres := raxObjects.Get(c, "gophercloud-test", "o2", nil)
--	th.AssertNoErr(t, getres.Err)
--	t.Logf("Headers from Get Account request (after update): %+v\n", getres.Header)
--	metadata, err := getres.ExtractMetadata()
--	th.AssertNoErr(t, err)
--	t.Logf("Metadata from Get Account request (after update): %+v\n", metadata)
--	th.CheckEquals(t, metadata["White"], "mountains")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/pkg.go
-deleted file mode 100644
-index 5d17b32..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/pkg.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package rackspace
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools/pkg.go
-deleted file mode 100644
-index f7eca12..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools/pkg.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package tools
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools/tools.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools/tools.go
-deleted file mode 100644
-index 61b1d7a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/tools/tools.go
-+++ /dev/null
-@@ -1,82 +0,0 @@
--// +build acceptance
--
--package tools
--
--import (
--	"crypto/rand"
--	"errors"
--	"os"
--	"time"
--
--	"github.com/rackspace/gophercloud"
--)
--
--// ErrTimeout is returned if WaitFor takes longer than 300 second to happen.
--var ErrTimeout = errors.New("Timed out")
--
--// OnlyRS overrides the default Gophercloud behavior of using OS_-prefixed environment variables
--// if RS_ variables aren't present. Otherwise, they'll stomp over each other here in the acceptance
--// tests, where you need to have both defined.
--func OnlyRS(original gophercloud.AuthOptions) gophercloud.AuthOptions {
--	if os.Getenv("RS_AUTH_URL") == "" {
--		original.IdentityEndpoint = ""
--	}
--	if os.Getenv("RS_USERNAME") == "" {
--		original.Username = ""
--	}
--	if os.Getenv("RS_PASSWORD") == "" {
--		original.Password = ""
--	}
--	if os.Getenv("RS_API_KEY") == "" {
--		original.APIKey = ""
--	}
--	return original
--}
--
--// WaitFor polls a predicate function once per second to wait for a certain state to arrive.
--func WaitFor(predicate func() (bool, error)) error {
--	for i := 0; i < 300; i++ {
--		time.Sleep(1 * time.Second)
--
--		satisfied, err := predicate()
--		if err != nil {
--			return err
--		}
--		if satisfied {
--			return nil
--		}
--	}
--	return ErrTimeout
--}
--
--// MakeNewPassword generates a new string that's guaranteed to be different than the given one.
--func MakeNewPassword(oldPass string) string {
--	randomPassword := RandomString("", 16)
--	for randomPassword == oldPass {
--		randomPassword = RandomString("", 16)
--	}
--	return randomPassword
--}
--
--// RandomString generates a string of given length, but random content.
--// All content will be within the ASCII graphic character set.
--// (Implementation from Even Shaw's contribution on
--// http://stackoverflow.com/questions/12771930/what-is-the-fastest-way-to-generate-a-long-random-string-in-go).
--func RandomString(prefix string, n int) string {
--	const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
--	var bytes = make([]byte, n)
--	rand.Read(bytes)
--	for i, b := range bytes {
--		bytes[i] = alphanum[b%byte(len(alphanum))]
--	}
--	return prefix + string(bytes)
--}
--
--// Elide returns the first bit of its input string with a suffix of "..." if it's longer than
--// a comfortable 40 characters.
--func Elide(value string) string {
--	if len(value) > 40 {
--		return value[0:37] + "..."
--	}
--	return value
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/auth_options.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/auth_options.go
-deleted file mode 100644
-index bc0ef65..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/auth_options.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package gophercloud
--
--// AuthOptions allows anyone calling Authenticate to supply the required access
--// credentials. Its fields are the union of those recognized by each identity
--// implementation and provider.
--type AuthOptions struct {
--	// IdentityEndpoint specifies the HTTP endpoint that is required to work with
--	// the Identity API of the appropriate version. Required by the identity
--	// services, but often populated by a provider Client.
--	IdentityEndpoint string
--
--	// Username is required if using Identity V2 API. Consult with your provider's
--	// control panel to discover your account's username. In Identity V3, either
--	// UserID or a combination of Username and DomainID or DomainName.
--	Username, UserID string
--
--	// Exactly one of Password or ApiKey is required for the Identity V2 and V3
--	// APIs. Consult with your provider's control panel to discover your account's
--	// preferred method of authentication.
--	Password, APIKey string
--
--	// At most one of DomainID and DomainName must be provided if using Username
--	// with Identity V3. Otherwise, either are optional.
--	DomainID, DomainName string
--
--	// The TenantID and TenantName fields are optional for the Identity V2 API.
--	// Some providers allow you to specify a TenantName instead of the TenantId.
--	// Some require both.  Your provider's authentication policies will determine
--	// how these fields influence authentication.
--	TenantID, TenantName string
--
--	// AllowReauth should be set to true if you grant permission for Gophercloud to
--	// cache your credentials in memory, and to allow Gophercloud to attempt to
--	// re-authenticate automatically if/when your token expires.  If you set it to
--	// false, it will not cache these settings, but re-authentication will not be
--	// possible.  This setting defaults to false.
--	AllowReauth bool
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/auth_results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/auth_results.go
-deleted file mode 100644
-index 1a1faa5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/auth_results.go
-+++ /dev/null
-@@ -1,15 +0,0 @@
--package gophercloud
--
--import "time"
--
--// AuthResults encapsulates the raw results from an authentication request. As OpenStack allows
--// extensions to influence the structure returned in ways that Gophercloud cannot predict at
--// compile-time, you should use type-safe accessors to work with the data represented by this type,
--// such as ServiceCatalog and TokenID.
--type AuthResults interface {
--	// TokenID returns the token's ID value from the authentication response.
--	TokenID() (string, error)
--
--	// ExpiresAt retrieves the token's expiration time.
--	ExpiresAt() (time.Time, error)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/endpoint_search.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/endpoint_search.go
-deleted file mode 100644
-index b6f6b48..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/endpoint_search.go
-+++ /dev/null
-@@ -1,65 +0,0 @@
--package gophercloud
--
--import "errors"
--
--var (
--	// ErrServiceNotFound is returned when no service matches the EndpointOpts.
--	ErrServiceNotFound = errors.New("No suitable service could be found in the service catalog.")
--
--	// ErrEndpointNotFound is returned when no available endpoints match the provided EndpointOpts.
--	ErrEndpointNotFound = errors.New("No suitable endpoint could be found in the service catalog.")
--)
--
--// Availability indicates whether a specific service endpoint is accessible.
--// Identity v2 lists these as different kinds of URLs ("adminURL",
--// "internalURL", and "publicURL"), while v3 lists them as "Interfaces".
--type Availability string
--
--const (
--	// AvailabilityAdmin makes an endpoint only available to administrators.
--	AvailabilityAdmin Availability = "admin"
--
--	// AvailabilityPublic makes an endpoint available to everyone.
--	AvailabilityPublic Availability = "public"
--
--	// AvailabilityInternal makes an endpoint only available within the cluster.
--	AvailabilityInternal Availability = "internal"
--)
--
--// EndpointOpts contains options for finding an endpoint for an Openstack client.
--type EndpointOpts struct {
--	// Type is the service type for the client (e.g., "compute", "object-store").
--	// Required.
--	Type string
--
--	// Name is the service name for the client (e.g., "nova") as it appears in
--	// the service catalog. Services can have the same Type but a different Name,
--	// which is why both Type and Name are sometimes needed. Optional.
--	Name string
--
--	// Region is the geographic region in which the service resides. Required only
--	// for services that span multiple regions.
--	Region string
--
--	// Availability is the visibility of the endpoint to be returned. Valid types
--	// are: AvailabilityPublic, AvailabilityInternal, or AvailabilityAdmin.
--	// Availability is not required, and defaults to AvailabilityPublic.
--	// Not all providers or services offer all Availability options.
--	Availability Availability
--}
--
--// EndpointLocator is a function that describes how to locate a single endpoint
--// from a service catalog for a specific ProviderClient. It should be set
--// during ProviderClient authentication and used to discover related ServiceClients.
--type EndpointLocator func(EndpointOpts) (string, error)
--
--// ApplyDefaults sets EndpointOpts fields if not already set. Currently,
--// EndpointOpts.Availability defaults to the public endpoint.
--func (eo *EndpointOpts) ApplyDefaults(t string) {
--	if eo.Type == "" {
--		eo.Type = t
--	}
--	if eo.Availability == "" {
--		eo.Availability = AvailabilityPublic
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/endpoint_search_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/endpoint_search_test.go
-deleted file mode 100644
-index 3457453..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/endpoint_search_test.go
-+++ /dev/null
-@@ -1,19 +0,0 @@
--package gophercloud
--
--import (
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestApplyDefaultsToEndpointOpts(t *testing.T) {
--	eo := EndpointOpts{Availability: AvailabilityPublic}
--	eo.ApplyDefaults("compute")
--	expected := EndpointOpts{Availability: AvailabilityPublic, Type: "compute"}
--	th.CheckDeepEquals(t, expected, eo)
--
--	eo = EndpointOpts{Type: "compute"}
--	eo.ApplyDefaults("object-store")
--	expected = EndpointOpts{Availability: AvailabilityPublic, Type: "compute"}
--	th.CheckDeepEquals(t, expected, eo)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/auth_env.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/auth_env.go
-deleted file mode 100644
-index a4402b6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/auth_env.go
-+++ /dev/null
-@@ -1,58 +0,0 @@
--package openstack
--
--import (
--	"fmt"
--	"os"
--
--	"github.com/rackspace/gophercloud"
--)
--
--var nilOptions = gophercloud.AuthOptions{}
--
--// ErrNoAuthUrl, ErrNoUsername, and ErrNoPassword errors indicate of the required OS_AUTH_URL, OS_USERNAME, or OS_PASSWORD
--// environment variables, respectively, remain undefined.  See the AuthOptions() function for more details.
--var (
--	ErrNoAuthURL  = fmt.Errorf("Environment variable OS_AUTH_URL needs to be set.")
--	ErrNoUsername = fmt.Errorf("Environment variable OS_USERNAME needs to be set.")
--	ErrNoPassword = fmt.Errorf("Environment variable OS_PASSWORD needs to be set.")
--)
--
--// AuthOptions fills out an identity.AuthOptions structure with the settings found on the various OpenStack
--// OS_* environment variables.  The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME,
--// OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME.  Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must
--// have settings, or an error will result.  OS_TENANT_ID and OS_TENANT_NAME are optional.
--func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) {
--	authURL := os.Getenv("OS_AUTH_URL")
--	username := os.Getenv("OS_USERNAME")
--	userID := os.Getenv("OS_USERID")
--	password := os.Getenv("OS_PASSWORD")
--	tenantID := os.Getenv("OS_TENANT_ID")
--	tenantName := os.Getenv("OS_TENANT_NAME")
--	domainID := os.Getenv("OS_DOMAIN_ID")
--	domainName := os.Getenv("OS_DOMAIN_NAME")
--
--	if authURL == "" {
--		return nilOptions, ErrNoAuthURL
--	}
--
--	if username == "" && userID == "" {
--		return nilOptions, ErrNoUsername
--	}
--
--	if password == "" {
--		return nilOptions, ErrNoPassword
--	}
--
--	ao := gophercloud.AuthOptions{
--		IdentityEndpoint: authURL,
--		UserID:           userID,
--		Username:         username,
--		Password:         password,
--		TenantID:         tenantID,
--		TenantName:       tenantName,
--		DomainID:         domainID,
--		DomainName:       domainName,
--	}
--
--	return ao, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/doc.go
-deleted file mode 100644
-index e3af39f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package apiversions provides information and interaction with the different
--// API versions for the OpenStack Block Storage service, code-named Cinder.
--package apiversions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go
-deleted file mode 100644
-index 016bf37..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go
-+++ /dev/null
-@@ -1,28 +0,0 @@
--package apiversions
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// List lists all the Cinder API versions available to end-users.
--func List(c *gophercloud.ServiceClient) pagination.Pager {
--	return pagination.NewPager(c, listURL(c), func(r pagination.PageResult) pagination.Page {
--		return APIVersionPage{pagination.SinglePageBase(r)}
--	})
--}
--
--// Get will retrieve the volume type with the provided ID. To extract the volume
--// type from the result, call the Extract method on the GetResult.
--func Get(client *gophercloud.ServiceClient, v string) GetResult {
--	var res GetResult
--	_, err := perigee.Request("GET", getURL(client, v), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{200},
--		Results:     &res.Body,
--	})
--	res.Err = err
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests_test.go
-deleted file mode 100644
-index 56b5e4f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests_test.go
-+++ /dev/null
-@@ -1,145 +0,0 @@
--package apiversions
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListVersions(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `{
--			"versions": [
--				{
--					"status": "CURRENT",
--					"updated": "2012-01-04T11:33:21Z",
--					"id": "v1.0",
--					"links": [
--						{
--							"href": "http://23.253.228.211:8776/v1/",
--							"rel": "self"
--						}
--					]
--			    },
--				{
--					"status": "CURRENT",
--					"updated": "2012-11-21T11:33:21Z",
--					"id": "v2.0",
--					"links": [
--						{
--							"href": "http://23.253.228.211:8776/v2/",
--							"rel": "self"
--						}
--					]
--				}
--			]
--		}`)
--	})
--
--	count := 0
--
--	List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractAPIVersions(page)
--		if err != nil {
--			t.Errorf("Failed to extract API versions: %v", err)
--			return false, err
--		}
--
--		expected := []APIVersion{
--			APIVersion{
--				ID:      "v1.0",
--				Status:  "CURRENT",
--				Updated: "2012-01-04T11:33:21Z",
--			},
--			APIVersion{
--				ID:      "v2.0",
--				Status:  "CURRENT",
--				Updated: "2012-11-21T11:33:21Z",
--			},
--		}
--
--		th.AssertDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestAPIInfo(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v1/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `{
--			"version": {
--				"status": "CURRENT",
--				"updated": "2012-01-04T11:33:21Z",
--				"media-types": [
--					{
--						"base": "application/xml",
--						"type": "application/vnd.openstack.volume+xml;version=1"
--					},
--					{
--						"base": "application/json",
--						"type": "application/vnd.openstack.volume+json;version=1"
--					}
--				],
--				"id": "v1.0",
--				"links": [
--					{
--						"href": "http://23.253.228.211:8776/v1/",
--						"rel": "self"
--					},
--					{
--						"href": "http://jorgew.github.com/block-storage-api/content/os-block-storage-1.0.pdf",
--						"type": "application/pdf",
--						"rel": "describedby"
--					},
--					{
--						"href": "http://docs.rackspacecloud.com/servers/api/v1.1/application.wadl",
--						"type": "application/vnd.sun.wadl+xml",
--						"rel": "describedby"
--					}
--				]
--			}
--		}`)
--	})
--
--	actual, err := Get(client.ServiceClient(), "v1").Extract()
--	if err != nil {
--		t.Errorf("Failed to extract version: %v", err)
--	}
--
--	expected := APIVersion{
--		ID:      "v1.0",
--		Status:  "CURRENT",
--		Updated: "2012-01-04T11:33:21Z",
--	}
--
--	th.AssertEquals(t, actual.ID, expected.ID)
--	th.AssertEquals(t, actual.Status, expected.Status)
--	th.AssertEquals(t, actual.Updated, expected.Updated)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/results.go
-deleted file mode 100644
-index 7b0df11..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/results.go
-+++ /dev/null
-@@ -1,58 +0,0 @@
--package apiversions
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--// APIVersion represents an API version for Cinder.
--type APIVersion struct {
--	ID      string `json:"id" mapstructure:"id"`           // unique identifier
--	Status  string `json:"status" mapstructure:"status"`   // current status
--	Updated string `json:"updated" mapstructure:"updated"` // date last updated
--}
--
--// APIVersionPage is the page returned by a pager when traversing over a
--// collection of API versions.
--type APIVersionPage struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty checks whether an APIVersionPage struct is empty.
--func (r APIVersionPage) IsEmpty() (bool, error) {
--	is, err := ExtractAPIVersions(r)
--	if err != nil {
--		return true, err
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractAPIVersions takes a collection page, extracts all of the elements,
--// and returns them a slice of APIVersion structs. It is effectively a cast.
--func ExtractAPIVersions(page pagination.Page) ([]APIVersion, error) {
--	var resp struct {
--		Versions []APIVersion `mapstructure:"versions"`
--	}
--
--	err := mapstructure.Decode(page.(APIVersionPage).Body, &resp)
--
--	return resp.Versions, err
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts an API version resource.
--func (r GetResult) Extract() (*APIVersion, error) {
--	var resp struct {
--		Version *APIVersion `mapstructure:"version"`
--	}
--
--	err := mapstructure.Decode(r.Body, &resp)
--
--	return resp.Version, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls.go
-deleted file mode 100644
-index 56f8260..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls.go
-+++ /dev/null
-@@ -1,15 +0,0 @@
--package apiversions
--
--import (
--	"strings"
--
--	"github.com/rackspace/gophercloud"
--)
--
--func getURL(c *gophercloud.ServiceClient, version string) string {
--	return c.ServiceURL(strings.TrimRight(version, "/") + "/")
--}
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls_test.go
-deleted file mode 100644
-index 37e9142..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/urls_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package apiversions
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "v1")
--	expected := endpoint + "v1/"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/doc.go
-deleted file mode 100644
-index 198f830..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/doc.go
-+++ /dev/null
-@@ -1,5 +0,0 @@
--// Package snapshots provides information and interaction with snapshots in the
--// OpenStack Block Storage service. A snapshot is a point in time copy of the
--// data contained in an external storage volume, and can be controlled
--// programmatically.
--package snapshots
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/fixtures.go
-deleted file mode 100644
-index d1461fb..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/fixtures.go
-+++ /dev/null
-@@ -1,114 +0,0 @@
--package snapshots
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func MockListResponse(t *testing.T) {
--	th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--    {
--      "snapshots": [
--        {
--          "id": "289da7f8-6440-407c-9fb4-7db01ec49164",
--          "display_name": "snapshot-001"
--        },
--        {
--          "id": "96c3bda7-c82a-4f50-be73-ca7621794835",
--          "display_name": "snapshot-002"
--        }
--      ]
--    }
--    `)
--	})
--}
--
--func MockGetResponse(t *testing.T) {
--	th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--{
--    "snapshot": {
--        "display_name": "snapshot-001",
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--    }
--}
--      `)
--	})
--}
--
--func MockCreateResponse(t *testing.T) {
--	th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "snapshot": {
--        "volume_id": "1234",
--        "display_name": "snapshot-001"
--    }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "snapshot": {
--        "volume_id": "1234",
--        "display_name": "snapshot-001",
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--    }
--}
--    `)
--	})
--}
--
--func MockUpdateMetadataResponse(t *testing.T) {
--	th.Mux.HandleFunc("/snapshots/123/metadata", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestJSONRequest(t, r, `
--    {
--      "metadata": {
--        "key": "v1"
--      }
--    }
--    `)
--
--		fmt.Fprintf(w, `
--      {
--        "metadata": {
--          "key": "v1"
--        }
--      }
--    `)
--	})
--}
--
--func MockDeleteResponse(t *testing.T) {
--	th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go
-deleted file mode 100644
-index 443f696..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go
-+++ /dev/null
-@@ -1,188 +0,0 @@
--package snapshots
--
--import (
--	"fmt"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// CreateOptsBuilder allows extensions to add additional parameters to the
--// Create request.
--type CreateOptsBuilder interface {
--	ToSnapshotCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts contains options for creating a Snapshot. This object is passed to
--// the snapshots.Create function. For more information about these parameters,
--// see the Snapshot object.
--type CreateOpts struct {
--	// OPTIONAL
--	Description string
--	// OPTIONAL
--	Force bool
--	// OPTIONAL
--	Metadata map[string]interface{}
--	// OPTIONAL
--	Name string
--	// REQUIRED
--	VolumeID string
--}
--
--// ToSnapshotCreateMap assembles a request body based on the contents of a
--// CreateOpts.
--func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) {
--	s := make(map[string]interface{})
--
--	if opts.VolumeID == "" {
--		return nil, fmt.Errorf("Required CreateOpts field 'VolumeID' not set.")
--	}
--	s["volume_id"] = opts.VolumeID
--
--	if opts.Description != "" {
--		s["display_description"] = opts.Description
--	}
--	if opts.Force == true {
--		s["force"] = opts.Force
--	}
--	if opts.Metadata != nil {
--		s["metadata"] = opts.Metadata
--	}
--	if opts.Name != "" {
--		s["display_name"] = opts.Name
--	}
--
--	return map[string]interface{}{"snapshot": s}, nil
--}
--
--// Create will create a new Snapshot based on the values in CreateOpts. To
--// extract the Snapshot object from the response, call the Extract method on the
--// CreateResult.
--func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToSnapshotCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", createURL(client), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{200, 201},
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--	})
--	return res
--}
--
--// Delete will delete the existing Snapshot with the provided ID.
--func Delete(client *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(client, id), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202, 204},
--	})
--	return res
--}
--
--// Get retrieves the Snapshot with the provided ID. To extract the Snapshot
--// object from the response, call the Extract method on the GetResult.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", getURL(client, id), perigee.Options{
--		Results:     &res.Body,
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// ListOptsBuilder allows extensions to add additional parameters to the List
--// request.
--type ListOptsBuilder interface {
--	ToSnapshotListQuery() (string, error)
--}
--
--// ListOpts hold options for listing Snapshots. It is passed to the
--// snapshots.List function.
--type ListOpts struct {
--	Name     string `q:"display_name"`
--	Status   string `q:"status"`
--	VolumeID string `q:"volume_id"`
--}
--
--// ToSnapshotListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToSnapshotListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// List returns Snapshots optionally limited by the conditions provided in
--// ListOpts.
--func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	url := listURL(client)
--	if opts != nil {
--		query, err := opts.ToSnapshotListQuery()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--	}
--
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return ListResult{pagination.SinglePageBase(r)}
--	}
--	return pagination.NewPager(client, url, createPage)
--}
--
--// UpdateMetadataOptsBuilder allows extensions to add additional parameters to
--// the Update request.
--type UpdateMetadataOptsBuilder interface {
--	ToSnapshotUpdateMetadataMap() (map[string]interface{}, error)
--}
--
--// UpdateMetadataOpts contain options for updating an existing Snapshot. This
--// object is passed to the snapshots.Update function. For more information
--// about the parameters, see the Snapshot object.
--type UpdateMetadataOpts struct {
--	Metadata map[string]interface{}
--}
--
--// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of
--// an UpdateMetadataOpts.
--func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) {
--	v := make(map[string]interface{})
--
--	if opts.Metadata != nil {
--		v["metadata"] = opts.Metadata
--	}
--
--	return v, nil
--}
--
--// UpdateMetadata will update the Snapshot with provided information. To
--// extract the updated Snapshot from the response, call the ExtractMetadata
--// method on the UpdateMetadataResult.
--func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) UpdateMetadataResult {
--	var res UpdateMetadataResult
--
--	reqBody, err := opts.ToSnapshotUpdateMetadataMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("PUT", updateMetadataURL(client, id), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{200},
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests_test.go
-deleted file mode 100644
-index d0f9e88..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests_test.go
-+++ /dev/null
-@@ -1,104 +0,0 @@
--package snapshots
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockListResponse(t)
--
--	count := 0
--
--	List(client.ServiceClient(), &ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractSnapshots(page)
--		if err != nil {
--			t.Errorf("Failed to extract snapshots: %v", err)
--			return false, err
--		}
--
--		expected := []Snapshot{
--			Snapshot{
--				ID:   "289da7f8-6440-407c-9fb4-7db01ec49164",
--				Name: "snapshot-001",
--			},
--			Snapshot{
--				ID:   "96c3bda7-c82a-4f50-be73-ca7621794835",
--				Name: "snapshot-002",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockGetResponse(t)
--
--	v, err := Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, v.Name, "snapshot-001")
--	th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockCreateResponse(t)
--
--	options := CreateOpts{VolumeID: "1234", Name: "snapshot-001"}
--	n, err := Create(client.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.VolumeID, "1234")
--	th.AssertEquals(t, n.Name, "snapshot-001")
--	th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestUpdateMetadata(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockUpdateMetadataResponse(t)
--
--	expected := map[string]interface{}{"key": "v1"}
--
--	options := &UpdateMetadataOpts{
--		Metadata: map[string]interface{}{
--			"key": "v1",
--		},
--	}
--
--	actual, err := UpdateMetadata(client.ServiceClient(), "123", options).ExtractMetadata()
--
--	th.AssertNoErr(t, err)
--	th.AssertDeepEquals(t, actual, expected)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockDeleteResponse(t)
--
--	res := Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/results.go
-deleted file mode 100644
-index e595798..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/results.go
-+++ /dev/null
-@@ -1,123 +0,0 @@
--package snapshots
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--// Snapshot contains all the information associated with an OpenStack Snapshot.
--type Snapshot struct {
--	// Currect status of the Snapshot.
--	Status string `mapstructure:"status"`
--
--	// Display name.
--	Name string `mapstructure:"display_name"`
--
--	// Instances onto which the Snapshot is attached.
--	Attachments []string `mapstructure:"attachments"`
--
--	// Logical group.
--	AvailabilityZone string `mapstructure:"availability_zone"`
--
--	// Is the Snapshot bootable?
--	Bootable string `mapstructure:"bootable"`
--
--	// Date created.
--	CreatedAt string `mapstructure:"created_at"`
--
--	// Display description.
--	Description string `mapstructure:"display_discription"`
--
--	// See VolumeType object for more information.
--	VolumeType string `mapstructure:"volume_type"`
--
--	// ID of the Snapshot from which this Snapshot was created.
--	SnapshotID string `mapstructure:"snapshot_id"`
--
--	// ID of the Volume from which this Snapshot was created.
--	VolumeID string `mapstructure:"volume_id"`
--
--	// User-defined key-value pairs.
--	Metadata map[string]string `mapstructure:"metadata"`
--
--	// Unique identifier.
--	ID string `mapstructure:"id"`
--
--	// Size of the Snapshot, in GB.
--	Size int `mapstructure:"size"`
--}
--
--// CreateResult contains the response body and error from a Create request.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult contains the response body and error from a Get request.
--type GetResult struct {
--	commonResult
--}
--
--// DeleteResult contains the response body and error from a Delete request.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// ListResult is a pagination.Pager that is returned from a call to the List function.
--type ListResult struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty returns true if a ListResult contains no Snapshots.
--func (r ListResult) IsEmpty() (bool, error) {
--	volumes, err := ExtractSnapshots(r)
--	if err != nil {
--		return true, err
--	}
--	return len(volumes) == 0, nil
--}
--
--// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call.
--func ExtractSnapshots(page pagination.Page) ([]Snapshot, error) {
--	var response struct {
--		Snapshots []Snapshot `json:"snapshots"`
--	}
--
--	err := mapstructure.Decode(page.(ListResult).Body, &response)
--	return response.Snapshots, err
--}
--
--// UpdateMetadataResult contains the response body and error from an UpdateMetadata request.
--type UpdateMetadataResult struct {
--	commonResult
--}
--
--// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata.
--func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	m := r.Body.(map[string]interface{})["metadata"]
--	return m.(map[string]interface{}), nil
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract will get the Snapshot object out of the commonResult object.
--func (r commonResult) Extract() (*Snapshot, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Snapshot *Snapshot `json:"snapshot"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Snapshot, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls.go
-deleted file mode 100644
-index 4d635e8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls.go
-+++ /dev/null
-@@ -1,27 +0,0 @@
--package snapshots
--
--import "github.com/rackspace/gophercloud"
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("snapshots")
--}
--
--func deleteURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("snapshots", id)
--}
--
--func getURL(c *gophercloud.ServiceClient, id string) string {
--	return deleteURL(c, id)
--}
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return createURL(c)
--}
--
--func metadataURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("snapshots", id, "metadata")
--}
--
--func updateMetadataURL(c *gophercloud.ServiceClient, id string) string {
--	return metadataURL(c, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls_test.go
-deleted file mode 100644
-index feacf7f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/urls_test.go
-+++ /dev/null
-@@ -1,50 +0,0 @@
--package snapshots
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "snapshots"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "snapshots/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "snapshots/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint + "snapshots"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestMetadataURL(t *testing.T) {
--	actual := metadataURL(endpointClient(), "foo")
--	expected := endpoint + "snapshots/foo/metadata"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestUpdateMetadataURL(t *testing.T) {
--	actual := updateMetadataURL(endpointClient(), "foo")
--	expected := endpoint + "snapshots/foo/metadata"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util.go
-deleted file mode 100644
-index 64cdc60..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util.go
-+++ /dev/null
-@@ -1,22 +0,0 @@
--package snapshots
--
--import (
--	"github.com/rackspace/gophercloud"
--)
--
--// WaitForStatus will continually poll the resource, checking for a particular
--// status. It will do this for the amount of seconds defined.
--func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error {
--	return gophercloud.WaitFor(secs, func() (bool, error) {
--		current, err := Get(c, id).Extract()
--		if err != nil {
--			return false, err
--		}
--
--		if current.Status == status {
--			return true, nil
--		}
--
--		return false, nil
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util_test.go
-deleted file mode 100644
-index a4c4c82..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/util_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package snapshots
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--	"time"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestWaitForStatus(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/snapshots/1234", func(w http.ResponseWriter, r *http.Request) {
--		time.Sleep(2 * time.Second)
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--		{
--			"snapshot": {
--				"display_name": "snapshot-001",
--				"id": "1234",
--				"status":"available"
--			}
--		}`)
--	})
--
--	err := WaitForStatus(client.ServiceClient(), "1234", "available", 0)
--	if err == nil {
--		t.Errorf("Expected error: 'Time Out in WaitFor'")
--	}
--
--	err = WaitForStatus(client.ServiceClient(), "1234", "available", 3)
--	th.CheckNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go
-deleted file mode 100644
-index 307b8b1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go
-+++ /dev/null
-@@ -1,5 +0,0 @@
--// Package volumes provides information and interaction with volumes in the
--// OpenStack Block Storage service. A volume is a detachable block storage
--// device, akin to a USB hard drive. It can only be attached to one instance at
--// a time.
--package volumes
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/fixtures.go
-deleted file mode 100644
-index a01ad05..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/fixtures.go
-+++ /dev/null
-@@ -1,105 +0,0 @@
--package volumes
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func MockListResponse(t *testing.T) {
--	th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--  {
--    "volumes": [
--      {
--        "id": "289da7f8-6440-407c-9fb4-7db01ec49164",
--        "display_name": "vol-001"
--      },
--      {
--        "id": "96c3bda7-c82a-4f50-be73-ca7621794835",
--        "display_name": "vol-002"
--      }
--    ]
--  }
--  `)
--	})
--}
--
--func MockGetResponse(t *testing.T) {
--	th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--{
--    "volume": {
--        "display_name": "vol-001",
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--    }
--}
--      `)
--	})
--}
--
--func MockCreateResponse(t *testing.T) {
--	th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "volume": {
--        "size": 75
--    }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "volume": {
--        "size": 4,
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--    }
--}
--    `)
--	})
--}
--
--func MockDeleteResponse(t *testing.T) {
--	th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
--
--func MockUpdateResponse(t *testing.T) {
--	th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--    {
--      "volume": {
--        "display_name": "vol-002",
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--        }
--    }
--    `)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go
-deleted file mode 100644
-index f4332de..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go
-+++ /dev/null
-@@ -1,217 +0,0 @@
--package volumes
--
--import (
--	"fmt"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// CreateOptsBuilder allows extensions to add additional parameters to the
--// Create request.
--type CreateOptsBuilder interface {
--	ToVolumeCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts contains options for creating a Volume. This object is passed to
--// the volumes.Create function. For more information about these parameters,
--// see the Volume object.
--type CreateOpts struct {
--	// OPTIONAL
--	Availability string
--	// OPTIONAL
--	Description string
--	// OPTIONAL
--	Metadata map[string]string
--	// OPTIONAL
--	Name string
--	// REQUIRED
--	Size int
--	// OPTIONAL
--	SnapshotID, SourceVolID, ImageID string
--	// OPTIONAL
--	VolumeType string
--}
--
--// ToVolumeCreateMap assembles a request body based on the contents of a
--// CreateOpts.
--func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) {
--	v := make(map[string]interface{})
--
--	if opts.Size == 0 {
--		return nil, fmt.Errorf("Required CreateOpts field 'Size' not set.")
--	}
--	v["size"] = opts.Size
--
--	if opts.Availability != "" {
--		v["availability_zone"] = opts.Availability
--	}
--	if opts.Description != "" {
--		v["display_description"] = opts.Description
--	}
--	if opts.ImageID != "" {
--		v["imageRef"] = opts.ImageID
--	}
--	if opts.Metadata != nil {
--		v["metadata"] = opts.Metadata
--	}
--	if opts.Name != "" {
--		v["display_name"] = opts.Name
--	}
--	if opts.SourceVolID != "" {
--		v["source_volid"] = opts.SourceVolID
--	}
--	if opts.SnapshotID != "" {
--		v["snapshot_id"] = opts.SnapshotID
--	}
--	if opts.VolumeType != "" {
--		v["volume_type"] = opts.VolumeType
--	}
--
--	return map[string]interface{}{"volume": v}, nil
--}
--
--// Create will create a new Volume based on the values in CreateOpts. To extract
--// the Volume object from the response, call the Extract method on the
--// CreateResult.
--func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToVolumeCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", createURL(client), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 201},
--	})
--	return res
--}
--
--// Delete will delete the existing Volume with the provided ID.
--func Delete(client *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(client, id), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202, 204},
--	})
--	return res
--}
--
--// Get retrieves the Volume with the provided ID. To extract the Volume object
--// from the response, call the Extract method on the GetResult.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", getURL(client, id), perigee.Options{
--		Results:     &res.Body,
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// ListOptsBuilder allows extensions to add additional parameters to the List
--// request.
--type ListOptsBuilder interface {
--	ToVolumeListQuery() (string, error)
--}
--
--// ListOpts holds options for listing Volumes. It is passed to the volumes.List
--// function.
--type ListOpts struct {
--	// admin-only option. Set it to true to see all tenant volumes.
--	AllTenants bool `q:"all_tenants"`
--	// List only volumes that contain Metadata.
--	Metadata map[string]string `q:"metadata"`
--	// List only volumes that have Name as the display name.
--	Name string `q:"name"`
--	// List only volumes that have a status of Status.
--	Status string `q:"status"`
--}
--
--// ToVolumeListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToVolumeListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// List returns Volumes optionally limited by the conditions provided in ListOpts.
--func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	url := listURL(client)
--	if opts != nil {
--		query, err := opts.ToVolumeListQuery()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--	}
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return ListResult{pagination.SinglePageBase(r)}
--	}
--	return pagination.NewPager(client, url, createPage)
--}
--
--// UpdateOptsBuilder allows extensions to add additional parameters to the
--// Update request.
--type UpdateOptsBuilder interface {
--	ToVolumeUpdateMap() (map[string]interface{}, error)
--}
--
--// UpdateOpts contain options for updating an existing Volume. This object is passed
--// to the volumes.Update function. For more information about the parameters, see
--// the Volume object.
--type UpdateOpts struct {
--	// OPTIONAL
--	Name string
--	// OPTIONAL
--	Description string
--	// OPTIONAL
--	Metadata map[string]string
--}
--
--// ToVolumeUpdateMap assembles a request body based on the contents of an
--// UpdateOpts.
--func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) {
--	v := make(map[string]interface{})
--
--	if opts.Description != "" {
--		v["display_description"] = opts.Description
--	}
--	if opts.Metadata != nil {
--		v["metadata"] = opts.Metadata
--	}
--	if opts.Name != "" {
--		v["display_name"] = opts.Name
--	}
--
--	return map[string]interface{}{"volume": v}, nil
--}
--
--// Update will update the Volume with provided information. To extract the updated
--// Volume from the response, call the Extract method on the UpdateResult.
--func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult {
--	var res UpdateResult
--
--	reqBody, err := opts.ToVolumeUpdateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("PUT", updateURL(client, id), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{200},
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests_test.go
-deleted file mode 100644
-index 067f89b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests_test.go
-+++ /dev/null
-@@ -1,95 +0,0 @@
--package volumes
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockListResponse(t)
--
--	count := 0
--
--	List(client.ServiceClient(), &ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractVolumes(page)
--		if err != nil {
--			t.Errorf("Failed to extract volumes: %v", err)
--			return false, err
--		}
--
--		expected := []Volume{
--			Volume{
--				ID:   "289da7f8-6440-407c-9fb4-7db01ec49164",
--				Name: "vol-001",
--			},
--			Volume{
--				ID:   "96c3bda7-c82a-4f50-be73-ca7621794835",
--				Name: "vol-002",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockGetResponse(t)
--
--	v, err := Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, v.Name, "vol-001")
--	th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockCreateResponse(t)
--
--	options := &CreateOpts{Size: 75}
--	n, err := Create(client.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Size, 4)
--	th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockDeleteResponse(t)
--
--	res := Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockUpdateResponse(t)
--
--	options := UpdateOpts{Name: "vol-002"}
--	v, err := Update(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22", options).Extract()
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, "vol-002", v.Name)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go
-deleted file mode 100644
-index c6ddbb5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go
-+++ /dev/null
-@@ -1,113 +0,0 @@
--package volumes
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--// Volume contains all the information associated with an OpenStack Volume.
--type Volume struct {
--	// Current status of the volume.
--	Status string `mapstructure:"status"`
--
--	// Human-readable display name for the volume.
--	Name string `mapstructure:"display_name"`
--
--	// Instances onto which the volume is attached.
--	Attachments []string `mapstructure:"attachments"`
--
--	// This parameter is no longer used.
--	AvailabilityZone string `mapstructure:"availability_zone"`
--
--	// Indicates whether this is a bootable volume.
--	Bootable string `mapstructure:"bootable"`
--
--	// The date when this volume was created.
--	CreatedAt string `mapstructure:"created_at"`
--
--	// Human-readable description for the volume.
--	Description string `mapstructure:"display_discription"`
--
--	// The type of volume to create, either SATA or SSD.
--	VolumeType string `mapstructure:"volume_type"`
--
--	// The ID of the snapshot from which the volume was created
--	SnapshotID string `mapstructure:"snapshot_id"`
--
--	// The ID of another block storage volume from which the current volume was created
--	SourceVolID string `mapstructure:"source_volid"`
--
--	// Arbitrary key-value pairs defined by the user.
--	Metadata map[string]string `mapstructure:"metadata"`
--
--	// Unique identifier for the volume.
--	ID string `mapstructure:"id"`
--
--	// Size of the volume in GB.
--	Size int `mapstructure:"size"`
--}
--
--// CreateResult contains the response body and error from a Create request.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult contains the response body and error from a Get request.
--type GetResult struct {
--	commonResult
--}
--
--// DeleteResult contains the response body and error from a Delete request.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// ListResult is a pagination.pager that is returned from a call to the List function.
--type ListResult struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty returns true if a ListResult contains no Volumes.
--func (r ListResult) IsEmpty() (bool, error) {
--	volumes, err := ExtractVolumes(r)
--	if err != nil {
--		return true, err
--	}
--	return len(volumes) == 0, nil
--}
--
--// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call.
--func ExtractVolumes(page pagination.Page) ([]Volume, error) {
--	var response struct {
--		Volumes []Volume `json:"volumes"`
--	}
--
--	err := mapstructure.Decode(page.(ListResult).Body, &response)
--	return response.Volumes, err
--}
--
--// UpdateResult contains the response body and error from an Update request.
--type UpdateResult struct {
--	commonResult
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract will get the Volume object out of the commonResult object.
--func (r commonResult) Extract() (*Volume, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Volume *Volume `json:"volume"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Volume, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go
-deleted file mode 100644
-index 29629a1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package volumes
--
--import "github.com/rackspace/gophercloud"
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("volumes")
--}
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return createURL(c)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("volumes", id)
--}
--
--func getURL(c *gophercloud.ServiceClient, id string) string {
--	return deleteURL(c, id)
--}
--
--func updateURL(c *gophercloud.ServiceClient, id string) string {
--	return deleteURL(c, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls_test.go
-deleted file mode 100644
-index a95270e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls_test.go
-+++ /dev/null
-@@ -1,44 +0,0 @@
--package volumes
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "volumes"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint + "volumes"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "volumes/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "volumes/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestUpdateURL(t *testing.T) {
--	actual := updateURL(endpointClient(), "foo")
--	expected := endpoint + "volumes/foo"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go
-deleted file mode 100644
-index 1dda695..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go
-+++ /dev/null
-@@ -1,22 +0,0 @@
--package volumes
--
--import (
--	"github.com/rackspace/gophercloud"
--)
--
--// WaitForStatus will continually poll the resource, checking for a particular
--// status. It will do this for the amount of seconds defined.
--func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error {
--	return gophercloud.WaitFor(secs, func() (bool, error) {
--		current, err := Get(c, id).Extract()
--		if err != nil {
--			return false, err
--		}
--
--		if current.Status == status {
--			return true, nil
--		}
--
--		return false, nil
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util_test.go
-deleted file mode 100644
-index 24ef3b6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package volumes
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--	"time"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestWaitForStatus(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/volumes/1234", func(w http.ResponseWriter, r *http.Request) {
--		time.Sleep(3 * time.Second)
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--		{
--			"volume": {
--				"display_name": "vol-001",
--				"id": "1234",
--				"status":"available"
--			}
--		}`)
--	})
--
--	err := WaitForStatus(client.ServiceClient(), "1234", "available", 0)
--	if err == nil {
--		t.Errorf("Expected error: 'Time Out in WaitFor'")
--	}
--
--	err = WaitForStatus(client.ServiceClient(), "1234", "available", 6)
--	th.CheckNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go
-deleted file mode 100644
-index 793084f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go
-+++ /dev/null
-@@ -1,9 +0,0 @@
--// Package volumetypes provides information and interaction with volume types
--// in the OpenStack Block Storage service. A volume type indicates the type of
--// a block storage volume, such as SATA, SCSCI, SSD, etc. These can be
--// customized or defined by the OpenStack admin.
--//
--// You can also define extra_specs associated with your volume types. For
--// instance, you could have a VolumeType=SATA, with extra_specs (RPM=10000,
--// RAID-Level=5) . Extra_specs are defined and customized by the admin.
--package volumetypes
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/fixtures.go
-deleted file mode 100644
-index e3326ea..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/fixtures.go
-+++ /dev/null
-@@ -1,60 +0,0 @@
--package volumetypes
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func MockListResponse(t *testing.T) {
--	th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--    {
--      "volume_types": [
--        {
--          "id": "289da7f8-6440-407c-9fb4-7db01ec49164",
--          "name": "vol-type-001",
--          "extra_specs": {
--            "capabilities": "gpu"
--            }
--        },
--        {
--          "id": "96c3bda7-c82a-4f50-be73-ca7621794835",
--          "name": "vol-type-002",
--          "extra_specs": {}
--        }
--      ]
--    }
--    `)
--	})
--}
--
--func MockGetResponse(t *testing.T) {
--	th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--{
--    "volume_type": {
--        "name": "vol-type-001",
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--    "extra_specs": {
--      "serverNumber": "2"
--    }
--    }
--}
--      `)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go
-deleted file mode 100644
-index 87e20f6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go
-+++ /dev/null
-@@ -1,87 +0,0 @@
--package volumetypes
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// CreateOptsBuilder allows extensions to add additional parameters to the
--// Create request.
--type CreateOptsBuilder interface {
--	ToVolumeTypeCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts are options for creating a volume type.
--type CreateOpts struct {
--	// OPTIONAL. See VolumeType.
--	ExtraSpecs map[string]interface{}
--	// OPTIONAL. See VolumeType.
--	Name string
--}
--
--// ToVolumeTypeCreateMap casts a CreateOpts struct to a map.
--func (opts CreateOpts) ToVolumeTypeCreateMap() (map[string]interface{}, error) {
--	vt := make(map[string]interface{})
--
--	if opts.ExtraSpecs != nil {
--		vt["extra_specs"] = opts.ExtraSpecs
--	}
--	if opts.Name != "" {
--		vt["name"] = opts.Name
--	}
--
--	return map[string]interface{}{"volume_type": vt}, nil
--}
--
--// Create will create a new volume. To extract the created volume type object,
--// call the Extract method on the CreateResult.
--func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToVolumeTypeCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", createURL(client), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{200, 201},
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--	})
--	return res
--}
--
--// Delete will delete the volume type with the provided ID.
--func Delete(client *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(client, id), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202},
--	})
--	return res
--}
--
--// Get will retrieve the volume type with the provided ID. To extract the volume
--// type from the result, call the Extract method on the GetResult.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, err := perigee.Request("GET", getURL(client, id), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{200},
--		Results:     &res.Body,
--	})
--	res.Err = err
--	return res
--}
--
--// List returns all volume types.
--func List(client *gophercloud.ServiceClient) pagination.Pager {
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return ListResult{pagination.SinglePageBase(r)}
--	}
--
--	return pagination.NewPager(client, listURL(client), createPage)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests_test.go
-deleted file mode 100644
-index 8d40bfe..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests_test.go
-+++ /dev/null
-@@ -1,118 +0,0 @@
--package volumetypes
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockListResponse(t)
--
--	count := 0
--
--	List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractVolumeTypes(page)
--		if err != nil {
--			t.Errorf("Failed to extract volume types: %v", err)
--			return false, err
--		}
--
--		expected := []VolumeType{
--			VolumeType{
--				ID:   "289da7f8-6440-407c-9fb4-7db01ec49164",
--				Name: "vol-type-001",
--				ExtraSpecs: map[string]interface{}{
--					"capabilities": "gpu",
--				},
--			},
--			VolumeType{
--				ID:         "96c3bda7-c82a-4f50-be73-ca7621794835",
--				Name:       "vol-type-002",
--				ExtraSpecs: map[string]interface{}{},
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	MockGetResponse(t)
--
--	vt, err := Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertDeepEquals(t, vt.ExtraSpecs, map[string]interface{}{"serverNumber": "2"})
--	th.AssertEquals(t, vt.Name, "vol-type-001")
--	th.AssertEquals(t, vt.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "volume_type": {
--        "name": "vol-type-001"
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "volume_type": {
--        "name": "vol-type-001",
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--    }
--}
--		`)
--	})
--
--	options := &CreateOpts{Name: "vol-type-001"}
--	n, err := Create(client.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Name, "vol-type-001")
--	th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		w.WriteHeader(http.StatusAccepted)
--	})
--
--	err := Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractErr()
--	th.AssertNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/results.go
-deleted file mode 100644
-index c049a04..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/results.go
-+++ /dev/null
-@@ -1,72 +0,0 @@
--package volumetypes
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// VolumeType contains all information associated with an OpenStack Volume Type.
--type VolumeType struct {
--	ExtraSpecs map[string]interface{} `json:"extra_specs" mapstructure:"extra_specs"` // user-defined metadata
--	ID         string                 `json:"id" mapstructure:"id"`                   // unique identifier
--	Name       string                 `json:"name" mapstructure:"name"`               // display name
--}
--
--// CreateResult contains the response body and error from a Create request.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult contains the response body and error from a Get request.
--type GetResult struct {
--	commonResult
--}
--
--// DeleteResult contains the response error from a Delete request.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// ListResult is a pagination.Pager that is returned from a call to the List function.
--type ListResult struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty returns true if a ListResult contains no Volume Types.
--func (r ListResult) IsEmpty() (bool, error) {
--	volumeTypes, err := ExtractVolumeTypes(r)
--	if err != nil {
--		return true, err
--	}
--	return len(volumeTypes) == 0, nil
--}
--
--// ExtractVolumeTypes extracts and returns Volume Types.
--func ExtractVolumeTypes(page pagination.Page) ([]VolumeType, error) {
--	var response struct {
--		VolumeTypes []VolumeType `mapstructure:"volume_types"`
--	}
--
--	err := mapstructure.Decode(page.(ListResult).Body, &response)
--	return response.VolumeTypes, err
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract will get the Volume Type object out of the commonResult object.
--func (r commonResult) Extract() (*VolumeType, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		VolumeType *VolumeType `json:"volume_type" mapstructure:"volume_type"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.VolumeType, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go
-deleted file mode 100644
-index cf8367b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go
-+++ /dev/null
-@@ -1,19 +0,0 @@
--package volumetypes
--
--import "github.com/rackspace/gophercloud"
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("types")
--}
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return listURL(c)
--}
--
--func getURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("types", id)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, id string) string {
--	return getURL(c, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls_test.go
-deleted file mode 100644
-index 44016e2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/urls_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package volumetypes
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint + "types"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "types"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "types/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "types/foo"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client.go
-deleted file mode 100644
-index 99b3d46..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client.go
-+++ /dev/null
-@@ -1,205 +0,0 @@
--package openstack
--
--import (
--	"fmt"
--	"net/url"
--
--	"github.com/rackspace/gophercloud"
--	tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens"
--	tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens"
--	"github.com/rackspace/gophercloud/openstack/utils"
--)
--
--const (
--	v20 = "v2.0"
--	v30 = "v3.0"
--)
--
--// NewClient prepares an unauthenticated ProviderClient instance.
--// Most users will probably prefer using the AuthenticatedClient function instead.
--// This is useful if you wish to explicitly control the version of the identity service that's used for authentication explicitly,
--// for example.
--func NewClient(endpoint string) (*gophercloud.ProviderClient, error) {
--	u, err := url.Parse(endpoint)
--	if err != nil {
--		return nil, err
--	}
--	hadPath := u.Path != ""
--	u.Path, u.RawQuery, u.Fragment = "", "", ""
--	base := u.String()
--
--	endpoint = gophercloud.NormalizeURL(endpoint)
--	base = gophercloud.NormalizeURL(base)
--
--	if hadPath {
--		return &gophercloud.ProviderClient{
--			IdentityBase:     base,
--			IdentityEndpoint: endpoint,
--		}, nil
--	}
--
--	return &gophercloud.ProviderClient{
--		IdentityBase:     base,
--		IdentityEndpoint: "",
--	}, nil
--}
--
--// AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint specified by options, acquires a token, and
--// returns a Client instance that's ready to operate.
--// It first queries the root identity endpoint to determine which versions of the identity service are supported, then chooses
--// the most recent identity service available to proceed.
--func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) {
--	client, err := NewClient(options.IdentityEndpoint)
--	if err != nil {
--		return nil, err
--	}
--
--	err = Authenticate(client, options)
--	if err != nil {
--		return nil, err
--	}
--	return client, nil
--}
--
--// Authenticate or re-authenticate against the most recent identity service supported at the provided endpoint.
--func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error {
--	versions := []*utils.Version{
--		&utils.Version{ID: v20, Priority: 20, Suffix: "/v2.0/"},
--		&utils.Version{ID: v30, Priority: 30, Suffix: "/v3/"},
--	}
--
--	chosen, endpoint, err := utils.ChooseVersion(client.IdentityBase, client.IdentityEndpoint, versions)
--	if err != nil {
--		return err
--	}
--
--	switch chosen.ID {
--	case v20:
--		return v2auth(client, endpoint, options)
--	case v30:
--		return v3auth(client, endpoint, options)
--	default:
--		// The switch statement must be out of date from the versions list.
--		return fmt.Errorf("Unrecognized identity version: %s", chosen.ID)
--	}
--}
--
--// AuthenticateV2 explicitly authenticates against the identity v2 endpoint.
--func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error {
--	return v2auth(client, "", options)
--}
--
--func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error {
--	v2Client := NewIdentityV2(client)
--	if endpoint != "" {
--		v2Client.Endpoint = endpoint
--	}
--
--	result := tokens2.Create(v2Client, tokens2.AuthOptions{AuthOptions: options})
--
--	token, err := result.ExtractToken()
--	if err != nil {
--		return err
--	}
--
--	catalog, err := result.ExtractServiceCatalog()
--	if err != nil {
--		return err
--	}
--
--	client.TokenID = token.ID
--	client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
--		return V2EndpointURL(catalog, opts)
--	}
--
--	return nil
--}
--
--// AuthenticateV3 explicitly authenticates against the identity v3 service.
--func AuthenticateV3(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error {
--	return v3auth(client, "", options)
--}
--
--func v3auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error {
--	// Override the generated service endpoint with the one returned by the version endpoint.
--	v3Client := NewIdentityV3(client)
--	if endpoint != "" {
--		v3Client.Endpoint = endpoint
--	}
--
--	token, err := tokens3.Create(v3Client, options, nil).Extract()
--	if err != nil {
--		return err
--	}
--	client.TokenID = token.ID
--
--	client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
--		return V3EndpointURL(v3Client, opts)
--	}
--
--	return nil
--}
--
--// NewIdentityV2 creates a ServiceClient that may be used to interact with the v2 identity service.
--func NewIdentityV2(client *gophercloud.ProviderClient) *gophercloud.ServiceClient {
--	v2Endpoint := client.IdentityBase + "v2.0/"
--
--	return &gophercloud.ServiceClient{
--		ProviderClient: client,
--		Endpoint:       v2Endpoint,
--	}
--}
--
--// NewIdentityV3 creates a ServiceClient that may be used to access the v3 identity service.
--func NewIdentityV3(client *gophercloud.ProviderClient) *gophercloud.ServiceClient {
--	v3Endpoint := client.IdentityBase + "v3/"
--
--	return &gophercloud.ServiceClient{
--		ProviderClient: client,
--		Endpoint:       v3Endpoint,
--	}
--}
--
--// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 object storage package.
--func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
--	eo.ApplyDefaults("object-store")
--	url, err := client.EndpointLocator(eo)
--	if err != nil {
--		return nil, err
--	}
--	return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil
--}
--
--// NewComputeV2 creates a ServiceClient that may be used with the v2 compute package.
--func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
--	eo.ApplyDefaults("compute")
--	url, err := client.EndpointLocator(eo)
--	if err != nil {
--		return nil, err
--	}
--	return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil
--}
--
--// NewNetworkV2 creates a ServiceClient that may be used with the v2 network package.
--func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
--	eo.ApplyDefaults("network")
--	url, err := client.EndpointLocator(eo)
--	if err != nil {
--		return nil, err
--	}
--	return &gophercloud.ServiceClient{
--		ProviderClient: client,
--		Endpoint:       url,
--		ResourceBase:   url + "v2.0/",
--	}, nil
--}
--
--// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 block storage service.
--func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
--	eo.ApplyDefaults("volume")
--	url, err := client.EndpointLocator(eo)
--	if err != nil {
--		return nil, err
--	}
--	return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client_test.go
-deleted file mode 100644
-index 257260c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client_test.go
-+++ /dev/null
-@@ -1,161 +0,0 @@
--package openstack
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestAuthenticatedClientV3(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	const ID = "0123456789"
--
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		fmt.Fprintf(w, `
--			{
--				"versions": {
--					"values": [
--						{
--							"status": "stable",
--							"id": "v3.0",
--							"links": [
--								{ "href": "%s", "rel": "self" }
--							]
--						},
--						{
--							"status": "stable",
--							"id": "v2.0",
--							"links": [
--								{ "href": "%s", "rel": "self" }
--							]
--						}
--					]
--				}
--			}
--		`, th.Endpoint()+"v3/", th.Endpoint()+"v2.0/")
--	})
--
--	th.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
--		w.Header().Add("X-Subject-Token", ID)
--
--		w.WriteHeader(http.StatusCreated)
--		fmt.Fprintf(w, `{ "token": { "expires_at": "2013-02-02T18:30:59.000000Z" } }`)
--	})
--
--	options := gophercloud.AuthOptions{
--		UserID:           "me",
--		Password:         "secret",
--		IdentityEndpoint: th.Endpoint(),
--	}
--	client, err := AuthenticatedClient(options)
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, ID, client.TokenID)
--}
--
--func TestAuthenticatedClientV2(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		fmt.Fprintf(w, `
--			{
--				"versions": {
--					"values": [
--						{
--							"status": "experimental",
--							"id": "v3.0",
--							"links": [
--								{ "href": "%s", "rel": "self" }
--							]
--						},
--						{
--							"status": "stable",
--							"id": "v2.0",
--							"links": [
--								{ "href": "%s", "rel": "self" }
--							]
--						}
--					]
--				}
--			}
--		`, th.Endpoint()+"v3/", th.Endpoint()+"v2.0/")
--	})
--
--	th.Mux.HandleFunc("/v2.0/tokens", func(w http.ResponseWriter, r *http.Request) {
--		fmt.Fprintf(w, `
--			{
--				"access": {
--					"token": {
--						"id": "01234567890",
--						"expires": "2014-10-01T10:00:00.000000Z"
--					},
--					"serviceCatalog": [
--						{
--							"name": "Cloud Servers",
--							"type": "compute",
--							"endpoints": [
--								{
--									"tenantId": "t1000",
--									"publicURL": "https://compute.north.host.com/v1/t1000",
--									"internalURL": "https://compute.north.internal/v1/t1000",
--									"region": "North",
--									"versionId": "1",
--									"versionInfo": "https://compute.north.host.com/v1/",
--									"versionList": "https://compute.north.host.com/"
--								},
--								{
--									"tenantId": "t1000",
--									"publicURL": "https://compute.north.host.com/v1.1/t1000",
--									"internalURL": "https://compute.north.internal/v1.1/t1000",
--									"region": "North",
--									"versionId": "1.1",
--									"versionInfo": "https://compute.north.host.com/v1.1/",
--									"versionList": "https://compute.north.host.com/"
--								}
--							],
--							"endpoints_links": []
--						},
--						{
--							"name": "Cloud Files",
--							"type": "object-store",
--							"endpoints": [
--								{
--									"tenantId": "t1000",
--									"publicURL": "https://storage.north.host.com/v1/t1000",
--									"internalURL": "https://storage.north.internal/v1/t1000",
--									"region": "North",
--									"versionId": "1",
--									"versionInfo": "https://storage.north.host.com/v1/",
--									"versionList": "https://storage.north.host.com/"
--								},
--								{
--									"tenantId": "t1000",
--									"publicURL": "https://storage.south.host.com/v1/t1000",
--									"internalURL": "https://storage.south.internal/v1/t1000",
--									"region": "South",
--									"versionId": "1",
--									"versionInfo": "https://storage.south.host.com/v1/",
--									"versionList": "https://storage.south.host.com/"
--								}
--							]
--						}
--					]
--				}
--			}
--		`)
--	})
--
--	options := gophercloud.AuthOptions{
--		Username:         "me",
--		Password:         "secret",
--		IdentityEndpoint: th.Endpoint(),
--	}
--	client, err := AuthenticatedClient(options)
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, "01234567890", client.TokenID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/README.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/README.md
-deleted file mode 100644
-index 7b55795..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/README.md
-+++ /dev/null
-@@ -1,3 +0,0 @@
--# Common Resources
--
--This directory is for resources that are shared by multiple services.
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/doc.go
-deleted file mode 100644
-index 4a168f4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/doc.go
-+++ /dev/null
-@@ -1,15 +0,0 @@
--// Package extensions provides information and interaction with the different extensions available
--// for an OpenStack service.
--//
--// The purpose of OpenStack API extensions is to:
--//
--// - Introduce new features in the API without requiring a version change.
--// - Introduce vendor-specific niche functionality.
--// - Act as a proving ground for experimental functionalities that might be included in a future
--//   version of the API.
--//
--// Extensions usually have tags that prevent conflicts with other extensions that define attributes
--// or resources with the same names, and with core resources and attributes.
--// Because an extension might not be supported by all plug-ins, its availability varies with deployments
--// and the specific plug-in.
--package extensions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go
-deleted file mode 100644
-index aeec0fa..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/errors.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package extensions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/fixtures.go
-deleted file mode 100644
-index 0ed7de9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/fixtures.go
-+++ /dev/null
-@@ -1,91 +0,0 @@
--// +build fixtures
--
--package extensions
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// ListOutput provides a single page of Extension results.
--const ListOutput = `
--{
--	"extensions": [
--		{
--			"updated": "2013-01-20T00:00:00-00:00",
--			"name": "Neutron Service Type Management",
--			"links": [],
--			"namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0",
--			"alias": "service-type",
--			"description": "API for retrieving service providers for Neutron advanced services"
--		}
--	]
--}`
--
--// GetOutput provides a single Extension result.
--const GetOutput = `
--{
--	"extension": {
--		"updated": "2013-02-03T10:00:00-00:00",
--		"name": "agent",
--		"links": [],
--		"namespace": "http://docs.openstack.org/ext/agent/api/v2.0",
--		"alias": "agent",
--		"description": "The agent management extension."
--	}
--}
--`
--
--// ListedExtension is the Extension that should be parsed from ListOutput.
--var ListedExtension = Extension{
--	Updated:     "2013-01-20T00:00:00-00:00",
--	Name:        "Neutron Service Type Management",
--	Links:       []interface{}{},
--	Namespace:   "http://docs.openstack.org/ext/neutron/service-type/api/v1.0",
--	Alias:       "service-type",
--	Description: "API for retrieving service providers for Neutron advanced services",
--}
--
--// ExpectedExtensions is a slice containing the Extension that should be parsed from ListOutput.
--var ExpectedExtensions = []Extension{ListedExtension}
--
--// SingleExtension is the Extension that should be parsed from GetOutput.
--var SingleExtension = &Extension{
--	Updated:     "2013-02-03T10:00:00-00:00",
--	Name:        "agent",
--	Links:       []interface{}{},
--	Namespace:   "http://docs.openstack.org/ext/agent/api/v2.0",
--	Alias:       "agent",
--	Description: "The agent management extension.",
--}
--
--// HandleListExtensionsSuccessfully creates an HTTP handler at `/extensions` on the test handler
--// mux that response with a list containing a single tenant.
--func HandleListExtensionsSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--
--		fmt.Fprintf(w, ListOutput)
--	})
--}
--
--// HandleGetExtensionSuccessfully creates an HTTP handler at `/extensions/agent` that responds with
--// a JSON payload corresponding to SingleExtension.
--func HandleGetExtensionSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/extensions/agent", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, GetOutput)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go
-deleted file mode 100644
-index 3ca6e12..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package extensions
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// Get retrieves information for a specific extension using its alias.
--func Get(c *gophercloud.ServiceClient, alias string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", ExtensionURL(c, alias), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// List returns a Pager which allows you to iterate over the full collection of extensions.
--// It does not accept query parameters.
--func List(c *gophercloud.ServiceClient) pagination.Pager {
--	return pagination.NewPager(c, ListExtensionURL(c), func(r pagination.PageResult) pagination.Page {
--		return ExtensionPage{pagination.SinglePageBase(r)}
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests_test.go
-deleted file mode 100644
-index 6550283..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package extensions
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleListExtensionsSuccessfully(t)
--
--	count := 0
--
--	List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractExtensions(page)
--		th.AssertNoErr(t, err)
--		th.AssertDeepEquals(t, ExpectedExtensions, actual)
--
--		return true, nil
--	})
--
--	th.CheckEquals(t, 1, count)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleGetExtensionSuccessfully(t)
--
--	actual, err := Get(client.ServiceClient(), "agent").Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, SingleExtension, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/results.go
-deleted file mode 100644
-index 777d083..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/results.go
-+++ /dev/null
-@@ -1,65 +0,0 @@
--package extensions
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// GetResult temporarily stores the result of a Get call.
--// Use its Extract() method to interpret it as an Extension.
--type GetResult struct {
--	gophercloud.Result
--}
--
--// Extract interprets a GetResult as an Extension.
--func (r GetResult) Extract() (*Extension, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Extension *Extension `json:"extension"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Extension, err
--}
--
--// Extension is a struct that represents an OpenStack extension.
--type Extension struct {
--	Updated     string        `json:"updated" mapstructure:"updated"`
--	Name        string        `json:"name" mapstructure:"name"`
--	Links       []interface{} `json:"links" mapstructure:"links"`
--	Namespace   string        `json:"namespace" mapstructure:"namespace"`
--	Alias       string        `json:"alias" mapstructure:"alias"`
--	Description string        `json:"description" mapstructure:"description"`
--}
--
--// ExtensionPage is the page returned by a pager when traversing over a collection of extensions.
--type ExtensionPage struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty checks whether an ExtensionPage struct is empty.
--func (r ExtensionPage) IsEmpty() (bool, error) {
--	is, err := ExtractExtensions(r)
--	if err != nil {
--		return true, err
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractExtensions accepts a Page struct, specifically an ExtensionPage struct, and extracts the
--// elements into a slice of Extension structs.
--// In other words, a generic collection is mapped into a relevant slice.
--func ExtractExtensions(page pagination.Page) ([]Extension, error) {
--	var resp struct {
--		Extensions []Extension `mapstructure:"extensions"`
--	}
--
--	err := mapstructure.Decode(page.(ExtensionPage).Body, &resp)
--
--	return resp.Extensions, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls.go
-deleted file mode 100644
-index 6460c66..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package extensions
--
--import "github.com/rackspace/gophercloud"
--
--// ExtensionURL generates the URL for an extension resource by name.
--func ExtensionURL(c *gophercloud.ServiceClient, name string) string {
--	return c.ServiceURL("extensions", name)
--}
--
--// ListExtensionURL generates the URL for the extensions resource collection.
--func ListExtensionURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("extensions")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go
-deleted file mode 100644
-index 3223b1c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/urls_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package extensions
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestExtensionURL(t *testing.T) {
--	actual := ExtensionURL(endpointClient(), "agent")
--	expected := endpoint + "extensions/agent"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestListExtensionURL(t *testing.T) {
--	actual := ListExtensionURL(endpointClient())
--	expected := endpoint + "extensions"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go
-deleted file mode 100644
-index 5a976d1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go
-+++ /dev/null
-@@ -1,111 +0,0 @@
--package bootfromvolume
--
--import (
--	"errors"
--	"strconv"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--
--	"github.com/racker/perigee"
--)
--
--// SourceType represents the type of medium being used to create the volume.
--type SourceType string
--
--const (
--	Volume   SourceType = "volume"
--	Snapshot SourceType = "snapshot"
--	Image    SourceType = "image"
--)
--
--// BlockDevice is a structure with options for booting a server instance
--// from a volume. The volume may be created from an image, snapshot, or another
--// volume.
--type BlockDevice struct {
--	// BootIndex [optional] is the boot index. It defaults to 0.
--	BootIndex int `json:"boot_index"`
--
--	// DeleteOnTermination [optional] specifies whether or not to delete the attached volume
--	// when the server is deleted. Defaults to `false`.
--	DeleteOnTermination bool `json:"delete_on_termination"`
--
--	// DestinationType [optional] is the type that gets created. Possible values are "volume"
--	// and "local".
--	DestinationType string `json:"destination_type"`
--
--	// SourceType [required] must be one of: "volume", "snapshot", "image".
--	SourceType SourceType `json:"source_type"`
--
--	// UUID [required] is the unique identifier for the volume, snapshot, or image (see above)
--	UUID string `json:"uuid"`
--
--	// VolumeSize [optional] is the size of the volume to create (in gigabytes).
--	VolumeSize int `json:"volume_size"`
--}
--
--// CreateOptsExt is a structure that extends the server `CreateOpts` structure
--// by allowing for a block device mapping.
--type CreateOptsExt struct {
--	servers.CreateOptsBuilder
--	BlockDevice []BlockDevice `json:"block_device_mapping_v2,omitempty"`
--}
--
--// ToServerCreateMap adds the block device mapping option to the base server
--// creation options.
--func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) {
--	base, err := opts.CreateOptsBuilder.ToServerCreateMap()
--	if err != nil {
--		return nil, err
--	}
--
--	if len(opts.BlockDevice) == 0 {
--		return nil, errors.New("Required fields UUID and SourceType not set.")
--	}
--
--	serverMap := base["server"].(map[string]interface{})
--
--	blockDevice := make([]map[string]interface{}, len(opts.BlockDevice))
--
--	for i, bd := range opts.BlockDevice {
--		if string(bd.SourceType) == "" {
--			return nil, errors.New("SourceType must be one of: volume, image, snapshot.")
--		}
--
--		blockDevice[i] = make(map[string]interface{})
--
--		blockDevice[i]["source_type"] = bd.SourceType
--		blockDevice[i]["boot_index"] = strconv.Itoa(bd.BootIndex)
--		blockDevice[i]["delete_on_termination"] = strconv.FormatBool(bd.DeleteOnTermination)
--		blockDevice[i]["volume_size"] = strconv.Itoa(bd.VolumeSize)
--		if bd.UUID != "" {
--			blockDevice[i]["uuid"] = bd.UUID
--		}
--		if bd.DestinationType != "" {
--			blockDevice[i]["destination_type"] = bd.DestinationType
--		}
--
--	}
--	serverMap["block_device_mapping_v2"] = blockDevice
--
--	return base, nil
--}
--
--// Create requests the creation of a server from the given block device mapping.
--func Create(client *gophercloud.ServiceClient, opts servers.CreateOptsBuilder) servers.CreateResult {
--	var res servers.CreateResult
--
--	reqBody, err := opts.ToServerCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", createURL(client), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		ReqBody:     reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 202},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests_test.go
-deleted file mode 100644
-index 5bf9137..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests_test.go
-+++ /dev/null
-@@ -1,51 +0,0 @@
--package bootfromvolume
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestCreateOpts(t *testing.T) {
--	base := servers.CreateOpts{
--		Name:      "createdserver",
--		ImageRef:  "asdfasdfasdf",
--		FlavorRef: "performance1-1",
--	}
--
--	ext := CreateOptsExt{
--		CreateOptsBuilder: base,
--		BlockDevice: []BlockDevice{
--			BlockDevice{
--				UUID:            "123456",
--				SourceType:      Image,
--				DestinationType: "volume",
--				VolumeSize:      10,
--			},
--		},
--	}
--
--	expected := `
--    {
--      "server": {
--        "name": "createdserver",
--        "imageRef": "asdfasdfasdf",
--        "flavorRef": "performance1-1",
--        "block_device_mapping_v2":[
--          {
--            "uuid":"123456",
--            "source_type":"image",
--            "destination_type":"volume",
--            "boot_index": "0",
--            "delete_on_termination": "false",
--            "volume_size": "10"
--          }
--        ]
--      }
--    }
--  `
--	actual, err := ext.ToServerCreateMap()
--	th.AssertNoErr(t, err)
--	th.CheckJSONEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go
-deleted file mode 100644
-index f60329f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go
-+++ /dev/null
-@@ -1,10 +0,0 @@
--package bootfromvolume
--
--import (
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--)
--
--// CreateResult temporarily contains the response from a Create call.
--type CreateResult struct {
--	os.CreateResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go
-deleted file mode 100644
-index 0cffe25..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--package bootfromvolume
--
--import "github.com/rackspace/gophercloud"
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("os-volumes_boot")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls_test.go
-deleted file mode 100644
-index 6ee6477..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls_test.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--package bootfromvolume
--
--import (
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestCreateURL(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	c := client.ServiceClient()
--
--	th.CheckEquals(t, c.Endpoint+"os-volumes_boot", createURL(c))
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate.go
-deleted file mode 100644
-index 1007909..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package extensions
--
--import (
--	"github.com/rackspace/gophercloud"
--	common "github.com/rackspace/gophercloud/openstack/common/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ExtractExtensions interprets a Page as a slice of Extensions.
--func ExtractExtensions(page pagination.Page) ([]common.Extension, error) {
--	return common.ExtractExtensions(page)
--}
--
--// Get retrieves information for a specific extension using its alias.
--func Get(c *gophercloud.ServiceClient, alias string) common.GetResult {
--	return common.Get(c, alias)
--}
--
--// List returns a Pager which allows you to iterate over the full collection of extensions.
--// It does not accept query parameters.
--func List(c *gophercloud.ServiceClient) pagination.Pager {
--	return common.List(c)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate_test.go
-deleted file mode 100644
-index c3c525f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/delegate_test.go
-+++ /dev/null
-@@ -1,96 +0,0 @@
--package extensions
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	common "github.com/rackspace/gophercloud/openstack/common/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--
--		fmt.Fprintf(w, `
--{
--		"extensions": [
--				{
--						"updated": "2013-01-20T00:00:00-00:00",
--						"name": "Neutron Service Type Management",
--						"links": [],
--						"namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0",
--						"alias": "service-type",
--						"description": "API for retrieving service providers for Neutron advanced services"
--				}
--		]
--}
--			`)
--	})
--
--	count := 0
--	List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractExtensions(page)
--		th.AssertNoErr(t, err)
--
--		expected := []common.Extension{
--			common.Extension{
--				Updated:     "2013-01-20T00:00:00-00:00",
--				Name:        "Neutron Service Type Management",
--				Links:       []interface{}{},
--				Namespace:   "http://docs.openstack.org/ext/neutron/service-type/api/v1.0",
--				Alias:       "service-type",
--				Description: "API for retrieving service providers for Neutron advanced services",
--			},
--		}
--		th.AssertDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--	th.CheckEquals(t, 1, count)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/extensions/agent", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--		"extension": {
--				"updated": "2013-02-03T10:00:00-00:00",
--				"name": "agent",
--				"links": [],
--				"namespace": "http://docs.openstack.org/ext/agent/api/v2.0",
--				"alias": "agent",
--				"description": "The agent management extension."
--		}
--}
--		`)
--	})
--
--	ext, err := Get(client.ServiceClient(), "agent").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, ext.Updated, "2013-02-03T10:00:00-00:00")
--	th.AssertEquals(t, ext.Name, "agent")
--	th.AssertEquals(t, ext.Namespace, "http://docs.openstack.org/ext/agent/api/v2.0")
--	th.AssertEquals(t, ext.Alias, "agent")
--	th.AssertEquals(t, ext.Description, "The agent management extension.")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go
-deleted file mode 100644
-index 80785fa..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package diskconfig provides information and interaction with the Disk
--// Config extension that works with the OpenStack Compute service.
--package diskconfig
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go
-deleted file mode 100644
-index 7407e0d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go
-+++ /dev/null
-@@ -1,114 +0,0 @@
--package diskconfig
--
--import (
--	"errors"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--)
--
--// DiskConfig represents one of the two possible settings for the DiskConfig option when creating,
--// rebuilding, or resizing servers: Auto or Manual.
--type DiskConfig string
--
--const (
--	// Auto builds a server with a single partition the size of the target flavor disk and
--	// automatically adjusts the filesystem to fit the entire partition. Auto may only be used with
--	// images and servers that use a single EXT3 partition.
--	Auto DiskConfig = "AUTO"
--
--	// Manual builds a server using whatever partition scheme and filesystem are present in the source
--	// image. If the target flavor disk is larger, the remaining space is left unpartitioned. This
--	// enables images to have non-EXT3 filesystems, multiple partitions, and so on, and enables you
--	// to manage the disk configuration. It also results in slightly shorter boot times.
--	Manual DiskConfig = "MANUAL"
--)
--
--// ErrInvalidDiskConfig is returned if an invalid string is specified for a DiskConfig option.
--var ErrInvalidDiskConfig = errors.New("DiskConfig must be either diskconfig.Auto or diskconfig.Manual.")
--
--// Validate ensures that a DiskConfig contains an appropriate value.
--func (config DiskConfig) validate() error {
--	switch config {
--	case Auto, Manual:
--		return nil
--	default:
--		return ErrInvalidDiskConfig
--	}
--}
--
--// CreateOptsExt adds a DiskConfig option to the base CreateOpts.
--type CreateOptsExt struct {
--	servers.CreateOptsBuilder
--
--	// DiskConfig [optional] controls how the created server's disk is partitioned.
--	DiskConfig DiskConfig `json:"OS-DCF:diskConfig,omitempty"`
--}
--
--// ToServerCreateMap adds the diskconfig option to the base server creation options.
--func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) {
--	base, err := opts.CreateOptsBuilder.ToServerCreateMap()
--	if err != nil {
--		return nil, err
--	}
--
--	if string(opts.DiskConfig) == "" {
--		return base, nil
--	}
--
--	serverMap := base["server"].(map[string]interface{})
--	serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig)
--
--	return base, nil
--}
--
--// RebuildOptsExt adds a DiskConfig option to the base RebuildOpts.
--type RebuildOptsExt struct {
--	servers.RebuildOptsBuilder
--
--	// DiskConfig [optional] controls how the rebuilt server's disk is partitioned.
--	DiskConfig DiskConfig
--}
--
--// ToServerRebuildMap adds the diskconfig option to the base server rebuild options.
--func (opts RebuildOptsExt) ToServerRebuildMap() (map[string]interface{}, error) {
--	err := opts.DiskConfig.validate()
--	if err != nil {
--		return nil, err
--	}
--
--	base, err := opts.RebuildOptsBuilder.ToServerRebuildMap()
--	if err != nil {
--		return nil, err
--	}
--
--	serverMap := base["rebuild"].(map[string]interface{})
--	serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig)
--
--	return base, nil
--}
--
--// ResizeOptsExt adds a DiskConfig option to the base server resize options.
--type ResizeOptsExt struct {
--	servers.ResizeOptsBuilder
--
--	// DiskConfig [optional] controls how the resized server's disk is partitioned.
--	DiskConfig DiskConfig
--}
--
--// ToServerResizeMap adds the diskconfig option to the base server creation options.
--func (opts ResizeOptsExt) ToServerResizeMap() (map[string]interface{}, error) {
--	err := opts.DiskConfig.validate()
--	if err != nil {
--		return nil, err
--	}
--
--	base, err := opts.ResizeOptsBuilder.ToServerResizeMap()
--	if err != nil {
--		return nil, err
--	}
--
--	serverMap := base["resize"].(map[string]interface{})
--	serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig)
--
--	return base, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests_test.go
-deleted file mode 100644
-index e3c26d4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests_test.go
-+++ /dev/null
-@@ -1,87 +0,0 @@
--package diskconfig
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestCreateOpts(t *testing.T) {
--	base := servers.CreateOpts{
--		Name:      "createdserver",
--		ImageRef:  "asdfasdfasdf",
--		FlavorRef: "performance1-1",
--	}
--
--	ext := CreateOptsExt{
--		CreateOptsBuilder: base,
--		DiskConfig:        Manual,
--	}
--
--	expected := `
--		{
--			"server": {
--				"name": "createdserver",
--				"imageRef": "asdfasdfasdf",
--				"flavorRef": "performance1-1",
--				"OS-DCF:diskConfig": "MANUAL"
--			}
--		}
--	`
--	actual, err := ext.ToServerCreateMap()
--	th.AssertNoErr(t, err)
--	th.CheckJSONEquals(t, expected, actual)
--}
--
--func TestRebuildOpts(t *testing.T) {
--	base := servers.RebuildOpts{
--		Name:      "rebuiltserver",
--		AdminPass: "swordfish",
--		ImageID:   "asdfasdfasdf",
--	}
--
--	ext := RebuildOptsExt{
--		RebuildOptsBuilder: base,
--		DiskConfig:         Auto,
--	}
--
--	actual, err := ext.ToServerRebuildMap()
--	th.AssertNoErr(t, err)
--
--	expected := `
--		{
--			"rebuild": {
--				"name": "rebuiltserver",
--				"imageRef": "asdfasdfasdf",
--				"adminPass": "swordfish",
--				"OS-DCF:diskConfig": "AUTO"
--			}
--		}
--	`
--	th.CheckJSONEquals(t, expected, actual)
--}
--
--func TestResizeOpts(t *testing.T) {
--	base := servers.ResizeOpts{
--		FlavorRef: "performance1-8",
--	}
--
--	ext := ResizeOptsExt{
--		ResizeOptsBuilder: base,
--		DiskConfig:        Auto,
--	}
--
--	actual, err := ext.ToServerResizeMap()
--	th.AssertNoErr(t, err)
--
--	expected := `
--		{
--			"resize": {
--				"flavorRef": "performance1-8",
--				"OS-DCF:diskConfig": "AUTO"
--			}
--		}
--	`
--	th.CheckJSONEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go
-deleted file mode 100644
-index 10ec2da..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go
-+++ /dev/null
-@@ -1,60 +0,0 @@
--package diskconfig
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--func commonExtract(result gophercloud.Result) (*DiskConfig, error) {
--	var resp struct {
--		Server struct {
--			DiskConfig string `mapstructure:"OS-DCF:diskConfig"`
--		} `mapstructure:"server"`
--	}
--
--	err := mapstructure.Decode(result.Body, &resp)
--	if err != nil {
--		return nil, err
--	}
--
--	config := DiskConfig(resp.Server.DiskConfig)
--	return &config, nil
--}
--
--// ExtractGet returns the disk configuration from a servers.Get call.
--func ExtractGet(result servers.GetResult) (*DiskConfig, error) {
--	return commonExtract(result.Result)
--}
--
--// ExtractUpdate returns the disk configuration from a servers.Update call.
--func ExtractUpdate(result servers.UpdateResult) (*DiskConfig, error) {
--	return commonExtract(result.Result)
--}
--
--// ExtractRebuild returns the disk configuration from a servers.Rebuild call.
--func ExtractRebuild(result servers.RebuildResult) (*DiskConfig, error) {
--	return commonExtract(result.Result)
--}
--
--// ExtractDiskConfig returns the DiskConfig setting for a specific server acquired from an
--// servers.ExtractServers call, while iterating through a Pager.
--func ExtractDiskConfig(page pagination.Page, index int) (*DiskConfig, error) {
--	casted := page.(servers.ServerPage).Body
--
--	type server struct {
--		DiskConfig string `mapstructure:"OS-DCF:diskConfig"`
--	}
--	var response struct {
--		Servers []server `mapstructure:"servers"`
--	}
--
--	err := mapstructure.Decode(casted, &response)
--	if err != nil {
--		return nil, err
--	}
--
--	config := DiskConfig(response.Servers[index].DiskConfig)
--	return &config, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results_test.go
-deleted file mode 100644
-index dd8d2b7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results_test.go
-+++ /dev/null
-@@ -1,68 +0,0 @@
--package diskconfig
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestExtractGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	servers.HandleServerGetSuccessfully(t)
--
--	config, err := ExtractGet(servers.Get(client.ServiceClient(), "1234asdf"))
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, Manual, *config)
--}
--
--func TestExtractUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	servers.HandleServerUpdateSuccessfully(t)
--
--	r := servers.Update(client.ServiceClient(), "1234asdf", servers.UpdateOpts{
--		Name: "new-name",
--	})
--	config, err := ExtractUpdate(r)
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, Manual, *config)
--}
--
--func TestExtractRebuild(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	servers.HandleRebuildSuccessfully(t, servers.SingleServerBody)
--
--	r := servers.Rebuild(client.ServiceClient(), "1234asdf", servers.RebuildOpts{
--		Name:       "new-name",
--		AdminPass:  "swordfish",
--		ImageID:    "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--		AccessIPv4: "1.2.3.4",
--	})
--	config, err := ExtractRebuild(r)
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, Manual, *config)
--}
--
--func TestExtractList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	servers.HandleServerListSuccessfully(t)
--
--	pages := 0
--	err := servers.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) {
--		pages++
--
--		config, err := ExtractDiskConfig(page, 0)
--		th.AssertNoErr(t, err)
--		th.CheckEquals(t, Manual, *config)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, pages, 1)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/doc.go
-deleted file mode 100644
-index 2b447da..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package extensions provides information and interaction with the
--// different extensions available for the OpenStack Compute service.
--package extensions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go
-deleted file mode 100644
-index 856f41b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package keypairs provides information and interaction with the Keypairs
--// extension for the OpenStack Compute service.
--package keypairs
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/fixtures.go
-deleted file mode 100644
-index d10af99..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/fixtures.go
-+++ /dev/null
-@@ -1,171 +0,0 @@
--// +build fixtures
--
--package keypairs
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// ListOutput is a sample response to a List call.
--const ListOutput = `
--{
--	"keypairs": [
--		{
--			"keypair": {
--				"fingerprint": "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a",
--				"name": "firstkey",
--				"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n"
--			}
--		},
--		{
--			"keypair": {
--				"fingerprint": "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8",
--				"name": "secondkey",
--				"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n"
--			}
--		}
--	]
--}
--`
--
--// GetOutput is a sample response to a Get call.
--const GetOutput = `
--{
--	"keypair": {
--		"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n",
--		"name": "firstkey",
--		"fingerprint": "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a"
--	}
--}
--`
--
--// CreateOutput is a sample response to a Create call.
--const CreateOutput = `
--{
--	"keypair": {
--		"fingerprint": "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8",
--		"name": "createdkey",
--		"private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIICXAIBAAKBgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7\nDUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ\n9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5QIDAQAB\nAoGAE5XO1mDhORy9COvsg+kYPUhB1GsCYxh+v88wG7HeFDKBY6KUc/Kxo6yoGn5T\nTjRjekyi2KoDZHz4VlIzyZPwFS4I1bf3oCunVoAKzgLdmnTtvRNMC5jFOGc2vUgP\n9bSyRj3S1R4ClVk2g0IDeagko/jc8zzLEYuIK+fbkds79YECQQDt3vcevgegnkga\ntF4NsDmmBPRkcSHCqrANP/7vFcBQN3czxeYYWX3DK07alu6GhH1Y4sHbdm616uU0\nll7xbDzxAkEAzAtN2IyftNygV2EGiaGgqLyo/tD9+Vui2qCQplqe4jvWh/5Sparl\nOjmKo+uAW+hLrLVMnHzRWxbWU8hirH5FNQJATO+ZxCK4etXXAnQmG41NCAqANWB2\nB+2HJbH2NcQ2QHvAHUm741JGn/KI/aBlo7KEjFRDWUVUB5ji64BbUwCsMQJBAIku\nLGcjnBf/oLk+XSPZC2eGd2Ph5G5qYmH0Q2vkTx+wtTn3DV+eNsDfgMtWAJVJ5t61\ngU1QSXyhLPVlKpnnxuUCQC+xvvWjWtsLaFtAsZywJiqLxQzHts8XLGZptYJ5tLWV\nrtmYtBcJCN48RrgQHry/xWYeA4K/AFQpXfNPgprQ96Q=\n-----END RSA PRIVATE KEY-----\n",
--		"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n",
--		"user_id": "fake"
--	}
--}
--`
--
--// ImportOutput is a sample response to a Create call that provides its own public key.
--const ImportOutput = `
--{
--	"keypair": {
--		"fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c",
--		"name": "importedkey",
--		"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova",
--		"user_id": "fake"
--	}
--}
--`
--
--// FirstKeyPair is the first result in ListOutput.
--var FirstKeyPair = KeyPair{
--	Name:        "firstkey",
--	Fingerprint: "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a",
--	PublicKey:   "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n",
--}
--
--// SecondKeyPair is the second result in ListOutput.
--var SecondKeyPair = KeyPair{
--	Name:        "secondkey",
--	Fingerprint: "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8",
--	PublicKey:   "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n",
--}
--
--// ExpectedKeyPairSlice is the slice of results that should be parsed from ListOutput, in the expected
--// order.
--var ExpectedKeyPairSlice = []KeyPair{FirstKeyPair, SecondKeyPair}
--
--// CreatedKeyPair is the parsed result from CreatedOutput.
--var CreatedKeyPair = KeyPair{
--	Name:        "createdkey",
--	Fingerprint: "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8",
--	PublicKey:   "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n",
--	PrivateKey:  "-----BEGIN RSA PRIVATE KEY-----\nMIICXAIBAAKBgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7\nDUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ\n9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5QIDAQAB\nAoGAE5XO1mDhORy9COvsg+kYPUhB1GsCYxh+v88wG7HeFDKBY6KUc/Kxo6yoGn5T\nTjRjekyi2KoDZHz4VlIzyZPwFS4I1bf3oCunVoAKzgLdmnTtvRNMC5jFOGc2vUgP\n9bSyRj3S1R4ClVk2g0IDeagko/jc8zzLEYuIK+fbkds79YECQQDt3vcevgegnkga\ntF4NsDmmBPRkcSHCqrANP/7vFcBQN3czxeYYWX3DK07alu6GhH1Y4sHbdm616uU0\nll7xbDzxAkEAzAtN2IyftNygV2EGiaGgqLyo/tD9+Vui2qCQplqe4jvWh/5Sparl\nOjmKo+uAW+hLrLVMnHzRWxbWU8hirH5FNQJATO+ZxCK4etXXAnQmG41NCAqANWB2\nB+2HJbH2NcQ2QHvAHUm741JGn/KI/aBlo7KEjFRDWUVUB5ji64BbUwCsMQJBAIku\nLGcjnBf/oLk+XSPZC2eGd2Ph5G5qYmH0Q2vkTx+wtTn3DV+eNsDfgMtWAJVJ5t61\ngU1QSXyhLPVlKpnnxuUCQC+xvvWjWtsLaFtAsZywJiqLxQzHts8XLGZptYJ5tLWV\nrtmYtBcJCN48RrgQHry/xWYeA4K/AFQpXfNPgprQ96Q=\n-----END RSA PRIVATE KEY-----\n",
--	UserID:      "fake",
--}
--
--// ImportedKeyPair is the parsed result from ImportOutput.
--var ImportedKeyPair = KeyPair{
--	Name:        "importedkey",
--	Fingerprint: "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c",
--	PublicKey:   "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova",
--	UserID:      "fake",
--}
--
--// HandleListSuccessfully configures the test server to respond to a List request.
--func HandleListSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, ListOutput)
--	})
--}
--
--// HandleGetSuccessfully configures the test server to respond to a Get request for "firstkey".
--func HandleGetSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/os-keypairs/firstkey", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, GetOutput)
--	})
--}
--
--// HandleCreateSuccessfully configures the test server to respond to a Create request for a new
--// keypair called "createdkey".
--func HandleCreateSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `{ "keypair": { "name": "createdkey" } }`)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, CreateOutput)
--	})
--}
--
--// HandleImportSuccessfully configures the test server to respond to an Import request for an
--// existing keypair called "importedkey".
--func HandleImportSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `
--			{
--				"keypair": {
--					"name": "importedkey",
--					"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova"
--				}
--			}
--		`)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, ImportOutput)
--	})
--}
--
--// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a
--// keypair called "deletedkey".
--func HandleDeleteSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/os-keypairs/deletedkey", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.WriteHeader(http.StatusAccepted)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go
-deleted file mode 100644
-index 7d1a2ac..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go
-+++ /dev/null
-@@ -1,88 +0,0 @@
--package keypairs
--
--import (
--	"errors"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// List returns a Pager that allows you to iterate over a collection of KeyPairs.
--func List(client *gophercloud.ServiceClient) pagination.Pager {
--	return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page {
--		return KeyPairPage{pagination.SinglePageBase(r)}
--	})
--}
--
--// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notable, the
--// CreateOpts struct in this package does.
--type CreateOptsBuilder interface {
--	ToKeyPairCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts species keypair creation or import parameters.
--type CreateOpts struct {
--	// Name [required] is a friendly name to refer to this KeyPair in other services.
--	Name string
--
--	// PublicKey [optional] is a pregenerated OpenSSH-formatted public key. If provided, this key
--	// will be imported and no new key will be created.
--	PublicKey string
--}
--
--// ToKeyPairCreateMap constructs a request body from CreateOpts.
--func (opts CreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) {
--	if opts.Name == "" {
--		return nil, errors.New("Missing field required for keypair creation: Name")
--	}
--
--	keypair := make(map[string]interface{})
--	keypair["name"] = opts.Name
--	if opts.PublicKey != "" {
--		keypair["public_key"] = opts.PublicKey
--	}
--
--	return map[string]interface{}{"keypair": keypair}, nil
--}
--
--// Create requests the creation of a new keypair on the server, or to import a pre-existing
--// keypair.
--func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToKeyPairCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", createURL(client), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		ReqBody:     reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// Get returns public data about a previously uploaded KeyPair.
--func Get(client *gophercloud.ServiceClient, name string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", getURL(client, name), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// Delete requests the deletion of a previous stored KeyPair from the server.
--func Delete(client *gophercloud.ServiceClient, name string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(client, name), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests_test.go
-deleted file mode 100644
-index 67d1833..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests_test.go
-+++ /dev/null
-@@ -1,71 +0,0 @@
--package keypairs
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleListSuccessfully(t)
--
--	count := 0
--	err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractKeyPairs(page)
--		th.AssertNoErr(t, err)
--		th.CheckDeepEquals(t, ExpectedKeyPairSlice, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleCreateSuccessfully(t)
--
--	actual, err := Create(client.ServiceClient(), CreateOpts{
--		Name: "createdkey",
--	}).Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &CreatedKeyPair, actual)
--}
--
--func TestImport(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleImportSuccessfully(t)
--
--	actual, err := Create(client.ServiceClient(), CreateOpts{
--		Name:      "importedkey",
--		PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova",
--	}).Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &ImportedKeyPair, actual)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleGetSuccessfully(t)
--
--	actual, err := Get(client.ServiceClient(), "firstkey").Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &FirstKeyPair, actual)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleDeleteSuccessfully(t)
--
--	err := Delete(client.ServiceClient(), "deletedkey").ExtractErr()
--	th.AssertNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/results.go
-deleted file mode 100644
-index f1a0d8e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/results.go
-+++ /dev/null
-@@ -1,94 +0,0 @@
--package keypairs
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// KeyPair is an SSH key known to the OpenStack cluster that is available to be injected into
--// servers.
--type KeyPair struct {
--	// Name is used to refer to this keypair from other services within this region.
--	Name string `mapstructure:"name"`
--
--	// Fingerprint is a short sequence of bytes that can be used to authenticate or validate a longer
--	// public key.
--	Fingerprint string `mapstructure:"fingerprint"`
--
--	// PublicKey is the public key from this pair, in OpenSSH format. "ssh-rsa AAAAB3Nz..."
--	PublicKey string `mapstructure:"public_key"`
--
--	// PrivateKey is the private key from this pair, in PEM format.
--	// "-----BEGIN RSA PRIVATE KEY-----\nMIICXA..." It is only present if this keypair was just
--	// returned from a Create call
--	PrivateKey string `mapstructure:"private_key"`
--
--	// UserID is the user who owns this keypair.
--	UserID string `mapstructure:"user_id"`
--}
--
--// KeyPairPage stores a single, only page of KeyPair results from a List call.
--type KeyPairPage struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty determines whether or not a KeyPairPage is empty.
--func (page KeyPairPage) IsEmpty() (bool, error) {
--	ks, err := ExtractKeyPairs(page)
--	return len(ks) == 0, err
--}
--
--// ExtractKeyPairs interprets a page of results as a slice of KeyPairs.
--func ExtractKeyPairs(page pagination.Page) ([]KeyPair, error) {
--	type pair struct {
--		KeyPair KeyPair `mapstructure:"keypair"`
--	}
--
--	var resp struct {
--		KeyPairs []pair `mapstructure:"keypairs"`
--	}
--
--	err := mapstructure.Decode(page.(KeyPairPage).Body, &resp)
--	results := make([]KeyPair, len(resp.KeyPairs))
--	for i, pair := range resp.KeyPairs {
--		results[i] = pair.KeyPair
--	}
--	return results, err
--}
--
--type keyPairResult struct {
--	gophercloud.Result
--}
--
--// Extract is a method that attempts to interpret any KeyPair resource response as a KeyPair struct.
--func (r keyPairResult) Extract() (*KeyPair, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		KeyPair *KeyPair `json:"keypair" mapstructure:"keypair"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--	return res.KeyPair, err
--}
--
--// CreateResult is the response from a Create operation. Call its Extract method to interpret it
--// as a KeyPair.
--type CreateResult struct {
--	keyPairResult
--}
--
--// GetResult is the response from a Get operation. Call its Extract method to interpret it
--// as a KeyPair.
--type GetResult struct {
--	keyPairResult
--}
--
--// DeleteResult is the response from a Delete operation. Call its Extract method to determine if
--// the call succeeded or failed.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go
-deleted file mode 100644
-index 702f532..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go
-+++ /dev/null
-@@ -1,25 +0,0 @@
--package keypairs
--
--import "github.com/rackspace/gophercloud"
--
--const resourcePath = "os-keypairs"
--
--func resourceURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(resourcePath)
--}
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return resourceURL(c)
--}
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return resourceURL(c)
--}
--
--func getURL(c *gophercloud.ServiceClient, name string) string {
--	return c.ServiceURL(resourcePath, name)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, name string) string {
--	return getURL(c, name)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls_test.go
-deleted file mode 100644
-index 60efd2a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/urls_test.go
-+++ /dev/null
-@@ -1,40 +0,0 @@
--package keypairs
--
--import (
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListURL(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	c := client.ServiceClient()
--
--	th.CheckEquals(t, c.Endpoint+"os-keypairs", listURL(c))
--}
--
--func TestCreateURL(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	c := client.ServiceClient()
--
--	th.CheckEquals(t, c.Endpoint+"os-keypairs", createURL(c))
--}
--
--func TestGetURL(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	c := client.ServiceClient()
--
--	th.CheckEquals(t, c.Endpoint+"os-keypairs/wat", getURL(c, "wat"))
--}
--
--func TestDeleteURL(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	c := client.ServiceClient()
--
--	th.CheckEquals(t, c.Endpoint+"os-keypairs/wat", deleteURL(c, "wat"))
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go
-deleted file mode 100644
-index 5822e1b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--// Package flavors provides information and interaction with the flavor API
--// resource in the OpenStack Compute service.
--//
--// A flavor is an available hardware configuration for a server. Each flavor
--// has a unique combination of disk space, memory capacity and priority for CPU
--// time.
--package flavors
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go
-deleted file mode 100644
-index 065a2ec..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go
-+++ /dev/null
-@@ -1,72 +0,0 @@
--package flavors
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOptsBuilder allows extensions to add additional parameters to the
--// List request.
--type ListOptsBuilder interface {
--	ToFlavorListQuery() (string, error)
--}
--
--// ListOpts helps control the results returned by the List() function.
--// For example, a flavor with a minDisk field of 10 will not be returned if you specify MinDisk set to 20.
--// Typically, software will use the last ID of the previous call to List to set the Marker for the current call.
--type ListOpts struct {
--
--	// ChangesSince, if provided, instructs List to return only those things which have changed since the timestamp provided.
--	ChangesSince string `q:"changes-since"`
--
--	// MinDisk and MinRAM, if provided, elides flavors which do not meet your criteria.
--	MinDisk int `q:"minDisk"`
--	MinRAM  int `q:"minRam"`
--
--	// Marker and Limit control paging.
--	// Marker instructs List where to start listing from.
--	Marker string `q:"marker"`
--
--	// Limit instructs List to refrain from sending excessively large lists of flavors.
--	Limit int `q:"limit"`
--}
--
--// ToFlavorListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToFlavorListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// ListDetail instructs OpenStack to provide a list of flavors.
--// You may provide criteria by which List curtails its results for easier processing.
--// See ListOpts for more details.
--func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	url := listURL(client)
--	if opts != nil {
--		query, err := opts.ToFlavorListQuery()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--	}
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return FlavorPage{pagination.LinkedPageBase{PageResult: r}}
--	}
--
--	return pagination.NewPager(client, url, createPage)
--}
--
--// Get instructs OpenStack to provide details on a single flavor, identified by its ID.
--// Use ExtractFlavor to convert its result into a Flavor.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	var gr GetResult
--	gr.Err = perigee.Get(getURL(client, id), perigee.Options{
--		Results:     &gr.Body,
--		MoreHeaders: client.AuthenticatedHeaders(),
--	})
--	return gr
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests_test.go
-deleted file mode 100644
-index fbd7c33..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests_test.go
-+++ /dev/null
-@@ -1,129 +0,0 @@
--package flavors
--
--import (
--	"fmt"
--	"net/http"
--	"reflect"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--const tokenID = "blerb"
--
--func TestListFlavors(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/flavors/detail", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, `
--					{
--						"flavors": [
--							{
--								"id": "1",
--								"name": "m1.tiny",
--								"disk": 1,
--								"ram": 512,
--								"vcpus": 1
--							},
--							{
--								"id": "2",
--								"name": "m2.small",
--								"disk": 10,
--								"ram": 1024,
--								"vcpus": 2
--							}
--						],
--						"flavors_links": [
--							{
--								"href": "%s/flavors/detail?marker=2",
--								"rel": "next"
--							}
--						]
--					}
--				`, th.Server.URL)
--		case "2":
--			fmt.Fprintf(w, `{ "flavors": [] }`)
--		default:
--			t.Fatalf("Unexpected marker: [%s]", marker)
--		}
--	})
--
--	pages := 0
--	err := ListDetail(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) {
--		pages++
--
--		actual, err := ExtractFlavors(page)
--		if err != nil {
--			return false, err
--		}
--
--		expected := []Flavor{
--			Flavor{ID: "1", Name: "m1.tiny", Disk: 1, RAM: 512, VCPUs: 1},
--			Flavor{ID: "2", Name: "m2.small", Disk: 10, RAM: 1024, VCPUs: 2},
--		}
--
--		if !reflect.DeepEqual(expected, actual) {
--			t.Errorf("Expected %#v, but was %#v", expected, actual)
--		}
--
--		return true, nil
--	})
--	if err != nil {
--		t.Fatal(err)
--	}
--	if pages != 1 {
--		t.Errorf("Expected one page, got %d", pages)
--	}
--}
--
--func TestGetFlavor(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/flavors/12345", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--			{
--				"flavor": {
--					"id": "1",
--					"name": "m1.tiny",
--					"disk": 1,
--					"ram": 512,
--					"vcpus": 1,
--					"rxtx_factor": 1
--				}
--			}
--		`)
--	})
--
--	actual, err := Get(fake.ServiceClient(), "12345").Extract()
--	if err != nil {
--		t.Fatalf("Unable to get flavor: %v", err)
--	}
--
--	expected := &Flavor{
--		ID:         "1",
--		Name:       "m1.tiny",
--		Disk:       1,
--		RAM:        512,
--		VCPUs:      1,
--		RxTxFactor: 1,
--	}
--	if !reflect.DeepEqual(expected, actual) {
--		t.Errorf("Expected %#v, but was %#v", expected, actual)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go
-deleted file mode 100644
-index 8dddd70..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go
-+++ /dev/null
-@@ -1,122 +0,0 @@
--package flavors
--
--import (
--	"errors"
--	"reflect"
--
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ErrCannotInterpret is returned by an Extract call if the response body doesn't have the expected structure.
--var ErrCannotInterpet = errors.New("Unable to interpret a response body.")
--
--// GetResult temporarily holds the response from a Get call.
--type GetResult struct {
--	gophercloud.Result
--}
--
--// Extract provides access to the individual Flavor returned by the Get function.
--func (gr GetResult) Extract() (*Flavor, error) {
--	if gr.Err != nil {
--		return nil, gr.Err
--	}
--
--	var result struct {
--		Flavor Flavor `mapstructure:"flavor"`
--	}
--
--	cfg := &mapstructure.DecoderConfig{
--		DecodeHook: defaulter,
--		Result:     &result,
--	}
--	decoder, err := mapstructure.NewDecoder(cfg)
--	if err != nil {
--		return nil, err
--	}
--	err = decoder.Decode(gr.Body)
--	return &result.Flavor, err
--}
--
--// Flavor records represent (virtual) hardware configurations for server resources in a region.
--type Flavor struct {
--	// The Id field contains the flavor's unique identifier.
--	// For example, this identifier will be useful when specifying which hardware configuration to use for a new server instance.
--	ID string `mapstructure:"id"`
--
--	// The Disk and RA< fields provide a measure of storage space offered by the flavor, in GB and MB, respectively.
--	Disk int `mapstructure:"disk"`
--	RAM  int `mapstructure:"ram"`
--
--	// The Name field provides a human-readable moniker for the flavor.
--	Name string `mapstructure:"name"`
--
--	RxTxFactor float64 `mapstructure:"rxtx_factor"`
--
--	// Swap indicates how much space is reserved for swap.
--	// If not provided, this field will be set to 0.
--	Swap int `mapstructure:"swap"`
--
--	// VCPUs indicates how many (virtual) CPUs are available for this flavor.
--	VCPUs int `mapstructure:"vcpus"`
--}
--
--// FlavorPage contains a single page of the response from a List call.
--type FlavorPage struct {
--	pagination.LinkedPageBase
--}
--
--// IsEmpty determines if a page contains any results.
--func (p FlavorPage) IsEmpty() (bool, error) {
--	flavors, err := ExtractFlavors(p)
--	if err != nil {
--		return true, err
--	}
--	return len(flavors) == 0, nil
--}
--
--// NextPageURL uses the response's embedded link reference to navigate to the next page of results.
--func (p FlavorPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"flavors_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--func defaulter(from, to reflect.Kind, v interface{}) (interface{}, error) {
--	if (from == reflect.String) && (to == reflect.Int) {
--		return 0, nil
--	}
--	return v, nil
--}
--
--// ExtractFlavors provides access to the list of flavors in a page acquired from the List operation.
--func ExtractFlavors(page pagination.Page) ([]Flavor, error) {
--	casted := page.(FlavorPage).Body
--	var container struct {
--		Flavors []Flavor `mapstructure:"flavors"`
--	}
--
--	cfg := &mapstructure.DecoderConfig{
--		DecodeHook: defaulter,
--		Result:     &container,
--	}
--	decoder, err := mapstructure.NewDecoder(cfg)
--	if err != nil {
--		return container.Flavors, err
--	}
--	err = decoder.Decode(casted)
--	if err != nil {
--		return container.Flavors, err
--	}
--
--	return container.Flavors, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go
-deleted file mode 100644
-index 683c107..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package flavors
--
--import (
--	"github.com/rackspace/gophercloud"
--)
--
--func getURL(client *gophercloud.ServiceClient, id string) string {
--	return client.ServiceURL("flavors", id)
--}
--
--func listURL(client *gophercloud.ServiceClient) string {
--	return client.ServiceURL("flavors", "detail")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls_test.go
-deleted file mode 100644
-index 069da24..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package flavors
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "flavors/foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint + "flavors/detail"
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go
-deleted file mode 100644
-index 0edaa3f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--// Package images provides information and interaction with the image API
--// resource in the OpenStack Compute service.
--//
--// An image is a collection of files used to create or rebuild a server.
--// Operators provide a number of pre-built OS images by default. You may also
--// create custom images from cloud servers you have launched.
--package images
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go
-deleted file mode 100644
-index bc61ddb..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go
-+++ /dev/null
-@@ -1,71 +0,0 @@
--package images
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// ListOptsBuilder allows extensions to add additional parameters to the
--// List request.
--type ListOptsBuilder interface {
--	ToImageListQuery() (string, error)
--}
--
--// ListOpts contain options for limiting the number of Images returned from a call to ListDetail.
--type ListOpts struct {
--	// When the image last changed status (in date-time format).
--	ChangesSince string `q:"changes-since"`
--	// The number of Images to return.
--	Limit int `q:"limit"`
--	// UUID of the Image at which to set a marker.
--	Marker string `q:"marker"`
--	// The name of the Image.
--	Name string `q:"name:"`
--	// The name of the Server (in URL format).
--	Server string `q:"server"`
--	// The current status of the Image.
--	Status string `q:"status"`
--	// The value of the type of image (e.g. BASE, SERVER, ALL)
--	Type string `q:"type"`
--}
--
--// ToImageListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToImageListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// ListDetail enumerates the available images.
--func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	url := listDetailURL(client)
--	if opts != nil {
--		query, err := opts.ToImageListQuery()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--	}
--
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return ImagePage{pagination.LinkedPageBase{PageResult: r}}
--	}
--
--	return pagination.NewPager(client, url, createPage)
--}
--
--// Get acquires additional detail about a specific image by ID.
--// Use ExtractImage() to interpret the result as an openstack Image.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	var result GetResult
--	_, result.Err = perigee.Request("GET", getURL(client, id), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		Results:     &result.Body,
--		OkCodes:     []int{200},
--	})
--	return result
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go
-deleted file mode 100644
-index 9a05f97..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go
-+++ /dev/null
-@@ -1,175 +0,0 @@
--package images
--
--import (
--	"encoding/json"
--	"fmt"
--	"net/http"
--	"reflect"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListImages(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/images/detail", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, `
--				{
--					"images": [
--						{
--							"status": "ACTIVE",
--							"updated": "2014-09-23T12:54:56Z",
--							"id": "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7",
--							"OS-EXT-IMG-SIZE:size": 476704768,
--							"name": "F17-x86_64-cfntools",
--							"created": "2014-09-23T12:54:52Z",
--							"minDisk": 0,
--							"progress": 100,
--							"minRam": 0,
--							"metadata": {}
--						},
--						{
--							"status": "ACTIVE",
--							"updated": "2014-09-23T12:51:43Z",
--							"id": "f90f6034-2570-4974-8351-6b49732ef2eb",
--							"OS-EXT-IMG-SIZE:size": 13167616,
--							"name": "cirros-0.3.2-x86_64-disk",
--							"created": "2014-09-23T12:51:42Z",
--							"minDisk": 0,
--							"progress": 100,
--							"minRam": 0,
--							"metadata": {}
--						}
--					]
--				}
--			`)
--		case "2":
--			fmt.Fprintf(w, `{ "images": [] }`)
--		default:
--			t.Fatalf("Unexpected marker: [%s]", marker)
--		}
--	})
--
--	pages := 0
--	options := &ListOpts{Limit: 2}
--	err := ListDetail(fake.ServiceClient(), options).EachPage(func(page pagination.Page) (bool, error) {
--		pages++
--
--		actual, err := ExtractImages(page)
--		if err != nil {
--			return false, err
--		}
--
--		expected := []Image{
--			Image{
--				ID:       "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7",
--				Name:     "F17-x86_64-cfntools",
--				Created:  "2014-09-23T12:54:52Z",
--				Updated:  "2014-09-23T12:54:56Z",
--				MinDisk:  0,
--				MinRAM:   0,
--				Progress: 100,
--				Status:   "ACTIVE",
--			},
--			Image{
--				ID:       "f90f6034-2570-4974-8351-6b49732ef2eb",
--				Name:     "cirros-0.3.2-x86_64-disk",
--				Created:  "2014-09-23T12:51:42Z",
--				Updated:  "2014-09-23T12:51:43Z",
--				MinDisk:  0,
--				MinRAM:   0,
--				Progress: 100,
--				Status:   "ACTIVE",
--			},
--		}
--
--		if !reflect.DeepEqual(expected, actual) {
--			t.Errorf("Unexpected page contents: expected %#v, got %#v", expected, actual)
--		}
--
--		return false, nil
--	})
--
--	if err != nil {
--		t.Fatalf("EachPage error: %v", err)
--	}
--	if pages != 1 {
--		t.Errorf("Expected one page, got %d", pages)
--	}
--}
--
--func TestGetImage(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/images/12345678", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--			{
--				"image": {
--					"status": "ACTIVE",
--					"updated": "2014-09-23T12:54:56Z",
--					"id": "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7",
--					"OS-EXT-IMG-SIZE:size": 476704768,
--					"name": "F17-x86_64-cfntools",
--					"created": "2014-09-23T12:54:52Z",
--					"minDisk": 0,
--					"progress": 100,
--					"minRam": 0,
--					"metadata": {}
--				}
--			}
--		`)
--	})
--
--	actual, err := Get(fake.ServiceClient(), "12345678").Extract()
--	if err != nil {
--		t.Fatalf("Unexpected error from Get: %v", err)
--	}
--
--	expected := &Image{
--		Status:   "ACTIVE",
--		Updated:  "2014-09-23T12:54:56Z",
--		ID:       "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7",
--		Name:     "F17-x86_64-cfntools",
--		Created:  "2014-09-23T12:54:52Z",
--		MinDisk:  0,
--		Progress: 100,
--		MinRAM:   0,
--	}
--
--	if !reflect.DeepEqual(expected, actual) {
--		t.Errorf("Expected %#v, but got %#v", expected, actual)
--	}
--}
--
--func TestNextPageURL(t *testing.T) {
--	var page ImagePage
--	var body map[string]interface{}
--	bodyString := []byte(`{"images":{"links":[{"href":"http://192.154.23.87/12345/images/image3","rel":"bookmark"}]}, "images_links":[{"href":"http://192.154.23.87/12345/images/image4","rel":"next"}]}`)
--	err := json.Unmarshal(bodyString, &body)
--	if err != nil {
--		t.Fatalf("Error unmarshaling data into page body: %v", err)
--	}
--	page.Body = body
--
--	expected := "http://192.154.23.87/12345/images/image4"
--	actual, err := page.NextPageURL()
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go
-deleted file mode 100644
-index 493d511..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go
-+++ /dev/null
-@@ -1,90 +0,0 @@
--package images
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// GetResult temporarily stores a Get response.
--type GetResult struct {
--	gophercloud.Result
--}
--
--// Extract interprets a GetResult as an Image.
--func (gr GetResult) Extract() (*Image, error) {
--	if gr.Err != nil {
--		return nil, gr.Err
--	}
--
--	var decoded struct {
--		Image Image `mapstructure:"image"`
--	}
--
--	err := mapstructure.Decode(gr.Body, &decoded)
--	return &decoded.Image, err
--}
--
--// Image is used for JSON (un)marshalling.
--// It provides a description of an OS image.
--type Image struct {
--	// ID contains the image's unique identifier.
--	ID string
--
--	Created string
--
--	// MinDisk and MinRAM specify the minimum resources a server must provide to be able to install the image.
--	MinDisk int
--	MinRAM  int
--
--	// Name provides a human-readable moniker for the OS image.
--	Name string
--
--	// The Progress and Status fields indicate image-creation status.
--	// Any usable image will have 100% progress.
--	Progress int
--	Status   string
--
--	Updated string
--}
--
--// ImagePage contains a single page of results from a List operation.
--// Use ExtractImages to convert it into a slice of usable structs.
--type ImagePage struct {
--	pagination.LinkedPageBase
--}
--
--// IsEmpty returns true if a page contains no Image results.
--func (page ImagePage) IsEmpty() (bool, error) {
--	images, err := ExtractImages(page)
--	if err != nil {
--		return true, err
--	}
--	return len(images) == 0, nil
--}
--
--// NextPageURL uses the response's embedded link reference to navigate to the next page of results.
--func (page ImagePage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"images_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(page.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// ExtractImages converts a page of List results into a slice of usable Image structs.
--func ExtractImages(page pagination.Page) ([]Image, error) {
--	casted := page.(ImagePage).Body
--	var results struct {
--		Images []Image `mapstructure:"images"`
--	}
--
--	err := mapstructure.Decode(casted, &results)
--	return results.Images, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go
-deleted file mode 100644
-index 9b3c86d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package images
--
--import "github.com/rackspace/gophercloud"
--
--func listDetailURL(client *gophercloud.ServiceClient) string {
--	return client.ServiceURL("images", "detail")
--}
--
--func getURL(client *gophercloud.ServiceClient, id string) string {
--	return client.ServiceURL("images", id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls_test.go
-deleted file mode 100644
-index b1ab3d6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package images
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "images/foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestListDetailURL(t *testing.T) {
--	actual := listDetailURL(endpointClient())
--	expected := endpoint + "images/detail"
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go
-deleted file mode 100644
-index fe45671..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go
-+++ /dev/null
-@@ -1,6 +0,0 @@
--// Package servers provides information and interaction with the server API
--// resource in the OpenStack Compute service.
--//
--// A server is a virtual machine instance in the compute system. In order for
--// one to be provisioned, a valid flavor and image are required.
--package servers
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go
-deleted file mode 100644
-index e872b07..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go
-+++ /dev/null
-@@ -1,459 +0,0 @@
--// +build fixtures
--
--package servers
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// ServerListBody contains the canned body of a servers.List response.
--const ServerListBody = `
--{
--	"servers": [
--		{
--			"status": "ACTIVE",
--			"updated": "2014-09-25T13:10:10Z",
--			"hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
--			"OS-EXT-SRV-ATTR:host": "devstack",
--			"addresses": {
--				"private": [
--					{
--						"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b",
--						"version": 4,
--						"addr": "10.0.0.32",
--						"OS-EXT-IPS:type": "fixed"
--					}
--				]
--			},
--			"links": [
--				{
--					"href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5",
--					"rel": "self"
--				},
--				{
--					"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5",
--					"rel": "bookmark"
--				}
--			],
--			"key_name": null,
--			"image": {
--				"id": "f90f6034-2570-4974-8351-6b49732ef2eb",
--				"links": [
--					{
--						"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--						"rel": "bookmark"
--					}
--				]
--			},
--			"OS-EXT-STS:task_state": null,
--			"OS-EXT-STS:vm_state": "active",
--			"OS-EXT-SRV-ATTR:instance_name": "instance-0000001e",
--			"OS-SRV-USG:launched_at": "2014-09-25T13:10:10.000000",
--			"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
--			"flavor": {
--				"id": "1",
--				"links": [
--					{
--						"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
--						"rel": "bookmark"
--					}
--				]
--			},
--			"id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5",
--			"security_groups": [
--				{
--					"name": "default"
--				}
--			],
--			"OS-SRV-USG:terminated_at": null,
--			"OS-EXT-AZ:availability_zone": "nova",
--			"user_id": "9349aff8be7545ac9d2f1d00999a23cd",
--			"name": "herp",
--			"created": "2014-09-25T13:10:02Z",
--			"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
--			"OS-DCF:diskConfig": "MANUAL",
--			"os-extended-volumes:volumes_attached": [],
--			"accessIPv4": "",
--			"accessIPv6": "",
--			"progress": 0,
--			"OS-EXT-STS:power_state": 1,
--			"config_drive": "",
--			"metadata": {}
--		},
--		{
--			"status": "ACTIVE",
--			"updated": "2014-09-25T13:04:49Z",
--			"hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
--			"OS-EXT-SRV-ATTR:host": "devstack",
--			"addresses": {
--				"private": [
--					{
--						"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be",
--						"version": 4,
--						"addr": "10.0.0.31",
--						"OS-EXT-IPS:type": "fixed"
--					}
--				]
--			},
--			"links": [
--				{
--					"href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--					"rel": "self"
--				},
--				{
--					"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--					"rel": "bookmark"
--				}
--			],
--			"key_name": null,
--			"image": {
--				"id": "f90f6034-2570-4974-8351-6b49732ef2eb",
--				"links": [
--					{
--						"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--						"rel": "bookmark"
--					}
--				]
--			},
--			"OS-EXT-STS:task_state": null,
--			"OS-EXT-STS:vm_state": "active",
--			"OS-EXT-SRV-ATTR:instance_name": "instance-0000001d",
--			"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
--			"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
--			"flavor": {
--				"id": "1",
--				"links": [
--					{
--						"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
--						"rel": "bookmark"
--					}
--				]
--			},
--			"id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--			"security_groups": [
--				{
--					"name": "default"
--				}
--			],
--			"OS-SRV-USG:terminated_at": null,
--			"OS-EXT-AZ:availability_zone": "nova",
--			"user_id": "9349aff8be7545ac9d2f1d00999a23cd",
--			"name": "derp",
--			"created": "2014-09-25T13:04:41Z",
--			"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
--			"OS-DCF:diskConfig": "MANUAL",
--			"os-extended-volumes:volumes_attached": [],
--			"accessIPv4": "",
--			"accessIPv6": "",
--			"progress": 0,
--			"OS-EXT-STS:power_state": 1,
--			"config_drive": "",
--			"metadata": {}
--		}
--	]
--}
--`
--
--// SingleServerBody is the canned body of a Get request on an existing server.
--const SingleServerBody = `
--{
--	"server": {
--		"status": "ACTIVE",
--		"updated": "2014-09-25T13:04:49Z",
--		"hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
--		"OS-EXT-SRV-ATTR:host": "devstack",
--		"addresses": {
--			"private": [
--				{
--					"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be",
--					"version": 4,
--					"addr": "10.0.0.31",
--					"OS-EXT-IPS:type": "fixed"
--				}
--			]
--		},
--		"links": [
--			{
--				"href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--				"rel": "self"
--			},
--			{
--				"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--				"rel": "bookmark"
--			}
--		],
--		"key_name": null,
--		"image": {
--			"id": "f90f6034-2570-4974-8351-6b49732ef2eb",
--			"links": [
--				{
--					"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--					"rel": "bookmark"
--				}
--			]
--		},
--		"OS-EXT-STS:task_state": null,
--		"OS-EXT-STS:vm_state": "active",
--		"OS-EXT-SRV-ATTR:instance_name": "instance-0000001d",
--		"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
--		"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
--		"flavor": {
--			"id": "1",
--			"links": [
--				{
--					"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
--					"rel": "bookmark"
--				}
--			]
--		},
--		"id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--		"security_groups": [
--			{
--				"name": "default"
--			}
--		],
--		"OS-SRV-USG:terminated_at": null,
--		"OS-EXT-AZ:availability_zone": "nova",
--		"user_id": "9349aff8be7545ac9d2f1d00999a23cd",
--		"name": "derp",
--		"created": "2014-09-25T13:04:41Z",
--		"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
--		"OS-DCF:diskConfig": "MANUAL",
--		"os-extended-volumes:volumes_attached": [],
--		"accessIPv4": "",
--		"accessIPv6": "",
--		"progress": 0,
--		"OS-EXT-STS:power_state": 1,
--		"config_drive": "",
--		"metadata": {}
--	}
--}
--`
--
--var (
--	// ServerHerp is a Server struct that should correspond to the first result in ServerListBody.
--	ServerHerp = Server{
--		Status:  "ACTIVE",
--		Updated: "2014-09-25T13:10:10Z",
--		HostID:  "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
--		Addresses: map[string]interface{}{
--			"private": []interface{}{
--				map[string]interface{}{
--					"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b",
--					"version":                 float64(4),
--					"addr":                    "10.0.0.32",
--					"OS-EXT-IPS:type":         "fixed",
--				},
--			},
--		},
--		Links: []interface{}{
--			map[string]interface{}{
--				"href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5",
--				"rel":  "self",
--			},
--			map[string]interface{}{
--				"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5",
--				"rel":  "bookmark",
--			},
--		},
--		Image: map[string]interface{}{
--			"id": "f90f6034-2570-4974-8351-6b49732ef2eb",
--			"links": []interface{}{
--				map[string]interface{}{
--					"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--					"rel":  "bookmark",
--				},
--			},
--		},
--		Flavor: map[string]interface{}{
--			"id": "1",
--			"links": []interface{}{
--				map[string]interface{}{
--					"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
--					"rel":  "bookmark",
--				},
--			},
--		},
--		ID:       "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5",
--		UserID:   "9349aff8be7545ac9d2f1d00999a23cd",
--		Name:     "herp",
--		Created:  "2014-09-25T13:10:02Z",
--		TenantID: "fcad67a6189847c4aecfa3c81a05783b",
--		Metadata: map[string]interface{}{},
--	}
--
--	// ServerDerp is a Server struct that should correspond to the second server in ServerListBody.
--	ServerDerp = Server{
--		Status:  "ACTIVE",
--		Updated: "2014-09-25T13:04:49Z",
--		HostID:  "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
--		Addresses: map[string]interface{}{
--			"private": []interface{}{
--				map[string]interface{}{
--					"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be",
--					"version":                 float64(4),
--					"addr":                    "10.0.0.31",
--					"OS-EXT-IPS:type":         "fixed",
--				},
--			},
--		},
--		Links: []interface{}{
--			map[string]interface{}{
--				"href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--				"rel":  "self",
--			},
--			map[string]interface{}{
--				"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--				"rel":  "bookmark",
--			},
--		},
--		Image: map[string]interface{}{
--			"id": "f90f6034-2570-4974-8351-6b49732ef2eb",
--			"links": []interface{}{
--				map[string]interface{}{
--					"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--					"rel":  "bookmark",
--				},
--			},
--		},
--		Flavor: map[string]interface{}{
--			"id": "1",
--			"links": []interface{}{
--				map[string]interface{}{
--					"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
--					"rel":  "bookmark",
--				},
--			},
--		},
--		ID:       "9e5476bd-a4ec-4653-93d6-72c93aa682ba",
--		UserID:   "9349aff8be7545ac9d2f1d00999a23cd",
--		Name:     "derp",
--		Created:  "2014-09-25T13:04:41Z",
--		TenantID: "fcad67a6189847c4aecfa3c81a05783b",
--		Metadata: map[string]interface{}{},
--	}
--)
--
--// HandleServerCreationSuccessfully sets up the test server to respond to a server creation request
--// with a given response.
--func HandleServerCreationSuccessfully(t *testing.T, response string) {
--	th.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `{
--			"server": {
--				"name": "derp",
--				"imageRef": "f90f6034-2570-4974-8351-6b49732ef2eb",
--				"flavorRef": "1"
--			}
--		}`)
--
--		w.WriteHeader(http.StatusAccepted)
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, response)
--	})
--}
--
--// HandleServerListSuccessfully sets up the test server to respond to a server List request.
--func HandleServerListSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, ServerListBody)
--		case "9e5476bd-a4ec-4653-93d6-72c93aa682ba":
--			fmt.Fprintf(w, `{ "servers": [] }`)
--		default:
--			t.Fatalf("/servers/detail invoked with unexpected marker=[%s]", marker)
--		}
--	})
--}
--
--// HandleServerDeletionSuccessfully sets up the test server to respond to a server deletion request.
--func HandleServerDeletionSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/servers/asdfasdfasdf", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
--
--// HandleServerGetSuccessfully sets up the test server to respond to a server Get request.
--func HandleServerGetSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--
--		fmt.Fprintf(w, SingleServerBody)
--	})
--}
--
--// HandleServerUpdateSuccessfully sets up the test server to respond to a server Update request.
--func HandleServerUpdateSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestJSONRequest(t, r, `{ "server": { "name": "new-name" } }`)
--
--		fmt.Fprintf(w, SingleServerBody)
--	})
--}
--
--// HandleAdminPasswordChangeSuccessfully sets up the test server to respond to a server password
--// change request.
--func HandleAdminPasswordChangeSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `{ "changePassword": { "adminPass": "new-password" } }`)
--
--		w.WriteHeader(http.StatusAccepted)
--	})
--}
--
--// HandleRebootSuccessfully sets up the test server to respond to a reboot request with success.
--func HandleRebootSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `{ "reboot": { "type": "SOFT" } }`)
--
--		w.WriteHeader(http.StatusAccepted)
--	})
--}
--
--// HandleRebuildSuccessfully sets up the test server to respond to a rebuild request with success.
--func HandleRebuildSuccessfully(t *testing.T, response string) {
--	th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `
--			{
--				"rebuild": {
--					"name": "new-name",
--					"adminPass": "swordfish",
--					"imageRef": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--					"accessIPv4": "1.2.3.4"
--				}
--			}
--		`)
--
--		w.WriteHeader(http.StatusAccepted)
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, response)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go
-deleted file mode 100644
-index 95a4188..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go
-+++ /dev/null
-@@ -1,538 +0,0 @@
--package servers
--
--import (
--	"encoding/base64"
--	"fmt"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOptsBuilder allows extensions to add additional parameters to the
--// List request.
--type ListOptsBuilder interface {
--	ToServerListQuery() (string, error)
--}
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the server attributes you want to see returned. Marker and Limit are used
--// for pagination.
--type ListOpts struct {
--	// A time/date stamp for when the server last changed status.
--	ChangesSince string `q:"changes-since"`
--
--	// Name of the image in URL format.
--	Image string `q:"image"`
--
--	// Name of the flavor in URL format.
--	Flavor string `q:"flavor"`
--
--	// Name of the server as a string; can be queried with regular expressions.
--	// Realize that ?name=bob returns both bob and bobb. If you need to match bob
--	// only, you can use a regular expression matching the syntax of the
--	// underlying database server implemented for Compute.
--	Name string `q:"name"`
--
--	// Value of the status of the server so that you can filter on "ACTIVE" for example.
--	Status string `q:"status"`
--
--	// Name of the host as a string.
--	Host string `q:"host"`
--
--	// UUID of the server at which you want to set a marker.
--	Marker string `q:"marker"`
--
--	// Integer value for the limit of values to return.
--	Limit int `q:"limit"`
--}
--
--// ToServerListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToServerListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// List makes a request against the API to list servers accessible to you.
--func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	url := listDetailURL(client)
--
--	if opts != nil {
--		query, err := opts.ToServerListQuery()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--	}
--
--	createPageFn := func(r pagination.PageResult) pagination.Page {
--		return ServerPage{pagination.LinkedPageBase{PageResult: r}}
--	}
--
--	return pagination.NewPager(client, url, createPageFn)
--}
--
--// CreateOptsBuilder describes struct types that can be accepted by the Create call.
--// The CreateOpts struct in this package does.
--type CreateOptsBuilder interface {
--	ToServerCreateMap() (map[string]interface{}, error)
--}
--
--// Network is used within CreateOpts to control a new server's network attachments.
--type Network struct {
--	// UUID of a nova-network to attach to the newly provisioned server.
--	// Required unless Port is provided.
--	UUID string
--
--	// Port of a neutron network to attach to the newly provisioned server.
--	// Required unless UUID is provided.
--	Port string
--
--	// FixedIP [optional] specifies a fixed IPv4 address to be used on this network.
--	FixedIP string
--}
--
--// CreateOpts specifies server creation parameters.
--type CreateOpts struct {
--	// Name [required] is the name to assign to the newly launched server.
--	Name string
--
--	// ImageRef [required] is the ID or full URL to the image that contains the server's OS and initial state.
--	// Optional if using the boot-from-volume extension.
--	ImageRef string
--
--	// FlavorRef [required] is the ID or full URL to the flavor that describes the server's specs.
--	FlavorRef string
--
--	// SecurityGroups [optional] lists the names of the security groups to which this server should belong.
--	SecurityGroups []string
--
--	// UserData [optional] contains configuration information or scripts to use upon launch.
--	// Create will base64-encode it for you.
--	UserData []byte
--
--	// AvailabilityZone [optional] in which to launch the server.
--	AvailabilityZone string
--
--	// Networks [optional] dictates how this server will be attached to available networks.
--	// By default, the server will be attached to all isolated networks for the tenant.
--	Networks []Network
--
--	// Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server.
--	Metadata map[string]string
--
--	// Personality [optional] includes the path and contents of a file to inject into the server at launch.
--	// The maximum size of the file is 255 bytes (decoded).
--	Personality []byte
--
--	// ConfigDrive [optional] enables metadata injection through a configuration drive.
--	ConfigDrive bool
--}
--
--// ToServerCreateMap assembles a request body based on the contents of a CreateOpts.
--func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) {
--	server := make(map[string]interface{})
--
--	server["name"] = opts.Name
--	server["imageRef"] = opts.ImageRef
--	server["flavorRef"] = opts.FlavorRef
--
--	if opts.UserData != nil {
--		encoded := base64.StdEncoding.EncodeToString(opts.UserData)
--		server["user_data"] = &encoded
--	}
--	if opts.Personality != nil {
--		encoded := base64.StdEncoding.EncodeToString(opts.Personality)
--		server["personality"] = &encoded
--	}
--	if opts.ConfigDrive {
--		server["config_drive"] = "true"
--	}
--	if opts.AvailabilityZone != "" {
--		server["availability_zone"] = opts.AvailabilityZone
--	}
--	if opts.Metadata != nil {
--		server["metadata"] = opts.Metadata
--	}
--
--	if len(opts.SecurityGroups) > 0 {
--		securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups))
--		for i, groupName := range opts.SecurityGroups {
--			securityGroups[i] = map[string]interface{}{"name": groupName}
--		}
--	}
--
--	if len(opts.Networks) > 0 {
--		networks := make([]map[string]interface{}, len(opts.Networks))
--		for i, net := range opts.Networks {
--			networks[i] = make(map[string]interface{})
--			if net.UUID != "" {
--				networks[i]["uuid"] = net.UUID
--			}
--			if net.Port != "" {
--				networks[i]["port"] = net.Port
--			}
--			if net.FixedIP != "" {
--				networks[i]["fixed_ip"] = net.FixedIP
--			}
--		}
--		server["networks"] = networks
--	}
--
--	return map[string]interface{}{"server": server}, nil
--}
--
--// Create requests a server to be provisioned to the user in the current tenant.
--func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToServerCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", listURL(client), perigee.Options{
--		Results:     &res.Body,
--		ReqBody:     reqBody,
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202},
--	})
--	return res
--}
--
--// Delete requests that a server previously provisioned be removed from your account.
--func Delete(client *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(client, id), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
--
--// Get requests details on a single server, by ID.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	var result GetResult
--	_, result.Err = perigee.Request("GET", getURL(client, id), perigee.Options{
--		Results:     &result.Body,
--		MoreHeaders: client.AuthenticatedHeaders(),
--	})
--	return result
--}
--
--// UpdateOptsBuilder allows extensions to add additional attributes to the Update request.
--type UpdateOptsBuilder interface {
--	ToServerUpdateMap() map[string]interface{}
--}
--
--// UpdateOpts specifies the base attributes that may be updated on an existing server.
--type UpdateOpts struct {
--	// Name [optional] changes the displayed name of the server.
--	// The server host name will *not* change.
--	// Server names are not constrained to be unique, even within the same tenant.
--	Name string
--
--	// AccessIPv4 [optional] provides a new IPv4 address for the instance.
--	AccessIPv4 string
--
--	// AccessIPv6 [optional] provides a new IPv6 address for the instance.
--	AccessIPv6 string
--}
--
--// ToServerUpdateMap formats an UpdateOpts structure into a request body.
--func (opts UpdateOpts) ToServerUpdateMap() map[string]interface{} {
--	server := make(map[string]string)
--	if opts.Name != "" {
--		server["name"] = opts.Name
--	}
--	if opts.AccessIPv4 != "" {
--		server["accessIPv4"] = opts.AccessIPv4
--	}
--	if opts.AccessIPv6 != "" {
--		server["accessIPv6"] = opts.AccessIPv6
--	}
--	return map[string]interface{}{"server": server}
--}
--
--// Update requests that various attributes of the indicated server be changed.
--func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult {
--	var result UpdateResult
--	_, result.Err = perigee.Request("PUT", updateURL(client, id), perigee.Options{
--		Results:     &result.Body,
--		ReqBody:     opts.ToServerUpdateMap(),
--		MoreHeaders: client.AuthenticatedHeaders(),
--	})
--	return result
--}
--
--// ChangeAdminPassword alters the administrator or root password for a specified server.
--func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) ActionResult {
--	var req struct {
--		ChangePassword struct {
--			AdminPass string `json:"adminPass"`
--		} `json:"changePassword"`
--	}
--
--	req.ChangePassword.AdminPass = newPassword
--
--	var res ActionResult
--
--	_, res.Err = perigee.Request("POST", actionURL(client, id), perigee.Options{
--		ReqBody:     req,
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202},
--	})
--
--	return res
--}
--
--// ErrArgument errors occur when an argument supplied to a package function
--// fails to fall within acceptable values.  For example, the Reboot() function
--// expects the "how" parameter to be one of HardReboot or SoftReboot.  These
--// constants are (currently) strings, leading someone to wonder if they can pass
--// other string values instead, perhaps in an effort to break the API of their
--// provider.  Reboot() returns this error in this situation.
--//
--// Function identifies which function was called/which function is generating
--// the error.
--// Argument identifies which formal argument was responsible for producing the
--// error.
--// Value provides the value as it was passed into the function.
--type ErrArgument struct {
--	Function, Argument string
--	Value              interface{}
--}
--
--// Error yields a useful diagnostic for debugging purposes.
--func (e *ErrArgument) Error() string {
--	return fmt.Sprintf("Bad argument in call to %s, formal parameter %s, value %#v", e.Function, e.Argument, e.Value)
--}
--
--func (e *ErrArgument) String() string {
--	return e.Error()
--}
--
--// RebootMethod describes the mechanisms by which a server reboot can be requested.
--type RebootMethod string
--
--// These constants determine how a server should be rebooted.
--// See the Reboot() function for further details.
--const (
--	SoftReboot RebootMethod = "SOFT"
--	HardReboot RebootMethod = "HARD"
--	OSReboot                = SoftReboot
--	PowerCycle              = HardReboot
--)
--
--// Reboot requests that a given server reboot.
--// Two methods exist for rebooting a server:
--//
--// HardReboot (aka PowerCycle) restarts the server instance by physically cutting power to the machine, or if a VM,
--// terminating it at the hypervisor level.
--// It's done. Caput. Full stop.
--// Then, after a brief while, power is restored or the VM instance restarted.
--//
--// SoftReboot (aka OSReboot) simply tells the OS to restart under its own procedures.
--// E.g., in Linux, asking it to enter runlevel 6, or executing "sudo shutdown -r now", or by asking Windows to restart the machine.
--func Reboot(client *gophercloud.ServiceClient, id string, how RebootMethod) ActionResult {
--	var res ActionResult
--
--	if (how != SoftReboot) && (how != HardReboot) {
--		res.Err = &ErrArgument{
--			Function: "Reboot",
--			Argument: "how",
--			Value:    how,
--		}
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", actionURL(client, id), perigee.Options{
--		ReqBody: struct {
--			C map[string]string `json:"reboot"`
--		}{
--			map[string]string{"type": string(how)},
--		},
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202},
--	})
--
--	return res
--}
--
--// RebuildOptsBuilder is an interface that allows extensions to override the
--// default behaviour of rebuild options
--type RebuildOptsBuilder interface {
--	ToServerRebuildMap() (map[string]interface{}, error)
--}
--
--// RebuildOpts represents the configuration options used in a server rebuild
--// operation
--type RebuildOpts struct {
--	// Required. The ID of the image you want your server to be provisioned on
--	ImageID string
--
--	// Name to set the server to
--	Name string
--
--	// Required. The server's admin password
--	AdminPass string
--
--	// AccessIPv4 [optional] provides a new IPv4 address for the instance.
--	AccessIPv4 string
--
--	// AccessIPv6 [optional] provides a new IPv6 address for the instance.
--	AccessIPv6 string
--
--	// Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server.
--	Metadata map[string]string
--
--	// Personality [optional] includes the path and contents of a file to inject into the server at launch.
--	// The maximum size of the file is 255 bytes (decoded).
--	Personality []byte
--}
--
--// ToServerRebuildMap formats a RebuildOpts struct into a map for use in JSON
--func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) {
--	var err error
--	server := make(map[string]interface{})
--
--	if opts.AdminPass == "" {
--		err = fmt.Errorf("AdminPass is required")
--	}
--
--	if opts.ImageID == "" {
--		err = fmt.Errorf("ImageID is required")
--	}
--
--	if err != nil {
--		return server, err
--	}
--
--	server["name"] = opts.Name
--	server["adminPass"] = opts.AdminPass
--	server["imageRef"] = opts.ImageID
--
--	if opts.AccessIPv4 != "" {
--		server["accessIPv4"] = opts.AccessIPv4
--	}
--
--	if opts.AccessIPv6 != "" {
--		server["accessIPv6"] = opts.AccessIPv6
--	}
--
--	if opts.Metadata != nil {
--		server["metadata"] = opts.Metadata
--	}
--
--	if opts.Personality != nil {
--		encoded := base64.StdEncoding.EncodeToString(opts.Personality)
--		server["personality"] = &encoded
--	}
--
--	return map[string]interface{}{"rebuild": server}, nil
--}
--
--// Rebuild will reprovision the server according to the configuration options
--// provided in the RebuildOpts struct.
--func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuilder) RebuildResult {
--	var result RebuildResult
--
--	if id == "" {
--		result.Err = fmt.Errorf("ID is required")
--		return result
--	}
--
--	reqBody, err := opts.ToServerRebuildMap()
--	if err != nil {
--		result.Err = err
--		return result
--	}
--
--	_, result.Err = perigee.Request("POST", actionURL(client, id), perigee.Options{
--		ReqBody:     &reqBody,
--		Results:     &result.Body,
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202},
--	})
--
--	return result
--}
--
--// ResizeOptsBuilder is an interface that allows extensions to override the default structure of
--// a Resize request.
--type ResizeOptsBuilder interface {
--	ToServerResizeMap() (map[string]interface{}, error)
--}
--
--// ResizeOpts represents the configuration options used to control a Resize operation.
--type ResizeOpts struct {
--	// FlavorRef is the ID of the flavor you wish your server to become.
--	FlavorRef string
--}
--
--// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON request body to the
--// Resize request.
--func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) {
--	resize := map[string]interface{}{
--		"flavorRef": opts.FlavorRef,
--	}
--
--	return map[string]interface{}{"resize": resize}, nil
--}
--
--// Resize instructs the provider to change the flavor of the server.
--// Note that this implies rebuilding it.
--// Unfortunately, one cannot pass rebuild parameters to the resize function.
--// When the resize completes, the server will be in RESIZE_VERIFY state.
--// While in this state, you can explore the use of the new server's configuration.
--// If you like it, call ConfirmResize() to commit the resize permanently.
--// Otherwise, call RevertResize() to restore the old configuration.
--func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) ActionResult {
--	var res ActionResult
--	reqBody, err := opts.ToServerResizeMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", actionURL(client, id), perigee.Options{
--		ReqBody:     reqBody,
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202},
--	})
--
--	return res
--}
--
--// ConfirmResize confirms a previous resize operation on a server.
--// See Resize() for more details.
--func ConfirmResize(client *gophercloud.ServiceClient, id string) ActionResult {
--	var res ActionResult
--
--	_, res.Err = perigee.Request("POST", actionURL(client, id), perigee.Options{
--		ReqBody:     map[string]interface{}{"confirmResize": nil},
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--
--	return res
--}
--
--// RevertResize cancels a previous resize operation on a server.
--// See Resize() for more details.
--func RevertResize(client *gophercloud.ServiceClient, id string) ActionResult {
--	var res ActionResult
--
--	_, res.Err = perigee.Request("POST", actionURL(client, id), perigee.Options{
--		ReqBody:     map[string]interface{}{"revertResize": nil},
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{202},
--	})
--
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go
-deleted file mode 100644
-index 392e2d8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go
-+++ /dev/null
-@@ -1,176 +0,0 @@
--package servers
--
--import (
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListServers(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleServerListSuccessfully(t)
--
--	pages := 0
--	err := List(client.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		pages++
--
--		actual, err := ExtractServers(page)
--		if err != nil {
--			return false, err
--		}
--
--		if len(actual) != 2 {
--			t.Fatalf("Expected 2 servers, got %d", len(actual))
--		}
--		th.CheckDeepEquals(t, ServerHerp, actual[0])
--		th.CheckDeepEquals(t, ServerDerp, actual[1])
--
--		return true, nil
--	})
--
--	th.AssertNoErr(t, err)
--
--	if pages != 1 {
--		t.Errorf("Expected 1 page, saw %d", pages)
--	}
--}
--
--func TestCreateServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleServerCreationSuccessfully(t, SingleServerBody)
--
--	actual, err := Create(client.ServiceClient(), CreateOpts{
--		Name:      "derp",
--		ImageRef:  "f90f6034-2570-4974-8351-6b49732ef2eb",
--		FlavorRef: "1",
--	}).Extract()
--	th.AssertNoErr(t, err)
--
--	th.CheckDeepEquals(t, ServerDerp, *actual)
--}
--
--func TestDeleteServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleServerDeletionSuccessfully(t)
--
--	res := Delete(client.ServiceClient(), "asdfasdfasdf")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestGetServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleServerGetSuccessfully(t)
--
--	client := client.ServiceClient()
--	actual, err := Get(client, "1234asdf").Extract()
--	if err != nil {
--		t.Fatalf("Unexpected Get error: %v", err)
--	}
--
--	th.CheckDeepEquals(t, ServerDerp, *actual)
--}
--
--func TestUpdateServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleServerUpdateSuccessfully(t)
--
--	client := client.ServiceClient()
--	actual, err := Update(client, "1234asdf", UpdateOpts{Name: "new-name"}).Extract()
--	if err != nil {
--		t.Fatalf("Unexpected Update error: %v", err)
--	}
--
--	th.CheckDeepEquals(t, ServerDerp, *actual)
--}
--
--func TestChangeServerAdminPassword(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleAdminPasswordChangeSuccessfully(t)
--
--	res := ChangeAdminPassword(client.ServiceClient(), "1234asdf", "new-password")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestRebootServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleRebootSuccessfully(t)
--
--	res := Reboot(client.ServiceClient(), "1234asdf", SoftReboot)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestRebuildServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleRebuildSuccessfully(t, SingleServerBody)
--
--	opts := RebuildOpts{
--		Name:       "new-name",
--		AdminPass:  "swordfish",
--		ImageID:    "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--		AccessIPv4: "1.2.3.4",
--	}
--
--	actual, err := Rebuild(client.ServiceClient(), "1234asdf", opts).Extract()
--	th.AssertNoErr(t, err)
--
--	th.CheckDeepEquals(t, ServerDerp, *actual)
--}
--
--func TestResizeServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `{ "resize": { "flavorRef": "2" } }`)
--
--		w.WriteHeader(http.StatusAccepted)
--	})
--
--	res := Resize(client.ServiceClient(), "1234asdf", ResizeOpts{FlavorRef: "2"})
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestConfirmResize(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `{ "confirmResize": null }`)
--
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := ConfirmResize(client.ServiceClient(), "1234asdf")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestRevertResize(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		th.TestJSONRequest(t, r, `{ "revertResize": null }`)
--
--		w.WriteHeader(http.StatusAccepted)
--	})
--
--	res := RevertResize(client.ServiceClient(), "1234asdf")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go
-deleted file mode 100644
-index 53946ba..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go
-+++ /dev/null
-@@ -1,150 +0,0 @@
--package servers
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type serverResult struct {
--	gophercloud.Result
--}
--
--// Extract interprets any serverResult as a Server, if possible.
--func (r serverResult) Extract() (*Server, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var response struct {
--		Server Server `mapstructure:"server"`
--	}
--
--	err := mapstructure.Decode(r.Body, &response)
--	return &response.Server, err
--}
--
--// CreateResult temporarily contains the response from a Create call.
--type CreateResult struct {
--	serverResult
--}
--
--// GetResult temporarily contains the response from a Get call.
--type GetResult struct {
--	serverResult
--}
--
--// UpdateResult temporarily contains the response from an Update call.
--type UpdateResult struct {
--	serverResult
--}
--
--// DeleteResult temporarily contains the response from an Delete call.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// RebuildResult temporarily contains the response from a Rebuild call.
--type RebuildResult struct {
--	serverResult
--}
--
--// ActionResult represents the result of server action operations, like reboot
--type ActionResult struct {
--	gophercloud.ErrResult
--}
--
--// Server exposes only the standard OpenStack fields corresponding to a given server on the user's account.
--type Server struct {
--	// ID uniquely identifies this server amongst all other servers, including those not accessible to the current tenant.
--	ID string
--
--	// TenantID identifies the tenant owning this server resource.
--	TenantID string `mapstructure:"tenant_id"`
--
--	// UserID uniquely identifies the user account owning the tenant.
--	UserID string `mapstructure:"user_id"`
--
--	// Name contains the human-readable name for the server.
--	Name string
--
--	// Updated and Created contain ISO-8601 timestamps of when the state of the server last changed, and when it was created.
--	Updated string
--	Created string
--
--	HostID string
--
--	// Status contains the current operational status of the server, such as IN_PROGRESS or ACTIVE.
--	Status string
--
--	// Progress ranges from 0..100.
--	// A request made against the server completes only once Progress reaches 100.
--	Progress int
--
--	// AccessIPv4 and AccessIPv6 contain the IP addresses of the server, suitable for remote access for administration.
--	AccessIPv4, AccessIPv6 string
--
--	// Image refers to a JSON object, which itself indicates the OS image used to deploy the server.
--	Image map[string]interface{}
--
--	// Flavor refers to a JSON object, which itself indicates the hardware configuration of the deployed server.
--	Flavor map[string]interface{}
--
--	// Addresses includes a list of all IP addresses assigned to the server, keyed by pool.
--	Addresses map[string]interface{}
--
--	// Metadata includes a list of all user-specified key-value pairs attached to the server.
--	Metadata map[string]interface{}
--
--	// Links includes HTTP references to the itself, useful for passing along to other APIs that might want a server reference.
--	Links []interface{}
--
--	// KeyName indicates which public key was injected into the server on launch.
--	KeyName string `json:"key_name" mapstructure:"key_name"`
--
--	// AdminPass will generally be empty ("").  However, it will contain the administrative password chosen when provisioning a new server without a set AdminPass setting in the first place.
--	// Note that this is the ONLY time this field will be valid.
--	AdminPass string `json:"adminPass" mapstructure:"adminPass"`
--}
--
--// ServerPage abstracts the raw results of making a List() request against the API.
--// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the
--// data provided through the ExtractServers call.
--type ServerPage struct {
--	pagination.LinkedPageBase
--}
--
--// IsEmpty returns true if a page contains no Server results.
--func (page ServerPage) IsEmpty() (bool, error) {
--	servers, err := ExtractServers(page)
--	if err != nil {
--		return true, err
--	}
--	return len(servers) == 0, nil
--}
--
--// NextPageURL uses the response's embedded link reference to navigate to the next page of results.
--func (page ServerPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"servers_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(page.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// ExtractServers interprets the results of a single page from a List() call, producing a slice of Server entities.
--func ExtractServers(page pagination.Page) ([]Server, error) {
--	casted := page.(ServerPage).Body
--
--	var response struct {
--		Servers []Server `mapstructure:"servers"`
--	}
--	err := mapstructure.Decode(casted, &response)
--	return response.Servers, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go
-deleted file mode 100644
-index 57587ab..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go
-+++ /dev/null
-@@ -1,31 +0,0 @@
--package servers
--
--import "github.com/rackspace/gophercloud"
--
--func createURL(client *gophercloud.ServiceClient) string {
--	return client.ServiceURL("servers")
--}
--
--func listURL(client *gophercloud.ServiceClient) string {
--	return createURL(client)
--}
--
--func listDetailURL(client *gophercloud.ServiceClient) string {
--	return client.ServiceURL("servers", "detail")
--}
--
--func deleteURL(client *gophercloud.ServiceClient, id string) string {
--	return client.ServiceURL("servers", id)
--}
--
--func getURL(client *gophercloud.ServiceClient, id string) string {
--	return deleteURL(client, id)
--}
--
--func updateURL(client *gophercloud.ServiceClient, id string) string {
--	return deleteURL(client, id)
--}
--
--func actionURL(client *gophercloud.ServiceClient, id string) string {
--	return client.ServiceURL("servers", id, "action")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls_test.go
-deleted file mode 100644
-index cc895c9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls_test.go
-+++ /dev/null
-@@ -1,56 +0,0 @@
--package servers
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "servers"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint + "servers"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestListDetailURL(t *testing.T) {
--	actual := listDetailURL(endpointClient())
--	expected := endpoint + "servers/detail"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "servers/foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "servers/foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestUpdateURL(t *testing.T) {
--	actual := updateURL(endpointClient(), "foo")
--	expected := endpoint + "servers/foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestActionURL(t *testing.T) {
--	actual := actionURL(endpointClient(), "foo")
--	expected := endpoint + "servers/foo/action"
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go
-deleted file mode 100644
-index e6baf74..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go
-+++ /dev/null
-@@ -1,20 +0,0 @@
--package servers
--
--import "github.com/rackspace/gophercloud"
--
--// WaitForStatus will continually poll a server until it successfully transitions to a specified
--// status. It will do this for at most the number of seconds specified.
--func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error {
--	return gophercloud.WaitFor(secs, func() (bool, error) {
--		current, err := Get(c, id).Extract()
--		if err != nil {
--			return false, err
--		}
--
--		if current.Status == status {
--			return true, nil
--		}
--
--		return false, nil
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util_test.go
-deleted file mode 100644
-index e192ae3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package servers
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--	"time"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestWaitForStatus(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/4321", func(w http.ResponseWriter, r *http.Request) {
--		time.Sleep(2 * time.Second)
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--		{
--			"server": {
--				"name": "the-server",
--				"id": "4321",
--				"status": "ACTIVE"
--			}
--		}`)
--	})
--
--	err := WaitForStatus(client.ServiceClient(), "4321", "ACTIVE", 0)
--	if err == nil {
--		t.Errorf("Expected error: 'Time Out in WaitFor'")
--	}
--
--	err = WaitForStatus(client.ServiceClient(), "4321", "ACTIVE", 3)
--	th.CheckNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location.go
-deleted file mode 100644
-index 5a311e4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location.go
-+++ /dev/null
-@@ -1,124 +0,0 @@
--package openstack
--
--import (
--	"fmt"
--
--	"github.com/rackspace/gophercloud"
--	tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens"
--	endpoints3 "github.com/rackspace/gophercloud/openstack/identity/v3/endpoints"
--	services3 "github.com/rackspace/gophercloud/openstack/identity/v3/services"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// V2EndpointURL discovers the endpoint URL for a specific service from a ServiceCatalog acquired
--// during the v2 identity service. The specified EndpointOpts are used to identify a unique,
--// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided
--// criteria and when none do. The minimum that can be specified is a Type, but you will also often
--// need to specify a Name and/or a Region depending on what's available on your OpenStack
--// deployment.
--func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) {
--	// Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided.
--	var endpoints = make([]tokens2.Endpoint, 0, 1)
--	for _, entry := range catalog.Entries {
--		if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) {
--			for _, endpoint := range entry.Endpoints {
--				if opts.Region == "" || endpoint.Region == opts.Region {
--					endpoints = append(endpoints, endpoint)
--				}
--			}
--		}
--	}
--
--	// Report an error if the options were ambiguous.
--	if len(endpoints) > 1 {
--		return "", fmt.Errorf("Discovered %d matching endpoints: %#v", len(endpoints), endpoints)
--	}
--
--	// Extract the appropriate URL from the matching Endpoint.
--	for _, endpoint := range endpoints {
--		switch opts.Availability {
--		case gophercloud.AvailabilityPublic:
--			return gophercloud.NormalizeURL(endpoint.PublicURL), nil
--		case gophercloud.AvailabilityInternal:
--			return gophercloud.NormalizeURL(endpoint.InternalURL), nil
--		case gophercloud.AvailabilityAdmin:
--			return gophercloud.NormalizeURL(endpoint.AdminURL), nil
--		default:
--			return "", fmt.Errorf("Unexpected availability in endpoint query: %s", opts.Availability)
--		}
--	}
--
--	// Report an error if there were no matching endpoints.
--	return "", gophercloud.ErrEndpointNotFound
--}
--
--// V3EndpointURL discovers the endpoint URL for a specific service using multiple calls against
--// an identity v3 service endpoint. The specified EndpointOpts are used to identify a unique,
--// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided
--// criteria and when none do. The minimum that can be specified is a Type, but you will also often
--// need to specify a Name and/or a Region depending on what's available on your OpenStack
--// deployment.
--func V3EndpointURL(v3Client *gophercloud.ServiceClient, opts gophercloud.EndpointOpts) (string, error) {
--	// Discover the service we're interested in.
--	var services = make([]services3.Service, 0, 1)
--	servicePager := services3.List(v3Client, services3.ListOpts{ServiceType: opts.Type})
--	err := servicePager.EachPage(func(page pagination.Page) (bool, error) {
--		part, err := services3.ExtractServices(page)
--		if err != nil {
--			return false, err
--		}
--
--		for _, service := range part {
--			if service.Name == opts.Name {
--				services = append(services, service)
--			}
--		}
--
--		return true, nil
--	})
--	if err != nil {
--		return "", err
--	}
--
--	if len(services) == 0 {
--		return "", gophercloud.ErrServiceNotFound
--	}
--	if len(services) > 1 {
--		return "", fmt.Errorf("Discovered %d matching services: %#v", len(services), services)
--	}
--	service := services[0]
--
--	// Enumerate the endpoints available for this service.
--	var endpoints []endpoints3.Endpoint
--	endpointPager := endpoints3.List(v3Client, endpoints3.ListOpts{
--		Availability: opts.Availability,
--		ServiceID:    service.ID,
--	})
--	err = endpointPager.EachPage(func(page pagination.Page) (bool, error) {
--		part, err := endpoints3.ExtractEndpoints(page)
--		if err != nil {
--			return false, err
--		}
--
--		for _, endpoint := range part {
--			if opts.Region == "" || endpoint.Region == opts.Region {
--				endpoints = append(endpoints, endpoint)
--			}
--		}
--
--		return true, nil
--	})
--	if err != nil {
--		return "", err
--	}
--
--	if len(endpoints) == 0 {
--		return "", gophercloud.ErrEndpointNotFound
--	}
--	if len(endpoints) > 1 {
--		return "", fmt.Errorf("Discovered %d matching endpoints: %#v", len(endpoints), endpoints)
--	}
--	endpoint := endpoints[0]
--
--	return gophercloud.NormalizeURL(endpoint.URL), nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go
-deleted file mode 100644
-index 4e0569a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go
-+++ /dev/null
-@@ -1,225 +0,0 @@
--package openstack
--
--import (
--	"fmt"
--	"net/http"
--	"strings"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// Service catalog fixtures take too much vertical space!
--var catalog2 = tokens2.ServiceCatalog{
--	Entries: []tokens2.CatalogEntry{
--		tokens2.CatalogEntry{
--			Type: "same",
--			Name: "same",
--			Endpoints: []tokens2.Endpoint{
--				tokens2.Endpoint{
--					Region:      "same",
--					PublicURL:   "https://public.correct.com/",
--					InternalURL: "https://internal.correct.com/",
--					AdminURL:    "https://admin.correct.com/",
--				},
--				tokens2.Endpoint{
--					Region:    "different",
--					PublicURL: "https://badregion.com/",
--				},
--			},
--		},
--		tokens2.CatalogEntry{
--			Type: "same",
--			Name: "different",
--			Endpoints: []tokens2.Endpoint{
--				tokens2.Endpoint{
--					Region:    "same",
--					PublicURL: "https://badname.com/",
--				},
--				tokens2.Endpoint{
--					Region:    "different",
--					PublicURL: "https://badname.com/+badregion",
--				},
--			},
--		},
--		tokens2.CatalogEntry{
--			Type: "different",
--			Name: "different",
--			Endpoints: []tokens2.Endpoint{
--				tokens2.Endpoint{
--					Region:    "same",
--					PublicURL: "https://badtype.com/+badname",
--				},
--				tokens2.Endpoint{
--					Region:    "different",
--					PublicURL: "https://badtype.com/+badregion+badname",
--				},
--			},
--		},
--	},
--}
--
--func TestV2EndpointExact(t *testing.T) {
--	expectedURLs := map[gophercloud.Availability]string{
--		gophercloud.AvailabilityPublic:   "https://public.correct.com/",
--		gophercloud.AvailabilityAdmin:    "https://admin.correct.com/",
--		gophercloud.AvailabilityInternal: "https://internal.correct.com/",
--	}
--
--	for availability, expected := range expectedURLs {
--		actual, err := V2EndpointURL(&catalog2, gophercloud.EndpointOpts{
--			Type:         "same",
--			Name:         "same",
--			Region:       "same",
--			Availability: availability,
--		})
--		th.AssertNoErr(t, err)
--		th.CheckEquals(t, expected, actual)
--	}
--}
--
--func TestV2EndpointNone(t *testing.T) {
--	_, err := V2EndpointURL(&catalog2, gophercloud.EndpointOpts{
--		Type:         "nope",
--		Availability: gophercloud.AvailabilityPublic,
--	})
--	th.CheckEquals(t, gophercloud.ErrEndpointNotFound, err)
--}
--
--func TestV2EndpointMultiple(t *testing.T) {
--	_, err := V2EndpointURL(&catalog2, gophercloud.EndpointOpts{
--		Type:         "same",
--		Region:       "same",
--		Availability: gophercloud.AvailabilityPublic,
--	})
--	if !strings.HasPrefix(err.Error(), "Discovered 2 matching endpoints:") {
--		t.Errorf("Received unexpected error: %v", err)
--	}
--}
--
--func TestV2EndpointBadAvailability(t *testing.T) {
--	_, err := V2EndpointURL(&catalog2, gophercloud.EndpointOpts{
--		Type:         "same",
--		Name:         "same",
--		Region:       "same",
--		Availability: "wat",
--	})
--	th.CheckEquals(t, err.Error(), "Unexpected availability in endpoint query: wat")
--}
--
--func setupV3Responses(t *testing.T) {
--	// Mock the service query.
--	th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--			{
--				"links": {
--					"next": null,
--					"previous": null
--				},
--				"services": [
--					{
--						"description": "Correct",
--						"id": "1234",
--						"name": "same",
--						"type": "same"
--					},
--					{
--						"description": "Bad Name",
--						"id": "9876",
--						"name": "different",
--						"type": "same"
--					}
--				]
--			}
--		`)
--	})
--
--	// Mock the endpoint query.
--	th.Mux.HandleFunc("/endpoints", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestFormValues(t, r, map[string]string{
--			"service_id": "1234",
--			"interface":  "public",
--		})
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--			{
--				"endpoints": [
--					{
--						"id": "12",
--						"interface": "public",
--						"name": "the-right-one",
--						"region": "same",
--						"service_id": "1234",
--						"url": "https://correct:9000/"
--					},
--					{
--						"id": "14",
--						"interface": "public",
--						"name": "bad-region",
--						"region": "different",
--						"service_id": "1234",
--						"url": "https://bad-region:9001/"
--					}
--				],
--				"links": {
--					"next": null,
--					"previous": null
--				}
--			}
--    `)
--	})
--}
--
--func TestV3EndpointExact(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	setupV3Responses(t)
--
--	actual, err := V3EndpointURL(fake.ServiceClient(), gophercloud.EndpointOpts{
--		Type:         "same",
--		Name:         "same",
--		Region:       "same",
--		Availability: gophercloud.AvailabilityPublic,
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, actual, "https://correct:9000/")
--}
--
--func TestV3EndpointNoService(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--      {
--        "links": {
--          "next": null,
--          "previous": null
--        },
--        "services": []
--      }
--    `)
--	})
--
--	_, err := V3EndpointURL(fake.ServiceClient(), gophercloud.EndpointOpts{
--		Type:         "nope",
--		Name:         "same",
--		Region:       "same",
--		Availability: gophercloud.AvailabilityPublic,
--	})
--	th.CheckEquals(t, gophercloud.ErrServiceNotFound, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate.go
-deleted file mode 100644
-index fd6e80e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate.go
-+++ /dev/null
-@@ -1,52 +0,0 @@
--package extensions
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	common "github.com/rackspace/gophercloud/openstack/common/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ExtensionPage is a single page of Extension results.
--type ExtensionPage struct {
--	common.ExtensionPage
--}
--
--// IsEmpty returns true if the current page contains at least one Extension.
--func (page ExtensionPage) IsEmpty() (bool, error) {
--	is, err := ExtractExtensions(page)
--	if err != nil {
--		return true, err
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractExtensions accepts a Page struct, specifically an ExtensionPage struct, and extracts the
--// elements into a slice of Extension structs.
--func ExtractExtensions(page pagination.Page) ([]common.Extension, error) {
--	// Identity v2 adds an intermediate "values" object.
--
--	var resp struct {
--		Extensions struct {
--			Values []common.Extension `mapstructure:"values"`
--		} `mapstructure:"extensions"`
--	}
--
--	err := mapstructure.Decode(page.(ExtensionPage).Body, &resp)
--	return resp.Extensions.Values, err
--}
--
--// Get retrieves information for a specific extension using its alias.
--func Get(c *gophercloud.ServiceClient, alias string) common.GetResult {
--	return common.Get(c, alias)
--}
--
--// List returns a Pager which allows you to iterate over the full collection of extensions.
--// It does not accept query parameters.
--func List(c *gophercloud.ServiceClient) pagination.Pager {
--	return common.List(c).WithPageCreator(func(r pagination.PageResult) pagination.Page {
--		return ExtensionPage{
--			ExtensionPage: common.ExtensionPage{SinglePageBase: pagination.SinglePageBase(r)},
--		}
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate_test.go
-deleted file mode 100644
-index 504118a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/delegate_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package extensions
--
--import (
--	"testing"
--
--	common "github.com/rackspace/gophercloud/openstack/common/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleListExtensionsSuccessfully(t)
--
--	count := 0
--	err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractExtensions(page)
--		th.AssertNoErr(t, err)
--		th.CheckDeepEquals(t, common.ExpectedExtensions, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	common.HandleGetExtensionSuccessfully(t)
--
--	actual, err := Get(client.ServiceClient(), "agent").Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, common.SingleExtension, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go
-deleted file mode 100644
-index 791e4e3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package extensions provides information and interaction with the
--// different extensions available for the OpenStack Identity service.
--package extensions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/fixtures.go
-deleted file mode 100644
-index 96cb7d2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/fixtures.go
-+++ /dev/null
-@@ -1,60 +0,0 @@
--// +build fixtures
--
--package extensions
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// ListOutput provides a single Extension result. It differs from the delegated implementation
--// by the introduction of an intermediate "values" member.
--const ListOutput = `
--{
--	"extensions": {
--		"values": [
--			{
--				"updated": "2013-01-20T00:00:00-00:00",
--				"name": "Neutron Service Type Management",
--				"links": [],
--				"namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0",
--				"alias": "service-type",
--				"description": "API for retrieving service providers for Neutron advanced services"
--			}
--		]
--	}
--}
--`
--
--// HandleListExtensionsSuccessfully creates an HTTP handler that returns ListOutput for a List
--// call.
--func HandleListExtensionsSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--
--		fmt.Fprintf(w, `
--{
--  "extensions": {
--    "values": [
--      {
--        "updated": "2013-01-20T00:00:00-00:00",
--        "name": "Neutron Service Type Management",
--        "links": [],
--        "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0",
--        "alias": "service-type",
--        "description": "API for retrieving service providers for Neutron advanced services"
--      }
--    ]
--  }
--}
--    `)
--	})
--
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go
-deleted file mode 100644
-index 0c2d49d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--// Package tenants provides information and interaction with the
--// tenants API resource for the OpenStack Identity service.
--//
--// See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2
--// and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants
--// for more information.
--package tenants
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go
-deleted file mode 100644
-index 7f044ac..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go
-+++ /dev/null
-@@ -1,65 +0,0 @@
--// +build fixtures
--
--package tenants
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// ListOutput provides a single page of Tenant results.
--const ListOutput = `
--{
--	"tenants": [
--		{
--			"id": "1234",
--			"name": "Red Team",
--			"description": "The team that is red",
--			"enabled": true
--		},
--		{
--			"id": "9876",
--			"name": "Blue Team",
--			"description": "The team that is blue",
--			"enabled": false
--		}
--	]
--}
--`
--
--// RedTeam is a Tenant fixture.
--var RedTeam = Tenant{
--	ID:          "1234",
--	Name:        "Red Team",
--	Description: "The team that is red",
--	Enabled:     true,
--}
--
--// BlueTeam is a Tenant fixture.
--var BlueTeam = Tenant{
--	ID:          "9876",
--	Name:        "Blue Team",
--	Description: "The team that is blue",
--	Enabled:     false,
--}
--
--// ExpectedTenantSlice is the slice of tenants expected to be returned from ListOutput.
--var ExpectedTenantSlice = []Tenant{RedTeam, BlueTeam}
--
--// HandleListTenantsSuccessfully creates an HTTP handler at `/tenants` on the test handler mux that
--// responds with a list of two tenants.
--func HandleListTenantsSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/tenants", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Set("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, ListOutput)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go
-deleted file mode 100644
-index 5a359f5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--package tenants
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts filters the Tenants that are returned by the List call.
--type ListOpts struct {
--	// Marker is the ID of the last Tenant on the previous page.
--	Marker string `q:"marker"`
--
--	// Limit specifies the page size.
--	Limit int `q:"limit"`
--}
--
--// List enumerates the Tenants to which the current token has access.
--func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager {
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return TenantPage{pagination.LinkedPageBase{PageResult: r}}
--	}
--
--	url := listURL(client)
--	if opts != nil {
--		q, err := gophercloud.BuildQueryString(opts)
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += q.String()
--	}
--
--	return pagination.NewPager(client, url, createPage)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests_test.go
-deleted file mode 100644
-index e8f172d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests_test.go
-+++ /dev/null
-@@ -1,29 +0,0 @@
--package tenants
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListTenants(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleListTenantsSuccessfully(t)
--
--	count := 0
--	err := List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--
--		actual, err := ExtractTenants(page)
--		th.AssertNoErr(t, err)
--
--		th.CheckDeepEquals(t, ExpectedTenantSlice, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go
-deleted file mode 100644
-index c1220c3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go
-+++ /dev/null
-@@ -1,62 +0,0 @@
--package tenants
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// Tenant is a grouping of users in the identity service.
--type Tenant struct {
--	// ID is a unique identifier for this tenant.
--	ID string `mapstructure:"id"`
--
--	// Name is a friendlier user-facing name for this tenant.
--	Name string `mapstructure:"name"`
--
--	// Description is a human-readable explanation of this Tenant's purpose.
--	Description string `mapstructure:"description"`
--
--	// Enabled indicates whether or not a tenant is active.
--	Enabled bool `mapstructure:"enabled"`
--}
--
--// TenantPage is a single page of Tenant results.
--type TenantPage struct {
--	pagination.LinkedPageBase
--}
--
--// IsEmpty determines whether or not a page of Tenants contains any results.
--func (page TenantPage) IsEmpty() (bool, error) {
--	tenants, err := ExtractTenants(page)
--	if err != nil {
--		return false, err
--	}
--	return len(tenants) == 0, nil
--}
--
--// NextPageURL extracts the "next" link from the tenants_links section of the result.
--func (page TenantPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"tenants_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(page.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// ExtractTenants returns a slice of Tenants contained in a single page of results.
--func ExtractTenants(page pagination.Page) ([]Tenant, error) {
--	casted := page.(TenantPage).Body
--	var response struct {
--		Tenants []Tenant `mapstructure:"tenants"`
--	}
--
--	err := mapstructure.Decode(casted, &response)
--	return response.Tenants, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go
-deleted file mode 100644
-index 1dd6ce0..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--package tenants
--
--import "github.com/rackspace/gophercloud"
--
--func listURL(client *gophercloud.ServiceClient) string {
--	return client.ServiceURL("tenants")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go
-deleted file mode 100644
-index 31cacc5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go
-+++ /dev/null
-@@ -1,5 +0,0 @@
--// Package tokens provides information and interaction with the token API
--// resource for the OpenStack Identity service.
--// For more information, see:
--// http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2
--package tokens
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go
-deleted file mode 100644
-index 3a9172e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go
-+++ /dev/null
-@@ -1,30 +0,0 @@
--package tokens
--
--import (
--	"errors"
--	"fmt"
--)
--
--var (
--	// ErrUserIDProvided is returned if you attempt to authenticate with a UserID.
--	ErrUserIDProvided = unacceptedAttributeErr("UserID")
--
--	// ErrAPIKeyProvided is returned if you attempt to authenticate with an APIKey.
--	ErrAPIKeyProvided = unacceptedAttributeErr("APIKey")
--
--	// ErrDomainIDProvided is returned if you attempt to authenticate with a DomainID.
--	ErrDomainIDProvided = unacceptedAttributeErr("DomainID")
--
--	// ErrDomainNameProvided is returned if you attempt to authenticate with a DomainName.
--	ErrDomainNameProvided = unacceptedAttributeErr("DomainName")
--
--	// ErrUsernameRequired is returned if you attempt ot authenticate without a Username.
--	ErrUsernameRequired = errors.New("You must supply a Username in your AuthOptions.")
--
--	// ErrPasswordRequired is returned if you don't provide a password.
--	ErrPasswordRequired = errors.New("Please supply a Password in your AuthOptions.")
--)
--
--func unacceptedAttributeErr(attribute string) error {
--	return fmt.Errorf("The base Identity V2 API does not accept authentication by %s", attribute)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go
-deleted file mode 100644
-index 1cb0d05..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go
-+++ /dev/null
-@@ -1,128 +0,0 @@
--// +build fixtures
--
--package tokens
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--	"time"
--
--	"github.com/rackspace/gophercloud/openstack/identity/v2/tenants"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--// ExpectedToken is the token that should be parsed from TokenCreationResponse.
--var ExpectedToken = &Token{
--	ID:        "aaaabbbbccccdddd",
--	ExpiresAt: time.Date(2014, time.January, 31, 15, 30, 58, 0, time.UTC),
--	Tenant: tenants.Tenant{
--		ID:          "fc394f2ab2df4114bde39905f800dc57",
--		Name:        "test",
--		Description: "There are many tenants. This one is yours.",
--		Enabled:     true,
--	},
--}
--
--// ExpectedServiceCatalog is the service catalog that should be parsed from TokenCreationResponse.
--var ExpectedServiceCatalog = &ServiceCatalog{
--	Entries: []CatalogEntry{
--		CatalogEntry{
--			Name: "inscrutablewalrus",
--			Type: "something",
--			Endpoints: []Endpoint{
--				Endpoint{
--					PublicURL: "http://something0:1234/v2/",
--					Region:    "region0",
--				},
--				Endpoint{
--					PublicURL: "http://something1:1234/v2/",
--					Region:    "region1",
--				},
--			},
--		},
--		CatalogEntry{
--			Name: "arbitrarypenguin",
--			Type: "else",
--			Endpoints: []Endpoint{
--				Endpoint{
--					PublicURL: "http://else0:4321/v3/",
--					Region:    "region0",
--				},
--			},
--		},
--	},
--}
--
--// TokenCreationResponse is a JSON response that contains ExpectedToken and ExpectedServiceCatalog.
--const TokenCreationResponse = `
--{
--	"access": {
--		"token": {
--			"issued_at": "2014-01-30T15:30:58.000000Z",
--			"expires": "2014-01-31T15:30:58Z",
--			"id": "aaaabbbbccccdddd",
--			"tenant": {
--				"description": "There are many tenants. This one is yours.",
--				"enabled": true,
--				"id": "fc394f2ab2df4114bde39905f800dc57",
--				"name": "test"
--			}
--		},
--		"serviceCatalog": [
--			{
--				"endpoints": [
--					{
--						"publicURL": "http://something0:1234/v2/",
--						"region": "region0"
--					},
--					{
--						"publicURL": "http://something1:1234/v2/",
--						"region": "region1"
--					}
--				],
--				"type": "something",
--				"name": "inscrutablewalrus"
--			},
--			{
--				"endpoints": [
--					{
--						"publicURL": "http://else0:4321/v3/",
--						"region": "region0"
--					}
--				],
--				"type": "else",
--				"name": "arbitrarypenguin"
--			}
--		]
--	}
--}
--`
--
--// HandleTokenPost expects a POST against a /tokens handler, ensures that the request body has been
--// constructed properly given certain auth options, and returns the result.
--func HandleTokenPost(t *testing.T, requestJSON string) {
--	th.Mux.HandleFunc("/tokens", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		if requestJSON != "" {
--			th.TestJSONRequest(t, r, requestJSON)
--		}
--
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, TokenCreationResponse)
--	})
--}
--
--// IsSuccessful ensures that a CreateResult was successful and contains the correct token and
--// service catalog.
--func IsSuccessful(t *testing.T, result CreateResult) {
--	token, err := result.ExtractToken()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, ExpectedToken, token)
--
--	serviceCatalog, err := result.ExtractServiceCatalog()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, ExpectedServiceCatalog, serviceCatalog)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go
-deleted file mode 100644
-index 87c923a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go
-+++ /dev/null
-@@ -1,87 +0,0 @@
--package tokens
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--)
--
--// AuthOptionsBuilder describes any argument that may be passed to the Create call.
--type AuthOptionsBuilder interface {
--
--	// ToTokenCreateMap assembles the Create request body, returning an error if parameters are
--	// missing or inconsistent.
--	ToTokenCreateMap() (map[string]interface{}, error)
--}
--
--// AuthOptions wraps a gophercloud AuthOptions in order to adhere to the AuthOptionsBuilder
--// interface.
--type AuthOptions struct {
--	gophercloud.AuthOptions
--}
--
--// WrapOptions embeds a root AuthOptions struct in a package-specific one.
--func WrapOptions(original gophercloud.AuthOptions) AuthOptions {
--	return AuthOptions{AuthOptions: original}
--}
--
--// ToTokenCreateMap converts AuthOptions into nested maps that can be serialized into a JSON
--// request.
--func (auth AuthOptions) ToTokenCreateMap() (map[string]interface{}, error) {
--	// Error out if an unsupported auth option is present.
--	if auth.UserID != "" {
--		return nil, ErrUserIDProvided
--	}
--	if auth.APIKey != "" {
--		return nil, ErrAPIKeyProvided
--	}
--	if auth.DomainID != "" {
--		return nil, ErrDomainIDProvided
--	}
--	if auth.DomainName != "" {
--		return nil, ErrDomainNameProvided
--	}
--
--	// Username and Password are always required.
--	if auth.Username == "" {
--		return nil, ErrUsernameRequired
--	}
--	if auth.Password == "" {
--		return nil, ErrPasswordRequired
--	}
--
--	// Populate the request map.
--	authMap := make(map[string]interface{})
--
--	authMap["passwordCredentials"] = map[string]interface{}{
--		"username": auth.Username,
--		"password": auth.Password,
--	}
--
--	if auth.TenantID != "" {
--		authMap["tenantId"] = auth.TenantID
--	}
--	if auth.TenantName != "" {
--		authMap["tenantName"] = auth.TenantName
--	}
--
--	return map[string]interface{}{"auth": authMap}, nil
--}
--
--// Create authenticates to the identity service and attempts to acquire a Token.
--// If successful, the CreateResult
--// Generally, rather than interact with this call directly, end users should call openstack.AuthenticatedClient(),
--// which abstracts all of the gory details about navigating service catalogs and such.
--func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) CreateResult {
--	request, err := auth.ToTokenCreateMap()
--	if err != nil {
--		return CreateResult{gophercloud.Result{Err: err}}
--	}
--
--	var result CreateResult
--	_, result.Err = perigee.Request("POST", CreateURL(client), perigee.Options{
--		ReqBody: &request,
--		Results: &result.Body,
--		OkCodes: []int{200, 203},
--	})
--	return result
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests_test.go
-deleted file mode 100644
-index 2f02825..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests_test.go
-+++ /dev/null
-@@ -1,140 +0,0 @@
--package tokens
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func tokenPost(t *testing.T, options gophercloud.AuthOptions, requestJSON string) CreateResult {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleTokenPost(t, requestJSON)
--
--	return Create(client.ServiceClient(), AuthOptions{options})
--}
--
--func tokenPostErr(t *testing.T, options gophercloud.AuthOptions, expectedErr error) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleTokenPost(t, "")
--
--	actualErr := Create(client.ServiceClient(), AuthOptions{options}).Err
--	th.CheckEquals(t, expectedErr, actualErr)
--}
--
--func TestCreateWithPassword(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username: "me",
--		Password: "swordfish",
--	}
--
--	IsSuccessful(t, tokenPost(t, options, `
--    {
--      "auth": {
--        "passwordCredentials": {
--          "username": "me",
--          "password": "swordfish"
--        }
--      }
--    }
--  `))
--}
--
--func TestCreateTokenWithTenantID(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username: "me",
--		Password: "opensesame",
--		TenantID: "fc394f2ab2df4114bde39905f800dc57",
--	}
--
--	IsSuccessful(t, tokenPost(t, options, `
--    {
--      "auth": {
--        "tenantId": "fc394f2ab2df4114bde39905f800dc57",
--        "passwordCredentials": {
--          "username": "me",
--          "password": "opensesame"
--        }
--      }
--    }
--  `))
--}
--
--func TestCreateTokenWithTenantName(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username:   "me",
--		Password:   "opensesame",
--		TenantName: "demo",
--	}
--
--	IsSuccessful(t, tokenPost(t, options, `
--    {
--      "auth": {
--        "tenantName": "demo",
--        "passwordCredentials": {
--          "username": "me",
--          "password": "opensesame"
--        }
--      }
--    }
--  `))
--}
--
--func TestProhibitUserID(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username: "me",
--		UserID:   "1234",
--		Password: "thing",
--	}
--
--	tokenPostErr(t, options, ErrUserIDProvided)
--}
--
--func TestProhibitAPIKey(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username: "me",
--		Password: "thing",
--		APIKey:   "123412341234",
--	}
--
--	tokenPostErr(t, options, ErrAPIKeyProvided)
--}
--
--func TestProhibitDomainID(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username: "me",
--		Password: "thing",
--		DomainID: "1234",
--	}
--
--	tokenPostErr(t, options, ErrDomainIDProvided)
--}
--
--func TestProhibitDomainName(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username:   "me",
--		Password:   "thing",
--		DomainName: "wat",
--	}
--
--	tokenPostErr(t, options, ErrDomainNameProvided)
--}
--
--func TestRequireUsername(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Password: "thing",
--	}
--
--	tokenPostErr(t, options, ErrUsernameRequired)
--}
--
--func TestRequirePassword(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username: "me",
--	}
--
--	tokenPostErr(t, options, ErrPasswordRequired)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go
-deleted file mode 100644
-index 1eddb9d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go
-+++ /dev/null
-@@ -1,133 +0,0 @@
--package tokens
--
--import (
--	"time"
--
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack/identity/v2/tenants"
--)
--
--// Token provides only the most basic information related to an authentication token.
--type Token struct {
--	// ID provides the primary means of identifying a user to the OpenStack API.
--	// OpenStack defines this field as an opaque value, so do not depend on its content.
--	// It is safe, however, to compare for equality.
--	ID string
--
--	// ExpiresAt provides a timestamp in ISO 8601 format, indicating when the authentication token becomes invalid.
--	// After this point in time, future API requests made using this authentication token will respond with errors.
--	// Either the caller will need to reauthenticate manually, or more preferably, the caller should exploit automatic re-authentication.
--	// See the AuthOptions structure for more details.
--	ExpiresAt time.Time
--
--	// Tenant provides information about the tenant to which this token grants access.
--	Tenant tenants.Tenant
--}
--
--// Endpoint represents a single API endpoint offered by a service.
--// It provides the public and internal URLs, if supported, along with a region specifier, again if provided.
--// The significance of the Region field will depend upon your provider.
--//
--// In addition, the interface offered by the service will have version information associated with it
--// through the VersionId, VersionInfo, and VersionList fields, if provided or supported.
--//
--// In all cases, fields which aren't supported by the provider and service combined will assume a zero-value ("").
--type Endpoint struct {
--	TenantID    string `mapstructure:"tenantId"`
--	PublicURL   string `mapstructure:"publicURL"`
--	InternalURL string `mapstructure:"internalURL"`
--	AdminURL    string `mapstructure:"adminURL"`
--	Region      string `mapstructure:"region"`
--	VersionID   string `mapstructure:"versionId"`
--	VersionInfo string `mapstructure:"versionInfo"`
--	VersionList string `mapstructure:"versionList"`
--}
--
--// CatalogEntry provides a type-safe interface to an Identity API V2 service catalog listing.
--// Each class of service, such as cloud DNS or block storage services, will have a single
--// CatalogEntry representing it.
--//
--// Note: when looking for the desired service, try, whenever possible, to key off the type field.
--// Otherwise, you'll tie the representation of the service to a specific provider.
--type CatalogEntry struct {
--	// Name will contain the provider-specified name for the service.
--	Name string `mapstructure:"name"`
--
--	// Type will contain a type string if OpenStack defines a type for the service.
--	// Otherwise, for provider-specific services, the provider may assign their own type strings.
--	Type string `mapstructure:"type"`
--
--	// Endpoints will let the caller iterate over all the different endpoints that may exist for
--	// the service.
--	Endpoints []Endpoint `mapstructure:"endpoints"`
--}
--
--// ServiceCatalog provides a view into the service catalog from a previous, successful authentication.
--type ServiceCatalog struct {
--	Entries []CatalogEntry
--}
--
--// CreateResult defers the interpretation of a created token.
--// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog.
--type CreateResult struct {
--	gophercloud.Result
--}
--
--// ExtractToken returns the just-created Token from a CreateResult.
--func (result CreateResult) ExtractToken() (*Token, error) {
--	if result.Err != nil {
--		return nil, result.Err
--	}
--
--	var response struct {
--		Access struct {
--			Token struct {
--				Expires string         `mapstructure:"expires"`
--				ID      string         `mapstructure:"id"`
--				Tenant  tenants.Tenant `mapstructure:"tenant"`
--			} `mapstructure:"token"`
--		} `mapstructure:"access"`
--	}
--
--	err := mapstructure.Decode(result.Body, &response)
--	if err != nil {
--		return nil, err
--	}
--
--	expiresTs, err := time.Parse(gophercloud.RFC3339Milli, response.Access.Token.Expires)
--	if err != nil {
--		return nil, err
--	}
--
--	return &Token{
--		ID:        response.Access.Token.ID,
--		ExpiresAt: expiresTs,
--		Tenant:    response.Access.Token.Tenant,
--	}, nil
--}
--
--// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token.
--func (result CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) {
--	if result.Err != nil {
--		return nil, result.Err
--	}
--
--	var response struct {
--		Access struct {
--			Entries []CatalogEntry `mapstructure:"serviceCatalog"`
--		} `mapstructure:"access"`
--	}
--
--	err := mapstructure.Decode(result.Body, &response)
--	if err != nil {
--		return nil, err
--	}
--
--	return &ServiceCatalog{Entries: response.Access.Entries}, nil
--}
--
--// createErr quickly packs an error in a CreateResult.
--func createErr(err error) CreateResult {
--	return CreateResult{gophercloud.Result{Err: err}}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go
-deleted file mode 100644
-index cd4c696..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go
-+++ /dev/null
-@@ -1,8 +0,0 @@
--package tokens
--
--import "github.com/rackspace/gophercloud"
--
--// CreateURL generates the URL used to create new Tokens.
--func CreateURL(client *gophercloud.ServiceClient) string {
--	return client.ServiceURL("tokens")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/doc.go
-deleted file mode 100644
-index 8516394..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/doc.go
-+++ /dev/null
-@@ -1,6 +0,0 @@
--// Package endpoints provides information and interaction with the service
--// endpoints API resource in the OpenStack Identity service.
--//
--// For more information, see:
--// http://developer.openstack.org/api-ref-identity-v3.html#endpoints-v3
--package endpoints
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/errors.go
-deleted file mode 100644
-index 854957f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/errors.go
-+++ /dev/null
-@@ -1,21 +0,0 @@
--package endpoints
--
--import "fmt"
--
--func requiredAttribute(attribute string) error {
--	return fmt.Errorf("You must specify %s for this endpoint.", attribute)
--}
--
--var (
--	// ErrAvailabilityRequired is reported if an Endpoint is created without an Availability.
--	ErrAvailabilityRequired = requiredAttribute("an availability")
--
--	// ErrNameRequired is reported if an Endpoint is created without a Name.
--	ErrNameRequired = requiredAttribute("a name")
--
--	// ErrURLRequired is reported if an Endpoint is created without a URL.
--	ErrURLRequired = requiredAttribute("a URL")
--
--	// ErrServiceIDRequired is reported if an Endpoint is created without a ServiceID.
--	ErrServiceIDRequired = requiredAttribute("a serviceID")
--)
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go
-deleted file mode 100644
-index 7bdb7ce..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go
-+++ /dev/null
-@@ -1,133 +0,0 @@
--package endpoints
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// EndpointOpts contains the subset of Endpoint attributes that should be used to create or update an Endpoint.
--type EndpointOpts struct {
--	Availability gophercloud.Availability
--	Name         string
--	Region       string
--	URL          string
--	ServiceID    string
--}
--
--// Create inserts a new Endpoint into the service catalog.
--// Within EndpointOpts, Region may be omitted by being left as "", but all other fields are required.
--func Create(client *gophercloud.ServiceClient, opts EndpointOpts) CreateResult {
--	// Redefined so that Region can be re-typed as a *string, which can be omitted from the JSON output.
--	type endpoint struct {
--		Interface string  `json:"interface"`
--		Name      string  `json:"name"`
--		Region    *string `json:"region,omitempty"`
--		URL       string  `json:"url"`
--		ServiceID string  `json:"service_id"`
--	}
--
--	type request struct {
--		Endpoint endpoint `json:"endpoint"`
--	}
--
--	// Ensure that EndpointOpts is fully populated.
--	if opts.Availability == "" {
--		return createErr(ErrAvailabilityRequired)
--	}
--	if opts.Name == "" {
--		return createErr(ErrNameRequired)
--	}
--	if opts.URL == "" {
--		return createErr(ErrURLRequired)
--	}
--	if opts.ServiceID == "" {
--		return createErr(ErrServiceIDRequired)
--	}
--
--	// Populate the request body.
--	reqBody := request{
--		Endpoint: endpoint{
--			Interface: string(opts.Availability),
--			Name:      opts.Name,
--			URL:       opts.URL,
--			ServiceID: opts.ServiceID,
--		},
--	}
--	reqBody.Endpoint.Region = gophercloud.MaybeString(opts.Region)
--
--	var result CreateResult
--	_, result.Err = perigee.Request("POST", listURL(client), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &result.Body,
--		OkCodes:     []int{201},
--	})
--	return result
--}
--
--// ListOpts allows finer control over the endpoints returned by a List call.
--// All fields are optional.
--type ListOpts struct {
--	Availability gophercloud.Availability `q:"interface"`
--	ServiceID    string                   `q:"service_id"`
--	Page         int                      `q:"page"`
--	PerPage      int                      `q:"per_page"`
--}
--
--// List enumerates endpoints in a paginated collection, optionally filtered by ListOpts criteria.
--func List(client *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	u := listURL(client)
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u += q.String()
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return EndpointPage{pagination.LinkedPageBase{PageResult: r}}
--	}
--
--	return pagination.NewPager(client, u, createPage)
--}
--
--// Update changes an existing endpoint with new data.
--// All fields are optional in the provided EndpointOpts.
--func Update(client *gophercloud.ServiceClient, endpointID string, opts EndpointOpts) UpdateResult {
--	type endpoint struct {
--		Interface *string `json:"interface,omitempty"`
--		Name      *string `json:"name,omitempty"`
--		Region    *string `json:"region,omitempty"`
--		URL       *string `json:"url,omitempty"`
--		ServiceID *string `json:"service_id,omitempty"`
--	}
--
--	type request struct {
--		Endpoint endpoint `json:"endpoint"`
--	}
--
--	reqBody := request{Endpoint: endpoint{}}
--	reqBody.Endpoint.Interface = gophercloud.MaybeString(string(opts.Availability))
--	reqBody.Endpoint.Name = gophercloud.MaybeString(opts.Name)
--	reqBody.Endpoint.Region = gophercloud.MaybeString(opts.Region)
--	reqBody.Endpoint.URL = gophercloud.MaybeString(opts.URL)
--	reqBody.Endpoint.ServiceID = gophercloud.MaybeString(opts.ServiceID)
--
--	var result UpdateResult
--	_, result.Err = perigee.Request("PATCH", endpointURL(client, endpointID), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &result.Body,
--		OkCodes:     []int{200},
--	})
--	return result
--}
--
--// Delete removes an endpoint from the service catalog.
--func Delete(client *gophercloud.ServiceClient, endpointID string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", endpointURL(client, endpointID), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests_test.go
-deleted file mode 100644
-index 80687c4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests_test.go
-+++ /dev/null
-@@ -1,226 +0,0 @@
--package endpoints
--
--import (
--	"fmt"
--	"net/http"
--	"reflect"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestCreateSuccessful(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/endpoints", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "POST")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		testhelper.TestJSONRequest(t, r, `
--      {
--        "endpoint": {
--          "interface": "public",
--          "name": "the-endiest-of-points",
--          "region": "underground",
--          "url": "https://1.2.3.4:9000/",
--          "service_id": "asdfasdfasdfasdf"
--        }
--      }
--    `)
--
--		w.WriteHeader(http.StatusCreated)
--		fmt.Fprintf(w, `
--      {
--        "endpoint": {
--          "id": "12",
--          "interface": "public",
--          "links": {
--            "self": "https://localhost:5000/v3/endpoints/12"
--          },
--          "name": "the-endiest-of-points",
--          "region": "underground",
--          "service_id": "asdfasdfasdfasdf",
--          "url": "https://1.2.3.4:9000/"
--        }
--      }
--    `)
--	})
--
--	actual, err := Create(client.ServiceClient(), EndpointOpts{
--		Availability: gophercloud.AvailabilityPublic,
--		Name:         "the-endiest-of-points",
--		Region:       "underground",
--		URL:          "https://1.2.3.4:9000/",
--		ServiceID:    "asdfasdfasdfasdf",
--	}).Extract()
--	if err != nil {
--		t.Fatalf("Unable to create an endpoint: %v", err)
--	}
--
--	expected := &Endpoint{
--		ID:           "12",
--		Availability: gophercloud.AvailabilityPublic,
--		Name:         "the-endiest-of-points",
--		Region:       "underground",
--		ServiceID:    "asdfasdfasdfasdf",
--		URL:          "https://1.2.3.4:9000/",
--	}
--
--	if !reflect.DeepEqual(actual, expected) {
--		t.Errorf("Expected %#v, was %#v", expected, actual)
--	}
--}
--
--func TestListEndpoints(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/endpoints", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "GET")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--			{
--				"endpoints": [
--					{
--						"id": "12",
--						"interface": "public",
--						"links": {
--							"self": "https://localhost:5000/v3/endpoints/12"
--						},
--						"name": "the-endiest-of-points",
--						"region": "underground",
--						"service_id": "asdfasdfasdfasdf",
--						"url": "https://1.2.3.4:9000/"
--					},
--					{
--						"id": "13",
--						"interface": "internal",
--						"links": {
--							"self": "https://localhost:5000/v3/endpoints/13"
--						},
--						"name": "shhhh",
--						"region": "underground",
--						"service_id": "asdfasdfasdfasdf",
--						"url": "https://1.2.3.4:9001/"
--					}
--				],
--				"links": {
--					"next": null,
--					"previous": null
--				}
--			}
--		`)
--	})
--
--	count := 0
--	List(client.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractEndpoints(page)
--		if err != nil {
--			t.Errorf("Failed to extract endpoints: %v", err)
--			return false, err
--		}
--
--		expected := []Endpoint{
--			Endpoint{
--				ID:           "12",
--				Availability: gophercloud.AvailabilityPublic,
--				Name:         "the-endiest-of-points",
--				Region:       "underground",
--				ServiceID:    "asdfasdfasdfasdf",
--				URL:          "https://1.2.3.4:9000/",
--			},
--			Endpoint{
--				ID:           "13",
--				Availability: gophercloud.AvailabilityInternal,
--				Name:         "shhhh",
--				Region:       "underground",
--				ServiceID:    "asdfasdfasdfasdf",
--				URL:          "https://1.2.3.4:9001/",
--			},
--		}
--
--		if !reflect.DeepEqual(expected, actual) {
--			t.Errorf("Expected %#v, got %#v", expected, actual)
--		}
--
--		return true, nil
--	})
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestUpdateEndpoint(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/endpoints/12", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "PATCH")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		testhelper.TestJSONRequest(t, r, `
--		{
--	    "endpoint": {
--	      "name": "renamed",
--				"region": "somewhere-else"
--	    }
--		}
--	`)
--
--		fmt.Fprintf(w, `
--		{
--			"endpoint": {
--				"id": "12",
--				"interface": "public",
--				"links": {
--					"self": "https://localhost:5000/v3/endpoints/12"
--				},
--				"name": "renamed",
--				"region": "somewhere-else",
--				"service_id": "asdfasdfasdfasdf",
--				"url": "https://1.2.3.4:9000/"
--			}
--		}
--	`)
--	})
--
--	actual, err := Update(client.ServiceClient(), "12", EndpointOpts{
--		Name:   "renamed",
--		Region: "somewhere-else",
--	}).Extract()
--	if err != nil {
--		t.Fatalf("Unexpected error from Update: %v", err)
--	}
--
--	expected := &Endpoint{
--		ID:           "12",
--		Availability: gophercloud.AvailabilityPublic,
--		Name:         "renamed",
--		Region:       "somewhere-else",
--		ServiceID:    "asdfasdfasdfasdf",
--		URL:          "https://1.2.3.4:9000/",
--	}
--	if !reflect.DeepEqual(expected, actual) {
--		t.Errorf("Expected %#v, was %#v", expected, actual)
--	}
--}
--
--func TestDeleteEndpoint(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/endpoints/34", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "DELETE")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(client.ServiceClient(), "34")
--	testhelper.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/results.go
-deleted file mode 100644
-index 1281122..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/results.go
-+++ /dev/null
-@@ -1,82 +0,0 @@
--package endpoints
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract interprets a GetResult, CreateResult or UpdateResult as a concrete Endpoint.
--// An error is returned if the original call or the extraction failed.
--func (r commonResult) Extract() (*Endpoint, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Endpoint `json:"endpoint"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return &res.Endpoint, err
--}
--
--// CreateResult is the deferred result of a Create call.
--type CreateResult struct {
--	commonResult
--}
--
--// createErr quickly wraps an error in a CreateResult.
--func createErr(err error) CreateResult {
--	return CreateResult{commonResult{gophercloud.Result{Err: err}}}
--}
--
--// UpdateResult is the deferred result of an Update call.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult is the deferred result of an Delete call.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// Endpoint describes the entry point for another service's API.
--type Endpoint struct {
--	ID           string                   `mapstructure:"id" json:"id"`
--	Availability gophercloud.Availability `mapstructure:"interface" json:"interface"`
--	Name         string                   `mapstructure:"name" json:"name"`
--	Region       string                   `mapstructure:"region" json:"region"`
--	ServiceID    string                   `mapstructure:"service_id" json:"service_id"`
--	URL          string                   `mapstructure:"url" json:"url"`
--}
--
--// EndpointPage is a single page of Endpoint results.
--type EndpointPage struct {
--	pagination.LinkedPageBase
--}
--
--// IsEmpty returns true if no Endpoints were returned.
--func (p EndpointPage) IsEmpty() (bool, error) {
--	es, err := ExtractEndpoints(p)
--	if err != nil {
--		return true, err
--	}
--	return len(es) == 0, nil
--}
--
--// ExtractEndpoints extracts an Endpoint slice from a Page.
--func ExtractEndpoints(page pagination.Page) ([]Endpoint, error) {
--	var response struct {
--		Endpoints []Endpoint `mapstructure:"endpoints"`
--	}
--
--	err := mapstructure.Decode(page.(EndpointPage).Body, &response)
--
--	return response.Endpoints, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls.go
-deleted file mode 100644
-index 547d7b1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package endpoints
--
--import "github.com/rackspace/gophercloud"
--
--func listURL(client *gophercloud.ServiceClient) string {
--	return client.ServiceURL("endpoints")
--}
--
--func endpointURL(client *gophercloud.ServiceClient, endpointID string) string {
--	return client.ServiceURL("endpoints", endpointID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls_test.go
-deleted file mode 100644
-index 0b183b7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/urls_test.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package endpoints
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--)
--
--func TestGetListURL(t *testing.T) {
--	client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"}
--	url := listURL(&client)
--	if url != "http://localhost:5000/v3/endpoints" {
--		t.Errorf("Unexpected list URL generated: [%s]", url)
--	}
--}
--
--func TestGetEndpointURL(t *testing.T) {
--	client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"}
--	url := endpointURL(&client, "1234")
--	if url != "http://localhost:5000/v3/endpoints/1234" {
--		t.Errorf("Unexpected service URL generated: [%s]", url)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/doc.go
-deleted file mode 100644
-index fa56411..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package services provides information and interaction with the services API
--// resource for the OpenStack Identity service.
--package services
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go
-deleted file mode 100644
-index 1d9aaa8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go
-+++ /dev/null
-@@ -1,91 +0,0 @@
--package services
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type response struct {
--	Service Service `json:"service"`
--}
--
--// Create adds a new service of the requested type to the catalog.
--func Create(client *gophercloud.ServiceClient, serviceType string) CreateResult {
--	type request struct {
--		Type string `json:"type"`
--	}
--
--	req := request{Type: serviceType}
--
--	var result CreateResult
--	_, result.Err = perigee.Request("POST", listURL(client), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		ReqBody:     &req,
--		Results:     &result.Body,
--		OkCodes:     []int{201},
--	})
--	return result
--}
--
--// ListOpts allows you to query the List method.
--type ListOpts struct {
--	ServiceType string `q:"type"`
--	PerPage     int    `q:"perPage"`
--	Page        int    `q:"page"`
--}
--
--// List enumerates the services available to a specific user.
--func List(client *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	u := listURL(client)
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u += q.String()
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return ServicePage{pagination.LinkedPageBase{PageResult: r}}
--	}
--
--	return pagination.NewPager(client, u, createPage)
--}
--
--// Get returns additional information about a service, given its ID.
--func Get(client *gophercloud.ServiceClient, serviceID string) GetResult {
--	var result GetResult
--	_, result.Err = perigee.Request("GET", serviceURL(client, serviceID), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		Results:     &result.Body,
--		OkCodes:     []int{200},
--	})
--	return result
--}
--
--// Update changes the service type of an existing service.
--func Update(client *gophercloud.ServiceClient, serviceID string, serviceType string) UpdateResult {
--	type request struct {
--		Type string `json:"type"`
--	}
--
--	req := request{Type: serviceType}
--
--	var result UpdateResult
--	_, result.Err = perigee.Request("PATCH", serviceURL(client, serviceID), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		ReqBody:     &req,
--		Results:     &result.Body,
--		OkCodes:     []int{200},
--	})
--	return result
--}
--
--// Delete removes an existing service.
--// It either deletes all associated endpoints, or fails until all endpoints are deleted.
--func Delete(client *gophercloud.ServiceClient, serviceID string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", serviceURL(client, serviceID), perigee.Options{
--		MoreHeaders: client.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests_test.go
-deleted file mode 100644
-index 32e6d1b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests_test.go
-+++ /dev/null
-@@ -1,209 +0,0 @@
--package services
--
--import (
--	"fmt"
--	"net/http"
--	"reflect"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	"github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestCreateSuccessful(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "POST")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		testhelper.TestJSONRequest(t, r, `{ "type": "compute" }`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--		fmt.Fprintf(w, `{
--        "service": {
--          "description": "Here's your service",
--          "id": "1234",
--          "name": "InscrutableOpenStackProjectName",
--          "type": "compute"
--        }
--    }`)
--	})
--
--	result, err := Create(client.ServiceClient(), "compute").Extract()
--	if err != nil {
--		t.Fatalf("Unexpected error from Create: %v", err)
--	}
--
--	if result.Description == nil || *result.Description != "Here's your service" {
--		t.Errorf("Service description was unexpected [%s]", result.Description)
--	}
--	if result.ID != "1234" {
--		t.Errorf("Service ID was unexpected [%s]", result.ID)
--	}
--	if result.Name != "InscrutableOpenStackProjectName" {
--		t.Errorf("Service name was unexpected [%s]", result.Name)
--	}
--	if result.Type != "compute" {
--		t.Errorf("Service type was unexpected [%s]", result.Type)
--	}
--}
--
--func TestListSinglePage(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "GET")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--			{
--				"links": {
--					"next": null,
--					"previous": null
--				},
--				"services": [
--					{
--						"description": "Service One",
--						"id": "1234",
--						"name": "service-one",
--						"type": "identity"
--					},
--					{
--						"description": "Service Two",
--						"id": "9876",
--						"name": "service-two",
--						"type": "compute"
--					}
--				]
--			}
--		`)
--	})
--
--	count := 0
--	err := List(client.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractServices(page)
--		if err != nil {
--			return false, err
--		}
--
--		desc0 := "Service One"
--		desc1 := "Service Two"
--		expected := []Service{
--			Service{
--				Description: &desc0,
--				ID:          "1234",
--				Name:        "service-one",
--				Type:        "identity",
--			},
--			Service{
--				Description: &desc1,
--				ID:          "9876",
--				Name:        "service-two",
--				Type:        "compute",
--			},
--		}
--
--		if !reflect.DeepEqual(expected, actual) {
--			t.Errorf("Expected %#v, got %#v", expected, actual)
--		}
--
--		return true, nil
--	})
--	if err != nil {
--		t.Errorf("Unexpected error while paging: %v", err)
--	}
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGetSuccessful(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/services/12345", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "GET")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--			{
--				"service": {
--						"description": "Service One",
--						"id": "12345",
--						"name": "service-one",
--						"type": "identity"
--				}
--			}
--		`)
--	})
--
--	result, err := Get(client.ServiceClient(), "12345").Extract()
--	if err != nil {
--		t.Fatalf("Error fetching service information: %v", err)
--	}
--
--	if result.ID != "12345" {
--		t.Errorf("Unexpected service ID: %s", result.ID)
--	}
--	if *result.Description != "Service One" {
--		t.Errorf("Unexpected service description: [%s]", *result.Description)
--	}
--	if result.Name != "service-one" {
--		t.Errorf("Unexpected service name: [%s]", result.Name)
--	}
--	if result.Type != "identity" {
--		t.Errorf("Unexpected service type: [%s]", result.Type)
--	}
--}
--
--func TestUpdateSuccessful(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/services/12345", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "PATCH")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--		testhelper.TestJSONRequest(t, r, `{ "type": "lasermagic" }`)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `
--			{
--				"service": {
--						"id": "12345",
--						"type": "lasermagic"
--				}
--			}
--		`)
--	})
--
--	result, err := Update(client.ServiceClient(), "12345", "lasermagic").Extract()
--	if err != nil {
--		t.Fatalf("Unable to update service: %v", err)
--	}
--
--	if result.ID != "12345" {
--		t.Fatalf("Expected ID 12345, was %s", result.ID)
--	}
--}
--
--func TestDeleteSuccessful(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	testhelper.Mux.HandleFunc("/services/12345", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "DELETE")
--		testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(client.ServiceClient(), "12345")
--	testhelper.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/results.go
-deleted file mode 100644
-index 1d0d141..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/results.go
-+++ /dev/null
-@@ -1,80 +0,0 @@
--package services
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract interprets a GetResult, CreateResult or UpdateResult as a concrete Service.
--// An error is returned if the original call or the extraction failed.
--func (r commonResult) Extract() (*Service, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Service `json:"service"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return &res.Service, err
--}
--
--// CreateResult is the deferred result of a Create call.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult is the deferred result of a Get call.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult is the deferred result of an Update call.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult is the deferred result of an Delete call.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// Service is the result of a list or information query.
--type Service struct {
--	Description *string `json:"description,omitempty"`
--	ID          string  `json:"id"`
--	Name        string  `json:"name"`
--	Type        string  `json:"type"`
--}
--
--// ServicePage is a single page of Service results.
--type ServicePage struct {
--	pagination.LinkedPageBase
--}
--
--// IsEmpty returns true if the page contains no results.
--func (p ServicePage) IsEmpty() (bool, error) {
--	services, err := ExtractServices(p)
--	if err != nil {
--		return true, err
--	}
--	return len(services) == 0, nil
--}
--
--// ExtractServices extracts a slice of Services from a Collection acquired from List.
--func ExtractServices(page pagination.Page) ([]Service, error) {
--	var response struct {
--		Services []Service `mapstructure:"services"`
--	}
--
--	err := mapstructure.Decode(page.(ServicePage).Body, &response)
--	return response.Services, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls.go
-deleted file mode 100644
-index 85443a4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package services
--
--import "github.com/rackspace/gophercloud"
--
--func listURL(client *gophercloud.ServiceClient) string {
--	return client.ServiceURL("services")
--}
--
--func serviceURL(client *gophercloud.ServiceClient, serviceID string) string {
--	return client.ServiceURL("services", serviceID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls_test.go
-deleted file mode 100644
-index 5a31b32..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/urls_test.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package services
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--)
--
--func TestListURL(t *testing.T) {
--	client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"}
--	url := listURL(&client)
--	if url != "http://localhost:5000/v3/services" {
--		t.Errorf("Unexpected list URL generated: [%s]", url)
--	}
--}
--
--func TestServiceURL(t *testing.T) {
--	client := gophercloud.ServiceClient{Endpoint: "http://localhost:5000/v3/"}
--	url := serviceURL(&client, "1234")
--	if url != "http://localhost:5000/v3/services/1234" {
--		t.Errorf("Unexpected service URL generated: [%s]", url)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go
-deleted file mode 100644
-index 76ff5f4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go
-+++ /dev/null
-@@ -1,6 +0,0 @@
--// Package tokens provides information and interaction with the token API
--// resource for the OpenStack Identity service.
--//
--// For more information, see:
--// http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3
--package tokens
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go
-deleted file mode 100644
-index 4476109..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go
-+++ /dev/null
-@@ -1,72 +0,0 @@
--package tokens
--
--import (
--	"errors"
--	"fmt"
--)
--
--func unacceptedAttributeErr(attribute string) error {
--	return fmt.Errorf("The base Identity V3 API does not accept authentication by %s", attribute)
--}
--
--func redundantWithTokenErr(attribute string) error {
--	return fmt.Errorf("%s may not be provided when authenticating with a TokenID", attribute)
--}
--
--func redundantWithUserID(attribute string) error {
--	return fmt.Errorf("%s may not be provided when authenticating with a UserID", attribute)
--}
--
--var (
--	// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used.
--	ErrAPIKeyProvided = unacceptedAttributeErr("APIKey")
--
--	// ErrTenantIDProvided indicates that a TenantID was provided but can't be used.
--	ErrTenantIDProvided = unacceptedAttributeErr("TenantID")
--
--	// ErrTenantNameProvided indicates that a TenantName was provided but can't be used.
--	ErrTenantNameProvided = unacceptedAttributeErr("TenantName")
--
--	// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead.
--	ErrUsernameWithToken = redundantWithTokenErr("Username")
--
--	// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead.
--	ErrUserIDWithToken = redundantWithTokenErr("UserID")
--
--	// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead.
--	ErrDomainIDWithToken = redundantWithTokenErr("DomainID")
--
--	// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s
--	ErrDomainNameWithToken = redundantWithTokenErr("DomainName")
--
--	// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once.
--	ErrUsernameOrUserID = errors.New("Exactly one of Username and UserID must be provided for password authentication")
--
--	// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used.
--	ErrDomainIDWithUserID = redundantWithUserID("DomainID")
--
--	// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used.
--	ErrDomainNameWithUserID = redundantWithUserID("DomainName")
--
--	// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it.
--	// It may also indicate that both a DomainID and a DomainName were provided at once.
--	ErrDomainIDOrDomainName = errors.New("You must provide exactly one of DomainID or DomainName to authenticate by Username")
--
--	// ErrMissingPassword indicates that no password was provided and no token is available.
--	ErrMissingPassword = errors.New("You must provide a password to authenticate")
--
--	// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present.
--	ErrScopeDomainIDOrDomainName = errors.New("You must provide exactly one of DomainID or DomainName in a Scope with ProjectName")
--
--	// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope.
--	ErrScopeProjectIDOrProjectName = errors.New("You must provide at most one of ProjectID or ProjectName in a Scope")
--
--	// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope.
--	ErrScopeProjectIDAlone = errors.New("ProjectID must be supplied alone in a Scope")
--
--	// ErrScopeDomainName indicates that a DomainName was provided alone in a Scope.
--	ErrScopeDomainName = errors.New("DomainName must be supplied with a ProjectName or ProjectID in a Scope.")
--
--	// ErrScopeEmpty indicates that no credentials were provided in a Scope.
--	ErrScopeEmpty = errors.New("You must provide either a Project or Domain in a Scope")
--)
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go
-deleted file mode 100644
-index 5ca1031..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go
-+++ /dev/null
-@@ -1,286 +0,0 @@
--package tokens
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--)
--
--// Scope allows a created token to be limited to a specific domain or project.
--type Scope struct {
--	ProjectID   string
--	ProjectName string
--	DomainID    string
--	DomainName  string
--}
--
--func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string {
--	h := c.AuthenticatedHeaders()
--	h["X-Subject-Token"] = subjectToken
--	return h
--}
--
--// Create authenticates and either generates a new token, or changes the Scope of an existing token.
--func Create(c *gophercloud.ServiceClient, options gophercloud.AuthOptions, scope *Scope) CreateResult {
--	type domainReq struct {
--		ID   *string `json:"id,omitempty"`
--		Name *string `json:"name,omitempty"`
--	}
--
--	type projectReq struct {
--		Domain *domainReq `json:"domain,omitempty"`
--		Name   *string    `json:"name,omitempty"`
--		ID     *string    `json:"id,omitempty"`
--	}
--
--	type userReq struct {
--		ID       *string    `json:"id,omitempty"`
--		Name     *string    `json:"name,omitempty"`
--		Password string     `json:"password"`
--		Domain   *domainReq `json:"domain,omitempty"`
--	}
--
--	type passwordReq struct {
--		User userReq `json:"user"`
--	}
--
--	type tokenReq struct {
--		ID string `json:"id"`
--	}
--
--	type identityReq struct {
--		Methods  []string     `json:"methods"`
--		Password *passwordReq `json:"password,omitempty"`
--		Token    *tokenReq    `json:"token,omitempty"`
--	}
--
--	type scopeReq struct {
--		Domain  *domainReq  `json:"domain,omitempty"`
--		Project *projectReq `json:"project,omitempty"`
--	}
--
--	type authReq struct {
--		Identity identityReq `json:"identity"`
--		Scope    *scopeReq   `json:"scope,omitempty"`
--	}
--
--	type request struct {
--		Auth authReq `json:"auth"`
--	}
--
--	// Populate the request structure based on the provided arguments. Create and return an error
--	// if insufficient or incompatible information is present.
--	var req request
--
--	// Test first for unrecognized arguments.
--	if options.APIKey != "" {
--		return createErr(ErrAPIKeyProvided)
--	}
--	if options.TenantID != "" {
--		return createErr(ErrTenantIDProvided)
--	}
--	if options.TenantName != "" {
--		return createErr(ErrTenantNameProvided)
--	}
--
--	if options.Password == "" {
--		if c.TokenID != "" {
--			// Because we aren't using password authentication, it's an error to also provide any of the user-based authentication
--			// parameters.
--			if options.Username != "" {
--				return createErr(ErrUsernameWithToken)
--			}
--			if options.UserID != "" {
--				return createErr(ErrUserIDWithToken)
--			}
--			if options.DomainID != "" {
--				return createErr(ErrDomainIDWithToken)
--			}
--			if options.DomainName != "" {
--				return createErr(ErrDomainNameWithToken)
--			}
--
--			// Configure the request for Token authentication.
--			req.Auth.Identity.Methods = []string{"token"}
--			req.Auth.Identity.Token = &tokenReq{
--				ID: c.TokenID,
--			}
--		} else {
--			// If no password or token ID are available, authentication can't continue.
--			return createErr(ErrMissingPassword)
--		}
--	} else {
--		// Password authentication.
--		req.Auth.Identity.Methods = []string{"password"}
--
--		// At least one of Username and UserID must be specified.
--		if options.Username == "" && options.UserID == "" {
--			return createErr(ErrUsernameOrUserID)
--		}
--
--		if options.Username != "" {
--			// If Username is provided, UserID may not be provided.
--			if options.UserID != "" {
--				return createErr(ErrUsernameOrUserID)
--			}
--
--			// Either DomainID or DomainName must also be specified.
--			if options.DomainID == "" && options.DomainName == "" {
--				return createErr(ErrDomainIDOrDomainName)
--			}
--
--			if options.DomainID != "" {
--				if options.DomainName != "" {
--					return createErr(ErrDomainIDOrDomainName)
--				}
--
--				// Configure the request for Username and Password authentication with a DomainID.
--				req.Auth.Identity.Password = &passwordReq{
--					User: userReq{
--						Name:     &options.Username,
--						Password: options.Password,
--						Domain:   &domainReq{ID: &options.DomainID},
--					},
--				}
--			}
--
--			if options.DomainName != "" {
--				// Configure the request for Username and Password authentication with a DomainName.
--				req.Auth.Identity.Password = &passwordReq{
--					User: userReq{
--						Name:     &options.Username,
--						Password: options.Password,
--						Domain:   &domainReq{Name: &options.DomainName},
--					},
--				}
--			}
--		}
--
--		if options.UserID != "" {
--			// If UserID is specified, neither DomainID nor DomainName may be.
--			if options.DomainID != "" {
--				return createErr(ErrDomainIDWithUserID)
--			}
--			if options.DomainName != "" {
--				return createErr(ErrDomainNameWithUserID)
--			}
--
--			// Configure the request for UserID and Password authentication.
--			req.Auth.Identity.Password = &passwordReq{
--				User: userReq{ID: &options.UserID, Password: options.Password},
--			}
--		}
--	}
--
--	// Add a "scope" element if a Scope has been provided.
--	if scope != nil {
--		if scope.ProjectName != "" {
--			// ProjectName provided: either DomainID or DomainName must also be supplied.
--			// ProjectID may not be supplied.
--			if scope.DomainID == "" && scope.DomainName == "" {
--				return createErr(ErrScopeDomainIDOrDomainName)
--			}
--			if scope.ProjectID != "" {
--				return createErr(ErrScopeProjectIDOrProjectName)
--			}
--
--			if scope.DomainID != "" {
--				// ProjectName + DomainID
--				req.Auth.Scope = &scopeReq{
--					Project: &projectReq{
--						Name:   &scope.ProjectName,
--						Domain: &domainReq{ID: &scope.DomainID},
--					},
--				}
--			}
--
--			if scope.DomainName != "" {
--				// ProjectName + DomainName
--				req.Auth.Scope = &scopeReq{
--					Project: &projectReq{
--						Name:   &scope.ProjectName,
--						Domain: &domainReq{Name: &scope.DomainName},
--					},
--				}
--			}
--		} else if scope.ProjectID != "" {
--			// ProjectID provided. ProjectName, DomainID, and DomainName may not be provided.
--			if scope.DomainID != "" {
--				return createErr(ErrScopeProjectIDAlone)
--			}
--			if scope.DomainName != "" {
--				return createErr(ErrScopeProjectIDAlone)
--			}
--
--			// ProjectID
--			req.Auth.Scope = &scopeReq{
--				Project: &projectReq{ID: &scope.ProjectID},
--			}
--		} else if scope.DomainID != "" {
--			// DomainID provided. ProjectID, ProjectName, and DomainName may not be provided.
--			if scope.DomainName != "" {
--				return createErr(ErrScopeDomainIDOrDomainName)
--			}
--
--			// DomainID
--			req.Auth.Scope = &scopeReq{
--				Domain: &domainReq{ID: &scope.DomainID},
--			}
--		} else if scope.DomainName != "" {
--			return createErr(ErrScopeDomainName)
--		} else {
--			return createErr(ErrScopeEmpty)
--		}
--	}
--
--	var result CreateResult
--	var response *perigee.Response
--	response, result.Err = perigee.Request("POST", tokenURL(c), perigee.Options{
--		ReqBody: &req,
--		Results: &result.Body,
--		OkCodes: []int{201},
--	})
--	if result.Err != nil {
--		return result
--	}
--	result.Header = response.HttpResponse.Header
--	return result
--}
--
--// Get validates and retrieves information about another token.
--func Get(c *gophercloud.ServiceClient, token string) GetResult {
--	var result GetResult
--	var response *perigee.Response
--	response, result.Err = perigee.Request("GET", tokenURL(c), perigee.Options{
--		MoreHeaders: subjectTokenHeaders(c, token),
--		Results:     &result.Body,
--		OkCodes:     []int{200, 203},
--	})
--	if result.Err != nil {
--		return result
--	}
--	result.Header = response.HttpResponse.Header
--	return result
--}
--
--// Validate determines if a specified token is valid or not.
--func Validate(c *gophercloud.ServiceClient, token string) (bool, error) {
--	response, err := perigee.Request("HEAD", tokenURL(c), perigee.Options{
--		MoreHeaders: subjectTokenHeaders(c, token),
--		OkCodes:     []int{204, 404},
--	})
--	if err != nil {
--		return false, err
--	}
--
--	return response.StatusCode == 204, nil
--}
--
--// Revoke immediately makes specified token invalid.
--func Revoke(c *gophercloud.ServiceClient, token string) RevokeResult {
--	var res RevokeResult
--	_, res.Err = perigee.Request("DELETE", tokenURL(c), perigee.Options{
--		MoreHeaders: subjectTokenHeaders(c, token),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests_test.go
-deleted file mode 100644
-index 2b26e4a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests_test.go
-+++ /dev/null
-@@ -1,514 +0,0 @@
--package tokens
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--	"time"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/testhelper"
--)
--
--// authTokenPost verifies that providing certain AuthOptions and Scope results in an expected JSON structure.
--func authTokenPost(t *testing.T, options gophercloud.AuthOptions, scope *Scope, requestJSON string) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	client := gophercloud.ServiceClient{
--		ProviderClient: &gophercloud.ProviderClient{
--			TokenID: "12345abcdef",
--		},
--		Endpoint: testhelper.Endpoint(),
--	}
--
--	testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "POST")
--		testhelper.TestHeader(t, r, "Content-Type", "application/json")
--		testhelper.TestHeader(t, r, "Accept", "application/json")
--		testhelper.TestJSONRequest(t, r, requestJSON)
--
--		w.WriteHeader(http.StatusCreated)
--		fmt.Fprintf(w, `{
--			"token": {
--				"expires_at": "2014-10-02T13:45:00.000000Z"
--			}
--		}`)
--	})
--
--	_, err := Create(&client, options, scope).Extract()
--	if err != nil {
--		t.Errorf("Create returned an error: %v", err)
--	}
--}
--
--func authTokenPostErr(t *testing.T, options gophercloud.AuthOptions, scope *Scope, includeToken bool, expectedErr error) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	client := gophercloud.ServiceClient{
--		ProviderClient: &gophercloud.ProviderClient{},
--		Endpoint:       testhelper.Endpoint(),
--	}
--	if includeToken {
--		client.TokenID = "abcdef123456"
--	}
--
--	_, err := Create(&client, options, scope).Extract()
--	if err == nil {
--		t.Errorf("Create did NOT return an error")
--	}
--	if err != expectedErr {
--		t.Errorf("Create returned an unexpected error: wanted %v, got %v", expectedErr, err)
--	}
--}
--
--func TestCreateUserIDAndPassword(t *testing.T) {
--	authTokenPost(t, gophercloud.AuthOptions{UserID: "me", Password: "squirrel!"}, nil, `
--		{
--			"auth": {
--				"identity": {
--					"methods": ["password"],
--					"password": {
--						"user": { "id": "me", "password": "squirrel!" }
--					}
--				}
--			}
--		}
--	`)
--}
--
--func TestCreateUsernameDomainIDPassword(t *testing.T) {
--	authTokenPost(t, gophercloud.AuthOptions{Username: "fakey", Password: "notpassword", DomainID: "abc123"}, nil, `
--		{
--			"auth": {
--				"identity": {
--					"methods": ["password"],
--					"password": {
--						"user": {
--							"domain": {
--								"id": "abc123"
--							},
--							"name": "fakey",
--							"password": "notpassword"
--						}
--					}
--				}
--			}
--		}
--	`)
--}
--
--func TestCreateUsernameDomainNamePassword(t *testing.T) {
--	authTokenPost(t, gophercloud.AuthOptions{Username: "frank", Password: "swordfish", DomainName: "spork.net"}, nil, `
--		{
--			"auth": {
--				"identity": {
--					"methods": ["password"],
--					"password": {
--						"user": {
--							"domain": {
--								"name": "spork.net"
--							},
--							"name": "frank",
--							"password": "swordfish"
--						}
--					}
--				}
--			}
--		}
--	`)
--}
--
--func TestCreateTokenID(t *testing.T) {
--	authTokenPost(t, gophercloud.AuthOptions{}, nil, `
--		{
--			"auth": {
--				"identity": {
--					"methods": ["token"],
--					"token": {
--						"id": "12345abcdef"
--					}
--				}
--			}
--		}
--	`)
--}
--
--func TestCreateProjectIDScope(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "fenris", Password: "g0t0h311"}
--	scope := &Scope{ProjectID: "123456"}
--	authTokenPost(t, options, scope, `
--		{
--			"auth": {
--				"identity": {
--					"methods": ["password"],
--					"password": {
--						"user": {
--							"id": "fenris",
--							"password": "g0t0h311"
--						}
--					}
--				},
--				"scope": {
--					"project": {
--						"id": "123456"
--					}
--				}
--			}
--		}
--	`)
--}
--
--func TestCreateDomainIDScope(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "fenris", Password: "g0t0h311"}
--	scope := &Scope{DomainID: "1000"}
--	authTokenPost(t, options, scope, `
--		{
--			"auth": {
--				"identity": {
--					"methods": ["password"],
--					"password": {
--						"user": {
--							"id": "fenris",
--							"password": "g0t0h311"
--						}
--					}
--				},
--				"scope": {
--					"domain": {
--						"id": "1000"
--					}
--				}
--			}
--		}
--	`)
--}
--
--func TestCreateProjectNameAndDomainIDScope(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "fenris", Password: "g0t0h311"}
--	scope := &Scope{ProjectName: "world-domination", DomainID: "1000"}
--	authTokenPost(t, options, scope, `
--		{
--			"auth": {
--				"identity": {
--					"methods": ["password"],
--					"password": {
--						"user": {
--							"id": "fenris",
--							"password": "g0t0h311"
--						}
--					}
--				},
--				"scope": {
--					"project": {
--						"domain": {
--							"id": "1000"
--						},
--						"name": "world-domination"
--					}
--				}
--			}
--		}
--	`)
--}
--
--func TestCreateProjectNameAndDomainNameScope(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "fenris", Password: "g0t0h311"}
--	scope := &Scope{ProjectName: "world-domination", DomainName: "evil-plans"}
--	authTokenPost(t, options, scope, `
--		{
--			"auth": {
--				"identity": {
--					"methods": ["password"],
--					"password": {
--						"user": {
--							"id": "fenris",
--							"password": "g0t0h311"
--						}
--					}
--				},
--				"scope": {
--					"project": {
--						"domain": {
--							"name": "evil-plans"
--						},
--						"name": "world-domination"
--					}
--				}
--			}
--		}
--	`)
--}
--
--func TestCreateExtractsTokenFromResponse(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	client := gophercloud.ServiceClient{
--		ProviderClient: &gophercloud.ProviderClient{},
--		Endpoint:       testhelper.Endpoint(),
--	}
--
--	testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
--		w.Header().Add("X-Subject-Token", "aaa111")
--
--		w.WriteHeader(http.StatusCreated)
--		fmt.Fprintf(w, `{
--			"token": {
--				"expires_at": "2014-10-02T13:45:00.000000Z"
--			}
--		}`)
--	})
--
--	options := gophercloud.AuthOptions{UserID: "me", Password: "shhh"}
--	token, err := Create(&client, options, nil).Extract()
--	if err != nil {
--		t.Fatalf("Create returned an error: %v", err)
--	}
--
--	if token.ID != "aaa111" {
--		t.Errorf("Expected token to be aaa111, but was %s", token.ID)
--	}
--}
--
--func TestCreateFailureEmptyAuth(t *testing.T) {
--	authTokenPostErr(t, gophercloud.AuthOptions{}, nil, false, ErrMissingPassword)
--}
--
--func TestCreateFailureAPIKey(t *testing.T) {
--	authTokenPostErr(t, gophercloud.AuthOptions{APIKey: "something"}, nil, false, ErrAPIKeyProvided)
--}
--
--func TestCreateFailureTenantID(t *testing.T) {
--	authTokenPostErr(t, gophercloud.AuthOptions{TenantID: "something"}, nil, false, ErrTenantIDProvided)
--}
--
--func TestCreateFailureTenantName(t *testing.T) {
--	authTokenPostErr(t, gophercloud.AuthOptions{TenantName: "something"}, nil, false, ErrTenantNameProvided)
--}
--
--func TestCreateFailureTokenIDUsername(t *testing.T) {
--	authTokenPostErr(t, gophercloud.AuthOptions{Username: "something"}, nil, true, ErrUsernameWithToken)
--}
--
--func TestCreateFailureTokenIDUserID(t *testing.T) {
--	authTokenPostErr(t, gophercloud.AuthOptions{UserID: "something"}, nil, true, ErrUserIDWithToken)
--}
--
--func TestCreateFailureTokenIDDomainID(t *testing.T) {
--	authTokenPostErr(t, gophercloud.AuthOptions{DomainID: "something"}, nil, true, ErrDomainIDWithToken)
--}
--
--func TestCreateFailureTokenIDDomainName(t *testing.T) {
--	authTokenPostErr(t, gophercloud.AuthOptions{DomainName: "something"}, nil, true, ErrDomainNameWithToken)
--}
--
--func TestCreateFailureMissingUser(t *testing.T) {
--	options := gophercloud.AuthOptions{Password: "supersecure"}
--	authTokenPostErr(t, options, nil, false, ErrUsernameOrUserID)
--}
--
--func TestCreateFailureBothUser(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Password: "supersecure",
--		Username: "oops",
--		UserID:   "redundancy",
--	}
--	authTokenPostErr(t, options, nil, false, ErrUsernameOrUserID)
--}
--
--func TestCreateFailureMissingDomain(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Password: "supersecure",
--		Username: "notuniqueenough",
--	}
--	authTokenPostErr(t, options, nil, false, ErrDomainIDOrDomainName)
--}
--
--func TestCreateFailureBothDomain(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Password:   "supersecure",
--		Username:   "someone",
--		DomainID:   "hurf",
--		DomainName: "durf",
--	}
--	authTokenPostErr(t, options, nil, false, ErrDomainIDOrDomainName)
--}
--
--func TestCreateFailureUserIDDomainID(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		UserID:   "100",
--		Password: "stuff",
--		DomainID: "oops",
--	}
--	authTokenPostErr(t, options, nil, false, ErrDomainIDWithUserID)
--}
--
--func TestCreateFailureUserIDDomainName(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		UserID:     "100",
--		Password:   "sssh",
--		DomainName: "oops",
--	}
--	authTokenPostErr(t, options, nil, false, ErrDomainNameWithUserID)
--}
--
--func TestCreateFailureScopeProjectNameAlone(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"}
--	scope := &Scope{ProjectName: "notenough"}
--	authTokenPostErr(t, options, scope, false, ErrScopeDomainIDOrDomainName)
--}
--
--func TestCreateFailureScopeProjectNameAndID(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"}
--	scope := &Scope{ProjectName: "whoops", ProjectID: "toomuch", DomainID: "1234"}
--	authTokenPostErr(t, options, scope, false, ErrScopeProjectIDOrProjectName)
--}
--
--func TestCreateFailureScopeProjectIDAndDomainID(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"}
--	scope := &Scope{ProjectID: "toomuch", DomainID: "notneeded"}
--	authTokenPostErr(t, options, scope, false, ErrScopeProjectIDAlone)
--}
--
--func TestCreateFailureScopeProjectIDAndDomainNAme(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"}
--	scope := &Scope{ProjectID: "toomuch", DomainName: "notneeded"}
--	authTokenPostErr(t, options, scope, false, ErrScopeProjectIDAlone)
--}
--
--func TestCreateFailureScopeDomainIDAndDomainName(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"}
--	scope := &Scope{DomainID: "toomuch", DomainName: "notneeded"}
--	authTokenPostErr(t, options, scope, false, ErrScopeDomainIDOrDomainName)
--}
--
--func TestCreateFailureScopeDomainNameAlone(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"}
--	scope := &Scope{DomainName: "notenough"}
--	authTokenPostErr(t, options, scope, false, ErrScopeDomainName)
--}
--
--func TestCreateFailureEmptyScope(t *testing.T) {
--	options := gophercloud.AuthOptions{UserID: "myself", Password: "swordfish"}
--	scope := &Scope{}
--	authTokenPostErr(t, options, scope, false, ErrScopeEmpty)
--}
--
--func TestGetRequest(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	client := gophercloud.ServiceClient{
--		ProviderClient: &gophercloud.ProviderClient{
--			TokenID: "12345abcdef",
--		},
--		Endpoint: testhelper.Endpoint(),
--	}
--
--	testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, "GET")
--		testhelper.TestHeader(t, r, "Content-Type", "")
--		testhelper.TestHeader(t, r, "Accept", "application/json")
--		testhelper.TestHeader(t, r, "X-Auth-Token", "12345abcdef")
--		testhelper.TestHeader(t, r, "X-Subject-Token", "abcdef12345")
--
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--			{ "token": { "expires_at": "2014-08-29T13:10:01.000000Z" } }
--		`)
--	})
--
--	token, err := Get(&client, "abcdef12345").Extract()
--	if err != nil {
--		t.Errorf("Info returned an error: %v", err)
--	}
--
--	expected, _ := time.Parse(time.UnixDate, "Fri Aug 29 13:10:01 UTC 2014")
--	if token.ExpiresAt != expected {
--		t.Errorf("Expected expiration time %s, but was %s", expected.Format(time.UnixDate), token.ExpiresAt.Format(time.UnixDate))
--	}
--}
--
--func prepareAuthTokenHandler(t *testing.T, expectedMethod string, status int) gophercloud.ServiceClient {
--	client := gophercloud.ServiceClient{
--		ProviderClient: &gophercloud.ProviderClient{
--			TokenID: "12345abcdef",
--		},
--		Endpoint: testhelper.Endpoint(),
--	}
--
--	testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
--		testhelper.TestMethod(t, r, expectedMethod)
--		testhelper.TestHeader(t, r, "Content-Type", "")
--		testhelper.TestHeader(t, r, "Accept", "application/json")
--		testhelper.TestHeader(t, r, "X-Auth-Token", "12345abcdef")
--		testhelper.TestHeader(t, r, "X-Subject-Token", "abcdef12345")
--
--		w.WriteHeader(status)
--	})
--
--	return client
--}
--
--func TestValidateRequestSuccessful(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--	client := prepareAuthTokenHandler(t, "HEAD", http.StatusNoContent)
--
--	ok, err := Validate(&client, "abcdef12345")
--	if err != nil {
--		t.Errorf("Unexpected error from Validate: %v", err)
--	}
--
--	if !ok {
--		t.Errorf("Validate returned false for a valid token")
--	}
--}
--
--func TestValidateRequestFailure(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--	client := prepareAuthTokenHandler(t, "HEAD", http.StatusNotFound)
--
--	ok, err := Validate(&client, "abcdef12345")
--	if err != nil {
--		t.Errorf("Unexpected error from Validate: %v", err)
--	}
--
--	if ok {
--		t.Errorf("Validate returned true for an invalid token")
--	}
--}
--
--func TestValidateRequestError(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--	client := prepareAuthTokenHandler(t, "HEAD", http.StatusUnauthorized)
--
--	_, err := Validate(&client, "abcdef12345")
--	if err == nil {
--		t.Errorf("Missing expected error from Validate")
--	}
--}
--
--func TestRevokeRequestSuccessful(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--	client := prepareAuthTokenHandler(t, "DELETE", http.StatusNoContent)
--
--	res := Revoke(&client, "abcdef12345")
--	testhelper.AssertNoErr(t, res.Err)
--}
--
--func TestRevokeRequestError(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--	client := prepareAuthTokenHandler(t, "DELETE", http.StatusNotFound)
--
--	res := Revoke(&client, "abcdef12345")
--	if res.Err == nil {
--		t.Errorf("Missing expected error from Revoke")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go
-deleted file mode 100644
-index d1fff4c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go
-+++ /dev/null
-@@ -1,73 +0,0 @@
--package tokens
--
--import (
--	"time"
--
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--)
--
--// commonResult is the deferred result of a Create or a Get call.
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract interprets a commonResult as a Token.
--func (r commonResult) Extract() (*Token, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var response struct {
--		Token struct {
--			ExpiresAt string `mapstructure:"expires_at"`
--		} `mapstructure:"token"`
--	}
--
--	var token Token
--
--	// Parse the token itself from the stored headers.
--	token.ID = r.Header.Get("X-Subject-Token")
--
--	err := mapstructure.Decode(r.Body, &response)
--	if err != nil {
--		return nil, err
--	}
--
--	// Attempt to parse the timestamp.
--	token.ExpiresAt, err = time.Parse(gophercloud.RFC3339Milli, response.Token.ExpiresAt)
--
--	return &token, err
--}
--
--// CreateResult is the deferred response from a Create call.
--type CreateResult struct {
--	commonResult
--}
--
--// createErr quickly creates a CreateResult that reports an error.
--func createErr(err error) CreateResult {
--	return CreateResult{
--		commonResult: commonResult{Result: gophercloud.Result{Err: err}},
--	}
--}
--
--// GetResult is the deferred response from a Get call.
--type GetResult struct {
--	commonResult
--}
--
--// RevokeResult is the deferred response from a Revoke call.
--type RevokeResult struct {
--	commonResult
--}
--
--// Token is a string that grants a user access to a controlled set of services in an OpenStack provider.
--// Each Token is valid for a set length of time.
--type Token struct {
--	// ID is the issued token.
--	ID string
--
--	// ExpiresAt is the timestamp at which this token will no longer be accepted.
--	ExpiresAt time.Time
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go
-deleted file mode 100644
-index 360b60a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--package tokens
--
--import "github.com/rackspace/gophercloud"
--
--func tokenURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("auth", "tokens")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls_test.go
-deleted file mode 100644
-index 549c398..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls_test.go
-+++ /dev/null
-@@ -1,21 +0,0 @@
--package tokens
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestTokenURL(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	client := gophercloud.ServiceClient{Endpoint: testhelper.Endpoint()}
--
--	expected := testhelper.Endpoint() + "auth/tokens"
--	actual := tokenURL(&client)
--	if actual != expected {
--		t.Errorf("Expected URL %s, but was %s", expected, actual)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/doc.go
-deleted file mode 100644
-index 0208ee2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/doc.go
-+++ /dev/null
-@@ -1,4 +0,0 @@
--// Package apiversions provides information and interaction with the different
--// API versions for the OpenStack Neutron service. This functionality is not
--// restricted to this particular version.
--package apiversions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/errors.go
-deleted file mode 100644
-index 76bdb14..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/errors.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package apiversions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests.go
-deleted file mode 100644
-index 9fb6de1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests.go
-+++ /dev/null
-@@ -1,21 +0,0 @@
--package apiversions
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListVersions lists all the Neutron API versions available to end-users
--func ListVersions(c *gophercloud.ServiceClient) pagination.Pager {
--	return pagination.NewPager(c, apiVersionsURL(c), func(r pagination.PageResult) pagination.Page {
--		return APIVersionPage{pagination.SinglePageBase(r)}
--	})
--}
--
--// ListVersionResources lists all of the different API resources for a particular
--// API versions. Typical resources for Neutron might be: networks, subnets, etc.
--func ListVersionResources(c *gophercloud.ServiceClient, v string) pagination.Pager {
--	return pagination.NewPager(c, apiInfoURL(c, v), func(r pagination.PageResult) pagination.Page {
--		return APIVersionResourcePage{pagination.SinglePageBase(r)}
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests_test.go
-deleted file mode 100644
-index d35af9f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/requests_test.go
-+++ /dev/null
-@@ -1,182 +0,0 @@
--package apiversions
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListVersions(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "versions": [
--        {
--            "status": "CURRENT",
--            "id": "v2.0",
--            "links": [
--                {
--                    "href": "http://23.253.228.211:9696/v2.0",
--                    "rel": "self"
--                }
--            ]
--        }
--    ]
--}`)
--	})
--
--	count := 0
--
--	ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractAPIVersions(page)
--		if err != nil {
--			t.Errorf("Failed to extract API versions: %v", err)
--			return false, err
--		}
--
--		expected := []APIVersion{
--			APIVersion{
--				Status: "CURRENT",
--				ID:     "v2.0",
--			},
--		}
--
--		th.AssertDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestNonJSONCannotBeExtractedIntoAPIVersions(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		w.WriteHeader(http.StatusOK)
--	})
--
--	ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		if _, err := ExtractAPIVersions(page); err == nil {
--			t.Fatalf("Expected error, got nil")
--		}
--		return true, nil
--	})
--}
--
--func TestAPIInfo(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "resources": [
--        {
--            "links": [
--                {
--                    "href": "http://23.253.228.211:9696/v2.0/subnets",
--                    "rel": "self"
--                }
--            ],
--            "name": "subnet",
--            "collection": "subnets"
--        },
--        {
--            "links": [
--                {
--                    "href": "http://23.253.228.211:9696/v2.0/networks",
--                    "rel": "self"
--                }
--            ],
--            "name": "network",
--            "collection": "networks"
--        },
--        {
--            "links": [
--                {
--                    "href": "http://23.253.228.211:9696/v2.0/ports",
--                    "rel": "self"
--                }
--            ],
--            "name": "port",
--            "collection": "ports"
--        }
--    ]
--}
--			`)
--	})
--
--	count := 0
--
--	ListVersionResources(fake.ServiceClient(), "v2.0").EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractVersionResources(page)
--		if err != nil {
--			t.Errorf("Failed to extract version resources: %v", err)
--			return false, err
--		}
--
--		expected := []APIVersionResource{
--			APIVersionResource{
--				Name:       "subnet",
--				Collection: "subnets",
--			},
--			APIVersionResource{
--				Name:       "network",
--				Collection: "networks",
--			},
--			APIVersionResource{
--				Name:       "port",
--				Collection: "ports",
--			},
--		}
--
--		th.AssertDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestNonJSONCannotBeExtractedIntoAPIVersionResources(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		w.WriteHeader(http.StatusOK)
--	})
--
--	ListVersionResources(fake.ServiceClient(), "v2.0").EachPage(func(page pagination.Page) (bool, error) {
--		if _, err := ExtractVersionResources(page); err == nil {
--			t.Fatalf("Expected error, got nil")
--		}
--		return true, nil
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/results.go
-deleted file mode 100644
-index 9715934..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/results.go
-+++ /dev/null
-@@ -1,77 +0,0 @@
--package apiversions
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// APIVersion represents an API version for Neutron. It contains the status of
--// the API, and its unique ID.
--type APIVersion struct {
--	Status string `mapstructure:"status" json:"status"`
--	ID     string `mapstructure:"id" json:"id"`
--}
--
--// APIVersionPage is the page returned by a pager when traversing over a
--// collection of API versions.
--type APIVersionPage struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty checks whether an APIVersionPage struct is empty.
--func (r APIVersionPage) IsEmpty() (bool, error) {
--	is, err := ExtractAPIVersions(r)
--	if err != nil {
--		return true, err
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractAPIVersions takes a collection page, extracts all of the elements,
--// and returns them a slice of APIVersion structs. It is effectively a cast.
--func ExtractAPIVersions(page pagination.Page) ([]APIVersion, error) {
--	var resp struct {
--		Versions []APIVersion `mapstructure:"versions"`
--	}
--
--	err := mapstructure.Decode(page.(APIVersionPage).Body, &resp)
--
--	return resp.Versions, err
--}
--
--// APIVersionResource represents a generic API resource. It contains the name
--// of the resource and its plural collection name.
--type APIVersionResource struct {
--	Name       string `mapstructure:"name" json:"name"`
--	Collection string `mapstructure:"collection" json:"collection"`
--}
--
--// APIVersionResourcePage is a concrete type which embeds the common
--// SinglePageBase struct, and is used when traversing API versions collections.
--type APIVersionResourcePage struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty is a concrete function which indicates whether an
--// APIVersionResourcePage is empty or not.
--func (r APIVersionResourcePage) IsEmpty() (bool, error) {
--	is, err := ExtractVersionResources(r)
--	if err != nil {
--		return true, err
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractVersionResources accepts a Page struct, specifically a
--// APIVersionResourcePage struct, and extracts the elements into a slice of
--// APIVersionResource structs. In other words, the collection is mapped into
--// a relevant slice.
--func ExtractVersionResources(page pagination.Page) ([]APIVersionResource, error) {
--	var resp struct {
--		APIVersionResources []APIVersionResource `mapstructure:"resources"`
--	}
--
--	err := mapstructure.Decode(page.(APIVersionResourcePage).Body, &resp)
--
--	return resp.APIVersionResources, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls.go
-deleted file mode 100644
-index 58aa2b6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls.go
-+++ /dev/null
-@@ -1,15 +0,0 @@
--package apiversions
--
--import (
--	"strings"
--
--	"github.com/rackspace/gophercloud"
--)
--
--func apiVersionsURL(c *gophercloud.ServiceClient) string {
--	return c.Endpoint
--}
--
--func apiInfoURL(c *gophercloud.ServiceClient, version string) string {
--	return c.Endpoint + strings.TrimRight(version, "/") + "/"
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls_test.go
-deleted file mode 100644
-index 7dd069c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/apiversions/urls_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package apiversions
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestAPIVersionsURL(t *testing.T) {
--	actual := apiVersionsURL(endpointClient())
--	expected := endpoint
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestAPIInfoURL(t *testing.T) {
--	actual := apiInfoURL(endpointClient(), "v2.0")
--	expected := endpoint + "v2.0/"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go
-deleted file mode 100644
-index 4160351..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go
-+++ /dev/null
-@@ -1,14 +0,0 @@
--package common
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--const TokenID = client.TokenID
--
--func ServiceClient() *gophercloud.ServiceClient {
--	sc := client.ServiceClient()
--	sc.ResourceBase = sc.Endpoint + "v2.0/"
--	return sc
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate.go
-deleted file mode 100644
-index d08e1fd..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate.go
-+++ /dev/null
-@@ -1,41 +0,0 @@
--package extensions
--
--import (
--	"github.com/rackspace/gophercloud"
--	common "github.com/rackspace/gophercloud/openstack/common/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// Extension is a single OpenStack extension.
--type Extension struct {
--	common.Extension
--}
--
--// GetResult wraps a GetResult from common.
--type GetResult struct {
--	common.GetResult
--}
--
--// ExtractExtensions interprets a Page as a slice of Extensions.
--func ExtractExtensions(page pagination.Page) ([]Extension, error) {
--	inner, err := common.ExtractExtensions(page)
--	if err != nil {
--		return nil, err
--	}
--	outer := make([]Extension, len(inner))
--	for index, ext := range inner {
--		outer[index] = Extension{ext}
--	}
--	return outer, nil
--}
--
--// Get retrieves information for a specific extension using its alias.
--func Get(c *gophercloud.ServiceClient, alias string) GetResult {
--	return GetResult{common.Get(c, alias)}
--}
--
--// List returns a Pager which allows you to iterate over the full collection of extensions.
--// It does not accept query parameters.
--func List(c *gophercloud.ServiceClient) pagination.Pager {
--	return common.List(c)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go
-deleted file mode 100644
-index 3d2ac78..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/delegate_test.go
-+++ /dev/null
-@@ -1,105 +0,0 @@
--package extensions
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	common "github.com/rackspace/gophercloud/openstack/common/extensions"
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/extensions", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--
--		fmt.Fprintf(w, `
--{
--    "extensions": [
--        {
--            "updated": "2013-01-20T00:00:00-00:00",
--            "name": "Neutron Service Type Management",
--            "links": [],
--            "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0",
--            "alias": "service-type",
--            "description": "API for retrieving service providers for Neutron advanced services"
--        }
--    ]
--}
--      `)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractExtensions(page)
--		if err != nil {
--			t.Errorf("Failed to extract extensions: %v", err)
--		}
--
--		expected := []Extension{
--			Extension{
--				common.Extension{
--					Updated:     "2013-01-20T00:00:00-00:00",
--					Name:        "Neutron Service Type Management",
--					Links:       []interface{}{},
--					Namespace:   "http://docs.openstack.org/ext/neutron/service-type/api/v1.0",
--					Alias:       "service-type",
--					Description: "API for retrieving service providers for Neutron advanced services",
--				},
--			},
--		}
--
--		th.AssertDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/extensions/agent", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "extension": {
--        "updated": "2013-02-03T10:00:00-00:00",
--        "name": "agent",
--        "links": [],
--        "namespace": "http://docs.openstack.org/ext/agent/api/v2.0",
--        "alias": "agent",
--        "description": "The agent management extension."
--    }
--}
--    `)
--	})
--
--	ext, err := Get(fake.ServiceClient(), "agent").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, ext.Updated, "2013-02-03T10:00:00-00:00")
--	th.AssertEquals(t, ext.Name, "agent")
--	th.AssertEquals(t, ext.Namespace, "http://docs.openstack.org/ext/agent/api/v2.0")
--	th.AssertEquals(t, ext.Alias, "agent")
--	th.AssertEquals(t, ext.Description, "The agent management extension.")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go
-deleted file mode 100644
-index dad3a84..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package external provides information and interaction with the external
--// extension for the OpenStack Networking service.
--package external
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go
-deleted file mode 100644
-index 2f04593..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go
-+++ /dev/null
-@@ -1,56 +0,0 @@
--package external
--
--import "github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--
--// AdminState gives users a solid type to work with for create and update
--// operations. It is recommended that users use the `Up` and `Down` enums.
--type AdminState *bool
--
--// Convenience vars for AdminStateUp values.
--var (
--	iTrue  = true
--	iFalse = false
--
--	Up   AdminState = &iTrue
--	Down AdminState = &iFalse
--)
--
--// CreateOpts is the structure used when creating new external network
--// resources. It embeds networks.CreateOpts and so inherits all of its required
--// and optional fields, with the addition of the External field.
--type CreateOpts struct {
--	Parent   networks.CreateOpts
--	External bool
--}
--
--// ToNetworkCreateMap casts a CreateOpts struct to a map.
--func (o CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) {
--	outer, err := o.Parent.ToNetworkCreateMap()
--	if err != nil {
--		return nil, err
--	}
--
--	outer["network"].(map[string]interface{})["router:external"] = o.External
--
--	return outer, nil
--}
--
--// UpdateOpts is the structure used when updating existing external network
--// resources. It embeds networks.UpdateOpts and so inherits all of its required
--// and optional fields, with the addition of the External field.
--type UpdateOpts struct {
--	Parent   networks.UpdateOpts
--	External bool
--}
--
--// ToNetworkUpdateMap casts an UpdateOpts struct to a map.
--func (o UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) {
--	outer, err := o.Parent.ToNetworkUpdateMap()
--	if err != nil {
--		return nil, err
--	}
--
--	outer["network"].(map[string]interface{})["router:external"] = o.External
--
--	return outer, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go
-deleted file mode 100644
-index 1c173c0..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go
-+++ /dev/null
-@@ -1,81 +0,0 @@
--package external
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// NetworkExternal represents a decorated form of a Network with based on the
--// "external-net" extension.
--type NetworkExternal struct {
--	// UUID for the network
--	ID string `mapstructure:"id" json:"id"`
--
--	// Human-readable name for the network. Might not be unique.
--	Name string `mapstructure:"name" json:"name"`
--
--	// The administrative state of network. If false (down), the network does not forward packets.
--	AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"`
--
--	// Indicates whether network is currently operational. Possible values include
--	// `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values.
--	Status string `mapstructure:"status" json:"status"`
--
--	// Subnets associated with this network.
--	Subnets []string `mapstructure:"subnets" json:"subnets"`
--
--	// Owner of network. Only admin users can specify a tenant_id other than its own.
--	TenantID string `mapstructure:"tenant_id" json:"tenant_id"`
--
--	// Specifies whether the network resource can be accessed by any tenant or not.
--	Shared bool `mapstructure:"shared" json:"shared"`
--
--	// Specifies whether the network is an external network or not.
--	External bool `mapstructure:"router:external" json:"router:external"`
--}
--
--func commonExtract(e error, response interface{}) (*NetworkExternal, error) {
--	if e != nil {
--		return nil, e
--	}
--
--	var res struct {
--		Network *NetworkExternal `json:"network"`
--	}
--
--	err := mapstructure.Decode(response, &res)
--
--	return res.Network, err
--}
--
--// ExtractGet decorates a GetResult struct returned from a networks.Get()
--// function with extended attributes.
--func ExtractGet(r networks.GetResult) (*NetworkExternal, error) {
--	return commonExtract(r.Err, r.Body)
--}
--
--// ExtractCreate decorates a CreateResult struct returned from a networks.Create()
--// function with extended attributes.
--func ExtractCreate(r networks.CreateResult) (*NetworkExternal, error) {
--	return commonExtract(r.Err, r.Body)
--}
--
--// ExtractUpdate decorates a UpdateResult struct returned from a
--// networks.Update() function with extended attributes.
--func ExtractUpdate(r networks.UpdateResult) (*NetworkExternal, error) {
--	return commonExtract(r.Err, r.Body)
--}
--
--// ExtractList accepts a Page struct, specifically a NetworkPage struct, and
--// extracts the elements into a slice of NetworkExtAttrs structs. In other
--// words, a generic collection is mapped into a relevant slice.
--func ExtractList(page pagination.Page) ([]NetworkExternal, error) {
--	var resp struct {
--		Networks []NetworkExternal `mapstructure:"networks" json:"networks"`
--	}
--
--	err := mapstructure.Decode(page.(networks.NetworkPage).Body, &resp)
--
--	return resp.Networks, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results_test.go
-deleted file mode 100644
-index 916cd2c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results_test.go
-+++ /dev/null
-@@ -1,254 +0,0 @@
--package external
--
--import (
--	"errors"
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "networks": [
--        {
--            "admin_state_up": true,
--            "id": "0f38d5ad-10a6-428f-a5fc-825cfe0f1970",
--            "name": "net1",
--            "router:external": false,
--            "shared": false,
--            "status": "ACTIVE",
--            "subnets": [
--                "25778974-48a8-46e7-8998-9dc8c70d2f06"
--            ],
--            "tenant_id": "b575417a6c444a6eb5cc3a58eb4f714a"
--        },
--        {
--            "admin_state_up": true,
--            "id": "8d05a1b1-297a-46ca-8974-17debf51ca3c",
--            "name": "ext_net",
--            "router:external": true,
--            "shared": false,
--            "status": "ACTIVE",
--            "subnets": [
--                "2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5"
--            ],
--            "tenant_id": "5eb8995cf717462c9df8d1edfa498010"
--        }
--    ]
--}
--			`)
--	})
--
--	count := 0
--
--	networks.List(fake.ServiceClient(), networks.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractList(page)
--		if err != nil {
--			t.Errorf("Failed to extract networks: %v", err)
--			return false, err
--		}
--
--		expected := []NetworkExternal{
--			NetworkExternal{
--				Status:       "ACTIVE",
--				Subnets:      []string{"25778974-48a8-46e7-8998-9dc8c70d2f06"},
--				Name:         "net1",
--				AdminStateUp: true,
--				TenantID:     "b575417a6c444a6eb5cc3a58eb4f714a",
--				Shared:       false,
--				ID:           "0f38d5ad-10a6-428f-a5fc-825cfe0f1970",
--				External:     false,
--			},
--			NetworkExternal{
--				Status:       "ACTIVE",
--				Subnets:      []string{"2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5"},
--				Name:         "ext_net",
--				AdminStateUp: true,
--				TenantID:     "5eb8995cf717462c9df8d1edfa498010",
--				Shared:       false,
--				ID:           "8d05a1b1-297a-46ca-8974-17debf51ca3c",
--				External:     true,
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "admin_state_up": true,
--        "id": "8d05a1b1-297a-46ca-8974-17debf51ca3c",
--        "name": "ext_net",
--        "router:external": true,
--        "shared": false,
--        "status": "ACTIVE",
--        "subnets": [
--            "2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5"
--        ],
--        "tenant_id": "5eb8995cf717462c9df8d1edfa498010"
--    }
--}
--			`)
--	})
--
--	res := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--	n, err := ExtractGet(res)
--
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, true, n.External)
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/networks", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "network": {
--        "admin_state_up": true,
--        "name": "ext_net",
--        "router:external": true
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--	"network": {
--			"admin_state_up": true,
--			"id": "8d05a1b1-297a-46ca-8974-17debf51ca3c",
--			"name": "ext_net",
--			"router:external": true,
--			"shared": false,
--			"status": "ACTIVE",
--			"subnets": [
--					"2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5"
--			],
--			"tenant_id": "5eb8995cf717462c9df8d1edfa498010"
--	}
--}
--		`)
--	})
--
--	options := CreateOpts{networks.CreateOpts{Name: "ext_net", AdminStateUp: Up}, true}
--	res := networks.Create(fake.ServiceClient(), options)
--
--	n, err := ExtractCreate(res)
--
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, true, n.External)
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--		"network": {
--				"router:external": true,
--				"name": "new_name"
--		}
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--	"network": {
--			"admin_state_up": true,
--			"id": "8d05a1b1-297a-46ca-8974-17debf51ca3c",
--			"name": "new_name",
--			"router:external": true,
--			"shared": false,
--			"status": "ACTIVE",
--			"subnets": [
--					"2f1fb918-9b0e-4bf9-9a50-6cebbb4db2c5"
--			],
--			"tenant_id": "5eb8995cf717462c9df8d1edfa498010"
--	}
--}
--		`)
--	})
--
--	options := UpdateOpts{networks.UpdateOpts{Name: "new_name"}, true}
--	res := networks.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options)
--	n, err := ExtractUpdate(res)
--
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, true, n.External)
--}
--
--func TestExtractFnsReturnsErrWhenResultContainsErr(t *testing.T) {
--	gr := networks.GetResult{}
--	gr.Err = errors.New("")
--
--	if _, err := ExtractGet(gr); err == nil {
--		t.Fatalf("Expected error, got one")
--	}
--
--	ur := networks.UpdateResult{}
--	ur.Err = errors.New("")
--
--	if _, err := ExtractUpdate(ur); err == nil {
--		t.Fatalf("Expected error, got one")
--	}
--
--	cr := networks.CreateResult{}
--	cr.Err = errors.New("")
--
--	if _, err := ExtractCreate(cr); err == nil {
--		t.Fatalf("Expected error, got one")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/doc.go
-deleted file mode 100644
-index d533458..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/doc.go
-+++ /dev/null
-@@ -1,5 +0,0 @@
--// Package layer3 provides access to the Layer-3 networking extension for the
--// OpenStack Neutron service. This extension allows API users to route packets
--// between subnets, forward packets from internal networks to external ones,
--// and access instances from external networks through floating IPs.
--package layer3
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go
-deleted file mode 100644
-index d23f9e2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go
-+++ /dev/null
-@@ -1,190 +0,0 @@
--package floatingips
--
--import (
--	"fmt"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the floating IP attributes you want to see returned. SortKey allows you to
--// sort by a particular network attribute. SortDir sets the direction, and is
--// either `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	ID                string `q:"id"`
--	FloatingNetworkID string `q:"floating_network_id"`
--	PortID            string `q:"port_id"`
--	FixedIP           string `q:"fixed_ip_address"`
--	FloatingIP        string `q:"floating_ip_address"`
--	TenantID          string `q:"tenant_id"`
--	Limit             int    `q:"limit"`
--	Marker            string `q:"marker"`
--	SortKey           string `q:"sort_key"`
--	SortDir           string `q:"sort_dir"`
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// floating IP resources. It accepts a ListOpts struct, which allows you to
--// filter and sort the returned collection for greater efficiency.
--func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	q, err := gophercloud.BuildQueryString(&opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u := rootURL(c) + q.String()
--	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
--		return FloatingIPPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// CreateOpts contains all the values needed to create a new floating IP
--// resource. The only required fields are FloatingNetworkID and PortID which
--// refer to the external network and internal port respectively.
--type CreateOpts struct {
--	FloatingNetworkID string
--	FloatingIP        string
--	PortID            string
--	FixedIP           string
--	TenantID          string
--}
--
--var (
--	errFloatingNetworkIDRequired = fmt.Errorf("A NetworkID is required")
--	errPortIDRequired            = fmt.Errorf("A PortID is required")
--)
--
--// Create accepts a CreateOpts struct and uses the values provided to create a
--// new floating IP resource. You can create floating IPs on external networks
--// only. If you provide a FloatingNetworkID which refers to a network that is
--// not external (i.e. its `router:external' attribute is False), the operation
--// will fail and return a 400 error.
--//
--// If you do not specify a FloatingIP address value, the operation will
--// automatically allocate an available address for the new resource. If you do
--// choose to specify one, it must fall within the subnet range for the external
--// network - otherwise the operation returns a 400 error. If the FloatingIP
--// address is already in use, the operation returns a 409 error code.
--//
--// You can associate the new resource with an internal port by using the PortID
--// field. If you specify a PortID that is not valid, the operation will fail and
--// return 404 error code.
--//
--// You must also configure an IP address for the port associated with the PortID
--// you have provided - this is what the FixedIP refers to: an IP fixed to a port.
--// Because a port might be associated with multiple IP addresses, you can use
--// the FixedIP field to associate a particular IP address rather than have the
--// API assume for you. If you specify an IP address that is not valid, the
--// operation will fail and return a 400 error code. If the PortID and FixedIP
--// are already associated with another resource, the operation will fail and
--// returns a 409 error code.
--func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
--	var res CreateResult
--
--	// Validate
--	if opts.FloatingNetworkID == "" {
--		res.Err = errFloatingNetworkIDRequired
--		return res
--	}
--	if opts.PortID == "" {
--		res.Err = errPortIDRequired
--		return res
--	}
--
--	// Define structures
--	type floatingIP struct {
--		FloatingNetworkID string `json:"floating_network_id"`
--		FloatingIP        string `json:"floating_ip_address,omitempty"`
--		PortID            string `json:"port_id"`
--		FixedIP           string `json:"fixed_ip_address,omitempty"`
--		TenantID          string `json:"tenant_id,omitempty"`
--	}
--	type request struct {
--		FloatingIP floatingIP `json:"floatingip"`
--	}
--
--	// Populate request body
--	reqBody := request{FloatingIP: floatingIP{
--		FloatingNetworkID: opts.FloatingNetworkID,
--		PortID:            opts.PortID,
--		FixedIP:           opts.FixedIP,
--		TenantID:          opts.TenantID,
--	}}
--
--	// Send request to API
--	_, res.Err = perigee.Request("POST", rootURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--
--	return res
--}
--
--// Get retrieves a particular floating IP resource based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// UpdateOpts contains the values used when updating a floating IP resource. The
--// only value that can be updated is which internal port the floating IP is
--// linked to. To associate the floating IP with a new internal port, provide its
--// ID. To disassociate the floating IP from all ports, provide an empty string.
--type UpdateOpts struct {
--	PortID string
--}
--
--// Update allows floating IP resources to be updated. Currently, the only way to
--// "update" a floating IP is to associate it with a new internal port, or
--// disassociated it from all ports. See UpdateOpts for instructions of how to
--// do this.
--func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult {
--	type floatingIP struct {
--		PortID *string `json:"port_id"`
--	}
--
--	type request struct {
--		FloatingIP floatingIP `json:"floatingip"`
--	}
--
--	var portID *string
--	if opts.PortID == "" {
--		portID = nil
--	} else {
--		portID = &opts.PortID
--	}
--
--	reqBody := request{FloatingIP: floatingIP{PortID: portID}}
--
--	// Send request to API
--	var res UpdateResult
--	_, res.Err = perigee.Request("PUT", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--
--	return res
--}
--
--// Delete will permanently delete a particular floating IP resource. Please
--// ensure this is what you want - you can also disassociate the IP from existing
--// internal ports.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests_test.go
-deleted file mode 100644
-index 19614be..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests_test.go
-+++ /dev/null
-@@ -1,306 +0,0 @@
--package floatingips
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "floatingips": [
--        {
--            "floating_network_id": "6d67c30a-ddb4-49a1-bec3-a65b286b4170",
--            "router_id": null,
--            "fixed_ip_address": null,
--            "floating_ip_address": "192.0.0.4",
--            "tenant_id": "017d8de156df4177889f31a9bd6edc00",
--            "status": "DOWN",
--            "port_id": null,
--            "id": "2f95fd2b-9f6a-4e8e-9e9a-2cbe286cbf9e"
--        },
--        {
--            "floating_network_id": "90f742b1-6d17-487b-ba95-71881dbc0b64",
--            "router_id": "0a24cb83-faf5-4d7f-b723-3144ed8a2167",
--            "fixed_ip_address": "192.0.0.2",
--            "floating_ip_address": "10.0.0.3",
--            "tenant_id": "017d8de156df4177889f31a9bd6edc00",
--            "status": "DOWN",
--            "port_id": "74a342ce-8e07-4e91-880c-9f834b68fa25",
--            "id": "ada25a95-f321-4f59-b0e0-f3a970dd3d63"
--        }
--    ]
--}
--			`)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractFloatingIPs(page)
--		if err != nil {
--			t.Errorf("Failed to extract floating IPs: %v", err)
--			return false, err
--		}
--
--		expected := []FloatingIP{
--			FloatingIP{
--				FloatingNetworkID: "6d67c30a-ddb4-49a1-bec3-a65b286b4170",
--				FixedIP:           "",
--				FloatingIP:        "192.0.0.4",
--				TenantID:          "017d8de156df4177889f31a9bd6edc00",
--				Status:            "DOWN",
--				PortID:            "",
--				ID:                "2f95fd2b-9f6a-4e8e-9e9a-2cbe286cbf9e",
--			},
--			FloatingIP{
--				FloatingNetworkID: "90f742b1-6d17-487b-ba95-71881dbc0b64",
--				FixedIP:           "192.0.0.2",
--				FloatingIP:        "10.0.0.3",
--				TenantID:          "017d8de156df4177889f31a9bd6edc00",
--				Status:            "DOWN",
--				PortID:            "74a342ce-8e07-4e91-880c-9f834b68fa25",
--				ID:                "ada25a95-f321-4f59-b0e0-f3a970dd3d63",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestInvalidNextPageURLs(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) {
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `{"floatingips": [{}], "floatingips_links": {}}`)
--	})
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		ExtractFloatingIPs(page)
--		return true, nil
--	})
--}
--
--func TestRequiredFieldsForCreate(t *testing.T) {
--	res1 := Create(fake.ServiceClient(), CreateOpts{FloatingNetworkID: ""})
--	if res1.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--
--	res2 := Create(fake.ServiceClient(), CreateOpts{FloatingNetworkID: "foo", PortID: ""})
--	if res2.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "floatingip": {
--        "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
--        "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab"
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "floatingip": {
--        "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
--        "tenant_id": "4969c491a3c74ee4af974e6d800c62de",
--        "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
--        "fixed_ip_address": "10.0.0.3",
--        "floating_ip_address": "",
--        "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
--        "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
--    }
--}
--		`)
--	})
--
--	options := CreateOpts{
--		FloatingNetworkID: "376da547-b977-4cfe-9cba-275c80debf57",
--		PortID:            "ce705c24-c1ef-408a-bda3-7bbd946164ab",
--	}
--
--	ip, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID)
--	th.AssertEquals(t, "4969c491a3c74ee4af974e6d800c62de", ip.TenantID)
--	th.AssertEquals(t, "376da547-b977-4cfe-9cba-275c80debf57", ip.FloatingNetworkID)
--	th.AssertEquals(t, "", ip.FloatingIP)
--	th.AssertEquals(t, "ce705c24-c1ef-408a-bda3-7bbd946164ab", ip.PortID)
--	th.AssertEquals(t, "10.0.0.3", ip.FixedIP)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "floatingip": {
--        "floating_network_id": "90f742b1-6d17-487b-ba95-71881dbc0b64",
--        "fixed_ip_address": "192.0.0.2",
--        "floating_ip_address": "10.0.0.3",
--        "tenant_id": "017d8de156df4177889f31a9bd6edc00",
--        "status": "DOWN",
--        "port_id": "74a342ce-8e07-4e91-880c-9f834b68fa25",
--        "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
--    }
--}
--      `)
--	})
--
--	ip, err := Get(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "90f742b1-6d17-487b-ba95-71881dbc0b64", ip.FloatingNetworkID)
--	th.AssertEquals(t, "10.0.0.3", ip.FloatingIP)
--	th.AssertEquals(t, "74a342ce-8e07-4e91-880c-9f834b68fa25", ip.PortID)
--	th.AssertEquals(t, "192.0.0.2", ip.FixedIP)
--	th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", ip.TenantID)
--	th.AssertEquals(t, "DOWN", ip.Status)
--	th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID)
--}
--
--func TestAssociate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--	"floatingip": {
--		"port_id": "423abc8d-2991-4a55-ba98-2aaea84cc72e"
--	}
--}
--		`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--	"floatingip": {
--			"router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
--			"tenant_id": "4969c491a3c74ee4af974e6d800c62de",
--			"floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
--			"fixed_ip_address": null,
--			"floating_ip_address": "172.24.4.228",
--			"port_id": "423abc8d-2991-4a55-ba98-2aaea84cc72e",
--			"id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
--	}
--}
--	`)
--	})
--
--	ip, err := Update(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7", UpdateOpts{PortID: "423abc8d-2991-4a55-ba98-2aaea84cc72e"}).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertDeepEquals(t, "423abc8d-2991-4a55-ba98-2aaea84cc72e", ip.PortID)
--}
--
--func TestDisassociate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "floatingip": {
--      "port_id": null
--    }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "floatingip": {
--        "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
--        "tenant_id": "4969c491a3c74ee4af974e6d800c62de",
--        "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
--        "fixed_ip_address": null,
--        "floating_ip_address": "172.24.4.228",
--        "port_id": null,
--        "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
--    }
--}
--    `)
--	})
--
--	ip, err := Update(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7", UpdateOpts{}).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertDeepEquals(t, "", ip.FixedIP)
--	th.AssertDeepEquals(t, "", ip.PortID)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go
-deleted file mode 100644
-index a1c7afe..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go
-+++ /dev/null
-@@ -1,127 +0,0 @@
--package floatingips
--
--import (
--	"fmt"
--
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// FloatingIP represents a floating IP resource. A floating IP is an external
--// IP address that is mapped to an internal port and, optionally, a specific
--// IP address on a private network. In other words, it enables access to an
--// instance on a private network from an external network. For this reason,
--// floating IPs can only be defined on networks where the `router:external'
--// attribute (provided by the external network extension) is set to True.
--type FloatingIP struct {
--	// Unique identifier for the floating IP instance.
--	ID string `json:"id" mapstructure:"id"`
--
--	// UUID of the external network where the floating IP is to be created.
--	FloatingNetworkID string `json:"floating_network_id" mapstructure:"floating_network_id"`
--
--	// Address of the floating IP on the external network.
--	FloatingIP string `json:"floating_ip_address" mapstructure:"floating_ip_address"`
--
--	// UUID of the port on an internal network that is associated with the floating IP.
--	PortID string `json:"port_id" mapstructure:"port_id"`
--
--	// The specific IP address of the internal port which should be associated
--	// with the floating IP.
--	FixedIP string `json:"fixed_ip_address" mapstructure:"fixed_ip_address"`
--
--	// Owner of the floating IP. Only admin users can specify a tenant identifier
--	// other than its own.
--	TenantID string `json:"tenant_id" mapstructure:"tenant_id"`
--
--	// The condition of the API resource.
--	Status string `json:"status" mapstructure:"status"`
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract a result and extracts a FloatingIP resource.
--func (r commonResult) Extract() (*FloatingIP, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		FloatingIP *FloatingIP `json:"floatingip"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--	if err != nil {
--		return nil, fmt.Errorf("Error decoding Neutron floating IP: %v", err)
--	}
--
--	return res.FloatingIP, nil
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of an update operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// FloatingIPPage is the page returned by a pager when traversing over a
--// collection of floating IPs.
--type FloatingIPPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of floating IPs has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p FloatingIPPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"floatingips_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a NetworkPage struct is empty.
--func (p FloatingIPPage) IsEmpty() (bool, error) {
--	is, err := ExtractFloatingIPs(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractFloatingIPs accepts a Page struct, specifically a FloatingIPPage struct,
--// and extracts the elements into a slice of FloatingIP structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractFloatingIPs(page pagination.Page) ([]FloatingIP, error) {
--	var resp struct {
--		FloatingIPs []FloatingIP `mapstructure:"floatingips" json:"floatingips"`
--	}
--
--	err := mapstructure.Decode(page.(FloatingIPPage).Body, &resp)
--
--	return resp.FloatingIPs, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go
-deleted file mode 100644
-index 355f20d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package floatingips
--
--import "github.com/rackspace/gophercloud"
--
--const resourcePath = "floatingips"
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(resourcePath)
--}
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(resourcePath, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go
-deleted file mode 100644
-index e3a1441..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go
-+++ /dev/null
-@@ -1,246 +0,0 @@
--package routers
--
--import (
--	"errors"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the floating IP attributes you want to see returned. SortKey allows you to
--// sort by a particular network attribute. SortDir sets the direction, and is
--// either `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	ID           string `q:"id"`
--	Name         string `q:"name"`
--	AdminStateUp *bool  `q:"admin_state_up"`
--	Status       string `q:"status"`
--	TenantID     string `q:"tenant_id"`
--	Limit        int    `q:"limit"`
--	Marker       string `q:"marker"`
--	SortKey      string `q:"sort_key"`
--	SortDir      string `q:"sort_dir"`
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// routers. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--//
--// Default policy settings return only those routers that are owned by the
--// tenant who submits the request, unless an admin user submits the request.
--func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	q, err := gophercloud.BuildQueryString(&opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u := rootURL(c) + q.String()
--	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
--		return RouterPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// CreateOpts contains all the values needed to create a new router. There are
--// no required values.
--type CreateOpts struct {
--	Name         string
--	AdminStateUp *bool
--	TenantID     string
--	GatewayInfo  *GatewayInfo
--}
--
--// Create accepts a CreateOpts struct and uses the values to create a new
--// logical router. When it is created, the router does not have an internal
--// interface - it is not associated to any subnet.
--//
--// You can optionally specify an external gateway for a router using the
--// GatewayInfo struct. The external gateway for the router must be plugged into
--// an external network (it is external if its `router:external' field is set to
--// true).
--func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
--	type router struct {
--		Name         *string      `json:"name,omitempty"`
--		AdminStateUp *bool        `json:"admin_state_up,omitempty"`
--		TenantID     *string      `json:"tenant_id,omitempty"`
--		GatewayInfo  *GatewayInfo `json:"external_gateway_info,omitempty"`
--	}
--
--	type request struct {
--		Router router `json:"router"`
--	}
--
--	reqBody := request{Router: router{
--		Name:         gophercloud.MaybeString(opts.Name),
--		AdminStateUp: opts.AdminStateUp,
--		TenantID:     gophercloud.MaybeString(opts.TenantID),
--	}}
--
--	if opts.GatewayInfo != nil {
--		reqBody.Router.GatewayInfo = opts.GatewayInfo
--	}
--
--	var res CreateResult
--	_, res.Err = perigee.Request("POST", rootURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--	return res
--}
--
--// Get retrieves a particular router based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// UpdateOpts contains the values used when updating a router.
--type UpdateOpts struct {
--	Name         string
--	AdminStateUp *bool
--	GatewayInfo  *GatewayInfo
--}
--
--// Update allows routers to be updated. You can update the name, administrative
--// state, and the external gateway. For more information about how to set the
--// external gateway for a router, see Create. This operation does not enable
--// the update of router interfaces. To do this, use the AddInterface and
--// RemoveInterface functions.
--func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult {
--	type router struct {
--		Name         *string      `json:"name,omitempty"`
--		AdminStateUp *bool        `json:"admin_state_up,omitempty"`
--		GatewayInfo  *GatewayInfo `json:"external_gateway_info,omitempty"`
--	}
--
--	type request struct {
--		Router router `json:"router"`
--	}
--
--	reqBody := request{Router: router{
--		Name:         gophercloud.MaybeString(opts.Name),
--		AdminStateUp: opts.AdminStateUp,
--	}}
--
--	if opts.GatewayInfo != nil {
--		reqBody.Router.GatewayInfo = opts.GatewayInfo
--	}
--
--	// Send request to API
--	var res UpdateResult
--	_, res.Err = perigee.Request("PUT", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--
--	return res
--}
--
--// Delete will permanently delete a particular router based on its unique ID.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
--
--var errInvalidInterfaceOpts = errors.New("When adding a router interface you must provide either a subnet ID or a port ID")
--
--// InterfaceOpts allow you to work with operations that either add or remote
--// an internal interface from a router.
--type InterfaceOpts struct {
--	SubnetID string
--	PortID   string
--}
--
--// AddInterface attaches a subnet to an internal router interface. You must
--// specify either a SubnetID or PortID in the request body. If you specify both,
--// the operation will fail and an error will be returned.
--//
--// If you specify a SubnetID, the gateway IP address for that particular subnet
--// is used to create the router interface. Alternatively, if you specify a
--// PortID, the IP address associated with the port is used to create the router
--// interface.
--//
--// If you reference a port that is associated with multiple IP addresses, or
--// if the port is associated with zero IP addresses, the operation will fail and
--// a 400 Bad Request error will be returned.
--//
--// If you reference a port already in use, the operation will fail and a 409
--// Conflict error will be returned.
--//
--// The PortID that is returned after using Extract() on the result of this
--// operation can either be the same PortID passed in or, on the other hand, the
--// identifier of a new port created by this operation. After the operation
--// completes, the device ID of the port is set to the router ID, and the
--// device owner attribute is set to `network:router_interface'.
--func AddInterface(c *gophercloud.ServiceClient, id string, opts InterfaceOpts) InterfaceResult {
--	var res InterfaceResult
--
--	// Validate
--	if (opts.SubnetID == "" && opts.PortID == "") || (opts.SubnetID != "" && opts.PortID != "") {
--		res.Err = errInvalidInterfaceOpts
--		return res
--	}
--
--	type request struct {
--		SubnetID string `json:"subnet_id,omitempty"`
--		PortID   string `json:"port_id,omitempty"`
--	}
--
--	body := request{SubnetID: opts.SubnetID, PortID: opts.PortID}
--
--	_, res.Err = perigee.Request("PUT", addInterfaceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &body,
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--
--	return res
--}
--
--// RemoveInterface removes an internal router interface, which detaches a
--// subnet from the router. You must specify either a SubnetID or PortID, since
--// these values are used to identify the router interface to remove.
--//
--// Unlike AddInterface, you can also specify both a SubnetID and PortID. If you
--// choose to specify both, the subnet ID must correspond to the subnet ID of
--// the first IP address on the port specified by the port ID. Otherwise, the
--// operation will fail and return a 409 Conflict error.
--//
--// If the router, subnet or port which are referenced do not exist or are not
--// visible to you, the operation will fail and a 404 Not Found error will be
--// returned. After this operation completes, the port connecting the router
--// with the subnet is removed from the subnet for the network.
--func RemoveInterface(c *gophercloud.ServiceClient, id string, opts InterfaceOpts) InterfaceResult {
--	var res InterfaceResult
--
--	type request struct {
--		SubnetID string `json:"subnet_id,omitempty"`
--		PortID   string `json:"port_id,omitempty"`
--	}
--
--	body := request{SubnetID: opts.SubnetID, PortID: opts.PortID}
--
--	_, res.Err = perigee.Request("PUT", removeInterfaceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &body,
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go
-deleted file mode 100644
-index c34264d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests_test.go
-+++ /dev/null
-@@ -1,338 +0,0 @@
--package routers
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestURLs(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.AssertEquals(t, th.Endpoint()+"v2.0/routers", rootURL(fake.ServiceClient()))
--}
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/routers", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "routers": [
--        {
--            "status": "ACTIVE",
--            "external_gateway_info": null,
--            "name": "second_routers",
--            "admin_state_up": true,
--            "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3",
--            "id": "7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b"
--        },
--        {
--            "status": "ACTIVE",
--            "external_gateway_info": {
--                "network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8"
--            },
--            "name": "router1",
--            "admin_state_up": true,
--            "tenant_id": "33a40233088643acb66ff6eb0ebea679",
--            "id": "a9254bdb-2613-4a13-ac4c-adc581fba50d"
--        }
--    ]
--}
--			`)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractRouters(page)
--		if err != nil {
--			t.Errorf("Failed to extract routers: %v", err)
--			return false, err
--		}
--
--		expected := []Router{
--			Router{
--				Status:       "ACTIVE",
--				GatewayInfo:  GatewayInfo{NetworkID: ""},
--				AdminStateUp: true,
--				Name:         "second_routers",
--				ID:           "7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b",
--				TenantID:     "6b96ff0cb17a4b859e1e575d221683d3",
--			},
--			Router{
--				Status:       "ACTIVE",
--				GatewayInfo:  GatewayInfo{NetworkID: "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8"},
--				AdminStateUp: true,
--				Name:         "router1",
--				ID:           "a9254bdb-2613-4a13-ac4c-adc581fba50d",
--				TenantID:     "33a40233088643acb66ff6eb0ebea679",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/routers", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--   "router":{
--      "name": "foo_router",
--      "admin_state_up": false,
--      "external_gateway_info":{
--         "network_id":"8ca37218-28ff-41cb-9b10-039601ea7e6b"
--      }
--   }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "router": {
--        "status": "ACTIVE",
--        "external_gateway_info": {
--            "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b"
--        },
--        "name": "foo_router",
--        "admin_state_up": false,
--        "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3",
--        "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e"
--    }
--}
--		`)
--	})
--
--	asu := false
--	gwi := GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"}
--
--	options := CreateOpts{
--		Name:         "foo_router",
--		AdminStateUp: &asu,
--		GatewayInfo:  &gwi,
--	}
--	r, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "foo_router", r.Name)
--	th.AssertEquals(t, false, r.AdminStateUp)
--	th.AssertDeepEquals(t, GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"}, r.GatewayInfo)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/routers/a07eea83-7710-4860-931b-5fe220fae533", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "router": {
--        "status": "ACTIVE",
--        "external_gateway_info": {
--            "network_id": "85d76829-6415-48ff-9c63-5c5ca8c61ac6"
--        },
--        "name": "router1",
--        "admin_state_up": true,
--        "tenant_id": "d6554fe62e2f41efbb6e026fad5c1542",
--        "id": "a07eea83-7710-4860-931b-5fe220fae533"
--    }
--}
--			`)
--	})
--
--	n, err := Get(fake.ServiceClient(), "a07eea83-7710-4860-931b-5fe220fae533").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Status, "ACTIVE")
--	th.AssertDeepEquals(t, n.GatewayInfo, GatewayInfo{NetworkID: "85d76829-6415-48ff-9c63-5c5ca8c61ac6"})
--	th.AssertEquals(t, n.Name, "router1")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	th.AssertEquals(t, n.TenantID, "d6554fe62e2f41efbb6e026fad5c1542")
--	th.AssertEquals(t, n.ID, "a07eea83-7710-4860-931b-5fe220fae533")
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "router": {
--			"name": "new_name",
--        "external_gateway_info": {
--            "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b"
--        }
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "router": {
--        "status": "ACTIVE",
--        "external_gateway_info": {
--            "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b"
--        },
--        "name": "new_name",
--        "admin_state_up": true,
--        "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3",
--        "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e"
--    }
--}
--		`)
--	})
--
--	gwi := GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"}
--	options := UpdateOpts{Name: "new_name", GatewayInfo: &gwi}
--
--	n, err := Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Name, "new_name")
--	th.AssertDeepEquals(t, n.GatewayInfo, GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"})
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestAddInterface(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c/add_router_interface", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1"
--}
--	`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "subnet_id": "0d32a837-8069-4ec3-84c4-3eef3e10b188",
--    "tenant_id": "017d8de156df4177889f31a9bd6edc00",
--    "port_id": "3f990102-4485-4df1-97a0-2c35bdb85b31",
--    "id": "9a83fa11-8da5-436e-9afe-3d3ac5ce7770"
--}
--`)
--	})
--
--	opts := InterfaceOpts{SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1"}
--	res, err := AddInterface(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", opts).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "0d32a837-8069-4ec3-84c4-3eef3e10b188", res.SubnetID)
--	th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", res.TenantID)
--	th.AssertEquals(t, "3f990102-4485-4df1-97a0-2c35bdb85b31", res.PortID)
--	th.AssertEquals(t, "9a83fa11-8da5-436e-9afe-3d3ac5ce7770", res.ID)
--}
--
--func TestAddInterfaceRequiredOpts(t *testing.T) {
--	_, err := AddInterface(fake.ServiceClient(), "foo", InterfaceOpts{}).Extract()
--	if err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	_, err = AddInterface(fake.ServiceClient(), "foo", InterfaceOpts{SubnetID: "bar", PortID: "baz"}).Extract()
--	if err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestRemoveInterface(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c/remove_router_interface", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--		"subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1"
--}
--	`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--		"subnet_id": "0d32a837-8069-4ec3-84c4-3eef3e10b188",
--		"tenant_id": "017d8de156df4177889f31a9bd6edc00",
--		"port_id": "3f990102-4485-4df1-97a0-2c35bdb85b31",
--		"id": "9a83fa11-8da5-436e-9afe-3d3ac5ce7770"
--}
--`)
--	})
--
--	opts := InterfaceOpts{SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1"}
--	res, err := RemoveInterface(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", opts).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "0d32a837-8069-4ec3-84c4-3eef3e10b188", res.SubnetID)
--	th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", res.TenantID)
--	th.AssertEquals(t, "3f990102-4485-4df1-97a0-2c35bdb85b31", res.PortID)
--	th.AssertEquals(t, "9a83fa11-8da5-436e-9afe-3d3ac5ce7770", res.ID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go
-deleted file mode 100644
-index bdad4cb..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go
-+++ /dev/null
-@@ -1,161 +0,0 @@
--package routers
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// GatewayInfo represents the information of an external gateway for any
--// particular network router.
--type GatewayInfo struct {
--	NetworkID string `json:"network_id" mapstructure:"network_id"`
--}
--
--// Router represents a Neutron router. A router is a logical entity that
--// forwards packets across internal subnets and NATs (network address
--// translation) them on external networks through an appropriate gateway.
--//
--// A router has an interface for each subnet with which it is associated. By
--// default, the IP address of such interface is the subnet's gateway IP. Also,
--// whenever a router is associated with a subnet, a port for that router
--// interface is added to the subnet's network.
--type Router struct {
--	// Indicates whether or not a router is currently operational.
--	Status string `json:"status" mapstructure:"status"`
--
--	// Information on external gateway for the router.
--	GatewayInfo GatewayInfo `json:"external_gateway_info" mapstructure:"external_gateway_info"`
--
--	// Administrative state of the router.
--	AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"`
--
--	// Human readable name for the router. Does not have to be unique.
--	Name string `json:"name" mapstructure:"name"`
--
--	// Unique identifier for the router.
--	ID string `json:"id" mapstructure:"id"`
--
--	// Owner of the router. Only admin users can specify a tenant identifier
--	// other than its own.
--	TenantID string `json:"tenant_id" mapstructure:"tenant_id"`
--}
--
--// RouterPage is the page returned by a pager when traversing over a
--// collection of routers.
--type RouterPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of routers has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p RouterPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"routers_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a RouterPage struct is empty.
--func (p RouterPage) IsEmpty() (bool, error) {
--	is, err := ExtractRouters(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractRouters accepts a Page struct, specifically a RouterPage struct,
--// and extracts the elements into a slice of Router structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractRouters(page pagination.Page) ([]Router, error) {
--	var resp struct {
--		Routers []Router `mapstructure:"routers" json:"routers"`
--	}
--
--	err := mapstructure.Decode(page.(RouterPage).Body, &resp)
--
--	return resp.Routers, err
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a router.
--func (r commonResult) Extract() (*Router, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Router *Router `json:"router"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Router, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// InterfaceInfo represents information about a particular router interface. As
--// mentioned above, in order for a router to forward to a subnet, it needs an
--// interface.
--type InterfaceInfo struct {
--	// The ID of the subnet which this interface is associated with.
--	SubnetID string `json:"subnet_id" mapstructure:"subnet_id"`
--
--	// The ID of the port that is a part of the subnet.
--	PortID string `json:"port_id" mapstructure:"port_id"`
--
--	// The UUID of the interface.
--	ID string `json:"id" mapstructure:"id"`
--
--	// Owner of the interface.
--	TenantID string `json:"tenant_id" mapstructure:"tenant_id"`
--}
--
--// InterfaceResult represents the result of interface operations, such as
--// AddInterface() and RemoveInterface().
--type InterfaceResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts an information struct.
--func (r InterfaceResult) Extract() (*InterfaceInfo, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res *InterfaceInfo
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go
-deleted file mode 100644
-index bc22c2a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go
-+++ /dev/null
-@@ -1,21 +0,0 @@
--package routers
--
--import "github.com/rackspace/gophercloud"
--
--const resourcePath = "routers"
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(resourcePath)
--}
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(resourcePath, id)
--}
--
--func addInterfaceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(resourcePath, id, "add_router_interface")
--}
--
--func removeInterfaceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(resourcePath, id, "remove_router_interface")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go
-deleted file mode 100644
-index bc1fc28..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package lbaas provides information and interaction with the Load Balancer
--// as a Service extension for the OpenStack Networking service.
--package lbaas
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go
-deleted file mode 100644
-index 58ec580..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go
-+++ /dev/null
-@@ -1,139 +0,0 @@
--package members
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the floating IP attributes you want to see returned. SortKey allows you to
--// sort by a particular network attribute. SortDir sets the direction, and is
--// either `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	Status       string `q:"status"`
--	Weight       int    `q:"weight"`
--	AdminStateUp *bool  `q:"admin_state_up"`
--	TenantID     string `q:"tenant_id"`
--	PoolID       string `q:"pool_id"`
--	Address      string `q:"address"`
--	ProtocolPort int    `q:"protocol_port"`
--	ID           string `q:"id"`
--	Limit        int    `q:"limit"`
--	Marker       string `q:"marker"`
--	SortKey      string `q:"sort_key"`
--	SortDir      string `q:"sort_dir"`
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// pools. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--//
--// Default policy settings return only those pools that are owned by the
--// tenant who submits the request, unless an admin user submits the request.
--func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	q, err := gophercloud.BuildQueryString(&opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u := rootURL(c) + q.String()
--	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
--		return MemberPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// CreateOpts contains all the values needed to create a new pool member.
--type CreateOpts struct {
--	// Only required if the caller has an admin role and wants to create a pool
--	// for another tenant.
--	TenantID string
--
--	// Required. The IP address of the member.
--	Address string
--
--	// Required. The port on which the application is hosted.
--	ProtocolPort int
--
--	// Required. The pool to which this member will belong.
--	PoolID string
--}
--
--// Create accepts a CreateOpts struct and uses the values to create a new
--// load balancer pool member.
--func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
--	type member struct {
--		TenantID     string `json:"tenant_id"`
--		ProtocolPort int    `json:"protocol_port"`
--		Address      string `json:"address"`
--		PoolID       string `json:"pool_id"`
--	}
--	type request struct {
--		Member member `json:"member"`
--	}
--
--	reqBody := request{Member: member{
--		Address:      opts.Address,
--		TenantID:     opts.TenantID,
--		ProtocolPort: opts.ProtocolPort,
--		PoolID:       opts.PoolID,
--	}}
--
--	var res CreateResult
--	_, res.Err = perigee.Request("POST", rootURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--	return res
--}
--
--// Get retrieves a particular pool member based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// UpdateOpts contains the values used when updating a pool member.
--type UpdateOpts struct {
--	// The administrative state of the member, which is up (true) or down (false).
--	AdminStateUp bool
--}
--
--// Update allows members to be updated.
--func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult {
--	type member struct {
--		AdminStateUp bool `json:"admin_state_up"`
--	}
--	type request struct {
--		Member member `json:"member"`
--	}
--
--	reqBody := request{Member: member{AdminStateUp: opts.AdminStateUp}}
--
--	// Send request to API
--	var res UpdateResult
--	_, res.Err = perigee.Request("PUT", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// Delete will permanently delete a particular member based on its unique ID.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests_test.go
-deleted file mode 100644
-index dc1ece3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests_test.go
-+++ /dev/null
-@@ -1,243 +0,0 @@
--package members
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestURLs(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.AssertEquals(t, th.Endpoint()+"v2.0/lb/members", rootURL(fake.ServiceClient()))
--}
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/members", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--   "members":[
--      {
--         "status":"ACTIVE",
--         "weight":1,
--         "admin_state_up":true,
--         "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea",
--         "pool_id":"72741b06-df4d-4715-b142-276b6bce75ab",
--         "address":"10.0.0.4",
--         "protocol_port":80,
--         "id":"701b531b-111a-4f21-ad85-4795b7b12af6"
--      },
--      {
--         "status":"ACTIVE",
--         "weight":1,
--         "admin_state_up":true,
--         "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea",
--         "pool_id":"72741b06-df4d-4715-b142-276b6bce75ab",
--         "address":"10.0.0.3",
--         "protocol_port":80,
--         "id":"beb53b4d-230b-4abd-8118-575b8fa006ef"
--      }
--   ]
--}
--      `)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractMembers(page)
--		if err != nil {
--			t.Errorf("Failed to extract members: %v", err)
--			return false, err
--		}
--
--		expected := []Member{
--			Member{
--				Status:       "ACTIVE",
--				Weight:       1,
--				AdminStateUp: true,
--				TenantID:     "83657cfcdfe44cd5920adaf26c48ceea",
--				PoolID:       "72741b06-df4d-4715-b142-276b6bce75ab",
--				Address:      "10.0.0.4",
--				ProtocolPort: 80,
--				ID:           "701b531b-111a-4f21-ad85-4795b7b12af6",
--			},
--			Member{
--				Status:       "ACTIVE",
--				Weight:       1,
--				AdminStateUp: true,
--				TenantID:     "83657cfcdfe44cd5920adaf26c48ceea",
--				PoolID:       "72741b06-df4d-4715-b142-276b6bce75ab",
--				Address:      "10.0.0.3",
--				ProtocolPort: 80,
--				ID:           "beb53b4d-230b-4abd-8118-575b8fa006ef",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/members", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--  "member": {
--    "tenant_id": "453105b9-1754-413f-aab1-55f1af620750",
--		"pool_id": "foo",
--    "address": "192.0.2.14",
--    "protocol_port":8080
--  }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--  "member": {
--    "id": "975592ca-e308-48ad-8298-731935ee9f45",
--    "address": "192.0.2.14",
--    "protocol_port": 8080,
--    "tenant_id": "453105b9-1754-413f-aab1-55f1af620750",
--    "admin_state_up":true,
--    "weight": 1,
--    "status": "DOWN"
--  }
--}
--    `)
--	})
--
--	options := CreateOpts{
--		TenantID:     "453105b9-1754-413f-aab1-55f1af620750",
--		Address:      "192.0.2.14",
--		ProtocolPort: 8080,
--		PoolID:       "foo",
--	}
--	_, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/members/975592ca-e308-48ad-8298-731935ee9f45", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--   "member":{
--      "id":"975592ca-e308-48ad-8298-731935ee9f45",
--      "address":"192.0.2.14",
--      "protocol_port":8080,
--      "tenant_id":"453105b9-1754-413f-aab1-55f1af620750",
--      "admin_state_up":true,
--      "weight":1,
--      "status":"DOWN"
--   }
--}
--      `)
--	})
--
--	m, err := Get(fake.ServiceClient(), "975592ca-e308-48ad-8298-731935ee9f45").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "975592ca-e308-48ad-8298-731935ee9f45", m.ID)
--	th.AssertEquals(t, "192.0.2.14", m.Address)
--	th.AssertEquals(t, 8080, m.ProtocolPort)
--	th.AssertEquals(t, "453105b9-1754-413f-aab1-55f1af620750", m.TenantID)
--	th.AssertEquals(t, true, m.AdminStateUp)
--	th.AssertEquals(t, 1, m.Weight)
--	th.AssertEquals(t, "DOWN", m.Status)
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/members/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--   "member":{
--      "admin_state_up":false
--   }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--   "member":{
--      "status":"PENDING_UPDATE",
--      "protocol_port":8080,
--      "weight":1,
--      "admin_state_up":false,
--      "tenant_id":"4fd44f30292945e481c7b8a0c8908869",
--      "pool_id":"7803631d-f181-4500-b3a2-1b68ba2a75fd",
--      "address":"10.0.0.5",
--      "status_description":null,
--      "id":"48a471ea-64f1-4eb6-9be7-dae6bbe40a0f"
--   }
--}
--    `)
--	})
--
--	options := UpdateOpts{AdminStateUp: false}
--
--	_, err := Update(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", options).Extract()
--	th.AssertNoErr(t, err)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/members/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go
-deleted file mode 100644
-index 3cad339..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go
-+++ /dev/null
-@@ -1,122 +0,0 @@
--package members
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// Member represents the application running on a backend server.
--type Member struct {
--	// The status of the member. Indicates whether the member is operational.
--	Status string
--
--	// Weight of member.
--	Weight int
--
--	// The administrative state of the member, which is up (true) or down (false).
--	AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"`
--
--	// Owner of the member. Only an administrative user can specify a tenant ID
--	// other than its own.
--	TenantID string `json:"tenant_id" mapstructure:"tenant_id"`
--
--	// The pool to which the member belongs.
--	PoolID string `json:"pool_id" mapstructure:"pool_id"`
--
--	// The IP address of the member.
--	Address string
--
--	// The port on which the application is hosted.
--	ProtocolPort int `json:"protocol_port" mapstructure:"protocol_port"`
--
--	// The unique ID for the member.
--	ID string
--}
--
--// MemberPage is the page returned by a pager when traversing over a
--// collection of pool members.
--type MemberPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of members has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p MemberPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"members_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a MemberPage struct is empty.
--func (p MemberPage) IsEmpty() (bool, error) {
--	is, err := ExtractMembers(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractMembers accepts a Page struct, specifically a MemberPage struct,
--// and extracts the elements into a slice of Member structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractMembers(page pagination.Page) ([]Member, error) {
--	var resp struct {
--		Members []Member `mapstructure:"members" json:"members"`
--	}
--
--	err := mapstructure.Decode(page.(MemberPage).Body, &resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return resp.Members, nil
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a router.
--func (r commonResult) Extract() (*Member, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Member *Member `json:"member"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Member, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go
-deleted file mode 100644
-index 94b57e4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--package members
--
--import "github.com/rackspace/gophercloud"
--
--const (
--	rootPath     = "lb"
--	resourcePath = "members"
--)
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(rootPath, resourcePath)
--}
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(rootPath, resourcePath, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go
-deleted file mode 100644
-index e2b590e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go
-+++ /dev/null
-@@ -1,282 +0,0 @@
--package monitors
--
--import (
--	"fmt"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the floating IP attributes you want to see returned. SortKey allows you to
--// sort by a particular network attribute. SortDir sets the direction, and is
--// either `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	ID            string `q:"id"`
--	TenantID      string `q:"tenant_id"`
--	Type          string `q:"type"`
--	Delay         int    `q:"delay"`
--	Timeout       int    `q:"timeout"`
--	MaxRetries    int    `q:"max_retries"`
--	HTTPMethod    string `q:"http_method"`
--	URLPath       string `q:"url_path"`
--	ExpectedCodes string `q:"expected_codes"`
--	AdminStateUp  *bool  `q:"admin_state_up"`
--	Status        string `q:"status"`
--	Limit         int    `q:"limit"`
--	Marker        string `q:"marker"`
--	SortKey       string `q:"sort_key"`
--	SortDir       string `q:"sort_dir"`
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// routers. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--//
--// Default policy settings return only those routers that are owned by the
--// tenant who submits the request, unless an admin user submits the request.
--func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	q, err := gophercloud.BuildQueryString(&opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u := rootURL(c) + q.String()
--
--	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
--		return MonitorPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// Constants that represent approved monitoring types.
--const (
--	TypePING  = "PING"
--	TypeTCP   = "TCP"
--	TypeHTTP  = "HTTP"
--	TypeHTTPS = "HTTPS"
--)
--
--var (
--	errValidTypeRequired     = fmt.Errorf("A valid Type is required. Supported values are PING, TCP, HTTP and HTTPS")
--	errDelayRequired         = fmt.Errorf("Delay is required")
--	errTimeoutRequired       = fmt.Errorf("Timeout is required")
--	errMaxRetriesRequired    = fmt.Errorf("MaxRetries is required")
--	errURLPathRequired       = fmt.Errorf("URL path is required")
--	errExpectedCodesRequired = fmt.Errorf("ExpectedCodes is required")
--	errDelayMustGETimeout    = fmt.Errorf("Delay must be greater than or equal to timeout")
--)
--
--// CreateOpts contains all the values needed to create a new health monitor.
--type CreateOpts struct {
--	// Required for admins. Indicates the owner of the VIP.
--	TenantID string
--
--	// Required. The type of probe, which is PING, TCP, HTTP, or HTTPS, that is
--	// sent by the load balancer to verify the member state.
--	Type string
--
--	// Required. The time, in seconds, between sending probes to members.
--	Delay int
--
--	// Required. Maximum number of seconds for a monitor to wait for a ping reply
--	// before it times out. The value must be less than the delay value.
--	Timeout int
--
--	// Required. Number of permissible ping failures before changing the member's
--	// status to INACTIVE. Must be a number between 1 and 10.
--	MaxRetries int
--
--	// Required for HTTP(S) types. URI path that will be accessed if monitor type
--	// is HTTP or HTTPS.
--	URLPath string
--
--	// Required for HTTP(S) types. The HTTP method used for requests by the
--	// monitor. If this attribute is not specified, it defaults to "GET".
--	HTTPMethod string
--
--	// Required for HTTP(S) types. Expected HTTP codes for a passing HTTP(S)
--	// monitor. You can either specify a single status like "200", or a range
--	// like "200-202".
--	ExpectedCodes string
--
--	AdminStateUp *bool
--}
--
--// Create is an operation which provisions a new health monitor. There are
--// different types of monitor you can provision: PING, TCP or HTTP(S). Below
--// are examples of how to create each one.
--//
--// Here is an example config struct to use when creating a PING or TCP monitor:
--//
--// CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3}
--// CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3}
--//
--// Here is an example config struct to use when creating a HTTP(S) monitor:
--//
--// CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3,
--//  HttpMethod: "HEAD", ExpectedCodes: "200"}
--//
--func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
--	var res CreateResult
--
--	// Validate inputs
--	allowed := map[string]bool{TypeHTTP: true, TypeHTTPS: true, TypeTCP: true, TypePING: true}
--	if opts.Type == "" || allowed[opts.Type] == false {
--		res.Err = errValidTypeRequired
--	}
--	if opts.Delay == 0 {
--		res.Err = errDelayRequired
--	}
--	if opts.Timeout == 0 {
--		res.Err = errTimeoutRequired
--	}
--	if opts.MaxRetries == 0 {
--		res.Err = errMaxRetriesRequired
--	}
--	if opts.Type == TypeHTTP || opts.Type == TypeHTTPS {
--		if opts.URLPath == "" {
--			res.Err = errURLPathRequired
--		}
--		if opts.ExpectedCodes == "" {
--			res.Err = errExpectedCodesRequired
--		}
--	}
--	if opts.Delay < opts.Timeout {
--		res.Err = errDelayMustGETimeout
--	}
--	if res.Err != nil {
--		return res
--	}
--
--	type monitor struct {
--		Type          string  `json:"type"`
--		Delay         int     `json:"delay"`
--		Timeout       int     `json:"timeout"`
--		MaxRetries    int     `json:"max_retries"`
--		TenantID      *string `json:"tenant_id,omitempty"`
--		URLPath       *string `json:"url_path,omitempty"`
--		ExpectedCodes *string `json:"expected_codes,omitempty"`
--		HTTPMethod    *string `json:"http_method,omitempty"`
--		AdminStateUp  *bool   `json:"admin_state_up,omitempty"`
--	}
--
--	type request struct {
--		Monitor monitor `json:"health_monitor"`
--	}
--
--	reqBody := request{Monitor: monitor{
--		Type:          opts.Type,
--		Delay:         opts.Delay,
--		Timeout:       opts.Timeout,
--		MaxRetries:    opts.MaxRetries,
--		TenantID:      gophercloud.MaybeString(opts.TenantID),
--		URLPath:       gophercloud.MaybeString(opts.URLPath),
--		ExpectedCodes: gophercloud.MaybeString(opts.ExpectedCodes),
--		HTTPMethod:    gophercloud.MaybeString(opts.HTTPMethod),
--		AdminStateUp:  opts.AdminStateUp,
--	}}
--
--	_, res.Err = perigee.Request("POST", rootURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--
--	return res
--}
--
--// Get retrieves a particular health monitor based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// UpdateOpts contains all the values needed to update an existing virtual IP.
--// Attributes not listed here but appear in CreateOpts are immutable and cannot
--// be updated.
--type UpdateOpts struct {
--	// Required. The time, in seconds, between sending probes to members.
--	Delay int
--
--	// Required. Maximum number of seconds for a monitor to wait for a ping reply
--	// before it times out. The value must be less than the delay value.
--	Timeout int
--
--	// Required. Number of permissible ping failures before changing the member's
--	// status to INACTIVE. Must be a number between 1 and 10.
--	MaxRetries int
--
--	// Required for HTTP(S) types. URI path that will be accessed if monitor type
--	// is HTTP or HTTPS.
--	URLPath string
--
--	// Required for HTTP(S) types. The HTTP method used for requests by the
--	// monitor. If this attribute is not specified, it defaults to "GET".
--	HTTPMethod string
--
--	// Required for HTTP(S) types. Expected HTTP codes for a passing HTTP(S)
--	// monitor. You can either specify a single status like "200", or a range
--	// like "200-202".
--	ExpectedCodes string
--
--	AdminStateUp *bool
--}
--
--// Update is an operation which modifies the attributes of the specified monitor.
--func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult {
--	var res UpdateResult
--
--	if opts.Delay > 0 && opts.Timeout > 0 && opts.Delay < opts.Timeout {
--		res.Err = errDelayMustGETimeout
--	}
--
--	type monitor struct {
--		Delay         int     `json:"delay"`
--		Timeout       int     `json:"timeout"`
--		MaxRetries    int     `json:"max_retries"`
--		URLPath       *string `json:"url_path,omitempty"`
--		ExpectedCodes *string `json:"expected_codes,omitempty"`
--		HTTPMethod    *string `json:"http_method,omitempty"`
--		AdminStateUp  *bool   `json:"admin_state_up,omitempty"`
--	}
--
--	type request struct {
--		Monitor monitor `json:"health_monitor"`
--	}
--
--	reqBody := request{Monitor: monitor{
--		Delay:         opts.Delay,
--		Timeout:       opts.Timeout,
--		MaxRetries:    opts.MaxRetries,
--		URLPath:       gophercloud.MaybeString(opts.URLPath),
--		ExpectedCodes: gophercloud.MaybeString(opts.ExpectedCodes),
--		HTTPMethod:    gophercloud.MaybeString(opts.HTTPMethod),
--		AdminStateUp:  opts.AdminStateUp,
--	}}
--
--	_, res.Err = perigee.Request("PUT", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 202},
--	})
--
--	return res
--}
--
--// Delete will permanently delete a particular monitor based on its unique ID.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests_test.go
-deleted file mode 100644
-index 79a99bf..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests_test.go
-+++ /dev/null
-@@ -1,312 +0,0 @@
--package monitors
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestURLs(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	th.AssertEquals(t, th.Endpoint()+"v2.0/lb/health_monitors", rootURL(fake.ServiceClient()))
--}
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/health_monitors", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--   "health_monitors":[
--      {
--         "admin_state_up":true,
--         "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea",
--         "delay":10,
--         "max_retries":1,
--         "timeout":1,
--         "type":"PING",
--         "id":"466c8345-28d8-4f84-a246-e04380b0461d"
--      },
--      {
--         "admin_state_up":true,
--         "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea",
--         "delay":5,
--         "expected_codes":"200",
--         "max_retries":2,
--         "http_method":"GET",
--         "timeout":2,
--         "url_path":"/",
--         "type":"HTTP",
--         "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7"
--      }
--   ]
--}
--			`)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractMonitors(page)
--		if err != nil {
--			t.Errorf("Failed to extract monitors: %v", err)
--			return false, err
--		}
--
--		expected := []Monitor{
--			Monitor{
--				AdminStateUp: true,
--				TenantID:     "83657cfcdfe44cd5920adaf26c48ceea",
--				Delay:        10,
--				MaxRetries:   1,
--				Timeout:      1,
--				Type:         "PING",
--				ID:           "466c8345-28d8-4f84-a246-e04380b0461d",
--			},
--			Monitor{
--				AdminStateUp:  true,
--				TenantID:      "83657cfcdfe44cd5920adaf26c48ceea",
--				Delay:         5,
--				ExpectedCodes: "200",
--				MaxRetries:    2,
--				Timeout:       2,
--				URLPath:       "/",
--				Type:          "HTTP",
--				HTTPMethod:    "GET",
--				ID:            "5d4b5228-33b0-4e60-b225-9b727c1a20e7",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestDelayMustBeGreaterOrEqualThanTimeout(t *testing.T) {
--	_, err := Create(fake.ServiceClient(), CreateOpts{
--		Type:          "HTTP",
--		Delay:         1,
--		Timeout:       10,
--		MaxRetries:    5,
--		URLPath:       "/check",
--		ExpectedCodes: "200-299",
--	}).Extract()
--
--	if err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--
--	_, err = Update(fake.ServiceClient(), "453105b9-1754-413f-aab1-55f1af620750", UpdateOpts{
--		Delay:   1,
--		Timeout: 10,
--	}).Extract()
--
--	if err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/health_monitors", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--   "health_monitor":{
--      "type":"HTTP",
--      "tenant_id":"453105b9-1754-413f-aab1-55f1af620750",
--      "delay":20,
--      "timeout":10,
--      "max_retries":5,
--      "url_path":"/check",
--      "expected_codes":"200-299"
--   }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--   "health_monitor":{
--      "id":"f3eeab00-8367-4524-b662-55e64d4cacb5",
--      "tenant_id":"453105b9-1754-413f-aab1-55f1af620750",
--      "type":"HTTP",
--      "delay":20,
--      "timeout":10,
--      "max_retries":5,
--      "http_method":"GET",
--      "url_path":"/check",
--      "expected_codes":"200-299",
--      "admin_state_up":true,
--      "status":"ACTIVE"
--   }
--}
--		`)
--	})
--
--	_, err := Create(fake.ServiceClient(), CreateOpts{
--		Type:          "HTTP",
--		TenantID:      "453105b9-1754-413f-aab1-55f1af620750",
--		Delay:         20,
--		Timeout:       10,
--		MaxRetries:    5,
--		URLPath:       "/check",
--		ExpectedCodes: "200-299",
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--}
--
--func TestRequiredCreateOpts(t *testing.T) {
--	res := Create(fake.ServiceClient(), CreateOpts{})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	res = Create(fake.ServiceClient(), CreateOpts{Type: TypeHTTP})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/health_monitors/f3eeab00-8367-4524-b662-55e64d4cacb5", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--   "health_monitor":{
--      "id":"f3eeab00-8367-4524-b662-55e64d4cacb5",
--      "tenant_id":"453105b9-1754-413f-aab1-55f1af620750",
--      "type":"HTTP",
--      "delay":20,
--      "timeout":10,
--      "max_retries":5,
--      "http_method":"GET",
--      "url_path":"/check",
--      "expected_codes":"200-299",
--      "admin_state_up":true,
--      "status":"ACTIVE"
--   }
--}
--			`)
--	})
--
--	hm, err := Get(fake.ServiceClient(), "f3eeab00-8367-4524-b662-55e64d4cacb5").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "f3eeab00-8367-4524-b662-55e64d4cacb5", hm.ID)
--	th.AssertEquals(t, "453105b9-1754-413f-aab1-55f1af620750", hm.TenantID)
--	th.AssertEquals(t, "HTTP", hm.Type)
--	th.AssertEquals(t, 20, hm.Delay)
--	th.AssertEquals(t, 10, hm.Timeout)
--	th.AssertEquals(t, 5, hm.MaxRetries)
--	th.AssertEquals(t, "GET", hm.HTTPMethod)
--	th.AssertEquals(t, "/check", hm.URLPath)
--	th.AssertEquals(t, "200-299", hm.ExpectedCodes)
--	th.AssertEquals(t, true, hm.AdminStateUp)
--	th.AssertEquals(t, "ACTIVE", hm.Status)
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/health_monitors/b05e44b5-81f9-4551-b474-711a722698f7", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--   "health_monitor":{
--      "delay": 3,
--      "timeout": 20,
--      "max_retries": 10,
--      "url_path": "/another_check",
--      "expected_codes": "301"
--   }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusAccepted)
--
--		fmt.Fprintf(w, `
--{
--    "health_monitor": {
--        "admin_state_up": true,
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "delay": 3,
--        "max_retries": 10,
--        "http_method": "GET",
--        "timeout": 20,
--        "pools": [
--            {
--                "status": "PENDING_CREATE",
--                "status_description": null,
--                "pool_id": "6e55751f-6ad4-4e53-b8d4-02e442cd21df"
--            }
--        ],
--        "type": "PING",
--        "id": "b05e44b5-81f9-4551-b474-711a722698f7"
--    }
--}
--		`)
--	})
--
--	_, err := Update(fake.ServiceClient(), "b05e44b5-81f9-4551-b474-711a722698f7", UpdateOpts{
--		Delay:         3,
--		Timeout:       20,
--		MaxRetries:    10,
--		URLPath:       "/another_check",
--		ExpectedCodes: "301",
--	}).Extract()
--
--	th.AssertNoErr(t, err)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/health_monitors/b05e44b5-81f9-4551-b474-711a722698f7", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "b05e44b5-81f9-4551-b474-711a722698f7")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go
-deleted file mode 100644
-index d595abd..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go
-+++ /dev/null
-@@ -1,147 +0,0 @@
--package monitors
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// Monitor represents a load balancer health monitor. A health monitor is used
--// to determine whether or not back-end members of the VIP's pool are usable
--// for processing a request. A pool can have several health monitors associated
--// with it. There are different types of health monitors supported:
--//
--// PING: used to ping the members using ICMP.
--// TCP: used to connect to the members using TCP.
--// HTTP: used to send an HTTP request to the member.
--// HTTPS: used to send a secure HTTP request to the member.
--//
--// When a pool has several monitors associated with it, each member of the pool
--// is monitored by all these monitors. If any monitor declares the member as
--// unhealthy, then the member status is changed to INACTIVE and the member
--// won't participate in its pool's load balancing. In other words, ALL monitors
--// must declare the member to be healthy for it to stay ACTIVE.
--type Monitor struct {
--	// The unique ID for the VIP.
--	ID string
--
--	// Owner of the VIP. Only an administrative user can specify a tenant ID
--	// other than its own.
--	TenantID string `json:"tenant_id" mapstructure:"tenant_id"`
--
--	// The type of probe sent by the load balancer to verify the member state,
--	// which is PING, TCP, HTTP, or HTTPS.
--	Type string
--
--	// The time, in seconds, between sending probes to members.
--	Delay int
--
--	// The maximum number of seconds for a monitor to wait for a connection to be
--	// established before it times out. This value must be less than the delay value.
--	Timeout int
--
--	// Number of allowed connection failures before changing the status of the
--	// member to INACTIVE. A valid value is from 1 to 10.
--	MaxRetries int `json:"max_retries" mapstructure:"max_retries"`
--
--	// The HTTP method that the monitor uses for requests.
--	HTTPMethod string `json:"http_method" mapstructure:"http_method"`
--
--	// The HTTP path of the request sent by the monitor to test the health of a
--	// member. Must be a string beginning with a forward slash (/).
--	URLPath string `json:"url_path" mapstructure:"url_path"`
--
--	// Expected HTTP codes for a passing HTTP(S) monitor.
--	ExpectedCodes string `json:"expected_codes" mapstructure:"expected_codes"`
--
--	// The administrative state of the health monitor, which is up (true) or down (false).
--	AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"`
--
--	// The status of the health monitor. Indicates whether the health monitor is
--	// operational.
--	Status string
--}
--
--// MonitorPage is the page returned by a pager when traversing over a
--// collection of health monitors.
--type MonitorPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of monitors has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p MonitorPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"health_monitors_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a PoolPage struct is empty.
--func (p MonitorPage) IsEmpty() (bool, error) {
--	is, err := ExtractMonitors(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct,
--// and extracts the elements into a slice of Monitor structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractMonitors(page pagination.Page) ([]Monitor, error) {
--	var resp struct {
--		Monitors []Monitor `mapstructure:"health_monitors" json:"health_monitors"`
--	}
--
--	err := mapstructure.Decode(page.(MonitorPage).Body, &resp)
--
--	return resp.Monitors, err
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a monitor.
--func (r commonResult) Extract() (*Monitor, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Monitor *Monitor `json:"health_monitor" mapstructure:"health_monitor"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Monitor, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go
-deleted file mode 100644
-index 46e84bb..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--package monitors
--
--import "github.com/rackspace/gophercloud"
--
--const (
--	rootPath     = "lb"
--	resourcePath = "health_monitors"
--)
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(rootPath, resourcePath)
--}
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(rootPath, resourcePath, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go
-deleted file mode 100644
-index ca8d33b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go
-+++ /dev/null
-@@ -1,205 +0,0 @@
--package pools
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the floating IP attributes you want to see returned. SortKey allows you to
--// sort by a particular network attribute. SortDir sets the direction, and is
--// either `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	Status       string `q:"status"`
--	LBMethod     string `q:"lb_method"`
--	Protocol     string `q:"protocol"`
--	SubnetID     string `q:"subnet_id"`
--	TenantID     string `q:"tenant_id"`
--	AdminStateUp *bool  `q:"admin_state_up"`
--	Name         string `q:"name"`
--	ID           string `q:"id"`
--	VIPID        string `q:"vip_id"`
--	Limit        int    `q:"limit"`
--	Marker       string `q:"marker"`
--	SortKey      string `q:"sort_key"`
--	SortDir      string `q:"sort_dir"`
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// pools. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--//
--// Default policy settings return only those pools that are owned by the
--// tenant who submits the request, unless an admin user submits the request.
--func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	q, err := gophercloud.BuildQueryString(&opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u := rootURL(c) + q.String()
--	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
--		return PoolPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// Supported attributes for create/update operations.
--const (
--	LBMethodRoundRobin       = "ROUND_ROBIN"
--	LBMethodLeastConnections = "LEAST_CONNECTIONS"
--
--	ProtocolTCP   = "TCP"
--	ProtocolHTTP  = "HTTP"
--	ProtocolHTTPS = "HTTPS"
--)
--
--// CreateOpts contains all the values needed to create a new pool.
--type CreateOpts struct {
--	// Only required if the caller has an admin role and wants to create a pool
--	// for another tenant.
--	TenantID string
--
--	// Required. Name of the pool.
--	Name string
--
--	// Required. The protocol used by the pool members, you can use either
--	// ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS.
--	Protocol string
--
--	// The network on which the members of the pool will be located. Only members
--	// that are on this network can be added to the pool.
--	SubnetID string
--
--	// The algorithm used to distribute load between the members of the pool. The
--	// current specification supports LBMethodRoundRobin and
--	// LBMethodLeastConnections as valid values for this attribute.
--	LBMethod string
--}
--
--// Create accepts a CreateOpts struct and uses the values to create a new
--// load balancer pool.
--func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
--	type pool struct {
--		Name     string `json:"name"`
--		TenantID string `json:"tenant_id,omitempty"`
--		Protocol string `json:"protocol"`
--		SubnetID string `json:"subnet_id"`
--		LBMethod string `json:"lb_method"`
--	}
--	type request struct {
--		Pool pool `json:"pool"`
--	}
--
--	reqBody := request{Pool: pool{
--		Name:     opts.Name,
--		TenantID: opts.TenantID,
--		Protocol: opts.Protocol,
--		SubnetID: opts.SubnetID,
--		LBMethod: opts.LBMethod,
--	}}
--
--	var res CreateResult
--	_, res.Err = perigee.Request("POST", rootURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--	return res
--}
--
--// Get retrieves a particular pool based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// UpdateOpts contains the values used when updating a pool.
--type UpdateOpts struct {
--	// Required. Name of the pool.
--	Name string
--
--	// The algorithm used to distribute load between the members of the pool. The
--	// current specification supports LBMethodRoundRobin and
--	// LBMethodLeastConnections as valid values for this attribute.
--	LBMethod string
--}
--
--// Update allows pools to be updated.
--func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult {
--	type pool struct {
--		Name     string `json:"name,"`
--		LBMethod string `json:"lb_method"`
--	}
--	type request struct {
--		Pool pool `json:"pool"`
--	}
--
--	reqBody := request{Pool: pool{
--		Name:     opts.Name,
--		LBMethod: opts.LBMethod,
--	}}
--
--	// Send request to API
--	var res UpdateResult
--	_, res.Err = perigee.Request("PUT", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// Delete will permanently delete a particular pool based on its unique ID.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
--
--// AssociateMonitor will associate a health monitor with a particular pool.
--// Once associated, the health monitor will start monitoring the members of the
--// pool and will deactivate these members if they are deemed unhealthy. A
--// member can be deactivated (status set to INACTIVE) if any of health monitors
--// finds it unhealthy.
--func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) AssociateResult {
--	type hm struct {
--		ID string `json:"id"`
--	}
--	type request struct {
--		Monitor hm `json:"health_monitor"`
--	}
--
--	reqBody := request{hm{ID: monitorID}}
--
--	var res AssociateResult
--	_, res.Err = perigee.Request("POST", associateURL(c, poolID), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--	return res
--}
--
--// DisassociateMonitor will disassociate a health monitor with a particular
--// pool. When dissociation is successful, the health monitor will no longer
--// check for the health of the members of the pool.
--func DisassociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) AssociateResult {
--	var res AssociateResult
--	_, res.Err = perigee.Request("DELETE", disassociateURL(c, poolID, monitorID), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests_test.go
-deleted file mode 100644
-index 6da29a6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests_test.go
-+++ /dev/null
-@@ -1,317 +0,0 @@
--package pools
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestURLs(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.AssertEquals(t, th.Endpoint()+"v2.0/lb/pools", rootURL(fake.ServiceClient()))
--}
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/pools", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--   "pools":[
--      {
--         "status":"ACTIVE",
--         "lb_method":"ROUND_ROBIN",
--         "protocol":"HTTP",
--         "description":"",
--         "health_monitors":[
--            "466c8345-28d8-4f84-a246-e04380b0461d",
--            "5d4b5228-33b0-4e60-b225-9b727c1a20e7"
--         ],
--         "members":[
--            "701b531b-111a-4f21-ad85-4795b7b12af6",
--            "beb53b4d-230b-4abd-8118-575b8fa006ef"
--         ],
--         "status_description": null,
--         "id":"72741b06-df4d-4715-b142-276b6bce75ab",
--         "vip_id":"4ec89087-d057-4e2c-911f-60a3b47ee304",
--         "name":"app_pool",
--         "admin_state_up":true,
--         "subnet_id":"8032909d-47a1-4715-90af-5153ffe39861",
--         "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea",
--         "health_monitors_status": [],
--         "provider": "haproxy"
--      }
--   ]
--}
--			`)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractPools(page)
--		if err != nil {
--			t.Errorf("Failed to extract pools: %v", err)
--			return false, err
--		}
--
--		expected := []Pool{
--			Pool{
--				Status:      "ACTIVE",
--				LBMethod:    "ROUND_ROBIN",
--				Protocol:    "HTTP",
--				Description: "",
--				MonitorIDs: []string{
--					"466c8345-28d8-4f84-a246-e04380b0461d",
--					"5d4b5228-33b0-4e60-b225-9b727c1a20e7",
--				},
--				SubnetID:     "8032909d-47a1-4715-90af-5153ffe39861",
--				TenantID:     "83657cfcdfe44cd5920adaf26c48ceea",
--				AdminStateUp: true,
--				Name:         "app_pool",
--				MemberIDs: []string{
--					"701b531b-111a-4f21-ad85-4795b7b12af6",
--					"beb53b4d-230b-4abd-8118-575b8fa006ef",
--				},
--				ID:       "72741b06-df4d-4715-b142-276b6bce75ab",
--				VIPID:    "4ec89087-d057-4e2c-911f-60a3b47ee304",
--				Provider: "haproxy",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/pools", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "pool": {
--        "lb_method": "ROUND_ROBIN",
--        "protocol": "HTTP",
--        "name": "Example pool",
--        "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9",
--        "tenant_id": "2ffc6e22aae24e4795f87155d24c896f"
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "pool": {
--        "status": "PENDING_CREATE",
--        "lb_method": "ROUND_ROBIN",
--        "protocol": "HTTP",
--        "description": "",
--        "health_monitors": [],
--        "members": [],
--        "status_description": null,
--        "id": "69055154-f603-4a28-8951-7cc2d9e54a9a",
--        "vip_id": null,
--        "name": "Example pool",
--        "admin_state_up": true,
--        "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9",
--        "tenant_id": "2ffc6e22aae24e4795f87155d24c896f",
--        "health_monitors_status": []
--    }
--}
--		`)
--	})
--
--	options := CreateOpts{
--		LBMethod: LBMethodRoundRobin,
--		Protocol: "HTTP",
--		Name:     "Example pool",
--		SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9",
--		TenantID: "2ffc6e22aae24e4795f87155d24c896f",
--	}
--	p, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "PENDING_CREATE", p.Status)
--	th.AssertEquals(t, "ROUND_ROBIN", p.LBMethod)
--	th.AssertEquals(t, "HTTP", p.Protocol)
--	th.AssertEquals(t, "", p.Description)
--	th.AssertDeepEquals(t, []string{}, p.MonitorIDs)
--	th.AssertDeepEquals(t, []string{}, p.MemberIDs)
--	th.AssertEquals(t, "69055154-f603-4a28-8951-7cc2d9e54a9a", p.ID)
--	th.AssertEquals(t, "Example pool", p.Name)
--	th.AssertEquals(t, "1981f108-3c48-48d2-b908-30f7d28532c9", p.SubnetID)
--	th.AssertEquals(t, "2ffc6e22aae24e4795f87155d24c896f", p.TenantID)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--   "pool":{
--      "id":"332abe93-f488-41ba-870b-2ac66be7f853",
--      "tenant_id":"19eaa775-cf5d-49bc-902e-2f85f668d995",
--      "name":"Example pool",
--      "description":"",
--      "protocol":"tcp",
--      "lb_algorithm":"ROUND_ROBIN",
--      "session_persistence":{
--      },
--      "healthmonitor_id":null,
--      "members":[
--      ],
--      "admin_state_up":true,
--      "status":"ACTIVE"
--   }
--}
--			`)
--	})
--
--	n, err := Get(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.ID, "332abe93-f488-41ba-870b-2ac66be7f853")
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--   "pool":{
--      "name":"SuperPool",
--      "lb_method": "LEAST_CONNECTIONS"
--   }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--   "pool":{
--      "status":"PENDING_UPDATE",
--      "lb_method":"LEAST_CONNECTIONS",
--      "protocol":"TCP",
--      "description":"",
--      "health_monitors":[
--
--      ],
--      "subnet_id":"8032909d-47a1-4715-90af-5153ffe39861",
--      "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea",
--      "admin_state_up":true,
--      "name":"SuperPool",
--      "members":[
--
--      ],
--      "id":"61b1f87a-7a21-4ad3-9dda-7f81d249944f",
--      "vip_id":null
--   }
--}
--		`)
--	})
--
--	options := UpdateOpts{Name: "SuperPool", LBMethod: LBMethodLeastConnections}
--
--	n, err := Update(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "SuperPool", n.Name)
--	th.AssertDeepEquals(t, "LEAST_CONNECTIONS", n.LBMethod)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestAssociateHealthMonitor(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853/health_monitors", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--   "health_monitor":{
--      "id":"b624decf-d5d3-4c66-9a3d-f047e7786181"
--   }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--	})
--
--	_, err := AssociateMonitor(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", "b624decf-d5d3-4c66-9a3d-f047e7786181").Extract()
--	th.AssertNoErr(t, err)
--}
--
--func TestDisassociateHealthMonitor(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853/health_monitors/b624decf-d5d3-4c66-9a3d-f047e7786181", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := DisassociateMonitor(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", "b624decf-d5d3-4c66-9a3d-f047e7786181")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go
-deleted file mode 100644
-index 07ec85e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go
-+++ /dev/null
-@@ -1,146 +0,0 @@
--package pools
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// Pool represents a logical set of devices, such as web servers, that you
--// group together to receive and process traffic. The load balancing function
--// chooses a member of the pool according to the configured load balancing
--// method to handle the new requests or connections received on the VIP address.
--// There is only one pool per virtual IP.
--type Pool struct {
--	// The status of the pool. Indicates whether the pool is operational.
--	Status string
--
--	// The load-balancer algorithm, which is round-robin, least-connections, and
--	// so on. This value, which must be supported, is dependent on the provider.
--	// Round-robin must be supported.
--	LBMethod string `json:"lb_method" mapstructure:"lb_method"`
--
--	// The protocol of the pool, which is TCP, HTTP, or HTTPS.
--	Protocol string
--
--	// Description for the pool.
--	Description string
--
--	// The IDs of associated monitors which check the health of the pool members.
--	MonitorIDs []string `json:"health_monitors" mapstructure:"health_monitors"`
--
--	// The network on which the members of the pool will be located. Only members
--	// that are on this network can be added to the pool.
--	SubnetID string `json:"subnet_id" mapstructure:"subnet_id"`
--
--	// Owner of the pool. Only an administrative user can specify a tenant ID
--	// other than its own.
--	TenantID string `json:"tenant_id" mapstructure:"tenant_id"`
--
--	// The administrative state of the pool, which is up (true) or down (false).
--	AdminStateUp bool `json:"admin_state_up" mapstructure:"admin_state_up"`
--
--	// Pool name. Does not have to be unique.
--	Name string
--
--	// List of member IDs that belong to the pool.
--	MemberIDs []string `json:"members" mapstructure:"members"`
--
--	// The unique ID for the pool.
--	ID string
--
--	// The ID of the virtual IP associated with this pool
--	VIPID string `json:"vip_id" mapstructure:"vip_id"`
--
--	// The provider
--	Provider string
--}
--
--// PoolPage is the page returned by a pager when traversing over a
--// collection of pools.
--type PoolPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of pools has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p PoolPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"pools_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a PoolPage struct is empty.
--func (p PoolPage) IsEmpty() (bool, error) {
--	is, err := ExtractPools(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractPools accepts a Page struct, specifically a RouterPage struct,
--// and extracts the elements into a slice of Router structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractPools(page pagination.Page) ([]Pool, error) {
--	var resp struct {
--		Pools []Pool `mapstructure:"pools" json:"pools"`
--	}
--
--	err := mapstructure.Decode(page.(PoolPage).Body, &resp)
--
--	return resp.Pools, err
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a router.
--func (r commonResult) Extract() (*Pool, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Pool *Pool `json:"pool"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Pool, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// AssociateResult represents the result of an association operation.
--type AssociateResult struct {
--	commonResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go
-deleted file mode 100644
-index 6cd15b0..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go
-+++ /dev/null
-@@ -1,25 +0,0 @@
--package pools
--
--import "github.com/rackspace/gophercloud"
--
--const (
--	rootPath     = "lb"
--	resourcePath = "pools"
--	monitorPath  = "health_monitors"
--)
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(rootPath, resourcePath)
--}
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(rootPath, resourcePath, id)
--}
--
--func associateURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(rootPath, resourcePath, id, monitorPath)
--}
--
--func disassociateURL(c *gophercloud.ServiceClient, poolID, monitorID string) string {
--	return c.ServiceURL(rootPath, resourcePath, poolID, monitorPath, monitorID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go
-deleted file mode 100644
-index ec929d6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go
-+++ /dev/null
-@@ -1,273 +0,0 @@
--package vips
--
--import (
--	"fmt"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// AdminState gives users a solid type to work with for create and update
--// operations. It is recommended that users use the `Up` and `Down` enums.
--type AdminState *bool
--
--// Convenience vars for AdminStateUp values.
--var (
--	iTrue  = true
--	iFalse = false
--
--	Up   AdminState = &iTrue
--	Down AdminState = &iFalse
--)
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the floating IP attributes you want to see returned. SortKey allows you to
--// sort by a particular network attribute. SortDir sets the direction, and is
--// either `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	ID              string `q:"id"`
--	Name            string `q:"name"`
--	AdminStateUp    *bool  `q:"admin_state_up"`
--	Status          string `q:"status"`
--	TenantID        string `q:"tenant_id"`
--	SubnetID        string `q:"subnet_id"`
--	Address         string `q:"address"`
--	PortID          string `q:"port_id"`
--	Protocol        string `q:"protocol"`
--	ProtocolPort    int    `q:"protocol_port"`
--	ConnectionLimit int    `q:"connection_limit"`
--	Limit           int    `q:"limit"`
--	Marker          string `q:"marker"`
--	SortKey         string `q:"sort_key"`
--	SortDir         string `q:"sort_dir"`
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// routers. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--//
--// Default policy settings return only those routers that are owned by the
--// tenant who submits the request, unless an admin user submits the request.
--func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	q, err := gophercloud.BuildQueryString(&opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u := rootURL(c) + q.String()
--	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
--		return VIPPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--var (
--	errNameRequired         = fmt.Errorf("Name is required")
--	errSubnetIDRequried     = fmt.Errorf("SubnetID is required")
--	errProtocolRequired     = fmt.Errorf("Protocol is required")
--	errProtocolPortRequired = fmt.Errorf("Protocol port is required")
--	errPoolIDRequired       = fmt.Errorf("PoolID is required")
--)
--
--// CreateOpts contains all the values needed to create a new virtual IP.
--type CreateOpts struct {
--	// Required. Human-readable name for the VIP. Does not have to be unique.
--	Name string
--
--	// Required. The network on which to allocate the VIP's address. A tenant can
--	// only create VIPs on networks authorized by policy (e.g. networks that
--	// belong to them or networks that are shared).
--	SubnetID string
--
--	// Required. The protocol - can either be TCP, HTTP or HTTPS.
--	Protocol string
--
--	// Required. The port on which to listen for client traffic.
--	ProtocolPort int
--
--	// Required. The ID of the pool with which the VIP is associated.
--	PoolID string
--
--	// Required for admins. Indicates the owner of the VIP.
--	TenantID string
--
--	// Optional. The IP address of the VIP.
--	Address string
--
--	// Optional. Human-readable description for the VIP.
--	Description string
--
--	// Optional. Omit this field to prevent session persistence.
--	Persistence *SessionPersistence
--
--	// Optional. The maximum number of connections allowed for the VIP.
--	ConnLimit *int
--
--	// Optional. The administrative state of the VIP. A valid value is true (UP)
--	// or false (DOWN).
--	AdminStateUp *bool
--}
--
--// Create is an operation which provisions a new virtual IP based on the
--// configuration defined in the CreateOpts struct. Once the request is
--// validated and progress has started on the provisioning process, a
--// CreateResult will be returned.
--//
--// Please note that the PoolID should refer to a pool that is not already
--// associated with another vip. If the pool is already used by another vip,
--// then the operation will fail with a 409 Conflict error will be returned.
--//
--// Users with an admin role can create VIPs on behalf of other tenants by
--// specifying a TenantID attribute different than their own.
--func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
--	var res CreateResult
--
--	// Validate required opts
--	if opts.Name == "" {
--		res.Err = errNameRequired
--		return res
--	}
--	if opts.SubnetID == "" {
--		res.Err = errSubnetIDRequried
--		return res
--	}
--	if opts.Protocol == "" {
--		res.Err = errProtocolRequired
--		return res
--	}
--	if opts.ProtocolPort == 0 {
--		res.Err = errProtocolPortRequired
--		return res
--	}
--	if opts.PoolID == "" {
--		res.Err = errPoolIDRequired
--		return res
--	}
--
--	type vip struct {
--		Name         string              `json:"name"`
--		SubnetID     string              `json:"subnet_id"`
--		Protocol     string              `json:"protocol"`
--		ProtocolPort int                 `json:"protocol_port"`
--		PoolID       string              `json:"pool_id"`
--		Description  *string             `json:"description,omitempty"`
--		TenantID     *string             `json:"tenant_id,omitempty"`
--		Address      *string             `json:"address,omitempty"`
--		Persistence  *SessionPersistence `json:"session_persistence,omitempty"`
--		ConnLimit    *int                `json:"connection_limit,omitempty"`
--		AdminStateUp *bool               `json:"admin_state_up,omitempty"`
--	}
--
--	type request struct {
--		VirtualIP vip `json:"vip"`
--	}
--
--	reqBody := request{VirtualIP: vip{
--		Name:         opts.Name,
--		SubnetID:     opts.SubnetID,
--		Protocol:     opts.Protocol,
--		ProtocolPort: opts.ProtocolPort,
--		PoolID:       opts.PoolID,
--		Description:  gophercloud.MaybeString(opts.Description),
--		TenantID:     gophercloud.MaybeString(opts.TenantID),
--		Address:      gophercloud.MaybeString(opts.Address),
--		ConnLimit:    opts.ConnLimit,
--		AdminStateUp: opts.AdminStateUp,
--	}}
--
--	if opts.Persistence != nil {
--		reqBody.VirtualIP.Persistence = opts.Persistence
--	}
--
--	_, res.Err = perigee.Request("POST", rootURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--
--	return res
--}
--
--// Get retrieves a particular virtual IP based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// UpdateOpts contains all the values needed to update an existing virtual IP.
--// Attributes not listed here but appear in CreateOpts are immutable and cannot
--// be updated.
--type UpdateOpts struct {
--	// Human-readable name for the VIP. Does not have to be unique.
--	Name string
--
--	// Required. The ID of the pool with which the VIP is associated.
--	PoolID string
--
--	// Optional. Human-readable description for the VIP.
--	Description string
--
--	// Optional. Omit this field to prevent session persistence.
--	Persistence *SessionPersistence
--
--	// Optional. The maximum number of connections allowed for the VIP.
--	ConnLimit *int
--
--	// Optional. The administrative state of the VIP. A valid value is true (UP)
--	// or false (DOWN).
--	AdminStateUp *bool
--}
--
--// Update is an operation which modifies the attributes of the specified VIP.
--func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResult {
--	type vip struct {
--		Name         string              `json:"name,omitempty"`
--		PoolID       string              `json:"pool_id,omitempty"`
--		Description  *string             `json:"description,omitempty"`
--		Persistence  *SessionPersistence `json:"session_persistence,omitempty"`
--		ConnLimit    *int                `json:"connection_limit,omitempty"`
--		AdminStateUp *bool               `json:"admin_state_up,omitempty"`
--	}
--
--	type request struct {
--		VirtualIP vip `json:"vip"`
--	}
--
--	reqBody := request{VirtualIP: vip{
--		Name:         opts.Name,
--		PoolID:       opts.PoolID,
--		Description:  gophercloud.MaybeString(opts.Description),
--		ConnLimit:    opts.ConnLimit,
--		AdminStateUp: opts.AdminStateUp,
--	}}
--
--	if opts.Persistence != nil {
--		reqBody.VirtualIP.Persistence = opts.Persistence
--	}
--
--	var res UpdateResult
--	_, res.Err = perigee.Request("PUT", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 202},
--	})
--
--	return res
--}
--
--// Delete will permanently delete a particular virtual IP based on its unique ID.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests_test.go
-deleted file mode 100644
-index 430f1a1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests_test.go
-+++ /dev/null
-@@ -1,336 +0,0 @@
--package vips
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestURLs(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.AssertEquals(t, th.Endpoint()+"v2.0/lb/vips", rootURL(fake.ServiceClient()))
--	th.AssertEquals(t, th.Endpoint()+"v2.0/lb/vips/foo", resourceURL(fake.ServiceClient(), "foo"))
--}
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/vips", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--  "vips":[
--         {
--           "id": "db902c0c-d5ff-4753-b465-668ad9656918",
--           "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c",
--           "name": "web_vip",
--           "description": "lb config for the web tier",
--           "subnet_id": "96a4386a-f8c3-42ed-afce-d7954eee77b3",
--           "address" : "10.30.176.47",
--           "port_id" : "cd1f7a47-4fa6-449c-9ee7-632838aedfea",
--           "protocol": "HTTP",
--           "protocol_port": 80,
--           "pool_id" : "cfc6589d-f949-4c66-99d2-c2da56ef3764",
--           "admin_state_up": true,
--           "status": "ACTIVE"
--         },
--         {
--           "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab",
--           "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c",
--           "name": "db_vip",
--					 "description": "lb config for the db tier",
--           "subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086",
--           "address" : "10.30.176.48",
--           "port_id" : "cd1f7a47-4fa6-449c-9ee7-632838aedfea",
--           "protocol": "TCP",
--           "protocol_port": 3306,
--           "pool_id" : "41efe233-7591-43c5-9cf7-923964759f9e",
--           "session_persistence" : {"type" : "SOURCE_IP"},
--           "connection_limit" : 2000,
--           "admin_state_up": true,
--           "status": "INACTIVE"
--         }
--      ]
--}
--			`)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractVIPs(page)
--		if err != nil {
--			t.Errorf("Failed to extract LBs: %v", err)
--			return false, err
--		}
--
--		expected := []VirtualIP{
--			VirtualIP{
--				ID:           "db902c0c-d5ff-4753-b465-668ad9656918",
--				TenantID:     "310df60f-2a10-4ee5-9554-98393092194c",
--				Name:         "web_vip",
--				Description:  "lb config for the web tier",
--				SubnetID:     "96a4386a-f8c3-42ed-afce-d7954eee77b3",
--				Address:      "10.30.176.47",
--				PortID:       "cd1f7a47-4fa6-449c-9ee7-632838aedfea",
--				Protocol:     "HTTP",
--				ProtocolPort: 80,
--				PoolID:       "cfc6589d-f949-4c66-99d2-c2da56ef3764",
--				Persistence:  SessionPersistence{},
--				ConnLimit:    0,
--				AdminStateUp: true,
--				Status:       "ACTIVE",
--			},
--			VirtualIP{
--				ID:           "36e08a3e-a78f-4b40-a229-1e7e23eee1ab",
--				TenantID:     "310df60f-2a10-4ee5-9554-98393092194c",
--				Name:         "db_vip",
--				Description:  "lb config for the db tier",
--				SubnetID:     "9cedb85d-0759-4898-8a4b-fa5a5ea10086",
--				Address:      "10.30.176.48",
--				PortID:       "cd1f7a47-4fa6-449c-9ee7-632838aedfea",
--				Protocol:     "TCP",
--				ProtocolPort: 3306,
--				PoolID:       "41efe233-7591-43c5-9cf7-923964759f9e",
--				Persistence:  SessionPersistence{Type: "SOURCE_IP"},
--				ConnLimit:    2000,
--				AdminStateUp: true,
--				Status:       "INACTIVE",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/vips", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "vip": {
--        "protocol": "HTTP",
--        "name": "NewVip",
--        "admin_state_up": true,
--        "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861",
--        "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f",
--        "protocol_port": 80,
--				"session_persistence": {"type": "SOURCE_IP"}
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "vip": {
--        "status": "PENDING_CREATE",
--        "protocol": "HTTP",
--        "description": "",
--        "admin_state_up": true,
--        "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861",
--        "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea",
--        "connection_limit": -1,
--        "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f",
--        "address": "10.0.0.11",
--        "protocol_port": 80,
--        "port_id": "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5",
--        "id": "c987d2be-9a3c-4ac9-a046-e8716b1350e2",
--        "name": "NewVip"
--    }
--}
--		`)
--	})
--
--	opts := CreateOpts{
--		Protocol:     "HTTP",
--		Name:         "NewVip",
--		AdminStateUp: Up,
--		SubnetID:     "8032909d-47a1-4715-90af-5153ffe39861",
--		PoolID:       "61b1f87a-7a21-4ad3-9dda-7f81d249944f",
--		ProtocolPort: 80,
--		Persistence:  &SessionPersistence{Type: "SOURCE_IP"},
--	}
--
--	r, err := Create(fake.ServiceClient(), opts).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "PENDING_CREATE", r.Status)
--	th.AssertEquals(t, "HTTP", r.Protocol)
--	th.AssertEquals(t, "", r.Description)
--	th.AssertEquals(t, true, r.AdminStateUp)
--	th.AssertEquals(t, "8032909d-47a1-4715-90af-5153ffe39861", r.SubnetID)
--	th.AssertEquals(t, "83657cfcdfe44cd5920adaf26c48ceea", r.TenantID)
--	th.AssertEquals(t, -1, r.ConnLimit)
--	th.AssertEquals(t, "61b1f87a-7a21-4ad3-9dda-7f81d249944f", r.PoolID)
--	th.AssertEquals(t, "10.0.0.11", r.Address)
--	th.AssertEquals(t, 80, r.ProtocolPort)
--	th.AssertEquals(t, "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5", r.PortID)
--	th.AssertEquals(t, "c987d2be-9a3c-4ac9-a046-e8716b1350e2", r.ID)
--	th.AssertEquals(t, "NewVip", r.Name)
--}
--
--func TestRequiredCreateOpts(t *testing.T) {
--	res := Create(fake.ServiceClient(), CreateOpts{})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	res = Create(fake.ServiceClient(), CreateOpts{Name: "foo"})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	res = Create(fake.ServiceClient(), CreateOpts{Name: "foo", SubnetID: "bar"})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	res = Create(fake.ServiceClient(), CreateOpts{Name: "foo", SubnetID: "bar", Protocol: "bar"})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	res = Create(fake.ServiceClient(), CreateOpts{Name: "foo", SubnetID: "bar", Protocol: "bar", ProtocolPort: 80})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "vip": {
--        "status": "ACTIVE",
--        "protocol": "HTTP",
--        "description": "",
--        "admin_state_up": true,
--        "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861",
--        "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea",
--        "connection_limit": 1000,
--        "pool_id": "72741b06-df4d-4715-b142-276b6bce75ab",
--        "session_persistence": {
--            "cookie_name": "MyAppCookie",
--            "type": "APP_COOKIE"
--        },
--        "address": "10.0.0.10",
--        "protocol_port": 80,
--        "port_id": "b5a743d6-056b-468b-862d-fb13a9aa694e",
--        "id": "4ec89087-d057-4e2c-911f-60a3b47ee304",
--        "name": "my-vip"
--    }
--}
--			`)
--	})
--
--	vip, err := Get(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "ACTIVE", vip.Status)
--	th.AssertEquals(t, "HTTP", vip.Protocol)
--	th.AssertEquals(t, "", vip.Description)
--	th.AssertEquals(t, true, vip.AdminStateUp)
--	th.AssertEquals(t, 1000, vip.ConnLimit)
--	th.AssertEquals(t, SessionPersistence{Type: "APP_COOKIE", CookieName: "MyAppCookie"}, vip.Persistence)
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "vip": {
--        "connection_limit": 1000,
--				"session_persistence": {"type": "SOURCE_IP"}
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusAccepted)
--
--		fmt.Fprintf(w, `
--{
--    "vip": {
--        "status": "PENDING_UPDATE",
--        "protocol": "HTTP",
--        "description": "",
--        "admin_state_up": true,
--        "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861",
--        "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea",
--        "connection_limit": 1000,
--        "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f",
--        "address": "10.0.0.11",
--        "protocol_port": 80,
--        "port_id": "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5",
--        "id": "c987d2be-9a3c-4ac9-a046-e8716b1350e2",
--        "name": "NewVip"
--    }
--}
--		`)
--	})
--
--	i1000 := 1000
--	options := UpdateOpts{
--		ConnLimit:   &i1000,
--		Persistence: &SessionPersistence{Type: "SOURCE_IP"},
--	}
--	vip, err := Update(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304", options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "PENDING_UPDATE", vip.Status)
--	th.AssertEquals(t, 1000, vip.ConnLimit)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go
-deleted file mode 100644
-index e1092e7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go
-+++ /dev/null
-@@ -1,166 +0,0 @@
--package vips
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// SessionPersistence represents the session persistence feature of the load
--// balancing service. It attempts to force connections or requests in the same
--// session to be processed by the same member as long as it is ative. Three
--// types of persistence are supported:
--//
--// SOURCE_IP:   With this mode, all connections originating from the same source
--//              IP address, will be handled by the same member of the pool.
--// HTTP_COOKIE: With this persistence mode, the load balancing function will
--//              create a cookie on the first request from a client. Subsequent
--//              requests containing the same cookie value will be handled by
--//              the same member of the pool.
--// APP_COOKIE:  With this persistence mode, the load balancing function will
--//              rely on a cookie established by the backend application. All
--//              requests carrying the same cookie value will be handled by the
--//              same member of the pool.
--type SessionPersistence struct {
--	// The type of persistence mode
--	Type string `mapstructure:"type" json:"type"`
--
--	// Name of cookie if persistence mode is set appropriately
--	CookieName string `mapstructure:"cookie_name" json:"cookie_name,omitempty"`
--}
--
--// VirtualIP is the primary load balancing configuration object that specifies
--// the virtual IP address and port on which client traffic is received, as well
--// as other details such as the load balancing method to be use, protocol, etc.
--// This entity is sometimes known in LB products under the name of a "virtual
--// server", a "vserver" or a "listener".
--type VirtualIP struct {
--	// The unique ID for the VIP.
--	ID string `mapstructure:"id" json:"id"`
--
--	// Owner of the VIP. Only an admin user can specify a tenant ID other than its own.
--	TenantID string `mapstructure:"tenant_id" json:"tenant_id"`
--
--	// Human-readable name for the VIP. Does not have to be unique.
--	Name string `mapstructure:"name" json:"name"`
--
--	// Human-readable description for the VIP.
--	Description string `mapstructure:"description" json:"description"`
--
--	// The ID of the subnet on which to allocate the VIP address.
--	SubnetID string `mapstructure:"subnet_id" json:"subnet_id"`
--
--	// The IP address of the VIP.
--	Address string `mapstructure:"address" json:"address"`
--
--	// The protocol of the VIP address. A valid value is TCP, HTTP, or HTTPS.
--	Protocol string `mapstructure:"protocol" json:"protocol"`
--
--	// The port on which to listen to client traffic that is associated with the
--	// VIP address. A valid value is from 0 to 65535.
--	ProtocolPort int `mapstructure:"protocol_port" json:"protocol_port"`
--
--	// The ID of the pool with which the VIP is associated.
--	PoolID string `mapstructure:"pool_id" json:"pool_id"`
--
--	// The ID of the port which belongs to the load balancer
--	PortID string `mapstructure:"port_id" json:"port_id"`
--
--	// Indicates whether connections in the same session will be processed by the
--	// same pool member or not.
--	Persistence SessionPersistence `mapstructure:"session_persistence" json:"session_persistence"`
--
--	// The maximum number of connections allowed for the VIP. Default is -1,
--	// meaning no limit.
--	ConnLimit int `mapstructure:"connection_limit" json:"connection_limit"`
--
--	// The administrative state of the VIP. A valid value is true (UP) or false (DOWN).
--	AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"`
--
--	// The status of the VIP. Indicates whether the VIP is operational.
--	Status string `mapstructure:"status" json:"status"`
--}
--
--// VIPPage is the page returned by a pager when traversing over a
--// collection of routers.
--type VIPPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of routers has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p VIPPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"vips_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a RouterPage struct is empty.
--func (p VIPPage) IsEmpty() (bool, error) {
--	is, err := ExtractVIPs(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractVIPs accepts a Page struct, specifically a VIPPage struct,
--// and extracts the elements into a slice of VirtualIP structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractVIPs(page pagination.Page) ([]VirtualIP, error) {
--	var resp struct {
--		VIPs []VirtualIP `mapstructure:"vips" json:"vips"`
--	}
--
--	err := mapstructure.Decode(page.(VIPPage).Body, &resp)
--
--	return resp.VIPs, err
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a router.
--func (r commonResult) Extract() (*VirtualIP, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		VirtualIP *VirtualIP `mapstructure:"vip" json:"vip"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.VirtualIP, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go
-deleted file mode 100644
-index 2b6f67e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--package vips
--
--import "github.com/rackspace/gophercloud"
--
--const (
--	rootPath     = "lb"
--	resourcePath = "vips"
--)
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(rootPath, resourcePath)
--}
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(rootPath, resourcePath, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go
-deleted file mode 100644
-index 373da44..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/doc.go
-+++ /dev/null
-@@ -1,21 +0,0 @@
--// Package provider gives access to the provider Neutron plugin, allowing
--// network extended attributes. The provider extended attributes for networks
--// enable administrative users to specify how network objects map to the
--// underlying networking infrastructure. These extended attributes also appear
--// when administrative users query networks.
--//
--// For more information about extended attributes, see the NetworkExtAttrs
--// struct. The actual semantics of these attributes depend on the technology
--// back end of the particular plug-in. See the plug-in documentation and the
--// OpenStack Cloud Administrator Guide to understand which values should be
--// specific for each of these attributes when OpenStack Networking is deployed
--// with a particular plug-in. The examples shown in this chapter refer to the
--// Open vSwitch plug-in.
--//
--// The default policy settings enable only users with administrative rights to
--// specify these parameters in requests and to see their values in responses. By
--// default, the provider network extension attributes are completely hidden from
--// regular tenants. As a rule of thumb, if these attributes are not visible in a
--// GET /networks/<network-id> operation, this implies the user submitting the
--// request is not authorized to view or manipulate provider network attributes.
--package provider
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go
-deleted file mode 100644
-index 3453584..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go
-+++ /dev/null
-@@ -1,124 +0,0 @@
--package provider
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// AdminState gives users a solid type to work with for create and update
--// operations. It is recommended that users use the `Up` and `Down` enums.
--type AdminState *bool
--
--// Convenience vars for AdminStateUp values.
--var (
--	iTrue  = true
--	iFalse = false
--
--	Up   AdminState = &iTrue
--	Down AdminState = &iFalse
--)
--
--// NetworkExtAttrs represents an extended form of a Network with additional fields.
--type NetworkExtAttrs struct {
--	// UUID for the network
--	ID string `mapstructure:"id" json:"id"`
--
--	// Human-readable name for the network. Might not be unique.
--	Name string `mapstructure:"name" json:"name"`
--
--	// The administrative state of network. If false (down), the network does not forward packets.
--	AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"`
--
--	// Indicates whether network is currently operational. Possible values include
--	// `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values.
--	Status string `mapstructure:"status" json:"status"`
--
--	// Subnets associated with this network.
--	Subnets []string `mapstructure:"subnets" json:"subnets"`
--
--	// Owner of network. Only admin users can specify a tenant_id other than its own.
--	TenantID string `mapstructure:"tenant_id" json:"tenant_id"`
--
--	// Specifies whether the network resource can be accessed by any tenant or not.
--	Shared bool `mapstructure:"shared" json:"shared"`
--
--	// Specifies the nature of the physical network mapped to this network
--	// resource. Examples are flat, vlan, or gre.
--	NetworkType string `json:"provider:network_type" mapstructure:"provider:network_type"`
--
--	// Identifies the physical network on top of which this network object is
--	// being implemented. The OpenStack Networking API does not expose any facility
--	// for retrieving the list of available physical networks. As an example, in
--	// the Open vSwitch plug-in this is a symbolic name which is then mapped to
--	// specific bridges on each compute host through the Open vSwitch plug-in
--	// configuration file.
--	PhysicalNetwork string `json:"provider:physical_network" mapstructure:"provider:physical_network"`
--
--	// Identifies an isolated segment on the physical network; the nature of the
--	// segment depends on the segmentation model defined by network_type. For
--	// instance, if network_type is vlan, then this is a vlan identifier;
--	// otherwise, if network_type is gre, then this will be a gre key.
--	SegmentationID string `json:"provider:segmentation_id" mapstructure:"provider:segmentation_id"`
--}
--
--// ExtractGet decorates a GetResult struct returned from a networks.Get()
--// function with extended attributes.
--func ExtractGet(r networks.GetResult) (*NetworkExtAttrs, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Network *NetworkExtAttrs `json:"network"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Network, err
--}
--
--// ExtractCreate decorates a CreateResult struct returned from a networks.Create()
--// function with extended attributes.
--func ExtractCreate(r networks.CreateResult) (*NetworkExtAttrs, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Network *NetworkExtAttrs `json:"network"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Network, err
--}
--
--// ExtractUpdate decorates a UpdateResult struct returned from a
--// networks.Update() function with extended attributes.
--func ExtractUpdate(r networks.UpdateResult) (*NetworkExtAttrs, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Network *NetworkExtAttrs `json:"network"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Network, err
--}
--
--// ExtractList accepts a Page struct, specifically a NetworkPage struct, and
--// extracts the elements into a slice of NetworkExtAttrs structs. In other
--// words, a generic collection is mapped into a relevant slice.
--func ExtractList(page pagination.Page) ([]NetworkExtAttrs, error) {
--	var resp struct {
--		Networks []NetworkExtAttrs `mapstructure:"networks" json:"networks"`
--	}
--
--	err := mapstructure.Decode(page.(networks.NetworkPage).Body, &resp)
--
--	return resp.Networks, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go
-deleted file mode 100644
-index 9801b2e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go
-+++ /dev/null
-@@ -1,253 +0,0 @@
--package provider
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "networks": [
--        {
--            "status": "ACTIVE",
--            "subnets": [
--                "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
--            ],
--            "name": "private-network",
--            "admin_state_up": true,
--            "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--            "shared": true,
--            "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--            "provider:segmentation_id": null,
--            "provider:physical_network": null,
--            "provider:network_type": "local"
--        },
--        {
--            "status": "ACTIVE",
--            "subnets": [
--                "08eae331-0402-425a-923c-34f7cfe39c1b"
--            ],
--            "name": "private",
--            "admin_state_up": true,
--            "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
--            "shared": true,
--            "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
--            "provider:segmentation_id": null,
--            "provider:physical_network": null,
--            "provider:network_type": "local"
--        }
--    ]
--}
--			`)
--	})
--
--	count := 0
--
--	networks.List(fake.ServiceClient(), networks.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractList(page)
--		if err != nil {
--			t.Errorf("Failed to extract networks: %v", err)
--			return false, err
--		}
--
--		expected := []NetworkExtAttrs{
--			NetworkExtAttrs{
--				Status:          "ACTIVE",
--				Subnets:         []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"},
--				Name:            "private-network",
--				AdminStateUp:    true,
--				TenantID:        "4fd44f30292945e481c7b8a0c8908869",
--				Shared:          true,
--				ID:              "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--				NetworkType:     "local",
--				PhysicalNetwork: "",
--				SegmentationID:  "",
--			},
--			NetworkExtAttrs{
--				Status:          "ACTIVE",
--				Subnets:         []string{"08eae331-0402-425a-923c-34f7cfe39c1b"},
--				Name:            "private",
--				AdminStateUp:    true,
--				TenantID:        "26a7980765d0414dbc1fc1f88cdb7e6e",
--				Shared:          true,
--				ID:              "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
--				NetworkType:     "local",
--				PhysicalNetwork: "",
--				SegmentationID:  "",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "status": "ACTIVE",
--        "subnets": [
--            "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
--        ],
--        "name": "private-network",
--        "provider:physical_network": null,
--        "admin_state_up": true,
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "provider:network_type": "local",
--        "shared": true,
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--        "provider:segmentation_id": null
--    }
--}
--			`)
--	})
--
--	res := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--	n, err := ExtractGet(res)
--
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "", n.PhysicalNetwork)
--	th.AssertEquals(t, "local", n.NetworkType)
--	th.AssertEquals(t, "", n.SegmentationID)
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "network": {
--        "name": "sample_network",
--        "admin_state_up": true
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "status": "ACTIVE",
--        "subnets": [
--            "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
--        ],
--        "name": "private-network",
--        "provider:physical_network": null,
--        "admin_state_up": true,
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "provider:network_type": "local",
--        "shared": true,
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--        "provider:segmentation_id": null
--    }
--}
--		`)
--	})
--
--	options := networks.CreateOpts{Name: "sample_network", AdminStateUp: Up}
--	res := networks.Create(fake.ServiceClient(), options)
--	n, err := ExtractCreate(res)
--
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "", n.PhysicalNetwork)
--	th.AssertEquals(t, "local", n.NetworkType)
--	th.AssertEquals(t, "", n.SegmentationID)
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--		"network": {
--				"name": "new_network_name",
--				"admin_state_up": false,
--				"shared": true
--		}
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "status": "ACTIVE",
--        "subnets": [
--            "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
--        ],
--        "name": "private-network",
--        "provider:physical_network": null,
--        "admin_state_up": true,
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "provider:network_type": "local",
--        "shared": true,
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--        "provider:segmentation_id": null
--    }
--}
--		`)
--	})
--
--	iTrue := true
--	options := networks.UpdateOpts{Name: "new_network_name", AdminStateUp: Down, Shared: &iTrue}
--	res := networks.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options)
--	n, err := ExtractUpdate(res)
--
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "", n.PhysicalNetwork)
--	th.AssertEquals(t, "local", n.NetworkType)
--	th.AssertEquals(t, "", n.SegmentationID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go
-deleted file mode 100644
-index 8ef455f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go
-+++ /dev/null
-@@ -1,32 +0,0 @@
--// Package security contains functionality to work with security group and
--// security group rules Neutron resources.
--//
--// Security groups and security group rules allows administrators and tenants
--// the ability to specify the type of traffic and direction (ingress/egress)
--// that is allowed to pass through a port. A security group is a container for
--// security group rules.
--//
--// When a port is created in Networking it is associated with a security group.
--// If a security group is not specified the port is associated with a 'default'
--// security group. By default, this group drops all ingress traffic and allows
--// all egress. Rules can be added to this group in order to change the behaviour.
--//
--// The basic characteristics of Neutron Security Groups are:
--//
--// For ingress traffic (to an instance)
--//  - Only traffic matched with security group rules are allowed.
--//  - When there is no rule defined, all traffic are dropped.
--//
--// For egress traffic (from an instance)
--//  - Only traffic matched with security group rules are allowed.
--//  - When there is no rule defined, all egress traffic are dropped.
--//  - When a new security group is created, rules to allow all egress traffic
--//    are automatically added.
--//
--// "default security group" is defined for each tenant.
--//  - For the default security group a rule which allows intercommunication
--//    among hosts associated with the default security group is defined by default.
--//  - As a result, all egress traffic and intercommunication in the default
--//    group are allowed and all ingress from outside of the default group is
--//    dropped by default (in the default security group).
--package security
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go
-deleted file mode 100644
-index 0c970ae..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go
-+++ /dev/null
-@@ -1,107 +0,0 @@
--package groups
--
--import (
--	"fmt"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the floating IP attributes you want to see returned. SortKey allows you to
--// sort by a particular network attribute. SortDir sets the direction, and is
--// either `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	ID       string `q:"id"`
--	Name     string `q:"name"`
--	TenantID string `q:"tenant_id"`
--	Limit    int    `q:"limit"`
--	Marker   string `q:"marker"`
--	SortKey  string `q:"sort_key"`
--	SortDir  string `q:"sort_dir"`
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// security groups. It accepts a ListOpts struct, which allows you to filter
--// and sort the returned collection for greater efficiency.
--func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	q, err := gophercloud.BuildQueryString(&opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u := rootURL(c) + q.String()
--	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
--		return SecGroupPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--var (
--	errNameRequired = fmt.Errorf("Name is required")
--)
--
--// CreateOpts contains all the values needed to create a new security group.
--type CreateOpts struct {
--	// Required. Human-readable name for the VIP. Does not have to be unique.
--	Name string
--
--	// Optional. Describes the security group.
--	Description string
--}
--
--// Create is an operation which provisions a new security group with default
--// security group rules for the IPv4 and IPv6 ether types.
--func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
--	var res CreateResult
--
--	// Validate required opts
--	if opts.Name == "" {
--		res.Err = errNameRequired
--		return res
--	}
--
--	type secgroup struct {
--		Name        string `json:"name"`
--		Description string `json:"description,omitempty"`
--	}
--
--	type request struct {
--		SecGroup secgroup `json:"security_group"`
--	}
--
--	reqBody := request{SecGroup: secgroup{
--		Name:        opts.Name,
--		Description: opts.Description,
--	}}
--
--	_, res.Err = perigee.Request("POST", rootURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--
--	return res
--}
--
--// Get retrieves a particular security group based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// Delete will permanently delete a particular security group based on its unique ID.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests_test.go
-deleted file mode 100644
-index 5f074c7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests_test.go
-+++ /dev/null
-@@ -1,213 +0,0 @@
--package groups
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestURLs(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.AssertEquals(t, th.Endpoint()+"v2.0/security-groups", rootURL(fake.ServiceClient()))
--	th.AssertEquals(t, th.Endpoint()+"v2.0/security-groups/foo", resourceURL(fake.ServiceClient(), "foo"))
--}
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "security_groups": [
--        {
--            "description": "default",
--            "id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--            "name": "default",
--            "security_group_rules": [],
--            "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--        }
--    ]
--}
--      `)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractGroups(page)
--		if err != nil {
--			t.Errorf("Failed to extract secgroups: %v", err)
--			return false, err
--		}
--
--		expected := []SecGroup{
--			SecGroup{
--				Description: "default",
--				ID:          "85cc3048-abc3-43cc-89b3-377341426ac5",
--				Name:        "default",
--				Rules:       []rules.SecGroupRule{},
--				TenantID:    "e4f50856753b4dc6afee5fa6b9b6c550",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "security_group": {
--        "name": "new-webservers",
--        "description": "security group for webservers"
--    }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "security_group": {
--        "description": "security group for webservers",
--        "id": "2076db17-a522-4506-91de-c6dd8e837028",
--        "name": "new-webservers",
--        "security_group_rules": [
--            {
--                "direction": "egress",
--                "ethertype": "IPv4",
--                "id": "38ce2d8e-e8f1-48bd-83c2-d33cb9f50c3d",
--                "port_range_max": null,
--                "port_range_min": null,
--                "protocol": null,
--                "remote_group_id": null,
--                "remote_ip_prefix": null,
--                "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028",
--                "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--            },
--            {
--                "direction": "egress",
--                "ethertype": "IPv6",
--                "id": "565b9502-12de-4ffd-91e9-68885cff6ae1",
--                "port_range_max": null,
--                "port_range_min": null,
--                "protocol": null,
--                "remote_group_id": null,
--                "remote_ip_prefix": null,
--                "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028",
--                "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--            }
--        ],
--        "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--    }
--}
--    `)
--	})
--
--	opts := CreateOpts{Name: "new-webservers", Description: "security group for webservers"}
--	_, err := Create(fake.ServiceClient(), opts).Extract()
--	th.AssertNoErr(t, err)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/security-groups/85cc3048-abc3-43cc-89b3-377341426ac5", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "security_group": {
--        "description": "default",
--        "id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--        "name": "default",
--        "security_group_rules": [
--            {
--                "direction": "egress",
--                "ethertype": "IPv6",
--                "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
--                "port_range_max": null,
--                "port_range_min": null,
--                "protocol": null,
--                "remote_group_id": null,
--                "remote_ip_prefix": null,
--                "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--                "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--            },
--            {
--                "direction": "egress",
--                "ethertype": "IPv4",
--                "id": "93aa42e5-80db-4581-9391-3a608bd0e448",
--                "port_range_max": null,
--                "port_range_min": null,
--                "protocol": null,
--                "remote_group_id": null,
--                "remote_ip_prefix": null,
--                "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--                "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--            }
--        ],
--        "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--    }
--}
--      `)
--	})
--
--	sg, err := Get(fake.ServiceClient(), "85cc3048-abc3-43cc-89b3-377341426ac5").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "default", sg.Description)
--	th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sg.ID)
--	th.AssertEquals(t, "default", sg.Name)
--	th.AssertEquals(t, 2, len(sg.Rules))
--	th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sg.TenantID)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/security-groups/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/results.go
-deleted file mode 100644
-index 49db261..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/results.go
-+++ /dev/null
-@@ -1,108 +0,0 @@
--package groups
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// SecGroup represents a container for security group rules.
--type SecGroup struct {
--	// The UUID for the security group.
--	ID string
--
--	// Human-readable name for the security group. Might not be unique. Cannot be
--	// named "default" as that is automatically created for a tenant.
--	Name string
--
--	// The security group description.
--	Description string
--
--	// A slice of security group rules that dictate the permitted behaviour for
--	// traffic entering and leaving the group.
--	Rules []rules.SecGroupRule `json:"security_group_rules" mapstructure:"security_group_rules"`
--
--	// Owner of the security group. Only admin users can specify a TenantID
--	// other than their own.
--	TenantID string `json:"tenant_id" mapstructure:"tenant_id"`
--}
--
--// SecGroupPage is the page returned by a pager when traversing over a
--// collection of security groups.
--type SecGroupPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of security groups has
--// reached the end of a page and the pager seeks to traverse over a new one. In
--// order to do this, it needs to construct the next page's URL.
--func (p SecGroupPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"security_groups_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a SecGroupPage struct is empty.
--func (p SecGroupPage) IsEmpty() (bool, error) {
--	is, err := ExtractGroups(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractGroups accepts a Page struct, specifically a SecGroupPage struct,
--// and extracts the elements into a slice of SecGroup structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractGroups(page pagination.Page) ([]SecGroup, error) {
--	var resp struct {
--		SecGroups []SecGroup `mapstructure:"security_groups" json:"security_groups"`
--	}
--
--	err := mapstructure.Decode(page.(SecGroupPage).Body, &resp)
--
--	return resp.SecGroups, err
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a security group.
--func (r commonResult) Extract() (*SecGroup, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		SecGroup *SecGroup `mapstructure:"security_group" json:"security_group"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.SecGroup, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go
-deleted file mode 100644
-index 84f7324..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package groups
--
--import "github.com/rackspace/gophercloud"
--
--const rootPath = "security-groups"
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(rootPath)
--}
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(rootPath, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go
-deleted file mode 100644
-index edaebe8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go
-+++ /dev/null
-@@ -1,183 +0,0 @@
--package rules
--
--import (
--	"fmt"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the security group attributes you want to see returned. SortKey allows you to
--// sort by a particular network attribute. SortDir sets the direction, and is
--// either `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	Direction      string `q:"direction"`
--	EtherType      string `q:"ethertype"`
--	ID             string `q:"id"`
--	PortRangeMax   int    `q:"port_range_max"`
--	PortRangeMin   int    `q:"port_range_min"`
--	Protocol       string `q:"protocol"`
--	RemoteGroupID  string `q:"remote_group_id"`
--	RemoteIPPrefix string `q:"remote_ip_prefix"`
--	SecGroupID     string `q:"security_group_id"`
--	TenantID       string `q:"tenant_id"`
--	Limit          int    `q:"limit"`
--	Marker         string `q:"marker"`
--	SortKey        string `q:"sort_key"`
--	SortDir        string `q:"sort_dir"`
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// security group rules. It accepts a ListOpts struct, which allows you to filter
--// and sort the returned collection for greater efficiency.
--func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager {
--	q, err := gophercloud.BuildQueryString(&opts)
--	if err != nil {
--		return pagination.Pager{Err: err}
--	}
--	u := rootURL(c) + q.String()
--	return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page {
--		return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// Errors
--var (
--	errValidDirectionRequired = fmt.Errorf("A valid Direction is required")
--	errValidEtherTypeRequired = fmt.Errorf("A valid EtherType is required")
--	errSecGroupIDRequired     = fmt.Errorf("A valid SecGroupID is required")
--	errValidProtocolRequired  = fmt.Errorf("A valid Protocol is required")
--)
--
--// Constants useful for CreateOpts
--const (
--	DirIngress   = "ingress"
--	DirEgress    = "egress"
--	Ether4       = "IPv4"
--	Ether6       = "IPv6"
--	ProtocolTCP  = "tcp"
--	ProtocolUDP  = "udp"
--	ProtocolICMP = "icmp"
--)
--
--// CreateOpts contains all the values needed to create a new security group rule.
--type CreateOpts struct {
--	// Required. Must be either "ingress" or "egress": the direction in which the
--	// security group rule is applied.
--	Direction string
--
--	// Required. Must be "IPv4" or "IPv6", and addresses represented in CIDR must
--	// match the ingress or egress rules.
--	EtherType string
--
--	// Required. The security group ID to associate with this security group rule.
--	SecGroupID string
--
--	// Optional. The maximum port number in the range that is matched by the
--	// security group rule. The PortRangeMin attribute constrains the PortRangeMax
--	// attribute. If the protocol is ICMP, this value must be an ICMP type.
--	PortRangeMax int
--
--	// Optional. The minimum port number in the range that is matched by the
--	// security group rule. If the protocol is TCP or UDP, this value must be
--	// less than or equal to the value of the PortRangeMax attribute. If the
--	// protocol is ICMP, this value must be an ICMP type.
--	PortRangeMin int
--
--	// Optional. The protocol that is matched by the security group rule. Valid
--	// values are "tcp", "udp", "icmp" or an empty string.
--	Protocol string
--
--	// Optional. The remote group ID to be associated with this security group
--	// rule. You can specify either RemoteGroupID or RemoteIPPrefix.
--	RemoteGroupID string
--
--	// Optional. The remote IP prefix to be associated with this security group
--	// rule. You can specify either RemoteGroupID or RemoteIPPrefix. This
--	// attribute matches the specified IP prefix as the source IP address of the
--	// IP packet.
--	RemoteIPPrefix string
--}
--
--// Create is an operation which provisions a new security group with default
--// security group rules for the IPv4 and IPv6 ether types.
--func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult {
--	var res CreateResult
--
--	// Validate required opts
--	if opts.Direction != DirIngress && opts.Direction != DirEgress {
--		res.Err = errValidDirectionRequired
--		return res
--	}
--	if opts.EtherType != Ether4 && opts.EtherType != Ether6 {
--		res.Err = errValidEtherTypeRequired
--		return res
--	}
--	if opts.SecGroupID == "" {
--		res.Err = errSecGroupIDRequired
--		return res
--	}
--	if opts.Protocol != "" && opts.Protocol != ProtocolTCP && opts.Protocol != ProtocolUDP && opts.Protocol != ProtocolICMP {
--		res.Err = errValidProtocolRequired
--		return res
--	}
--
--	type secrule struct {
--		Direction      string `json:"direction"`
--		EtherType      string `json:"ethertype"`
--		SecGroupID     string `json:"security_group_id"`
--		PortRangeMax   int    `json:"port_range_max,omitempty"`
--		PortRangeMin   int    `json:"port_range_min,omitempty"`
--		Protocol       string `json:"protocol,omitempty"`
--		RemoteGroupID  string `json:"remote_group_id,omitempty"`
--		RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"`
--	}
--
--	type request struct {
--		SecRule secrule `json:"security_group_rule"`
--	}
--
--	reqBody := request{SecRule: secrule{
--		Direction:      opts.Direction,
--		EtherType:      opts.EtherType,
--		SecGroupID:     opts.SecGroupID,
--		PortRangeMax:   opts.PortRangeMax,
--		PortRangeMin:   opts.PortRangeMin,
--		Protocol:       opts.Protocol,
--		RemoteGroupID:  opts.RemoteGroupID,
--		RemoteIPPrefix: opts.RemoteIPPrefix,
--	}}
--
--	_, res.Err = perigee.Request("POST", rootURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--
--	return res
--}
--
--// Get retrieves a particular security group based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// Delete will permanently delete a particular security group based on its unique ID.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", resourceURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests_test.go
-deleted file mode 100644
-index b5afef3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests_test.go
-+++ /dev/null
-@@ -1,243 +0,0 @@
--package rules
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestURLs(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.AssertEquals(t, th.Endpoint()+"v2.0/security-group-rules", rootURL(fake.ServiceClient()))
--	th.AssertEquals(t, th.Endpoint()+"v2.0/security-group-rules/foo", resourceURL(fake.ServiceClient(), "foo"))
--}
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "security_group_rules": [
--        {
--            "direction": "egress",
--            "ethertype": "IPv6",
--            "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
--            "port_range_max": null,
--            "port_range_min": null,
--            "protocol": null,
--            "remote_group_id": null,
--            "remote_ip_prefix": null,
--            "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--            "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--        },
--        {
--            "direction": "egress",
--            "ethertype": "IPv4",
--            "id": "93aa42e5-80db-4581-9391-3a608bd0e448",
--            "port_range_max": null,
--            "port_range_min": null,
--            "protocol": null,
--            "remote_group_id": null,
--            "remote_ip_prefix": null,
--            "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--            "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--        }
--    ]
--}
--      `)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractRules(page)
--		if err != nil {
--			t.Errorf("Failed to extract secrules: %v", err)
--			return false, err
--		}
--
--		expected := []SecGroupRule{
--			SecGroupRule{
--				Direction:      "egress",
--				EtherType:      "IPv6",
--				ID:             "3c0e45ff-adaf-4124-b083-bf390e5482ff",
--				PortRangeMax:   0,
--				PortRangeMin:   0,
--				Protocol:       "",
--				RemoteGroupID:  "",
--				RemoteIPPrefix: "",
--				SecGroupID:     "85cc3048-abc3-43cc-89b3-377341426ac5",
--				TenantID:       "e4f50856753b4dc6afee5fa6b9b6c550",
--			},
--			SecGroupRule{
--				Direction:      "egress",
--				EtherType:      "IPv4",
--				ID:             "93aa42e5-80db-4581-9391-3a608bd0e448",
--				PortRangeMax:   0,
--				PortRangeMin:   0,
--				Protocol:       "",
--				RemoteGroupID:  "",
--				RemoteIPPrefix: "",
--				SecGroupID:     "85cc3048-abc3-43cc-89b3-377341426ac5",
--				TenantID:       "e4f50856753b4dc6afee5fa6b9b6c550",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "security_group_rule": {
--        "direction": "ingress",
--        "port_range_min": 80,
--        "ethertype": "IPv4",
--        "port_range_max": 80,
--        "protocol": "tcp",
--        "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--        "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a"
--    }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "security_group_rule": {
--        "direction": "ingress",
--        "ethertype": "IPv4",
--        "id": "2bc0accf-312e-429a-956e-e4407625eb62",
--        "port_range_max": 80,
--        "port_range_min": 80,
--        "protocol": "tcp",
--        "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--        "remote_ip_prefix": null,
--        "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a",
--        "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--    }
--}
--    `)
--	})
--
--	opts := CreateOpts{
--		Direction:     "ingress",
--		PortRangeMin:  80,
--		EtherType:     "IPv4",
--		PortRangeMax:  80,
--		Protocol:      "tcp",
--		RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5",
--		SecGroupID:    "a7734e61-b545-452d-a3cd-0189cbd9747a",
--	}
--	_, err := Create(fake.ServiceClient(), opts).Extract()
--	th.AssertNoErr(t, err)
--}
--
--func TestRequiredCreateOpts(t *testing.T) {
--	res := Create(fake.ServiceClient(), CreateOpts{Direction: "something"})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	res = Create(fake.ServiceClient(), CreateOpts{Direction: DirIngress, EtherType: "something"})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	res = Create(fake.ServiceClient(), CreateOpts{Direction: DirIngress, EtherType: Ether4})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--	res = Create(fake.ServiceClient(), CreateOpts{Direction: DirIngress, EtherType: Ether4, SecGroupID: "something", Protocol: "foo"})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/security-group-rules/3c0e45ff-adaf-4124-b083-bf390e5482ff", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "security_group_rule": {
--        "direction": "egress",
--        "ethertype": "IPv6",
--        "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
--        "port_range_max": null,
--        "port_range_min": null,
--        "protocol": null,
--        "remote_group_id": null,
--        "remote_ip_prefix": null,
--        "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5",
--        "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550"
--    }
--}
--      `)
--	})
--
--	sr, err := Get(fake.ServiceClient(), "3c0e45ff-adaf-4124-b083-bf390e5482ff").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, "egress", sr.Direction)
--	th.AssertEquals(t, "IPv6", sr.EtherType)
--	th.AssertEquals(t, "3c0e45ff-adaf-4124-b083-bf390e5482ff", sr.ID)
--	th.AssertEquals(t, 0, sr.PortRangeMax)
--	th.AssertEquals(t, 0, sr.PortRangeMin)
--	th.AssertEquals(t, "", sr.Protocol)
--	th.AssertEquals(t, "", sr.RemoteGroupID)
--	th.AssertEquals(t, "", sr.RemoteIPPrefix)
--	th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sr.SecGroupID)
--	th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sr.TenantID)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/security-group-rules/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/results.go
-deleted file mode 100644
-index 6e13857..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/results.go
-+++ /dev/null
-@@ -1,133 +0,0 @@
--package rules
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// SecGroupRule represents a rule to dictate the behaviour of incoming or
--// outgoing traffic for a particular security group.
--type SecGroupRule struct {
--	// The UUID for this security group rule.
--	ID string
--
--	// The direction in which the security group rule is applied. The only values
--	// allowed are "ingress" or "egress". For a compute instance, an ingress
--	// security group rule is applied to incoming (ingress) traffic for that
--	// instance. An egress rule is applied to traffic leaving the instance.
--	Direction string
--
--	// Must be IPv4 or IPv6, and addresses represented in CIDR must match the
--	// ingress or egress rules.
--	EtherType string `json:"ethertype" mapstructure:"ethertype"`
--
--	// The security group ID to associate with this security group rule.
--	SecGroupID string `json:"security_group_id" mapstructure:"security_group_id"`
--
--	// The minimum port number in the range that is matched by the security group
--	// rule. If the protocol is TCP or UDP, this value must be less than or equal
--	// to the value of the PortRangeMax attribute. If the protocol is ICMP, this
--	// value must be an ICMP type.
--	PortRangeMin int `json:"port_range_min" mapstructure:"port_range_min"`
--
--	// The maximum port number in the range that is matched by the security group
--	// rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If
--	// the protocol is ICMP, this value must be an ICMP type.
--	PortRangeMax int `json:"port_range_max" mapstructure:"port_range_max"`
--
--	// The protocol that is matched by the security group rule. Valid values are
--	// "tcp", "udp", "icmp" or an empty string.
--	Protocol string
--
--	// The remote group ID to be associated with this security group rule. You
--	// can specify either RemoteGroupID or RemoteIPPrefix.
--	RemoteGroupID string `json:"remote_group_id" mapstructure:"remote_group_id"`
--
--	// The remote IP prefix to be associated with this security group rule. You
--	// can specify either RemoteGroupID or RemoteIPPrefix . This attribute
--	// matches the specified IP prefix as the source IP address of the IP packet.
--	RemoteIPPrefix string `json:"remote_ip_prefix" mapstructure:"remote_ip_prefix"`
--
--	// The owner of this security group rule.
--	TenantID string `json:"tenant_id" mapstructure:"tenant_id"`
--}
--
--// SecGroupRulePage is the page returned by a pager when traversing over a
--// collection of security group rules.
--type SecGroupRulePage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of security group rules has
--// reached the end of a page and the pager seeks to traverse over a new one. In
--// order to do this, it needs to construct the next page's URL.
--func (p SecGroupRulePage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"security_group_rules_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a SecGroupRulePage struct is empty.
--func (p SecGroupRulePage) IsEmpty() (bool, error) {
--	is, err := ExtractRules(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractRules accepts a Page struct, specifically a SecGroupRulePage struct,
--// and extracts the elements into a slice of SecGroupRule structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractRules(page pagination.Page) ([]SecGroupRule, error) {
--	var resp struct {
--		SecGroupRules []SecGroupRule `mapstructure:"security_group_rules" json:"security_group_rules"`
--	}
--
--	err := mapstructure.Decode(page.(SecGroupRulePage).Body, &resp)
--
--	return resp.SecGroupRules, err
--}
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a security rule.
--func (r commonResult) Extract() (*SecGroupRule, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		SecGroupRule *SecGroupRule `mapstructure:"security_group_rule" json:"security_group_rule"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.SecGroupRule, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go
-deleted file mode 100644
-index 8e2b2bb..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package rules
--
--import "github.com/rackspace/gophercloud"
--
--const rootPath = "security-group-rules"
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL(rootPath)
--}
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL(rootPath, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/doc.go
-deleted file mode 100644
-index c87a7ce..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/doc.go
-+++ /dev/null
-@@ -1,9 +0,0 @@
--// Package networks contains functionality for working with Neutron network
--// resources. A network is an isolated virtual layer-2 broadcast domain that is
--// typically reserved for the tenant who created it (unless you configure the
--// network to be shared). Tenants can create multiple networks until the
--// thresholds per-tenant quota is reached.
--//
--// In the v2.0 Networking API, the network is the main entity. Ports and subnets
--// are always associated with a network.
--package networks
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/errors.go
-deleted file mode 100644
-index 83c4a6a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/errors.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package networks
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go
-deleted file mode 100644
-index eaa7136..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go
-+++ /dev/null
-@@ -1,209 +0,0 @@
--package networks
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// AdminState gives users a solid type to work with for create and update
--// operations. It is recommended that users use the `Up` and `Down` enums.
--type AdminState *bool
--
--// Convenience vars for AdminStateUp values.
--var (
--	iTrue  = true
--	iFalse = false
--
--	Up   AdminState = &iTrue
--	Down AdminState = &iFalse
--)
--
--type networkOpts struct {
--	AdminStateUp *bool
--	Name         string
--	Shared       *bool
--	TenantID     string
--}
--
--// ListOptsBuilder allows extensions to add additional parameters to the
--// List request.
--type ListOptsBuilder interface {
--	ToNetworkListQuery() (string, error)
--}
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the network attributes you want to see returned. SortKey allows you to sort
--// by a particular network attribute. SortDir sets the direction, and is either
--// `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	Status       string `q:"status"`
--	Name         string `q:"name"`
--	AdminStateUp *bool  `q:"admin_state_up"`
--	TenantID     string `q:"tenant_id"`
--	Shared       *bool  `q:"shared"`
--	ID           string `q:"id"`
--	Marker       string `q:"marker"`
--	Limit        int    `q:"limit"`
--	SortKey      string `q:"sort_key"`
--	SortDir      string `q:"sort_dir"`
--}
--
--// ToNetworkListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToNetworkListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// networks. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	url := listURL(c)
--	if opts != nil {
--		query, err := opts.ToNetworkListQuery()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--	}
--
--	return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
--		return NetworkPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// Get retrieves a specific network based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", getURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// CreateOptsBuilder is the interface options structs have to satisfy in order
--// to be used in the main Create operation in this package. Since many
--// extensions decorate or modify the common logic, it is useful for them to
--// satisfy a basic interface in order for them to be used.
--type CreateOptsBuilder interface {
--	ToNetworkCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts is the common options struct used in this package's Create
--// operation.
--type CreateOpts networkOpts
--
--// ToNetworkCreateMap casts a CreateOpts struct to a map.
--func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) {
--	n := make(map[string]interface{})
--
--	if opts.AdminStateUp != nil {
--		n["admin_state_up"] = &opts.AdminStateUp
--	}
--	if opts.Name != "" {
--		n["name"] = opts.Name
--	}
--	if opts.Shared != nil {
--		n["shared"] = &opts.Shared
--	}
--	if opts.TenantID != "" {
--		n["tenant_id"] = opts.TenantID
--	}
--
--	return map[string]interface{}{"network": n}, nil
--}
--
--// Create accepts a CreateOpts struct and creates a new network using the values
--// provided. This operation does not actually require a request body, i.e. the
--// CreateOpts struct argument can be empty.
--//
--// The tenant ID that is contained in the URI is the tenant that creates the
--// network. An admin user, however, has the option of specifying another tenant
--// ID in the CreateOpts struct.
--func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToNetworkCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	// Send request to API
--	_, res.Err = perigee.Request("POST", createURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--	return res
--}
--
--// UpdateOptsBuilder is the interface options structs have to satisfy in order
--// to be used in the main Update operation in this package. Since many
--// extensions decorate or modify the common logic, it is useful for them to
--// satisfy a basic interface in order for them to be used.
--type UpdateOptsBuilder interface {
--	ToNetworkUpdateMap() (map[string]interface{}, error)
--}
--
--// UpdateOpts is the common options struct used in this package's Update
--// operation.
--type UpdateOpts networkOpts
--
--// ToNetworkUpdateMap casts a UpdateOpts struct to a map.
--func (opts UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) {
--	n := make(map[string]interface{})
--
--	if opts.AdminStateUp != nil {
--		n["admin_state_up"] = &opts.AdminStateUp
--	}
--	if opts.Name != "" {
--		n["name"] = opts.Name
--	}
--	if opts.Shared != nil {
--		n["shared"] = &opts.Shared
--	}
--
--	return map[string]interface{}{"network": n}, nil
--}
--
--// Update accepts a UpdateOpts struct and updates an existing network using the
--// values provided. For more information, see the Create function.
--func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuilder) UpdateResult {
--	var res UpdateResult
--
--	reqBody, err := opts.ToNetworkUpdateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	// Send request to API
--	_, res.Err = perigee.Request("PUT", getURL(c, networkID), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 201},
--	})
--
--	return res
--}
--
--// Delete accepts a unique ID and deletes the network associated with it.
--func Delete(c *gophercloud.ServiceClient, networkID string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(c, networkID), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests_test.go
-deleted file mode 100644
-index a263b7b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests_test.go
-+++ /dev/null
-@@ -1,275 +0,0 @@
--package networks
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "networks": [
--        {
--            "status": "ACTIVE",
--            "subnets": [
--                "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
--            ],
--            "name": "private-network",
--            "admin_state_up": true,
--            "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--            "shared": true,
--            "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--        },
--        {
--            "status": "ACTIVE",
--            "subnets": [
--                "08eae331-0402-425a-923c-34f7cfe39c1b"
--            ],
--            "name": "private",
--            "admin_state_up": true,
--            "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
--            "shared": true,
--            "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324"
--        }
--    ]
--}
--			`)
--	})
--
--	client := fake.ServiceClient()
--	count := 0
--
--	List(client, ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractNetworks(page)
--		if err != nil {
--			t.Errorf("Failed to extract networks: %v", err)
--			return false, err
--		}
--
--		expected := []Network{
--			Network{
--				Status:       "ACTIVE",
--				Subnets:      []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"},
--				Name:         "private-network",
--				AdminStateUp: true,
--				TenantID:     "4fd44f30292945e481c7b8a0c8908869",
--				Shared:       true,
--				ID:           "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--			},
--			Network{
--				Status:       "ACTIVE",
--				Subnets:      []string{"08eae331-0402-425a-923c-34f7cfe39c1b"},
--				Name:         "private",
--				AdminStateUp: true,
--				TenantID:     "26a7980765d0414dbc1fc1f88cdb7e6e",
--				Shared:       true,
--				ID:           "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "status": "ACTIVE",
--        "subnets": [
--            "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
--        ],
--        "name": "private-network",
--        "admin_state_up": true,
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "shared": true,
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--    }
--}
--			`)
--	})
--
--	n, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Status, "ACTIVE")
--	th.AssertDeepEquals(t, n.Subnets, []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"})
--	th.AssertEquals(t, n.Name, "private-network")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	th.AssertEquals(t, n.TenantID, "4fd44f30292945e481c7b8a0c8908869")
--	th.AssertEquals(t, n.Shared, true)
--	th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "network": {
--        "name": "sample_network",
--        "admin_state_up": true
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "status": "ACTIVE",
--        "subnets": [],
--        "name": "net1",
--        "admin_state_up": true,
--        "tenant_id": "9bacb3c5d39d41a79512987f338cf177",
--        "shared": false,
--        "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c"
--    }
--}
--		`)
--	})
--
--	iTrue := true
--	options := CreateOpts{Name: "sample_network", AdminStateUp: &iTrue}
--	n, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Status, "ACTIVE")
--	th.AssertDeepEquals(t, n.Subnets, []string{})
--	th.AssertEquals(t, n.Name, "net1")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	th.AssertEquals(t, n.TenantID, "9bacb3c5d39d41a79512987f338cf177")
--	th.AssertEquals(t, n.Shared, false)
--	th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c")
--}
--
--func TestCreateWithOptionalFields(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--	"network": {
--			"name": "sample_network",
--			"admin_state_up": true,
--			"shared": true,
--			"tenant_id": "12345"
--	}
--}
--		`)
--
--		w.WriteHeader(http.StatusCreated)
--	})
--
--	iTrue := true
--	options := CreateOpts{Name: "sample_network", AdminStateUp: &iTrue, Shared: &iTrue, TenantID: "12345"}
--	_, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--		"network": {
--				"name": "new_network_name",
--				"admin_state_up": false,
--				"shared": true
--		}
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "status": "ACTIVE",
--        "subnets": [],
--        "name": "new_network_name",
--        "admin_state_up": false,
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "shared": true,
--        "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c"
--    }
--}
--		`)
--	})
--
--	iTrue, iFalse := true, false
--	options := UpdateOpts{Name: "new_network_name", AdminStateUp: &iFalse, Shared: &iTrue}
--	n, err := Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Name, "new_network_name")
--	th.AssertEquals(t, n.AdminStateUp, false)
--	th.AssertEquals(t, n.Shared, true)
--	th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c")
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go
-deleted file mode 100644
-index 3ecedde..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/results.go
-+++ /dev/null
-@@ -1,116 +0,0 @@
--package networks
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a network resource.
--func (r commonResult) Extract() (*Network, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Network *Network `json:"network"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Network, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// Network represents, well, a network.
--type Network struct {
--	// UUID for the network
--	ID string `mapstructure:"id" json:"id"`
--
--	// Human-readable name for the network. Might not be unique.
--	Name string `mapstructure:"name" json:"name"`
--
--	// The administrative state of network. If false (down), the network does not forward packets.
--	AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"`
--
--	// Indicates whether network is currently operational. Possible values include
--	// `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values.
--	Status string `mapstructure:"status" json:"status"`
--
--	// Subnets associated with this network.
--	Subnets []string `mapstructure:"subnets" json:"subnets"`
--
--	// Owner of network. Only admin users can specify a tenant_id other than its own.
--	TenantID string `mapstructure:"tenant_id" json:"tenant_id"`
--
--	// Specifies whether the network resource can be accessed by any tenant or not.
--	Shared bool `mapstructure:"shared" json:"shared"`
--}
--
--// NetworkPage is the page returned by a pager when traversing over a
--// collection of networks.
--type NetworkPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of networks has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p NetworkPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"networks_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a NetworkPage struct is empty.
--func (p NetworkPage) IsEmpty() (bool, error) {
--	is, err := ExtractNetworks(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct,
--// and extracts the elements into a slice of Network structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractNetworks(page pagination.Page) ([]Network, error) {
--	var resp struct {
--		Networks []Network `mapstructure:"networks" json:"networks"`
--	}
--
--	err := mapstructure.Decode(page.(NetworkPage).Body, &resp)
--
--	return resp.Networks, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls.go
-deleted file mode 100644
-index 33c2387..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls.go
-+++ /dev/null
-@@ -1,27 +0,0 @@
--package networks
--
--import "github.com/rackspace/gophercloud"
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("networks", id)
--}
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("networks")
--}
--
--func getURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return rootURL(c)
--}
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return rootURL(c)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls_test.go
-deleted file mode 100644
-index caf77db..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/urls_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package networks
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint, ResourceBase: endpoint + "v2.0/"}
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "v2.0/networks/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "v2.0/networks"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestListURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "v2.0/networks"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "v2.0/networks/foo"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/doc.go
-deleted file mode 100644
-index f16a4bb..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/doc.go
-+++ /dev/null
-@@ -1,8 +0,0 @@
--// Package ports contains functionality for working with Neutron port resources.
--// A port represents a virtual switch port on a logical network switch. Virtual
--// instances attach their interfaces into ports. The logical port also defines
--// the MAC address and the IP address(es) to be assigned to the interfaces
--// plugged into them. When IP addresses are associated to a port, this also
--// implies the port is associated with a subnet, as the IP address was taken
--// from the allocation pool for a specific subnet.
--package ports
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/errors.go
-deleted file mode 100644
-index 111d977..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/errors.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package ports
--
--import "fmt"
--
--func err(str string) error {
--	return fmt.Errorf("%s", str)
--}
--
--var (
--	errNetworkIDRequired = err("A Network ID is required")
--)
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go
-deleted file mode 100644
-index 3399907..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go
-+++ /dev/null
-@@ -1,245 +0,0 @@
--package ports
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// AdminState gives users a solid type to work with for create and update
--// operations. It is recommended that users use the `Up` and `Down` enums.
--type AdminState *bool
--
--// Convenience vars for AdminStateUp values.
--var (
--	iTrue  = true
--	iFalse = false
--
--	Up   AdminState = &iTrue
--	Down AdminState = &iFalse
--)
--
--// ListOptsBuilder allows extensions to add additional parameters to the
--// List request.
--type ListOptsBuilder interface {
--	ToPortListQuery() (string, error)
--}
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the port attributes you want to see returned. SortKey allows you to sort
--// by a particular port attribute. SortDir sets the direction, and is either
--// `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	Status       string `q:"status"`
--	Name         string `q:"name"`
--	AdminStateUp *bool  `q:"admin_state_up"`
--	NetworkID    string `q:"network_id"`
--	TenantID     string `q:"tenant_id"`
--	DeviceOwner  string `q:"device_owner"`
--	MACAddress   string `q:"mac_address"`
--	ID           string `q:"id"`
--	DeviceID     string `q:"device_id"`
--	Limit        int    `q:"limit"`
--	Marker       string `q:"marker"`
--	SortKey      string `q:"sort_key"`
--	SortDir      string `q:"sort_dir"`
--}
--
--// ToPortListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToPortListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// ports. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--//
--// Default policy settings return only those ports that are owned by the tenant
--// who submits the request, unless the request is submitted by a user with
--// administrative rights.
--func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	url := listURL(c)
--	if opts != nil {
--		query, err := opts.ToPortListQuery()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--	}
--
--	return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
--		return PortPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// Get retrieves a specific port based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", getURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// CreateOptsBuilder is the interface options structs have to satisfy in order
--// to be used in the main Create operation in this package. Since many
--// extensions decorate or modify the common logic, it is useful for them to
--// satisfy a basic interface in order for them to be used.
--type CreateOptsBuilder interface {
--	ToPortCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts represents the attributes used when creating a new port.
--type CreateOpts struct {
--	NetworkID      string
--	Name           string
--	AdminStateUp   *bool
--	MACAddress     string
--	FixedIPs       interface{}
--	DeviceID       string
--	DeviceOwner    string
--	TenantID       string
--	SecurityGroups []string
--}
--
--// ToPortCreateMap casts a CreateOpts struct to a map.
--func (opts CreateOpts) ToPortCreateMap() (map[string]interface{}, error) {
--	p := make(map[string]interface{})
--
--	if opts.NetworkID == "" {
--		return nil, errNetworkIDRequired
--	}
--	p["network_id"] = opts.NetworkID
--
--	if opts.DeviceID != "" {
--		p["device_id"] = opts.DeviceID
--	}
--	if opts.DeviceOwner != "" {
--		p["device_owner"] = opts.DeviceOwner
--	}
--	if opts.FixedIPs != nil {
--		p["fixed_ips"] = opts.FixedIPs
--	}
--	if opts.SecurityGroups != nil {
--		p["security_groups"] = opts.SecurityGroups
--	}
--	if opts.TenantID != "" {
--		p["tenant_id"] = opts.TenantID
--	}
--	if opts.AdminStateUp != nil {
--		p["admin_state_up"] = &opts.AdminStateUp
--	}
--	if opts.Name != "" {
--		p["name"] = opts.Name
--	}
--	if opts.MACAddress != "" {
--		p["mac_address"] = opts.MACAddress
--	}
--
--	return map[string]interface{}{"port": p}, nil
--}
--
--// Create accepts a CreateOpts struct and creates a new network using the values
--// provided. You must remember to provide a NetworkID value.
--func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToPortCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	// Response
--	_, res.Err = perigee.Request("POST", createURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--		DumpReqJson: true,
--	})
--
--	return res
--}
--
--// UpdateOptsBuilder is the interface options structs have to satisfy in order
--// to be used in the main Update operation in this package. Since many
--// extensions decorate or modify the common logic, it is useful for them to
--// satisfy a basic interface in order for them to be used.
--type UpdateOptsBuilder interface {
--	ToPortUpdateMap() (map[string]interface{}, error)
--}
--
--// UpdateOpts represents the attributes used when updating an existing port.
--type UpdateOpts struct {
--	Name           string
--	AdminStateUp   *bool
--	FixedIPs       interface{}
--	DeviceID       string
--	DeviceOwner    string
--	SecurityGroups []string
--}
--
--// ToPortUpdateMap casts an UpdateOpts struct to a map.
--func (opts UpdateOpts) ToPortUpdateMap() (map[string]interface{}, error) {
--	p := make(map[string]interface{})
--
--	if opts.DeviceID != "" {
--		p["device_id"] = opts.DeviceID
--	}
--	if opts.DeviceOwner != "" {
--		p["device_owner"] = opts.DeviceOwner
--	}
--	if opts.FixedIPs != nil {
--		p["fixed_ips"] = opts.FixedIPs
--	}
--	if opts.SecurityGroups != nil {
--		p["security_groups"] = opts.SecurityGroups
--	}
--	if opts.AdminStateUp != nil {
--		p["admin_state_up"] = &opts.AdminStateUp
--	}
--	if opts.Name != "" {
--		p["name"] = opts.Name
--	}
--
--	return map[string]interface{}{"port": p}, nil
--}
--
--// Update accepts a UpdateOpts struct and updates an existing port using the
--// values provided.
--func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult {
--	var res UpdateResult
--
--	reqBody, err := opts.ToPortUpdateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("PUT", updateURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 201},
--	})
--	return res
--}
--
--// Delete accepts a unique ID and deletes the port associated with it.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests_test.go
-deleted file mode 100644
-index 9e323ef..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests_test.go
-+++ /dev/null
-@@ -1,321 +0,0 @@
--package ports
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "ports": [
--        {
--            "status": "ACTIVE",
--            "binding:host_id": "devstack",
--            "name": "",
--            "admin_state_up": true,
--            "network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3",
--            "tenant_id": "",
--            "device_owner": "network:router_gateway",
--            "mac_address": "fa:16:3e:58:42:ed",
--            "fixed_ips": [
--                {
--                    "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062",
--                    "ip_address": "172.24.4.2"
--                }
--            ],
--            "id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b",
--            "security_groups": [],
--            "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824"
--        }
--    ]
--}
--      `)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractPorts(page)
--		if err != nil {
--			t.Errorf("Failed to extract subnets: %v", err)
--			return false, nil
--		}
--
--		expected := []Port{
--			Port{
--				Status:       "ACTIVE",
--				Name:         "",
--				AdminStateUp: true,
--				NetworkID:    "70c1db1f-b701-45bd-96e0-a313ee3430b3",
--				TenantID:     "",
--				DeviceOwner:  "network:router_gateway",
--				MACAddress:   "fa:16:3e:58:42:ed",
--				FixedIPs: []IP{
--					IP{
--						SubnetID:  "008ba151-0b8c-4a67-98b5-0d2b87666062",
--						IPAddress: "172.24.4.2",
--					},
--				},
--				ID:             "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b",
--				SecurityGroups: []string{},
--				DeviceID:       "9ae135f4-b6e0-4dad-9e91-3c223e385824",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/ports/46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "port": {
--        "status": "ACTIVE",
--        "name": "",
--        "admin_state_up": true,
--        "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7",
--        "tenant_id": "7e02058126cc4950b75f9970368ba177",
--        "device_owner": "network:router_interface",
--        "mac_address": "fa:16:3e:23:fd:d7",
--        "fixed_ips": [
--            {
--                "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2",
--                "ip_address": "10.0.0.1"
--            }
--        ],
--        "id": "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2",
--        "security_groups": [],
--        "device_id": "5e3898d7-11be-483e-9732-b2f5eccd2b2e"
--    }
--}
--			`)
--	})
--
--	n, err := Get(fake.ServiceClient(), "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Status, "ACTIVE")
--	th.AssertEquals(t, n.Name, "")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7")
--	th.AssertEquals(t, n.TenantID, "7e02058126cc4950b75f9970368ba177")
--	th.AssertEquals(t, n.DeviceOwner, "network:router_interface")
--	th.AssertEquals(t, n.MACAddress, "fa:16:3e:23:fd:d7")
--	th.AssertDeepEquals(t, n.FixedIPs, []IP{
--		IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.1"},
--	})
--	th.AssertEquals(t, n.ID, "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2")
--	th.AssertDeepEquals(t, n.SecurityGroups, []string{})
--	th.AssertEquals(t, n.Status, "ACTIVE")
--	th.AssertEquals(t, n.DeviceID, "5e3898d7-11be-483e-9732-b2f5eccd2b2e")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "port": {
--        "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7",
--        "name": "private-port",
--        "admin_state_up": true,
--				"fixed_ips": [
--						{
--								"subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2",
--								"ip_address": "10.0.0.2"
--						}
--				],
--				"security_groups": ["foo"]
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "port": {
--        "status": "DOWN",
--        "name": "private-port",
--        "allowed_address_pairs": [],
--        "admin_state_up": true,
--        "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7",
--        "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa",
--        "device_owner": "",
--        "mac_address": "fa:16:3e:c9:cb:f0",
--        "fixed_ips": [
--            {
--                "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2",
--                "ip_address": "10.0.0.2"
--            }
--        ],
--        "id": "65c0ee9f-d634-4522-8954-51021b570b0d",
--        "security_groups": [
--            "f0ac4394-7e4a-4409-9701-ba8be283dbc3"
--        ],
--        "device_id": ""
--    }
--}
--		`)
--	})
--
--	asu := true
--	options := CreateOpts{
--		Name:         "private-port",
--		AdminStateUp: &asu,
--		NetworkID:    "a87cc70a-3e15-4acf-8205-9b711a3531b7",
--		FixedIPs: []IP{
--			IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"},
--		},
--		SecurityGroups: []string{"foo"},
--	}
--	n, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Status, "DOWN")
--	th.AssertEquals(t, n.Name, "private-port")
--	th.AssertEquals(t, n.AdminStateUp, true)
--	th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7")
--	th.AssertEquals(t, n.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa")
--	th.AssertEquals(t, n.DeviceOwner, "")
--	th.AssertEquals(t, n.MACAddress, "fa:16:3e:c9:cb:f0")
--	th.AssertDeepEquals(t, n.FixedIPs, []IP{
--		IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"},
--	})
--	th.AssertEquals(t, n.ID, "65c0ee9f-d634-4522-8954-51021b570b0d")
--	th.AssertDeepEquals(t, n.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"})
--}
--
--func TestRequiredCreateOpts(t *testing.T) {
--	res := Create(fake.ServiceClient(), CreateOpts{})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--		"port": {
--				"name": "new_port_name",
--				"fixed_ips": [
--            {
--                "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2",
--                "ip_address": "10.0.0.3"
--            }
--        ],
--				"security_groups": [
--            "f0ac4394-7e4a-4409-9701-ba8be283dbc3"
--        ]
--		}
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "port": {
--        "status": "DOWN",
--        "name": "new_port_name",
--        "admin_state_up": true,
--        "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7",
--        "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa",
--        "device_owner": "",
--        "mac_address": "fa:16:3e:c9:cb:f0",
--        "fixed_ips": [
--            {
--                "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2",
--                "ip_address": "10.0.0.3"
--            }
--        ],
--        "id": "65c0ee9f-d634-4522-8954-51021b570b0d",
--        "security_groups": [
--            "f0ac4394-7e4a-4409-9701-ba8be283dbc3"
--        ],
--        "device_id": ""
--    }
--}
--		`)
--	})
--
--	options := UpdateOpts{
--		Name: "new_port_name",
--		FixedIPs: []IP{
--			IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"},
--		},
--		SecurityGroups: []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"},
--	}
--
--	s, err := Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, s.Name, "new_port_name")
--	th.AssertDeepEquals(t, s.FixedIPs, []IP{
--		IP{SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"},
--	})
--	th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"})
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/results.go
-deleted file mode 100644
-index 2511ff5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/results.go
-+++ /dev/null
-@@ -1,126 +0,0 @@
--package ports
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a port resource.
--func (r commonResult) Extract() (*Port, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Port *Port `json:"port"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Port, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// IP is a sub-struct that represents an individual IP.
--type IP struct {
--	SubnetID  string `mapstructure:"subnet_id" json:"subnet_id"`
--	IPAddress string `mapstructure:"ip_address" json:"ip_address,omitempty"`
--}
--
--// Port represents a Neutron port. See package documentation for a top-level
--// description of what this is.
--type Port struct {
--	// UUID for the port.
--	ID string `mapstructure:"id" json:"id"`
--	// Network that this port is associated with.
--	NetworkID string `mapstructure:"network_id" json:"network_id"`
--	// Human-readable name for the port. Might not be unique.
--	Name string `mapstructure:"name" json:"name"`
--	// Administrative state of port. If false (down), port does not forward packets.
--	AdminStateUp bool `mapstructure:"admin_state_up" json:"admin_state_up"`
--	// Indicates whether network is currently operational. Possible values include
--	// `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional values.
--	Status string `mapstructure:"status" json:"status"`
--	// Mac address to use on this port.
--	MACAddress string `mapstructure:"mac_address" json:"mac_address"`
--	// Specifies IP addresses for the port thus associating the port itself with
--	// the subnets where the IP addresses are picked from
--	FixedIPs []IP `mapstructure:"fixed_ips" json:"fixed_ips"`
--	// Owner of network. Only admin users can specify a tenant_id other than its own.
--	TenantID string `mapstructure:"tenant_id" json:"tenant_id"`
--	// Identifies the entity (e.g.: dhcp agent) using this port.
--	DeviceOwner string `mapstructure:"device_owner" json:"device_owner"`
--	// Specifies the IDs of any security groups associated with a port.
--	SecurityGroups []string `mapstructure:"security_groups" json:"security_groups"`
--	// Identifies the device (e.g., virtual server) using this port.
--	DeviceID string `mapstructure:"device_id" json:"device_id"`
--}
--
--// PortPage is the page returned by a pager when traversing over a collection
--// of network ports.
--type PortPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of ports has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p PortPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"ports_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a PortPage struct is empty.
--func (p PortPage) IsEmpty() (bool, error) {
--	is, err := ExtractPorts(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractPorts accepts a Page struct, specifically a PortPage struct,
--// and extracts the elements into a slice of Port structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractPorts(page pagination.Page) ([]Port, error) {
--	var resp struct {
--		Ports []Port `mapstructure:"ports" json:"ports"`
--	}
--
--	err := mapstructure.Decode(page.(PortPage).Body, &resp)
--
--	return resp.Ports, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls.go
-deleted file mode 100644
-index 6d0572f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls.go
-+++ /dev/null
-@@ -1,31 +0,0 @@
--package ports
--
--import "github.com/rackspace/gophercloud"
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("ports", id)
--}
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("ports")
--}
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return rootURL(c)
--}
--
--func getURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return rootURL(c)
--}
--
--func updateURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls_test.go
-deleted file mode 100644
-index 7fadd4d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/urls_test.go
-+++ /dev/null
-@@ -1,44 +0,0 @@
--package ports
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint, ResourceBase: endpoint + "v2.0/"}
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint + "v2.0/ports"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "v2.0/ports/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "v2.0/ports"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestUpdateURL(t *testing.T) {
--	actual := updateURL(endpointClient(), "foo")
--	expected := endpoint + "v2.0/ports/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "v2.0/ports/foo"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/doc.go
-deleted file mode 100644
-index 43e8296..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/doc.go
-+++ /dev/null
-@@ -1,10 +0,0 @@
--// Package subnets contains functionality for working with Neutron subnet
--// resources. A subnet represents an IP address block that can be used to
--// assign IP addresses to virtual instances. Each subnet must have a CIDR and
--// must be associated with a network. IPs can either be selected from the whole
--// subnet CIDR or from allocation pools specified by the user.
--//
--// A subnet can also have a gateway, a list of DNS name servers, and host routes.
--// This information is pushed to instances whose interfaces are associated with
--// the subnet.
--package subnets
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/errors.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/errors.go
-deleted file mode 100644
-index 0db0a6e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/errors.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package subnets
--
--import "fmt"
--
--func err(str string) error {
--	return fmt.Errorf("%s", str)
--}
--
--var (
--	errNetworkIDRequired = err("A network ID is required")
--	errCIDRRequired      = err("A valid CIDR is required")
--	errInvalidIPType     = err("An IP type must either be 4 or 6")
--)
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go
-deleted file mode 100644
-index cd7c663..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go
-+++ /dev/null
-@@ -1,254 +0,0 @@
--package subnets
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// AdminState gives users a solid type to work with for create and update
--// operations. It is recommended that users use the `Up` and `Down` enums.
--type AdminState *bool
--
--// Convenience vars for AdminStateUp values.
--var (
--	iTrue  = true
--	iFalse = false
--
--	Up   AdminState = &iTrue
--	Down AdminState = &iFalse
--)
--
--// ListOptsBuilder allows extensions to add additional parameters to the
--// List request.
--type ListOptsBuilder interface {
--	ToSubnetListQuery() (string, error)
--}
--
--// ListOpts allows the filtering and sorting of paginated collections through
--// the API. Filtering is achieved by passing in struct field values that map to
--// the subnet attributes you want to see returned. SortKey allows you to sort
--// by a particular subnet attribute. SortDir sets the direction, and is either
--// `asc' or `desc'. Marker and Limit are used for pagination.
--type ListOpts struct {
--	Name       string `q:"name"`
--	EnableDHCP *bool  `q:"enable_dhcp"`
--	NetworkID  string `q:"network_id"`
--	TenantID   string `q:"tenant_id"`
--	IPVersion  int    `q:"ip_version"`
--	GatewayIP  string `q:"gateway_ip"`
--	CIDR       string `q:"cidr"`
--	ID         string `q:"id"`
--	Limit      int    `q:"limit"`
--	Marker     string `q:"marker"`
--	SortKey    string `q:"sort_key"`
--	SortDir    string `q:"sort_dir"`
--}
--
--// ToSubnetListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToSubnetListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// List returns a Pager which allows you to iterate over a collection of
--// subnets. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--//
--// Default policy settings return only those subnets that are owned by the tenant
--// who submits the request, unless the request is submitted by a user with
--// administrative rights.
--func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	url := listURL(c)
--	if opts != nil {
--		query, err := opts.ToSubnetListQuery()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--	}
--
--	return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page {
--		return SubnetPage{pagination.LinkedPageBase{PageResult: r}}
--	})
--}
--
--// Get retrieves a specific subnet based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", getURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// Valid IP types
--const (
--	IPv4 = 4
--	IPv6 = 6
--)
--
--// CreateOptsBuilder is the interface options structs have to satisfy in order
--// to be used in the main Create operation in this package. Since many
--// extensions decorate or modify the common logic, it is useful for them to
--// satisfy a basic interface in order for them to be used.
--type CreateOptsBuilder interface {
--	ToSubnetCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts represents the attributes used when creating a new subnet.
--type CreateOpts struct {
--	// Required
--	NetworkID string
--	CIDR      string
--	// Optional
--	Name            string
--	TenantID        string
--	AllocationPools []AllocationPool
--	GatewayIP       string
--	IPVersion       int
--	EnableDHCP      *bool
--	DNSNameservers  []string
--	HostRoutes      []HostRoute
--}
--
--// ToSubnetCreateMap casts a CreateOpts struct to a map.
--func (opts CreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) {
--	s := make(map[string]interface{})
--
--	if opts.NetworkID == "" {
--		return nil, errNetworkIDRequired
--	}
--	if opts.CIDR == "" {
--		return nil, errCIDRRequired
--	}
--	if opts.IPVersion != 0 && opts.IPVersion != IPv4 && opts.IPVersion != IPv6 {
--		return nil, errInvalidIPType
--	}
--
--	s["network_id"] = opts.NetworkID
--	s["cidr"] = opts.CIDR
--
--	if opts.EnableDHCP != nil {
--		s["enable_dhcp"] = &opts.EnableDHCP
--	}
--	if opts.Name != "" {
--		s["name"] = opts.Name
--	}
--	if opts.GatewayIP != "" {
--		s["gateway_ip"] = opts.GatewayIP
--	}
--	if opts.TenantID != "" {
--		s["tenant_id"] = opts.TenantID
--	}
--	if opts.IPVersion != 0 {
--		s["ip_version"] = opts.IPVersion
--	}
--	if len(opts.AllocationPools) != 0 {
--		s["allocation_pools"] = opts.AllocationPools
--	}
--	if len(opts.DNSNameservers) != 0 {
--		s["dns_nameservers"] = opts.DNSNameservers
--	}
--	if len(opts.HostRoutes) != 0 {
--		s["host_routes"] = opts.HostRoutes
--	}
--
--	return map[string]interface{}{"subnet": s}, nil
--}
--
--// Create accepts a CreateOpts struct and creates a new subnet using the values
--// provided. You must remember to provide a valid NetworkID, CIDR and IP version.
--func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToSubnetCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("POST", createURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{201},
--	})
--
--	return res
--}
--
--// UpdateOptsBuilder allows extensions to add additional parameters to the
--// Update request.
--type UpdateOptsBuilder interface {
--	ToSubnetUpdateMap() (map[string]interface{}, error)
--}
--
--// UpdateOpts represents the attributes used when updating an existing subnet.
--type UpdateOpts struct {
--	Name           string
--	GatewayIP      string
--	DNSNameservers []string
--	HostRoutes     []HostRoute
--	EnableDHCP     *bool
--}
--
--// ToSubnetUpdateMap casts an UpdateOpts struct to a map.
--func (opts UpdateOpts) ToSubnetUpdateMap() (map[string]interface{}, error) {
--	s := make(map[string]interface{})
--
--	if opts.EnableDHCP != nil {
--		s["enable_dhcp"] = &opts.EnableDHCP
--	}
--	if opts.Name != "" {
--		s["name"] = opts.Name
--	}
--	if opts.GatewayIP != "" {
--		s["gateway_ip"] = opts.GatewayIP
--	}
--	if len(opts.DNSNameservers) != 0 {
--		s["dns_nameservers"] = opts.DNSNameservers
--	}
--	if len(opts.HostRoutes) != 0 {
--		s["host_routes"] = opts.HostRoutes
--	}
--
--	return map[string]interface{}{"subnet": s}, nil
--}
--
--// Update accepts a UpdateOpts struct and updates an existing subnet using the
--// values provided.
--func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult {
--	var res UpdateResult
--
--	reqBody, err := opts.ToSubnetUpdateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	_, res.Err = perigee.Request("PUT", updateURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 201},
--	})
--
--	return res
--}
--
--// Delete accepts a unique ID and deletes the subnet associated with it.
--func Delete(c *gophercloud.ServiceClient, id string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests_test.go
-deleted file mode 100644
-index 987064a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests_test.go
-+++ /dev/null
-@@ -1,362 +0,0 @@
--package subnets
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	fake "github.com/rackspace/gophercloud/openstack/networking/v2/common"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "subnets": [
--        {
--            "name": "private-subnet",
--            "enable_dhcp": true,
--            "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
--            "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
--            "dns_nameservers": [],
--            "allocation_pools": [
--                {
--                    "start": "10.0.0.2",
--                    "end": "10.0.0.254"
--                }
--            ],
--            "host_routes": [],
--            "ip_version": 4,
--            "gateway_ip": "10.0.0.1",
--            "cidr": "10.0.0.0/24",
--            "id": "08eae331-0402-425a-923c-34f7cfe39c1b"
--        },
--        {
--            "name": "my_subnet",
--            "enable_dhcp": true,
--            "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--            "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--            "dns_nameservers": [],
--            "allocation_pools": [
--                {
--                    "start": "192.0.0.2",
--                    "end": "192.255.255.254"
--                }
--            ],
--            "host_routes": [],
--            "ip_version": 4,
--            "gateway_ip": "192.0.0.1",
--            "cidr": "192.0.0.0/8",
--            "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
--        }
--    ]
--}
--      `)
--	})
--
--	count := 0
--
--	List(fake.ServiceClient(), ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractSubnets(page)
--		if err != nil {
--			t.Errorf("Failed to extract subnets: %v", err)
--			return false, nil
--		}
--
--		expected := []Subnet{
--			Subnet{
--				Name:           "private-subnet",
--				EnableDHCP:     true,
--				NetworkID:      "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
--				TenantID:       "26a7980765d0414dbc1fc1f88cdb7e6e",
--				DNSNameservers: []string{},
--				AllocationPools: []AllocationPool{
--					AllocationPool{
--						Start: "10.0.0.2",
--						End:   "10.0.0.254",
--					},
--				},
--				HostRoutes: []HostRoute{},
--				IPVersion:  4,
--				GatewayIP:  "10.0.0.1",
--				CIDR:       "10.0.0.0/24",
--				ID:         "08eae331-0402-425a-923c-34f7cfe39c1b",
--			},
--			Subnet{
--				Name:           "my_subnet",
--				EnableDHCP:     true,
--				NetworkID:      "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--				TenantID:       "4fd44f30292945e481c7b8a0c8908869",
--				DNSNameservers: []string{},
--				AllocationPools: []AllocationPool{
--					AllocationPool{
--						Start: "192.0.0.2",
--						End:   "192.255.255.254",
--					},
--				},
--				HostRoutes: []HostRoute{},
--				IPVersion:  4,
--				GatewayIP:  "192.0.0.1",
--				CIDR:       "192.0.0.0/8",
--				ID:         "54d6f61d-db07-451c-9ab3-b9609b6b6f0b",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	if count != 1 {
--		t.Errorf("Expected 1 page, got %d", count)
--	}
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/subnets/54d6f61d-db07-451c-9ab3-b9609b6b6f0b", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "subnet": {
--        "name": "my_subnet",
--        "enable_dhcp": true,
--        "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "dns_nameservers": [],
--        "allocation_pools": [
--            {
--                "start": "192.0.0.2",
--                "end": "192.255.255.254"
--            }
--        ],
--        "host_routes": [],
--        "ip_version": 4,
--        "gateway_ip": "192.0.0.1",
--        "cidr": "192.0.0.0/8",
--        "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
--    }
--}
--			`)
--	})
--
--	s, err := Get(fake.ServiceClient(), "54d6f61d-db07-451c-9ab3-b9609b6b6f0b").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, s.Name, "my_subnet")
--	th.AssertEquals(t, s.EnableDHCP, true)
--	th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--	th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869")
--	th.AssertDeepEquals(t, s.DNSNameservers, []string{})
--	th.AssertDeepEquals(t, s.AllocationPools, []AllocationPool{
--		AllocationPool{
--			Start: "192.0.0.2",
--			End:   "192.255.255.254",
--		},
--	})
--	th.AssertDeepEquals(t, s.HostRoutes, []HostRoute{})
--	th.AssertEquals(t, s.IPVersion, 4)
--	th.AssertEquals(t, s.GatewayIP, "192.0.0.1")
--	th.AssertEquals(t, s.CIDR, "192.0.0.0/8")
--	th.AssertEquals(t, s.ID, "54d6f61d-db07-451c-9ab3-b9609b6b6f0b")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "subnet": {
--        "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--        "ip_version": 4,
--        "cidr": "192.168.199.0/24",
--				"dns_nameservers": ["foo"],
--				"allocation_pools": [
--						{
--								"start": "192.168.199.2",
--								"end": "192.168.199.254"
--						}
--				],
--				"host_routes": [{"destination":"","nexthop": "bar"}]
--    }
--}
--			`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "subnet": {
--        "name": "",
--        "enable_dhcp": true,
--        "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--        "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
--        "dns_nameservers": [],
--        "allocation_pools": [
--            {
--                "start": "192.168.199.2",
--                "end": "192.168.199.254"
--            }
--        ],
--        "host_routes": [],
--        "ip_version": 4,
--        "gateway_ip": "192.168.199.1",
--        "cidr": "192.168.199.0/24",
--        "id": "3b80198d-4f7b-4f77-9ef5-774d54e17126"
--    }
--}
--		`)
--	})
--
--	opts := CreateOpts{
--		NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--		IPVersion: 4,
--		CIDR:      "192.168.199.0/24",
--		AllocationPools: []AllocationPool{
--			AllocationPool{
--				Start: "192.168.199.2",
--				End:   "192.168.199.254",
--			},
--		},
--		DNSNameservers: []string{"foo"},
--		HostRoutes: []HostRoute{
--			HostRoute{NextHop: "bar"},
--		},
--	}
--	s, err := Create(fake.ServiceClient(), opts).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, s.Name, "")
--	th.AssertEquals(t, s.EnableDHCP, true)
--	th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--	th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869")
--	th.AssertDeepEquals(t, s.DNSNameservers, []string{})
--	th.AssertDeepEquals(t, s.AllocationPools, []AllocationPool{
--		AllocationPool{
--			Start: "192.168.199.2",
--			End:   "192.168.199.254",
--		},
--	})
--	th.AssertDeepEquals(t, s.HostRoutes, []HostRoute{})
--	th.AssertEquals(t, s.IPVersion, 4)
--	th.AssertEquals(t, s.GatewayIP, "192.168.199.1")
--	th.AssertEquals(t, s.CIDR, "192.168.199.0/24")
--	th.AssertEquals(t, s.ID, "3b80198d-4f7b-4f77-9ef5-774d54e17126")
--}
--
--func TestRequiredCreateOpts(t *testing.T) {
--	res := Create(fake.ServiceClient(), CreateOpts{})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--
--	res = Create(fake.ServiceClient(), CreateOpts{NetworkID: "foo"})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--
--	res = Create(fake.ServiceClient(), CreateOpts{NetworkID: "foo", CIDR: "bar", IPVersion: 40})
--	if res.Err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "subnet": {
--        "name": "my_new_subnet",
--				"dns_nameservers": ["foo"],
--				"host_routes": [{"destination":"","nexthop": "bar"}]
--    }
--}
--		`)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "subnet": {
--        "name": "my_new_subnet",
--        "enable_dhcp": true,
--        "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
--        "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
--        "dns_nameservers": [],
--        "allocation_pools": [
--            {
--                "start": "10.0.0.2",
--                "end": "10.0.0.254"
--            }
--        ],
--        "host_routes": [],
--        "ip_version": 4,
--        "gateway_ip": "10.0.0.1",
--        "cidr": "10.0.0.0/24",
--        "id": "08eae331-0402-425a-923c-34f7cfe39c1b"
--    }
--}
--	`)
--	})
--
--	opts := UpdateOpts{
--		Name:           "my_new_subnet",
--		DNSNameservers: []string{"foo"},
--		HostRoutes: []HostRoute{
--			HostRoute{NextHop: "bar"},
--		},
--	}
--	s, err := Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, s.Name, "my_new_subnet")
--	th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b")
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results.go
-deleted file mode 100644
-index 1910f17..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/results.go
-+++ /dev/null
-@@ -1,132 +0,0 @@
--package subnets
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a subnet resource.
--func (r commonResult) Extract() (*Subnet, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Subnet *Subnet `json:"subnet"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Subnet, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// AllocationPool represents a sub-range of cidr available for dynamic
--// allocation to ports, e.g. {Start: "10.0.0.2", End: "10.0.0.254"}
--type AllocationPool struct {
--	Start string `json:"start"`
--	End   string `json:"end"`
--}
--
--// HostRoute represents a route that should be used by devices with IPs from
--// a subnet (not including local subnet route).
--type HostRoute struct {
--	DestinationCIDR string `json:"destination"`
--	NextHop         string `json:"nexthop"`
--}
--
--// Subnet represents a subnet. See package documentation for a top-level
--// description of what this is.
--type Subnet struct {
--	// UUID representing the subnet
--	ID string `mapstructure:"id" json:"id"`
--	// UUID of the parent network
--	NetworkID string `mapstructure:"network_id" json:"network_id"`
--	// Human-readable name for the subnet. Might not be unique.
--	Name string `mapstructure:"name" json:"name"`
--	// IP version, either `4' or `6'
--	IPVersion int `mapstructure:"ip_version" json:"ip_version"`
--	// CIDR representing IP range for this subnet, based on IP version
--	CIDR string `mapstructure:"cidr" json:"cidr"`
--	// Default gateway used by devices in this subnet
--	GatewayIP string `mapstructure:"gateway_ip" json:"gateway_ip"`
--	// DNS name servers used by hosts in this subnet.
--	DNSNameservers []string `mapstructure:"dns_nameservers" json:"dns_nameservers"`
--	// Sub-ranges of CIDR available for dynamic allocation to ports. See AllocationPool.
--	AllocationPools []AllocationPool `mapstructure:"allocation_pools" json:"allocation_pools"`
--	// Routes that should be used by devices with IPs from this subnet (not including local subnet route).
--	HostRoutes []HostRoute `mapstructure:"host_routes" json:"host_routes"`
--	// Specifies whether DHCP is enabled for this subnet or not.
--	EnableDHCP bool `mapstructure:"enable_dhcp" json:"enable_dhcp"`
--	// Owner of network. Only admin users can specify a tenant_id other than its own.
--	TenantID string `mapstructure:"tenant_id" json:"tenant_id"`
--}
--
--// SubnetPage is the page returned by a pager when traversing over a collection
--// of subnets.
--type SubnetPage struct {
--	pagination.LinkedPageBase
--}
--
--// NextPageURL is invoked when a paginated collection of subnets has reached
--// the end of a page and the pager seeks to traverse over a new one. In order
--// to do this, it needs to construct the next page's URL.
--func (p SubnetPage) NextPageURL() (string, error) {
--	type resp struct {
--		Links []gophercloud.Link `mapstructure:"subnets_links"`
--	}
--
--	var r resp
--	err := mapstructure.Decode(p.Body, &r)
--	if err != nil {
--		return "", err
--	}
--
--	return gophercloud.ExtractNextURL(r.Links)
--}
--
--// IsEmpty checks whether a SubnetPage struct is empty.
--func (p SubnetPage) IsEmpty() (bool, error) {
--	is, err := ExtractSubnets(p)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--// ExtractSubnets accepts a Page struct, specifically a SubnetPage struct,
--// and extracts the elements into a slice of Subnet structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractSubnets(page pagination.Page) ([]Subnet, error) {
--	var resp struct {
--		Subnets []Subnet `mapstructure:"subnets" json:"subnets"`
--	}
--
--	err := mapstructure.Decode(page.(SubnetPage).Body, &resp)
--
--	return resp.Subnets, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls.go
-deleted file mode 100644
-index 0d02368..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls.go
-+++ /dev/null
-@@ -1,31 +0,0 @@
--package subnets
--
--import "github.com/rackspace/gophercloud"
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("subnets", id)
--}
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("subnets")
--}
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return rootURL(c)
--}
--
--func getURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return rootURL(c)
--}
--
--func updateURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls_test.go
-deleted file mode 100644
-index aeeddf3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/urls_test.go
-+++ /dev/null
-@@ -1,44 +0,0 @@
--package subnets
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint, ResourceBase: endpoint + "v2.0/"}
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint + "v2.0/subnets"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "v2.0/subnets/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "v2.0/subnets"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestUpdateURL(t *testing.T) {
--	actual := updateURL(endpointClient(), "foo")
--	expected := endpoint + "v2.0/subnets/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "v2.0/subnets/foo"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/doc.go
-deleted file mode 100644
-index f5f894a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/doc.go
-+++ /dev/null
-@@ -1,8 +0,0 @@
--// Package accounts contains functionality for working with Object Storage
--// account resources. An account is the top-level resource the object storage
--// hierarchy: containers belong to accounts, objects belong to containers.
--//
--// Another way of thinking of an account is like a namespace for all your
--// resources. It is synonymous with a project or tenant in other OpenStack
--// services.
--package accounts
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/fixtures.go
-deleted file mode 100644
-index 3dad0c5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/fixtures.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--// +build fixtures
--
--package accounts
--
--import (
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// HandleGetAccountSuccessfully creates an HTTP handler at `/` on the test handler mux that
--// responds with a `Get` response.
--func HandleGetAccountSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "X-Account-Meta-Gophercloud-Test", "accounts")
--
--		w.Header().Set("X-Account-Container-Count", "2")
--		w.Header().Set("X-Account-Bytes-Used", "14")
--		w.Header().Set("X-Account-Meta-Subject", "books")
--
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
--
--// HandleUpdateAccountSuccessfully creates an HTTP handler at `/` on the test handler mux that
--// responds with a `Update` response.
--func HandleUpdateAccountSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "HEAD")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.Header().Set("X-Account-Meta-Foo", "bar")
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go
-deleted file mode 100644
-index e6f5f95..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go
-+++ /dev/null
-@@ -1,106 +0,0 @@
--package accounts
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--)
--
--// GetOptsBuilder allows extensions to add additional headers to the Get
--// request.
--type GetOptsBuilder interface {
--	ToAccountGetMap() (map[string]string, error)
--}
--
--// GetOpts is a structure that contains parameters for getting an account's
--// metadata.
--type GetOpts struct {
--	Newest bool `h:"X-Newest"`
--}
--
--// ToAccountGetMap formats a GetOpts into a map[string]string of headers.
--func (opts GetOpts) ToAccountGetMap() (map[string]string, error) {
--	return gophercloud.BuildHeaders(opts)
--}
--
--// Get is a function that retrieves an account's metadata. To extract just the
--// custom metadata, call the ExtractMetadata method on the GetResult. To extract
--// all the headers that are returned (including the metadata), call the
--// ExtractHeader method on the GetResult.
--func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) GetResult {
--	var res GetResult
--	h := c.AuthenticatedHeaders()
--
--	if opts != nil {
--		headers, err := opts.ToAccountGetMap()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--
--		for k, v := range headers {
--			h[k] = v
--		}
--	}
--
--	resp, err := perigee.Request("HEAD", getURL(c), perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{204},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
--
--// UpdateOptsBuilder allows extensions to add additional headers to the Update
--// request.
--type UpdateOptsBuilder interface {
--	ToAccountUpdateMap() (map[string]string, error)
--}
--
--// UpdateOpts is a structure that contains parameters for updating, creating, or
--// deleting an account's metadata.
--type UpdateOpts struct {
--	Metadata          map[string]string
--	ContentType       string `h:"Content-Type"`
--	DetectContentType bool   `h:"X-Detect-Content-Type"`
--	TempURLKey        string `h:"X-Account-Meta-Temp-URL-Key"`
--	TempURLKey2       string `h:"X-Account-Meta-Temp-URL-Key-2"`
--}
--
--// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers.
--func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) {
--	headers, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		headers["X-Account-Meta-"+k] = v
--	}
--	return headers, err
--}
--
--// Update is a function that creates, updates, or deletes an account's metadata.
--// To extract the headers returned, call the Extract method on the UpdateResult.
--func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) UpdateResult {
--	var res UpdateResult
--	h := c.AuthenticatedHeaders()
--
--	if opts != nil {
--		headers, err := opts.ToAccountUpdateMap()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--		for k, v := range headers {
--			h[k] = v
--		}
--	}
--
--	resp, err := perigee.Request("POST", updateURL(c), perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{204},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests_test.go
-deleted file mode 100644
-index d6dc26b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests_test.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--package accounts
--
--import (
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--var metadata = map[string]string{"gophercloud-test": "accounts"}
--
--func TestUpdateAccount(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleGetAccountSuccessfully(t)
--
--	options := &UpdateOpts{Metadata: map[string]string{"gophercloud-test": "accounts"}}
--	res := Update(fake.ServiceClient(), options)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestGetAccount(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleUpdateAccountSuccessfully(t)
--
--	expected := map[string]string{"Foo": "bar"}
--	actual, err := Get(fake.ServiceClient(), &GetOpts{}).ExtractMetadata()
--	if err != nil {
--		t.Fatalf("Unable to get account metadata: %v", err)
--	}
--	th.CheckDeepEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/results.go
-deleted file mode 100644
-index abae026..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/results.go
-+++ /dev/null
-@@ -1,34 +0,0 @@
--package accounts
--
--import (
--	"strings"
--
--	"github.com/rackspace/gophercloud"
--)
--
--// GetResult is returned from a call to the Get function.
--type GetResult struct {
--	gophercloud.HeaderResult
--}
--
--// ExtractMetadata is a function that takes a GetResult (of type *http.Response)
--// and returns the custom metatdata associated with the account.
--func (gr GetResult) ExtractMetadata() (map[string]string, error) {
--	if gr.Err != nil {
--		return nil, gr.Err
--	}
--
--	metadata := make(map[string]string)
--	for k, v := range gr.Header {
--		if strings.HasPrefix(k, "X-Account-Meta-") {
--			key := strings.TrimPrefix(k, "X-Account-Meta-")
--			metadata[key] = v[0]
--		}
--	}
--	return metadata, nil
--}
--
--// UpdateResult is returned from a call to the Update function.
--type UpdateResult struct {
--	gophercloud.HeaderResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls.go
-deleted file mode 100644
-index 9952fe4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package accounts
--
--import "github.com/rackspace/gophercloud"
--
--func getURL(c *gophercloud.ServiceClient) string {
--	return c.Endpoint
--}
--
--func updateURL(c *gophercloud.ServiceClient) string {
--	return getURL(c)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls_test.go
-deleted file mode 100644
-index 074d52d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/urls_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package accounts
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient())
--	expected := endpoint
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestUpdateURL(t *testing.T) {
--	actual := updateURL(endpointClient())
--	expected := endpoint
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/doc.go
-deleted file mode 100644
-index 5fed553..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/doc.go
-+++ /dev/null
-@@ -1,8 +0,0 @@
--// Package containers contains functionality for working with Object Storage
--// container resources. A container serves as a logical namespace for objects
--// that are placed inside it - an object with the same name in two different
--// containers represents two different objects.
--//
--// In addition to containing objects, you can also use the container to control
--// access to objects by using an access control list (ACL).
--package containers
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/fixtures.go
-deleted file mode 100644
-index 1c0a915..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/fixtures.go
-+++ /dev/null
-@@ -1,132 +0,0 @@
--// +build fixtures
--
--package containers
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// ExpectedListInfo is the result expected from a call to `List` when full
--// info is requested.
--var ExpectedListInfo = []Container{
--	Container{
--		Count: 0,
--		Bytes: 0,
--		Name:  "janeausten",
--	},
--	Container{
--		Count: 1,
--		Bytes: 14,
--		Name:  "marktwain",
--	},
--}
--
--// ExpectedListNames is the result expected from a call to `List` when just
--// container names are requested.
--var ExpectedListNames = []string{"janeausten", "marktwain"}
--
--// HandleListContainerInfoSuccessfully creates an HTTP handler at `/` on the test handler mux that
--// responds with a `List` response when full info is requested.
--func HandleListContainerInfoSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--
--		w.Header().Set("Content-Type", "application/json")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, `[
--        {
--          "count": 0,
--          "bytes": 0,
--          "name": "janeausten"
--        },
--        {
--          "count": 1,
--          "bytes": 14,
--          "name": "marktwain"
--        }
--      ]`)
--		case "marktwain":
--			fmt.Fprintf(w, `[]`)
--		default:
--			t.Fatalf("Unexpected marker: [%s]", marker)
--		}
--	})
--}
--
--// HandleListContainerNamesSuccessfully creates an HTTP handler at `/` on the test handler mux that
--// responds with a `ListNames` response when only container names are requested.
--func HandleListContainerNamesSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "text/plain")
--
--		w.Header().Set("Content-Type", "text/plain")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, "janeausten\nmarktwain\n")
--		case "marktwain":
--			fmt.Fprintf(w, ``)
--		default:
--			t.Fatalf("Unexpected marker: [%s]", marker)
--		}
--	})
--}
--
--// HandleCreateContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that
--// responds with a `Create` response.
--func HandleCreateContainerSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--
--		w.Header().Add("X-Container-Meta-Foo", "bar")
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
--
--// HandleDeleteContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that
--// responds with a `Delete` response.
--func HandleDeleteContainerSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
--
--// HandleUpdateContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that
--// responds with a `Update` response.
--func HandleUpdateContainerSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
--
--// HandleGetContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that
--// responds with a `Get` response.
--func HandleGetContainerSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "HEAD")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go
-deleted file mode 100644
-index 9f3b2af..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go
-+++ /dev/null
-@@ -1,204 +0,0 @@
--package containers
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOptsBuilder allows extensions to add additional parameters to the List
--// request.
--type ListOptsBuilder interface {
--	ToContainerListParams() (bool, string, error)
--}
--
--// ListOpts is a structure that holds options for listing containers.
--type ListOpts struct {
--	Full      bool
--	Limit     int    `q:"limit"`
--	Marker    string `q:"marker"`
--	EndMarker string `q:"end_marker"`
--	Format    string `q:"format"`
--	Prefix    string `q:"prefix"`
--	Delimiter string `q:"delimiter"`
--}
--
--// ToContainerListParams formats a ListOpts into a query string and boolean
--// representing whether to list complete information for each container.
--func (opts ListOpts) ToContainerListParams() (bool, string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return false, "", err
--	}
--	return opts.Full, q.String(), nil
--}
--
--// List is a function that retrieves containers associated with the account as
--// well as account metadata. It returns a pager which can be iterated with the
--// EachPage function.
--func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
--	headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"}
--
--	url := listURL(c)
--	if opts != nil {
--		full, query, err := opts.ToContainerListParams()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--
--		if full {
--			headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"}
--		}
--	}
--
--	createPage := func(r pagination.PageResult) pagination.Page {
--		p := ContainerPage{pagination.MarkerPageBase{PageResult: r}}
--		p.MarkerPageBase.Owner = p
--		return p
--	}
--
--	pager := pagination.NewPager(c, url, createPage)
--	pager.Headers = headers
--	return pager
--}
--
--// CreateOptsBuilder allows extensions to add additional parameters to the
--// Create request.
--type CreateOptsBuilder interface {
--	ToContainerCreateMap() (map[string]string, error)
--}
--
--// CreateOpts is a structure that holds parameters for creating a container.
--type CreateOpts struct {
--	Metadata          map[string]string
--	ContainerRead     string `h:"X-Container-Read"`
--	ContainerSyncTo   string `h:"X-Container-Sync-To"`
--	ContainerSyncKey  string `h:"X-Container-Sync-Key"`
--	ContainerWrite    string `h:"X-Container-Write"`
--	ContentType       string `h:"Content-Type"`
--	DetectContentType bool   `h:"X-Detect-Content-Type"`
--	IfNoneMatch       string `h:"If-None-Match"`
--	VersionsLocation  string `h:"X-Versions-Location"`
--}
--
--// ToContainerCreateMap formats a CreateOpts into a map of headers.
--func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) {
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		h["X-Container-Meta-"+k] = v
--	}
--	return h, nil
--}
--
--// Create is a function that creates a new container.
--func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--	h := c.AuthenticatedHeaders()
--
--	if opts != nil {
--		headers, err := opts.ToContainerCreateMap()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--
--		for k, v := range headers {
--			h[k] = v
--		}
--	}
--
--	resp, err := perigee.Request("PUT", createURL(c, containerName), perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{201, 202, 204},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
--
--// Delete is a function that deletes a container.
--func Delete(c *gophercloud.ServiceClient, containerName string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(c, containerName), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{202, 204},
--	})
--	return res
--}
--
--// UpdateOptsBuilder allows extensions to add additional parameters to the
--// Update request.
--type UpdateOptsBuilder interface {
--	ToContainerUpdateMap() (map[string]string, error)
--}
--
--// UpdateOpts is a structure that holds parameters for updating, creating, or
--// deleting a container's metadata.
--type UpdateOpts struct {
--	Metadata               map[string]string
--	ContainerRead          string `h:"X-Container-Read"`
--	ContainerSyncTo        string `h:"X-Container-Sync-To"`
--	ContainerSyncKey       string `h:"X-Container-Sync-Key"`
--	ContainerWrite         string `h:"X-Container-Write"`
--	ContentType            string `h:"Content-Type"`
--	DetectContentType      bool   `h:"X-Detect-Content-Type"`
--	RemoveVersionsLocation string `h:"X-Remove-Versions-Location"`
--	VersionsLocation       string `h:"X-Versions-Location"`
--}
--
--// ToContainerUpdateMap formats a CreateOpts into a map of headers.
--func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) {
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		h["X-Container-Meta-"+k] = v
--	}
--	return h, nil
--}
--
--// Update is a function that creates, updates, or deletes a container's
--// metadata.
--func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) UpdateResult {
--	var res UpdateResult
--	h := c.AuthenticatedHeaders()
--
--	if opts != nil {
--		headers, err := opts.ToContainerUpdateMap()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--
--		for k, v := range headers {
--			h[k] = v
--		}
--	}
--
--	resp, err := perigee.Request("POST", updateURL(c, containerName), perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{202, 204},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
--
--// Get is a function that retrieves the metadata of a container. To extract just
--// the custom metadata, pass the GetResult response to the ExtractMetadata
--// function.
--func Get(c *gophercloud.ServiceClient, containerName string) GetResult {
--	var res GetResult
--	resp, err := perigee.Request("HEAD", getURL(c, containerName), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{200, 204},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests_test.go
-deleted file mode 100644
-index d0ce7f1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests_test.go
-+++ /dev/null
-@@ -1,91 +0,0 @@
--package containers
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--var metadata = map[string]string{"gophercloud-test": "containers"}
--
--func TestListContainerInfo(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleListContainerInfoSuccessfully(t)
--
--	count := 0
--	err := List(fake.ServiceClient(), &ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractInfo(page)
--		th.AssertNoErr(t, err)
--
--		th.CheckDeepEquals(t, ExpectedListInfo, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestListContainerNames(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleListContainerNamesSuccessfully(t)
--
--	count := 0
--	err := List(fake.ServiceClient(), &ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractNames(page)
--		if err != nil {
--			t.Errorf("Failed to extract container names: %v", err)
--			return false, err
--		}
--
--		th.CheckDeepEquals(t, ExpectedListNames, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestCreateContainer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleCreateContainerSuccessfully(t)
--
--	options := CreateOpts{ContentType: "application/json", Metadata: map[string]string{"foo": "bar"}}
--	res := Create(fake.ServiceClient(), "testContainer", options)
--	th.CheckNoErr(t, res.Err)
--	th.CheckEquals(t, "bar", res.Header["X-Container-Meta-Foo"][0])
--}
--
--func TestDeleteContainer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleDeleteContainerSuccessfully(t)
--
--	res := Delete(fake.ServiceClient(), "testContainer")
--	th.CheckNoErr(t, res.Err)
--}
--
--func TestUpateContainer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleUpdateContainerSuccessfully(t)
--
--	options := &UpdateOpts{Metadata: map[string]string{"foo": "bar"}}
--	res := Update(fake.ServiceClient(), "testContainer", options)
--	th.CheckNoErr(t, res.Err)
--}
--
--func TestGetContainer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleGetContainerSuccessfully(t)
--
--	_, err := Get(fake.ServiceClient(), "testContainer").ExtractMetadata()
--	th.CheckNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/results.go
-deleted file mode 100644
-index 74f3286..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/results.go
-+++ /dev/null
-@@ -1,139 +0,0 @@
--package containers
--
--import (
--	"fmt"
--	"strings"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--// Container represents a container resource.
--type Container struct {
--	// The total number of bytes stored in the container.
--	Bytes int `json:"bytes" mapstructure:"bytes"`
--
--	// The total number of objects stored in the container.
--	Count int `json:"count" mapstructure:"count"`
--
--	// The name of the container.
--	Name string `json:"name" mapstructure:"name"`
--}
--
--// ContainerPage is the page returned by a pager when traversing over a
--// collection of containers.
--type ContainerPage struct {
--	pagination.MarkerPageBase
--}
--
--// IsEmpty returns true if a ListResult contains no container names.
--func (r ContainerPage) IsEmpty() (bool, error) {
--	names, err := ExtractNames(r)
--	if err != nil {
--		return true, err
--	}
--	return len(names) == 0, nil
--}
--
--// LastMarker returns the last container name in a ListResult.
--func (r ContainerPage) LastMarker() (string, error) {
--	names, err := ExtractNames(r)
--	if err != nil {
--		return "", err
--	}
--	if len(names) == 0 {
--		return "", nil
--	}
--	return names[len(names)-1], nil
--}
--
--// ExtractInfo is a function that takes a ListResult and returns the containers' information.
--func ExtractInfo(page pagination.Page) ([]Container, error) {
--	untyped := page.(ContainerPage).Body.([]interface{})
--	results := make([]Container, len(untyped))
--	for index, each := range untyped {
--		container := each.(map[string]interface{})
--		err := mapstructure.Decode(container, &results[index])
--		if err != nil {
--			return results, err
--		}
--	}
--	return results, nil
--}
--
--// ExtractNames is a function that takes a ListResult and returns the containers' names.
--func ExtractNames(page pagination.Page) ([]string, error) {
--	casted := page.(ContainerPage)
--	ct := casted.Header.Get("Content-Type")
--
--	switch {
--	case strings.HasPrefix(ct, "application/json"):
--		parsed, err := ExtractInfo(page)
--		if err != nil {
--			return nil, err
--		}
--
--		names := make([]string, 0, len(parsed))
--		for _, container := range parsed {
--			names = append(names, container.Name)
--		}
--		return names, nil
--	case strings.HasPrefix(ct, "text/plain"):
--		names := make([]string, 0, 50)
--
--		body := string(page.(ContainerPage).Body.([]uint8))
--		for _, name := range strings.Split(body, "\n") {
--			if len(name) > 0 {
--				names = append(names, name)
--			}
--		}
--
--		return names, nil
--	default:
--		return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct)
--	}
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	gophercloud.HeaderResult
--}
--
--// ExtractMetadata is a function that takes a GetResult (of type *http.Response)
--// and returns the custom metadata associated with the container.
--func (gr GetResult) ExtractMetadata() (map[string]string, error) {
--	if gr.Err != nil {
--		return nil, gr.Err
--	}
--	metadata := make(map[string]string)
--	for k, v := range gr.Header {
--		if strings.HasPrefix(k, "X-Container-Meta-") {
--			key := strings.TrimPrefix(k, "X-Container-Meta-")
--			metadata[key] = v[0]
--		}
--	}
--	return metadata, nil
--}
--
--// CreateResult represents the result of a create operation. To extract the
--// the headers from the HTTP response, you can invoke the 'ExtractHeader'
--// method on the result struct.
--type CreateResult struct {
--	gophercloud.HeaderResult
--}
--
--// UpdateResult represents the result of an update operation. To extract the
--// the headers from the HTTP response, you can invoke the 'ExtractHeader'
--// method on the result struct.
--type UpdateResult struct {
--	gophercloud.HeaderResult
--}
--
--// DeleteResult represents the result of a delete operation. To extract the
--// the headers from the HTTP response, you can invoke the 'ExtractHeader'
--// method on the result struct.
--type DeleteResult struct {
--	gophercloud.HeaderResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls.go
-deleted file mode 100644
-index f864f84..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls.go
-+++ /dev/null
-@@ -1,23 +0,0 @@
--package containers
--
--import "github.com/rackspace/gophercloud"
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return c.Endpoint
--}
--
--func createURL(c *gophercloud.ServiceClient, container string) string {
--	return c.ServiceURL(container)
--}
--
--func getURL(c *gophercloud.ServiceClient, container string) string {
--	return createURL(c, container)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, container string) string {
--	return createURL(c, container)
--}
--
--func updateURL(c *gophercloud.ServiceClient, container string) string {
--	return createURL(c, container)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls_test.go
-deleted file mode 100644
-index d043a2a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/urls_test.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--package containers
--
--import (
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"testing"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient())
--	expected := endpoint
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient(), "foo")
--	expected := endpoint + "foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestUpdateURL(t *testing.T) {
--	actual := updateURL(endpointClient(), "foo")
--	expected := endpoint + "foo"
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/doc.go
-deleted file mode 100644
-index 30a9add..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/doc.go
-+++ /dev/null
-@@ -1,5 +0,0 @@
--// Package objects contains functionality for working with Object Storage
--// object resources. An object is a resource that represents and contains data
--// - such as documents, images, and so on. You can also store custom metadata
--// with an object.
--package objects
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/fixtures.go
-deleted file mode 100644
-index d951160..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/fixtures.go
-+++ /dev/null
-@@ -1,164 +0,0 @@
--// +build fixtures
--
--package objects
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--// HandleDownloadObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that
--// responds with a `Download` response.
--func HandleDownloadObjectSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, "Successful download with Gophercloud")
--	})
--}
--
--// ExpectedListInfo is the result expected from a call to `List` when full
--// info is requested.
--var ExpectedListInfo = []Object{
--	Object{
--		Hash:         "451e372e48e0f6b1114fa0724aa79fa1",
--		LastModified: "2009-11-10 23:00:00 +0000 UTC",
--		Bytes:        14,
--		Name:         "goodbye",
--		ContentType:  "application/octet-stream",
--	},
--	Object{
--		Hash:         "451e372e48e0f6b1114fa0724aa79fa1",
--		LastModified: "2009-11-10 23:00:00 +0000 UTC",
--		Bytes:        14,
--		Name:         "hello",
--		ContentType:  "application/octet-stream",
--	},
--}
--
--// ExpectedListNames is the result expected from a call to `List` when just
--// object names are requested.
--var ExpectedListNames = []string{"hello", "goodbye"}
--
--// HandleListObjectsInfoSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that
--// responds with a `List` response when full info is requested.
--func HandleListObjectsInfoSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--
--		w.Header().Set("Content-Type", "application/json")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, `[
--      {
--        "hash": "451e372e48e0f6b1114fa0724aa79fa1",
--        "last_modified": "2009-11-10 23:00:00 +0000 UTC",
--        "bytes": 14,
--        "name": "goodbye",
--        "content_type": "application/octet-stream"
--      },
--      {
--        "hash": "451e372e48e0f6b1114fa0724aa79fa1",
--        "last_modified": "2009-11-10 23:00:00 +0000 UTC",
--        "bytes": 14,
--        "name": "hello",
--        "content_type": "application/octet-stream"
--      }
--    ]`)
--		case "hello":
--			fmt.Fprintf(w, `[]`)
--		default:
--			t.Fatalf("Unexpected marker: [%s]", marker)
--		}
--	})
--}
--
--// HandleListObjectNamesSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that
--// responds with a `List` response when only object names are requested.
--func HandleListObjectNamesSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "text/plain")
--
--		w.Header().Set("Content-Type", "text/plain")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, "hello\ngoodbye\n")
--		case "goodbye":
--			fmt.Fprintf(w, "")
--		default:
--			t.Fatalf("Unexpected marker: [%s]", marker)
--		}
--	})
--}
--
--// HandleCreateObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that
--// responds with a `Create` response.
--func HandleCreateObjectSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		w.WriteHeader(http.StatusCreated)
--	})
--}
--
--// HandleCopyObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that
--// responds with a `Copy` response.
--func HandleCopyObjectSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "COPY")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestHeader(t, r, "Destination", "/newTestContainer/newTestObject")
--		w.WriteHeader(http.StatusCreated)
--	})
--}
--
--// HandleDeleteObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that
--// responds with a `Delete` response.
--func HandleDeleteObjectSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
--
--// HandleUpdateObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that
--// responds with a `Update` response.
--func HandleUpdateObjectSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestHeader(t, r, "X-Object-Meta-Gophercloud-Test", "objects")
--		w.WriteHeader(http.StatusAccepted)
--	})
--}
--
--// HandleGetObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that
--// responds with a `Get` response.
--func HandleGetObjectSuccessfully(t *testing.T) {
--	th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "HEAD")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--		w.Header().Add("X-Object-Meta-Gophercloud-Test", "objects")
--		w.WriteHeader(http.StatusNoContent)
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go
-deleted file mode 100644
-index 9778de3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go
-+++ /dev/null
-@@ -1,416 +0,0 @@
--package objects
--
--import (
--	"fmt"
--	"io"
--	"time"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOptsBuilder allows extensions to add additional parameters to the List
--// request.
--type ListOptsBuilder interface {
--	ToObjectListParams() (bool, string, error)
--}
--
--// ListOpts is a structure that holds parameters for listing objects.
--type ListOpts struct {
--	Full      bool
--	Limit     int    `q:"limit"`
--	Marker    string `q:"marker"`
--	EndMarker string `q:"end_marker"`
--	Format    string `q:"format"`
--	Prefix    string `q:"prefix"`
--	Delimiter string `q:"delimiter"`
--	Path      string `q:"path"`
--}
--
--// ToObjectListParams formats a ListOpts into a query string and boolean
--// representing whether to list complete information for each object.
--func (opts ListOpts) ToObjectListParams() (bool, string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return false, "", err
--	}
--	return opts.Full, q.String(), nil
--}
--
--// List is a function that retrieves all objects in a container. It also returns the details
--// for the container. To extract only the object information or names, pass the ListResult
--// response to the ExtractInfo or ExtractNames function, respectively.
--func List(c *gophercloud.ServiceClient, containerName string, opts ListOptsBuilder) pagination.Pager {
--	headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"}
--
--	url := listURL(c, containerName)
--	if opts != nil {
--		full, query, err := opts.ToObjectListParams()
--		if err != nil {
--			return pagination.Pager{Err: err}
--		}
--		url += query
--
--		if full {
--			headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"}
--		}
--	}
--
--	createPage := func(r pagination.PageResult) pagination.Page {
--		p := ObjectPage{pagination.MarkerPageBase{PageResult: r}}
--		p.MarkerPageBase.Owner = p
--		return p
--	}
--
--	pager := pagination.NewPager(c, url, createPage)
--	pager.Headers = headers
--	return pager
--}
--
--// DownloadOptsBuilder allows extensions to add additional parameters to the
--// Download request.
--type DownloadOptsBuilder interface {
--	ToObjectDownloadParams() (map[string]string, string, error)
--}
--
--// DownloadOpts is a structure that holds parameters for downloading an object.
--type DownloadOpts struct {
--	IfMatch           string    `h:"If-Match"`
--	IfModifiedSince   time.Time `h:"If-Modified-Since"`
--	IfNoneMatch       string    `h:"If-None-Match"`
--	IfUnmodifiedSince time.Time `h:"If-Unmodified-Since"`
--	Range             string    `h:"Range"`
--	Expires           string    `q:"expires"`
--	MultipartManifest string    `q:"multipart-manifest"`
--	Signature         string    `q:"signature"`
--}
--
--// ToObjectDownloadParams formats a DownloadOpts into a query string and map of
--// headers.
--func (opts ListOpts) ToObjectDownloadParams() (map[string]string, string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return nil, "", err
--	}
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, q.String(), err
--	}
--	return h, q.String(), nil
--}
--
--// Download is a function that retrieves the content and metadata for an object.
--// To extract just the content, pass the DownloadResult response to the
--// ExtractContent function.
--func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts DownloadOptsBuilder) DownloadResult {
--	var res DownloadResult
--
--	url := downloadURL(c, containerName, objectName)
--	h := c.AuthenticatedHeaders()
--
--	if opts != nil {
--		headers, query, err := opts.ToObjectDownloadParams()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--
--		for k, v := range headers {
--			h[k] = v
--		}
--
--		url += query
--	}
--
--	resp, err := perigee.Request("GET", url, perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{200},
--	})
--
--	res.Body = resp.HttpResponse.Body
--	res.Err = err
--	res.Header = resp.HttpResponse.Header
--
--	return res
--}
--
--// CreateOptsBuilder allows extensions to add additional parameters to the
--// Create request.
--type CreateOptsBuilder interface {
--	ToObjectCreateParams() (map[string]string, string, error)
--}
--
--// CreateOpts is a structure that holds parameters for creating an object.
--type CreateOpts struct {
--	Metadata           map[string]string
--	ContentDisposition string `h:"Content-Disposition"`
--	ContentEncoding    string `h:"Content-Encoding"`
--	ContentLength      int    `h:"Content-Length"`
--	ContentType        string `h:"Content-Type"`
--	CopyFrom           string `h:"X-Copy-From"`
--	DeleteAfter        int    `h:"X-Delete-After"`
--	DeleteAt           int    `h:"X-Delete-At"`
--	DetectContentType  string `h:"X-Detect-Content-Type"`
--	ETag               string `h:"ETag"`
--	IfNoneMatch        string `h:"If-None-Match"`
--	ObjectManifest     string `h:"X-Object-Manifest"`
--	TransferEncoding   string `h:"Transfer-Encoding"`
--	Expires            string `q:"expires"`
--	MultipartManifest  string `q:"multiple-manifest"`
--	Signature          string `q:"signature"`
--}
--
--// ToObjectCreateParams formats a CreateOpts into a query string and map of
--// headers.
--func (opts CreateOpts) ToObjectCreateParams() (map[string]string, string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return nil, "", err
--	}
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, q.String(), err
--	}
--
--	for k, v := range opts.Metadata {
--		h["X-Object-Meta-"+k] = v
--	}
--
--	return h, q.String(), nil
--}
--
--// Create is a function that creates a new object or replaces an existing object.
--func Create(c *gophercloud.ServiceClient, containerName, objectName string, content io.Reader, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	url := createURL(c, containerName, objectName)
--	h := c.AuthenticatedHeaders()
--
--	if opts != nil {
--		headers, query, err := opts.ToObjectCreateParams()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--
--		for k, v := range headers {
--			h[k] = v
--		}
--
--		url += query
--	}
--
--	contentType := h["Content-Type"]
--
--	resp, err := perigee.Request("PUT", url, perigee.Options{
--		ContentType: contentType,
--		ReqBody:     content,
--		MoreHeaders: h,
--		OkCodes:     []int{201, 202},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
--
--// CopyOptsBuilder allows extensions to add additional parameters to the
--// Copy request.
--type CopyOptsBuilder interface {
--	ToObjectCopyMap() (map[string]string, error)
--}
--
--// CopyOpts is a structure that holds parameters for copying one object to
--// another.
--type CopyOpts struct {
--	Metadata           map[string]string
--	ContentDisposition string `h:"Content-Disposition"`
--	ContentEncoding    string `h:"Content-Encoding"`
--	ContentType        string `h:"Content-Type"`
--	Destination        string `h:"Destination,required"`
--}
--
--// ToObjectCopyMap formats a CopyOpts into a map of headers.
--func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) {
--	if opts.Destination == "" {
--		return nil, fmt.Errorf("Required CopyOpts field 'Destination' not set.")
--	}
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		h["X-Object-Meta-"+k] = v
--	}
--	return h, nil
--}
--
--// Copy is a function that copies one object to another.
--func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) CopyResult {
--	var res CopyResult
--	h := c.AuthenticatedHeaders()
--
--	headers, err := opts.ToObjectCopyMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	for k, v := range headers {
--		h[k] = v
--	}
--
--	url := copyURL(c, containerName, objectName)
--	resp, err := perigee.Request("COPY", url, perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{201},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
--
--// DeleteOptsBuilder allows extensions to add additional parameters to the
--// Delete request.
--type DeleteOptsBuilder interface {
--	ToObjectDeleteQuery() (string, error)
--}
--
--// DeleteOpts is a structure that holds parameters for deleting an object.
--type DeleteOpts struct {
--	MultipartManifest string `q:"multipart-manifest"`
--}
--
--// ToObjectDeleteQuery formats a DeleteOpts into a query string.
--func (opts DeleteOpts) ToObjectDeleteQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// Delete is a function that deletes an object.
--func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts DeleteOptsBuilder) DeleteResult {
--	var res DeleteResult
--	url := deleteURL(c, containerName, objectName)
--
--	if opts != nil {
--		query, err := opts.ToObjectDeleteQuery()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--		url += query
--	}
--
--	resp, err := perigee.Request("DELETE", url, perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
--
--// GetOptsBuilder allows extensions to add additional parameters to the
--// Get request.
--type GetOptsBuilder interface {
--	ToObjectGetQuery() (string, error)
--}
--
--// GetOpts is a structure that holds parameters for getting an object's metadata.
--type GetOpts struct {
--	Expires   string `q:"expires"`
--	Signature string `q:"signature"`
--}
--
--// ToObjectGetQuery formats a GetOpts into a query string.
--func (opts GetOpts) ToObjectGetQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// Get is a function that retrieves the metadata of an object. To extract just the custom
--// metadata, pass the GetResult response to the ExtractMetadata function.
--func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts GetOptsBuilder) GetResult {
--	var res GetResult
--	url := getURL(c, containerName, objectName)
--
--	if opts != nil {
--		query, err := opts.ToObjectGetQuery()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--		url += query
--	}
--
--	resp, err := perigee.Request("HEAD", url, perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{200, 204},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
--
--// UpdateOptsBuilder allows extensions to add additional parameters to the
--// Update request.
--type UpdateOptsBuilder interface {
--	ToObjectUpdateMap() (map[string]string, error)
--}
--
--// UpdateOpts is a structure that holds parameters for updating, creating, or deleting an
--// object's metadata.
--type UpdateOpts struct {
--	Metadata           map[string]string
--	ContentDisposition string `h:"Content-Disposition"`
--	ContentEncoding    string `h:"Content-Encoding"`
--	ContentType        string `h:"Content-Type"`
--	DeleteAfter        int    `h:"X-Delete-After"`
--	DeleteAt           int    `h:"X-Delete-At"`
--	DetectContentType  bool   `h:"X-Detect-Content-Type"`
--}
--
--// ToObjectUpdateMap formats a UpdateOpts into a map of headers.
--func (opts UpdateOpts) ToObjectUpdateMap() (map[string]string, error) {
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		h["X-Object-Meta-"+k] = v
--	}
--	return h, nil
--}
--
--// Update is a function that creates, updates, or deletes an object's metadata.
--func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts UpdateOptsBuilder) UpdateResult {
--	var res UpdateResult
--	h := c.AuthenticatedHeaders()
--
--	if opts != nil {
--		headers, err := opts.ToObjectUpdateMap()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--
--		for k, v := range headers {
--			h[k] = v
--		}
--	}
--
--	url := updateURL(c, containerName, objectName)
--	resp, err := perigee.Request("POST", url, perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{202},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests_test.go
-deleted file mode 100644
-index c3c28a7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests_test.go
-+++ /dev/null
-@@ -1,132 +0,0 @@
--package objects
--
--import (
--	"bytes"
--	"io"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestDownloadReader(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleDownloadObjectSuccessfully(t)
--
--	response := Download(fake.ServiceClient(), "testContainer", "testObject", nil)
--	defer response.Body.Close()
--
--	// Check reader
--	buf := bytes.NewBuffer(make([]byte, 0))
--	io.CopyN(buf, response.Body, 10)
--	th.CheckEquals(t, "Successful", string(buf.Bytes()))
--}
--
--func TestDownloadExtraction(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleDownloadObjectSuccessfully(t)
--
--	response := Download(fake.ServiceClient(), "testContainer", "testObject", nil)
--
--	// Check []byte extraction
--	bytes, err := response.ExtractContent()
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, "Successful download with Gophercloud", string(bytes))
--}
--
--func TestListObjectInfo(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleListObjectsInfoSuccessfully(t)
--
--	count := 0
--	options := &ListOpts{Full: true}
--	err := List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractInfo(page)
--		th.AssertNoErr(t, err)
--
--		th.CheckDeepEquals(t, ExpectedListInfo, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestListObjectNames(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleListObjectNamesSuccessfully(t)
--
--	count := 0
--	options := &ListOpts{Full: false}
--	err := List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractNames(page)
--		if err != nil {
--			t.Errorf("Failed to extract container names: %v", err)
--			return false, err
--		}
--
--		th.CheckDeepEquals(t, ExpectedListNames, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestCreateObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleCreateObjectSuccessfully(t)
--
--	content := bytes.NewBufferString("Did gyre and gimble in the wabe")
--	options := &CreateOpts{ContentType: "application/json"}
--	res := Create(fake.ServiceClient(), "testContainer", "testObject", content, options)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestCopyObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleCopyObjectSuccessfully(t)
--
--	options := &CopyOpts{Destination: "/newTestContainer/newTestObject"}
--	res := Copy(fake.ServiceClient(), "testContainer", "testObject", options)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestDeleteObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleDeleteObjectSuccessfully(t)
--
--	res := Delete(fake.ServiceClient(), "testContainer", "testObject", nil)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestUpateObjectMetadata(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleUpdateObjectSuccessfully(t)
--
--	options := &UpdateOpts{Metadata: map[string]string{"Gophercloud-Test": "objects"}}
--	res := Update(fake.ServiceClient(), "testContainer", "testObject", options)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestGetObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	HandleGetObjectSuccessfully(t)
--
--	expected := map[string]string{"Gophercloud-Test": "objects"}
--	actual, err := Get(fake.ServiceClient(), "testContainer", "testObject", nil).ExtractMetadata()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/results.go
-deleted file mode 100644
-index 102d94c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/results.go
-+++ /dev/null
-@@ -1,162 +0,0 @@
--package objects
--
--import (
--	"fmt"
--	"io"
--	"io/ioutil"
--	"strings"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--// Object is a structure that holds information related to a storage object.
--type Object struct {
--	Bytes        int    `json:"bytes" mapstructure:"bytes"`
--	ContentType  string `json:"content_type" mapstructure:"content_type"`
--	Hash         string `json:"hash" mapstructure:"hash"`
--	LastModified string `json:"last_modified" mapstructure:"last_modified"`
--	Name         string `json:"name" mapstructure:"name"`
--}
--
--// ObjectPage is a single page of objects that is returned from a call to the
--// List function.
--type ObjectPage struct {
--	pagination.MarkerPageBase
--}
--
--// IsEmpty returns true if a ListResult contains no object names.
--func (r ObjectPage) IsEmpty() (bool, error) {
--	names, err := ExtractNames(r)
--	if err != nil {
--		return true, err
--	}
--	return len(names) == 0, nil
--}
--
--// LastMarker returns the last object name in a ListResult.
--func (r ObjectPage) LastMarker() (string, error) {
--	names, err := ExtractNames(r)
--	if err != nil {
--		return "", err
--	}
--	if len(names) == 0 {
--		return "", nil
--	}
--	return names[len(names)-1], nil
--}
--
--// ExtractInfo is a function that takes a page of objects and returns their full information.
--func ExtractInfo(page pagination.Page) ([]Object, error) {
--	untyped := page.(ObjectPage).Body.([]interface{})
--	results := make([]Object, len(untyped))
--	for index, each := range untyped {
--		object := each.(map[string]interface{})
--		err := mapstructure.Decode(object, &results[index])
--		if err != nil {
--			return results, err
--		}
--	}
--	return results, nil
--}
--
--// ExtractNames is a function that takes a page of objects and returns only their names.
--func ExtractNames(page pagination.Page) ([]string, error) {
--	casted := page.(ObjectPage)
--	ct := casted.Header.Get("Content-Type")
--	switch {
--	case strings.HasPrefix(ct, "application/json"):
--		parsed, err := ExtractInfo(page)
--		if err != nil {
--			return nil, err
--		}
--
--		names := make([]string, 0, len(parsed))
--		for _, object := range parsed {
--			names = append(names, object.Name)
--		}
--
--		return names, nil
--	case strings.HasPrefix(ct, "text/plain"):
--		names := make([]string, 0, 50)
--
--		body := string(page.(ObjectPage).Body.([]uint8))
--		for _, name := range strings.Split(body, "\n") {
--			if len(name) > 0 {
--				names = append(names, name)
--			}
--		}
--
--		return names, nil
--	case strings.HasPrefix(ct, "text/html"):
--		return []string{}, nil
--	default:
--		return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct)
--	}
--}
--
--// DownloadResult is a *http.Response that is returned from a call to the Download function.
--type DownloadResult struct {
--	gophercloud.HeaderResult
--	Body io.ReadCloser
--}
--
--// ExtractContent is a function that takes a DownloadResult's io.Reader body
--// and reads all available data into a slice of bytes. Please be aware that due
--// the nature of io.Reader is forward-only - meaning that it can only be read
--// once and not rewound. You can recreate a reader from the output of this
--// function by using bytes.NewReader(downloadBytes)
--func (dr DownloadResult) ExtractContent() ([]byte, error) {
--	if dr.Err != nil {
--		return nil, dr.Err
--	}
--	body, err := ioutil.ReadAll(dr.Body)
--	if err != nil {
--		return nil, err
--	}
--	dr.Body.Close()
--	return body, nil
--}
--
--// GetResult is a *http.Response that is returned from a call to the Get function.
--type GetResult struct {
--	gophercloud.HeaderResult
--}
--
--// ExtractMetadata is a function that takes a GetResult (of type *http.Response)
--// and returns the custom metadata associated with the object.
--func (gr GetResult) ExtractMetadata() (map[string]string, error) {
--	if gr.Err != nil {
--		return nil, gr.Err
--	}
--	metadata := make(map[string]string)
--	for k, v := range gr.Header {
--		if strings.HasPrefix(k, "X-Object-Meta-") {
--			key := strings.TrimPrefix(k, "X-Object-Meta-")
--			metadata[key] = v[0]
--		}
--	}
--	return metadata, nil
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	gophercloud.HeaderResult
--}
--
--// UpdateResult represents the result of an update operation.
--type UpdateResult struct {
--	gophercloud.HeaderResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.HeaderResult
--}
--
--// CopyResult represents the result of a copy operation.
--type CopyResult struct {
--	gophercloud.HeaderResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls.go
-deleted file mode 100644
-index d2ec62c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--package objects
--
--import (
--	"github.com/rackspace/gophercloud"
--)
--
--func listURL(c *gophercloud.ServiceClient, container string) string {
--	return c.ServiceURL(container)
--}
--
--func copyURL(c *gophercloud.ServiceClient, container, object string) string {
--	return c.ServiceURL(container, object)
--}
--
--func createURL(c *gophercloud.ServiceClient, container, object string) string {
--	return copyURL(c, container, object)
--}
--
--func getURL(c *gophercloud.ServiceClient, container, object string) string {
--	return copyURL(c, container, object)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, container, object string) string {
--	return copyURL(c, container, object)
--}
--
--func downloadURL(c *gophercloud.ServiceClient, container, object string) string {
--	return copyURL(c, container, object)
--}
--
--func updateURL(c *gophercloud.ServiceClient, container, object string) string {
--	return copyURL(c, container, object)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls_test.go
-deleted file mode 100644
-index 1dcfe35..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/urls_test.go
-+++ /dev/null
-@@ -1,56 +0,0 @@
--package objects
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestListURL(t *testing.T) {
--	actual := listURL(endpointClient(), "foo")
--	expected := endpoint + "foo"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestCopyURL(t *testing.T) {
--	actual := copyURL(endpointClient(), "foo", "bar")
--	expected := endpoint + "foo/bar"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient(), "foo", "bar")
--	expected := endpoint + "foo/bar"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo", "bar")
--	expected := endpoint + "foo/bar"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo", "bar")
--	expected := endpoint + "foo/bar"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestDownloadURL(t *testing.T) {
--	actual := downloadURL(endpointClient(), "foo", "bar")
--	expected := endpoint + "foo/bar"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestUpdateURL(t *testing.T) {
--	actual := updateURL(endpointClient(), "foo", "bar")
--	expected := endpoint + "foo/bar"
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/utils/choose_version.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/utils/choose_version.go
-deleted file mode 100644
-index a0d5b26..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/utils/choose_version.go
-+++ /dev/null
-@@ -1,114 +0,0 @@
--package utils
--
--import (
--	"fmt"
--	"strings"
--
--	"github.com/racker/perigee"
--)
--
--// Version is a supported API version, corresponding to a vN package within the appropriate service.
--type Version struct {
--	ID       string
--	Suffix   string
--	Priority int
--}
--
--var goodStatus = map[string]bool{
--	"current":   true,
--	"supported": true,
--	"stable":    true,
--}
--
--// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's
--// published versions.
--// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint.
--func ChooseVersion(identityBase string, identityEndpoint string, recognized []*Version) (*Version, string, error) {
--	type linkResp struct {
--		Href string `json:"href"`
--		Rel  string `json:"rel"`
--	}
--
--	type valueResp struct {
--		ID     string     `json:"id"`
--		Status string     `json:"status"`
--		Links  []linkResp `json:"links"`
--	}
--
--	type versionsResp struct {
--		Values []valueResp `json:"values"`
--	}
--
--	type response struct {
--		Versions versionsResp `json:"versions"`
--	}
--
--	normalize := func(endpoint string) string {
--		if !strings.HasSuffix(endpoint, "/") {
--			return endpoint + "/"
--		}
--		return endpoint
--	}
--	identityEndpoint = normalize(identityEndpoint)
--
--	// If a full endpoint is specified, check version suffixes for a match first.
--	for _, v := range recognized {
--		if strings.HasSuffix(identityEndpoint, v.Suffix) {
--			return v, identityEndpoint, nil
--		}
--	}
--
--	var resp response
--	_, err := perigee.Request("GET", identityBase, perigee.Options{
--		Results: &resp,
--		OkCodes: []int{200, 300},
--	})
--
--	if err != nil {
--		return nil, "", err
--	}
--
--	byID := make(map[string]*Version)
--	for _, version := range recognized {
--		byID[version.ID] = version
--	}
--
--	var highest *Version
--	var endpoint string
--
--	for _, value := range resp.Versions.Values {
--		href := ""
--		for _, link := range value.Links {
--			if link.Rel == "self" {
--				href = normalize(link.Href)
--			}
--		}
--
--		if matching, ok := byID[value.ID]; ok {
--			// Prefer a version that exactly matches the provided endpoint.
--			if href == identityEndpoint {
--				if href == "" {
--					return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, identityBase)
--				}
--				return matching, href, nil
--			}
--
--			// Otherwise, find the highest-priority version with a whitelisted status.
--			if goodStatus[strings.ToLower(value.Status)] {
--				if highest == nil || matching.Priority > highest.Priority {
--					highest = matching
--					endpoint = href
--				}
--			}
--		}
--	}
--
--	if highest == nil {
--		return nil, "", fmt.Errorf("No supported version available from endpoint %s", identityBase)
--	}
--	if endpoint == "" {
--		return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, identityBase)
--	}
--
--	return highest, endpoint, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/utils/choose_version_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/utils/choose_version_test.go
-deleted file mode 100644
-index 9552696..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/utils/choose_version_test.go
-+++ /dev/null
-@@ -1,105 +0,0 @@
--package utils
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/testhelper"
--)
--
--func setupVersionHandler() {
--	testhelper.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		fmt.Fprintf(w, `
--			{
--				"versions": {
--					"values": [
--						{
--							"status": "stable",
--							"id": "v3.0",
--							"links": [
--								{ "href": "%s/v3.0", "rel": "self" }
--							]
--						},
--						{
--							"status": "stable",
--							"id": "v2.0",
--							"links": [
--								{ "href": "%s/v2.0", "rel": "self" }
--							]
--						}
--					]
--				}
--			}
--		`, testhelper.Server.URL, testhelper.Server.URL)
--	})
--}
--
--func TestChooseVersion(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--	setupVersionHandler()
--
--	v2 := &Version{ID: "v2.0", Priority: 2, Suffix: "blarg"}
--	v3 := &Version{ID: "v3.0", Priority: 3, Suffix: "hargl"}
--
--	v, endpoint, err := ChooseVersion(testhelper.Endpoint(), "", []*Version{v2, v3})
--
--	if err != nil {
--		t.Fatalf("Unexpected error from ChooseVersion: %v", err)
--	}
--
--	if v != v3 {
--		t.Errorf("Expected %#v to win, but %#v did instead", v3, v)
--	}
--
--	expected := testhelper.Endpoint() + "v3.0/"
--	if endpoint != expected {
--		t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint)
--	}
--}
--
--func TestChooseVersionOpinionatedLink(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--	setupVersionHandler()
--
--	v2 := &Version{ID: "v2.0", Priority: 2, Suffix: "nope"}
--	v3 := &Version{ID: "v3.0", Priority: 3, Suffix: "northis"}
--
--	v, endpoint, err := ChooseVersion(testhelper.Endpoint(), testhelper.Endpoint()+"v2.0/", []*Version{v2, v3})
--	if err != nil {
--		t.Fatalf("Unexpected error from ChooseVersion: %v", err)
--	}
--
--	if v != v2 {
--		t.Errorf("Expected %#v to win, but %#v did instead", v2, v)
--	}
--
--	expected := testhelper.Endpoint() + "v2.0/"
--	if endpoint != expected {
--		t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint)
--	}
--}
--
--func TestChooseVersionFromSuffix(t *testing.T) {
--	testhelper.SetupHTTP()
--	defer testhelper.TeardownHTTP()
--
--	v2 := &Version{ID: "v2.0", Priority: 2, Suffix: "/v2.0/"}
--	v3 := &Version{ID: "v3.0", Priority: 3, Suffix: "/v3.0/"}
--
--	v, endpoint, err := ChooseVersion(testhelper.Endpoint(), testhelper.Endpoint()+"v2.0/", []*Version{v2, v3})
--	if err != nil {
--		t.Fatalf("Unexpected error from ChooseVersion: %v", err)
--	}
--
--	if v != v2 {
--		t.Errorf("Expected %#v to win, but %#v did instead", v2, v)
--	}
--
--	expected := testhelper.Endpoint() + "v2.0/"
--	if endpoint != expected {
--		t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/package.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/package.go
-deleted file mode 100644
-index e8c2e82..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/package.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--/*
--Package gophercloud provides a multi-vendor interface to OpenStack-compatible
--clouds. The library has a three-level hierarchy: providers, services, and
--resources.
--
--Provider structs represent the service providers that offer and manage a
--collection of services. Examples of providers include: OpenStack, Rackspace,
--HP. These are defined like so:
--
--  opts := gophercloud.AuthOptions{
--    IdentityEndpoint: "https://my-openstack.com:5000/v2.0",
--    Username: "{username}",
--    Password: "{password}",
--    TenantID: "{tenant_id}",
--  }
--
--  provider, err := openstack.AuthenticatedClient(opts)
--
--Service structs are specific to a provider and handle all of the logic and
--operations for a particular OpenStack service. Examples of services include:
--Compute, Object Storage, Block Storage. In order to define one, you need to
--pass in the parent provider, like so:
--
--  opts := gophercloud.EndpointOpts{Region: "RegionOne"}
--
--  client := openstack.NewComputeV2(provider, opts)
--
--Resource structs are the domain models that services make use of in order
--to work with and represent the state of API resources:
--
--  server, err := servers.Get(client, "{serverId}").Extract()
--
--Another convention is to return Result structs for API operations, which allow
--you to access the HTTP headers, response body, and associated errors with the
--network transaction. To get a resource struct, you then call the Extract
--method which is chained to the response.
--*/
--package gophercloud
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/http.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/http.go
-deleted file mode 100644
-index 1e108c8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/http.go
-+++ /dev/null
-@@ -1,64 +0,0 @@
--package pagination
--
--import (
--	"encoding/json"
--	"io/ioutil"
--	"net/http"
--	"net/url"
--	"strings"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--)
--
--// PageResult stores the HTTP response that returned the current page of results.
--type PageResult struct {
--	gophercloud.Result
--	url.URL
--}
--
--// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the
--// results, interpreting it as JSON if the content type indicates.
--func PageResultFrom(resp http.Response) (PageResult, error) {
--	var parsedBody interface{}
--
--	defer resp.Body.Close()
--	rawBody, err := ioutil.ReadAll(resp.Body)
--	if err != nil {
--		return PageResult{}, err
--	}
--
--	if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") {
--		err = json.Unmarshal(rawBody, &parsedBody)
--		if err != nil {
--			return PageResult{}, err
--		}
--	} else {
--		parsedBody = rawBody
--	}
--
--	return PageResult{
--		Result: gophercloud.Result{
--			Body:   parsedBody,
--			Header: resp.Header,
--		},
--		URL: *resp.Request.URL,
--	}, err
--}
--
--// Request performs a Perigee request and extracts the http.Response from the result.
--func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (http.Response, error) {
--	h := client.AuthenticatedHeaders()
--	for key, value := range headers {
--		h[key] = value
--	}
--
--	resp, err := perigee.Request("GET", url, perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{200, 204},
--	})
--	if err != nil {
--		return http.Response{}, err
--	}
--	return resp.HttpResponse, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/linked.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/linked.go
-deleted file mode 100644
-index 461fa49..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/linked.go
-+++ /dev/null
-@@ -1,61 +0,0 @@
--package pagination
--
--import "fmt"
--
--// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result.
--type LinkedPageBase struct {
--	PageResult
--
--	// LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer.
--	// If any link along the path is missing, an empty URL will be returned.
--	// If any link results in an unexpected value type, an error will be returned.
--	// When left as "nil", []string{"links", "next"} will be used as a default.
--	LinkPath []string
--}
--
--// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present.
--// It assumes that the links are available in a "links" element of the top-level response object.
--// If this is not the case, override NextPageURL on your result type.
--func (current LinkedPageBase) NextPageURL() (string, error) {
--	var path []string
--	var key string
--
--	if current.LinkPath == nil {
--		path = []string{"links", "next"}
--	} else {
--		path = current.LinkPath
--	}
--
--	submap, ok := current.Body.(map[string]interface{})
--	if !ok {
--		return "", fmt.Errorf("Expected an object, but was %#v", current.Body)
--	}
--
--	for {
--		key, path = path[0], path[1:len(path)]
--
--		value, ok := submap[key]
--		if !ok {
--			return "", nil
--		}
--
--		if len(path) > 0 {
--			submap, ok = value.(map[string]interface{})
--			if !ok {
--				return "", fmt.Errorf("Expected an object, but was %#v", value)
--			}
--		} else {
--			if value == nil {
--				// Actual null element.
--				return "", nil
--			}
--
--			url, ok := value.(string)
--			if !ok {
--				return "", fmt.Errorf("Expected a string, but was %#v", value)
--			}
--
--			return url, nil
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/linked_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/linked_test.go
-deleted file mode 100644
-index 4d3248e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/linked_test.go
-+++ /dev/null
-@@ -1,107 +0,0 @@
--package pagination
--
--import (
--	"fmt"
--	"net/http"
--	"reflect"
--	"testing"
--
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud/testhelper"
--)
--
--// LinkedPager sample and test cases.
--
--type LinkedPageResult struct {
--	LinkedPageBase
--}
--
--func (r LinkedPageResult) IsEmpty() (bool, error) {
--	is, err := ExtractLinkedInts(r)
--	if err != nil {
--		return true, nil
--	}
--	return len(is) == 0, nil
--}
--
--func ExtractLinkedInts(page Page) ([]int, error) {
--	var response struct {
--		Ints []int `mapstructure:"ints"`
--	}
--
--	err := mapstructure.Decode(page.(LinkedPageResult).Body, &response)
--	if err != nil {
--		return nil, err
--	}
--
--	return response.Ints, nil
--}
--
--func createLinked(t *testing.T) Pager {
--	testhelper.SetupHTTP()
--
--	testhelper.Mux.HandleFunc("/page1", func(w http.ResponseWriter, r *http.Request) {
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `{ "ints": [1, 2, 3], "links": { "next": "%s/page2" } }`, testhelper.Server.URL)
--	})
--
--	testhelper.Mux.HandleFunc("/page2", func(w http.ResponseWriter, r *http.Request) {
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `{ "ints": [4, 5, 6], "links": { "next": "%s/page3" } }`, testhelper.Server.URL)
--	})
--
--	testhelper.Mux.HandleFunc("/page3", func(w http.ResponseWriter, r *http.Request) {
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `{ "ints": [7, 8, 9], "links": { "next": null } }`)
--	})
--
--	client := createClient()
--
--	createPage := func(r PageResult) Page {
--		return LinkedPageResult{LinkedPageBase{PageResult: r}}
--	}
--
--	return NewPager(client, testhelper.Server.URL+"/page1", createPage)
--}
--
--func TestEnumerateLinked(t *testing.T) {
--	pager := createLinked(t)
--	defer testhelper.TeardownHTTP()
--
--	callCount := 0
--	err := pager.EachPage(func(page Page) (bool, error) {
--		actual, err := ExtractLinkedInts(page)
--		if err != nil {
--			return false, err
--		}
--
--		t.Logf("Handler invoked with %v", actual)
--
--		var expected []int
--		switch callCount {
--		case 0:
--			expected = []int{1, 2, 3}
--		case 1:
--			expected = []int{4, 5, 6}
--		case 2:
--			expected = []int{7, 8, 9}
--		default:
--			t.Fatalf("Unexpected call count: %d", callCount)
--			return false, nil
--		}
--
--		if !reflect.DeepEqual(expected, actual) {
--			t.Errorf("Call %d: Expected %#v, but was %#v", callCount, expected, actual)
--		}
--
--		callCount++
--		return true, nil
--	})
--	if err != nil {
--		t.Errorf("Unexpected error for page iteration: %v", err)
--	}
--
--	if callCount != 3 {
--		t.Errorf("Expected 3 calls, but was %d", callCount)
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/marker.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/marker.go
-deleted file mode 100644
-index e7688c2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/marker.go
-+++ /dev/null
-@@ -1,34 +0,0 @@
--package pagination
--
--// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager.
--// For convenience, embed the MarkedPageBase struct.
--type MarkerPage interface {
--	Page
--
--	// LastMarker returns the last "marker" value on this page.
--	LastMarker() (string, error)
--}
--
--// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters.
--type MarkerPageBase struct {
--	PageResult
--
--	// Owner is a reference to the embedding struct.
--	Owner MarkerPage
--}
--
--// NextPageURL generates the URL for the page of results after this one.
--func (current MarkerPageBase) NextPageURL() (string, error) {
--	currentURL := current.URL
--
--	mark, err := current.Owner.LastMarker()
--	if err != nil {
--		return "", err
--	}
--
--	q := currentURL.Query()
--	q.Set("marker", mark)
--	currentURL.RawQuery = q.Encode()
--
--	return currentURL.String(), nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/marker_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/marker_test.go
-deleted file mode 100644
-index 3b1df1d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/marker_test.go
-+++ /dev/null
-@@ -1,113 +0,0 @@
--package pagination
--
--import (
--	"fmt"
--	"net/http"
--	"strings"
--	"testing"
--
--	"github.com/rackspace/gophercloud/testhelper"
--)
--
--// MarkerPager sample and test cases.
--
--type MarkerPageResult struct {
--	MarkerPageBase
--}
--
--func (r MarkerPageResult) IsEmpty() (bool, error) {
--	results, err := ExtractMarkerStrings(r)
--	if err != nil {
--		return true, err
--	}
--	return len(results) == 0, err
--}
--
--func (r MarkerPageResult) LastMarker() (string, error) {
--	results, err := ExtractMarkerStrings(r)
--	if err != nil {
--		return "", err
--	}
--	if len(results) == 0 {
--		return "", nil
--	}
--	return results[len(results)-1], nil
--}
--
--func createMarkerPaged(t *testing.T) Pager {
--	testhelper.SetupHTTP()
--
--	testhelper.Mux.HandleFunc("/page", func(w http.ResponseWriter, r *http.Request) {
--		r.ParseForm()
--		ms := r.Form["marker"]
--		switch {
--		case len(ms) == 0:
--			fmt.Fprintf(w, "aaa\nbbb\nccc")
--		case len(ms) == 1 && ms[0] == "ccc":
--			fmt.Fprintf(w, "ddd\neee\nfff")
--		case len(ms) == 1 && ms[0] == "fff":
--			fmt.Fprintf(w, "ggg\nhhh\niii")
--		case len(ms) == 1 && ms[0] == "iii":
--			w.WriteHeader(http.StatusNoContent)
--		default:
--			t.Errorf("Request with unexpected marker: [%v]", ms)
--		}
--	})
--
--	client := createClient()
--
--	createPage := func(r PageResult) Page {
--		p := MarkerPageResult{MarkerPageBase{PageResult: r}}
--		p.MarkerPageBase.Owner = p
--		return p
--	}
--
--	return NewPager(client, testhelper.Server.URL+"/page", createPage)
--}
--
--func ExtractMarkerStrings(page Page) ([]string, error) {
--	content := page.(MarkerPageResult).Body.([]uint8)
--	parts := strings.Split(string(content), "\n")
--	results := make([]string, 0, len(parts))
--	for _, part := range parts {
--		if len(part) > 0 {
--			results = append(results, part)
--		}
--	}
--	return results, nil
--}
--
--func TestEnumerateMarker(t *testing.T) {
--	pager := createMarkerPaged(t)
--	defer testhelper.TeardownHTTP()
--
--	callCount := 0
--	err := pager.EachPage(func(page Page) (bool, error) {
--		actual, err := ExtractMarkerStrings(page)
--		if err != nil {
--			return false, err
--		}
--
--		t.Logf("Handler invoked with %v", actual)
--
--		var expected []string
--		switch callCount {
--		case 0:
--			expected = []string{"aaa", "bbb", "ccc"}
--		case 1:
--			expected = []string{"ddd", "eee", "fff"}
--		case 2:
--			expected = []string{"ggg", "hhh", "iii"}
--		default:
--			t.Fatalf("Unexpected call count: %d", callCount)
--			return false, nil
--		}
--
--		testhelper.CheckDeepEquals(t, expected, actual)
--
--		callCount++
--		return true, nil
--	})
--	testhelper.AssertNoErr(t, err)
--	testhelper.AssertEquals(t, callCount, 3)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/null.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/null.go
-deleted file mode 100644
-index ae57e18..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/null.go
-+++ /dev/null
-@@ -1,20 +0,0 @@
--package pagination
--
--// nullPage is an always-empty page that trivially satisfies all Page interfacts.
--// It's useful to be returned along with an error.
--type nullPage struct{}
--
--// NextPageURL always returns "" to indicate that there are no more pages to return.
--func (p nullPage) NextPageURL() (string, error) {
--	return "", nil
--}
--
--// IsEmpty always returns true to prevent iteration over nullPages.
--func (p nullPage) IsEmpty() (bool, error) {
--	return true, nil
--}
--
--// LastMark always returns "" because the nullPage contains no items to have a mark.
--func (p nullPage) LastMark() (string, error) {
--	return "", nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pager.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pager.go
-deleted file mode 100644
-index 5c20e16..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pager.go
-+++ /dev/null
-@@ -1,115 +0,0 @@
--package pagination
--
--import (
--	"errors"
--
--	"github.com/rackspace/gophercloud"
--)
--
--var (
--	// ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist.
--	ErrPageNotAvailable = errors.New("The requested page does not exist.")
--)
--
--// Page must be satisfied by the result type of any resource collection.
--// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated.
--// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs,
--// instead.
--// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type
--// will need to implement.
--type Page interface {
--
--	// NextPageURL generates the URL for the page of data that follows this collection.
--	// Return "" if no such page exists.
--	NextPageURL() (string, error)
--
--	// IsEmpty returns true if this Page has no items in it.
--	IsEmpty() (bool, error)
--}
--
--// Pager knows how to advance through a specific resource collection, one page at a time.
--type Pager struct {
--	client *gophercloud.ServiceClient
--
--	initialURL string
--
--	createPage func(r PageResult) Page
--
--	Err error
--
--	// Headers supplies additional HTTP headers to populate on each paged request.
--	Headers map[string]string
--}
--
--// NewPager constructs a manually-configured pager.
--// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page.
--func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager {
--	return Pager{
--		client:     client,
--		initialURL: initialURL,
--		createPage: createPage,
--	}
--}
--
--// WithPageCreator returns a new Pager that substitutes a different page creation function. This is
--// useful for overriding List functions in delegation.
--func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager {
--	return Pager{
--		client:     p.client,
--		initialURL: p.initialURL,
--		createPage: createPage,
--	}
--}
--
--func (p Pager) fetchNextPage(url string) (Page, error) {
--	resp, err := Request(p.client, p.Headers, url)
--	if err != nil {
--		return nil, err
--	}
--
--	remembered, err := PageResultFrom(resp)
--	if err != nil {
--		return nil, err
--	}
--
--	return p.createPage(remembered), nil
--}
--
--// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function.
--// Return "false" from the handler to prematurely stop iterating.
--func (p Pager) EachPage(handler func(Page) (bool, error)) error {
--	if p.Err != nil {
--		return p.Err
--	}
--	currentURL := p.initialURL
--	for {
--		currentPage, err := p.fetchNextPage(currentURL)
--		if err != nil {
--			return err
--		}
--
--		empty, err := currentPage.IsEmpty()
--		if err != nil {
--			return err
--		}
--		if empty {
--			return nil
--		}
--
--		ok, err := handler(currentPage)
--		if err != nil {
--			return err
--		}
--		if !ok {
--			return nil
--		}
--
--		currentURL, err = currentPage.NextPageURL()
--		if err != nil {
--			return err
--		}
--		if currentURL == "" {
--			return nil
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pagination_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pagination_test.go
-deleted file mode 100644
-index f3e4de1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pagination_test.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package pagination
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/testhelper"
--)
--
--func createClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{
--		ProviderClient: &gophercloud.ProviderClient{TokenID: "abc123"},
--		Endpoint:       testhelper.Endpoint(),
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pkg.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pkg.go
-deleted file mode 100644
-index 912daea..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/pkg.go
-+++ /dev/null
-@@ -1,4 +0,0 @@
--/*
--Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs.
--*/
--package pagination
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/single.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/single.go
-deleted file mode 100644
-index 4dd3c5c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/single.go
-+++ /dev/null
-@@ -1,9 +0,0 @@
--package pagination
--
--// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once.
--type SinglePageBase PageResult
--
--// NextPageURL always returns "" to indicate that there are no more pages to return.
--func (current SinglePageBase) NextPageURL() (string, error) {
--	return "", nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/single_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/single_test.go
-deleted file mode 100644
-index 8817d57..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/pagination/single_test.go
-+++ /dev/null
-@@ -1,71 +0,0 @@
--package pagination
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud/testhelper"
--)
--
--// SinglePage sample and test cases.
--
--type SinglePageResult struct {
--	SinglePageBase
--}
--
--func (r SinglePageResult) IsEmpty() (bool, error) {
--	is, err := ExtractSingleInts(r)
--	if err != nil {
--		return true, err
--	}
--	return len(is) == 0, nil
--}
--
--func ExtractSingleInts(page Page) ([]int, error) {
--	var response struct {
--		Ints []int `mapstructure:"ints"`
--	}
--
--	err := mapstructure.Decode(page.(SinglePageResult).Body, &response)
--	if err != nil {
--		return nil, err
--	}
--
--	return response.Ints, nil
--}
--
--func setupSinglePaged() Pager {
--	testhelper.SetupHTTP()
--	client := createClient()
--
--	testhelper.Mux.HandleFunc("/only", func(w http.ResponseWriter, r *http.Request) {
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, `{ "ints": [1, 2, 3] }`)
--	})
--
--	createPage := func(r PageResult) Page {
--		return SinglePageResult{SinglePageBase(r)}
--	}
--
--	return NewPager(client, testhelper.Server.URL+"/only", createPage)
--}
--
--func TestEnumerateSinglePaged(t *testing.T) {
--	callCount := 0
--	pager := setupSinglePaged()
--	defer testhelper.TeardownHTTP()
--
--	err := pager.EachPage(func(page Page) (bool, error) {
--		callCount++
--
--		expected := []int{1, 2, 3}
--		actual, err := ExtractSingleInts(page)
--		testhelper.AssertNoErr(t, err)
--		testhelper.CheckDeepEquals(t, expected, actual)
--		return true, nil
--	})
--	testhelper.CheckNoErr(t, err)
--	testhelper.CheckEquals(t, 1, callCount)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/params.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/params.go
-deleted file mode 100644
-index 5fe3c2c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/params.go
-+++ /dev/null
-@@ -1,184 +0,0 @@
--package gophercloud
--
--import (
--	"fmt"
--	"net/url"
--	"reflect"
--	"strconv"
--	"strings"
--	"time"
--)
--
--// MaybeString takes a string that might be a zero-value, and either returns a
--// pointer to its address or a nil value (i.e. empty pointer). This is useful
--// for converting zero values in options structs when the end-user hasn't
--// defined values. Those zero values need to be nil in order for the JSON
--// serialization to ignore them.
--func MaybeString(original string) *string {
--	if original != "" {
--		return &original
--	}
--	return nil
--}
--
--// MaybeInt takes an int that might be a zero-value, and either returns a
--// pointer to its address or a nil value (i.e. empty pointer).
--func MaybeInt(original int) *int {
--	if original != 0 {
--		return &original
--	}
--	return nil
--}
--
--var t time.Time
--
--func isZero(v reflect.Value) bool {
--	switch v.Kind() {
--	case reflect.Func, reflect.Map, reflect.Slice:
--		return v.IsNil()
--	case reflect.Array:
--		z := true
--		for i := 0; i < v.Len(); i++ {
--			z = z && isZero(v.Index(i))
--		}
--		return z
--	case reflect.Struct:
--		if v.Type() == reflect.TypeOf(t) {
--			if v.Interface().(time.Time).IsZero() {
--				return true
--			}
--			return false
--		}
--		z := true
--		for i := 0; i < v.NumField(); i++ {
--			z = z && isZero(v.Field(i))
--		}
--		return z
--	}
--	// Compare other types directly:
--	z := reflect.Zero(v.Type())
--	return v.Interface() == z.Interface()
--}
--
--/*
--BuildQueryString accepts a generic structure and parses it URL struct. It
--converts field names into query names based on "q" tags. So for example, this
--type:
--
--	struct {
--	   Bar string `q:"x_bar"`
--	   Baz int    `q:"lorem_ipsum"`
--	}{
--	   Bar: "XXX",
--	   Baz: "YYY",
--	}
--
--will be converted into ?x_bar=XXX&lorem_ipsum=YYYY
--*/
--func BuildQueryString(opts interface{}) (*url.URL, error) {
--	optsValue := reflect.ValueOf(opts)
--	if optsValue.Kind() == reflect.Ptr {
--		optsValue = optsValue.Elem()
--	}
--
--	optsType := reflect.TypeOf(opts)
--	if optsType.Kind() == reflect.Ptr {
--		optsType = optsType.Elem()
--	}
--
--	var optsSlice []string
--	if optsValue.Kind() == reflect.Struct {
--		for i := 0; i < optsValue.NumField(); i++ {
--			v := optsValue.Field(i)
--			f := optsType.Field(i)
--			qTag := f.Tag.Get("q")
--
--			// if the field has a 'q' tag, it goes in the query string
--			if qTag != "" {
--				tags := strings.Split(qTag, ",")
--
--				// if the field is set, add it to the slice of query pieces
--				if !isZero(v) {
--					switch v.Kind() {
--					case reflect.String:
--						optsSlice = append(optsSlice, tags[0]+"="+v.String())
--					case reflect.Int:
--						optsSlice = append(optsSlice, tags[0]+"="+strconv.FormatInt(v.Int(), 10))
--					case reflect.Bool:
--						optsSlice = append(optsSlice, tags[0]+"="+strconv.FormatBool(v.Bool()))
--					}
--				} else {
--					// Otherwise, the field is not set.
--					if len(tags) == 2 && tags[1] == "required" {
--						// And the field is required. Return an error.
--						return nil, fmt.Errorf("Required query parameter [%s] not set.", f.Name)
--					}
--				}
--			}
--
--		}
--		// URL encode the string for safety.
--		s := strings.Join(optsSlice, "&")
--		if s != "" {
--			s = "?" + s
--		}
--		u, err := url.Parse(s)
--		if err != nil {
--			return nil, err
--		}
--		return u, nil
--	}
--	// Return an error if the underlying type of 'opts' isn't a struct.
--	return nil, fmt.Errorf("Options type is not a struct.")
--}
--
--// BuildHeaders accepts a generic structure and parses it into a string map. It
--// converts field names into header names based on "h" tags, and field values
--// into header values by a simple one-to-one mapping.
--func BuildHeaders(opts interface{}) (map[string]string, error) {
--	optsValue := reflect.ValueOf(opts)
--	if optsValue.Kind() == reflect.Ptr {
--		optsValue = optsValue.Elem()
--	}
--
--	optsType := reflect.TypeOf(opts)
--	if optsType.Kind() == reflect.Ptr {
--		optsType = optsType.Elem()
--	}
--
--	optsMap := make(map[string]string)
--	if optsValue.Kind() == reflect.Struct {
--		for i := 0; i < optsValue.NumField(); i++ {
--			v := optsValue.Field(i)
--			f := optsType.Field(i)
--			hTag := f.Tag.Get("h")
--
--			// if the field has a 'h' tag, it goes in the header
--			if hTag != "" {
--				tags := strings.Split(hTag, ",")
--
--				// if the field is set, add it to the slice of query pieces
--				if !isZero(v) {
--					switch v.Kind() {
--					case reflect.String:
--						optsMap[tags[0]] = v.String()
--					case reflect.Int:
--						optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10)
--					case reflect.Bool:
--						optsMap[tags[0]] = strconv.FormatBool(v.Bool())
--					}
--				} else {
--					// Otherwise, the field is not set.
--					if len(tags) == 2 && tags[1] == "required" {
--						// And the field is required. Return an error.
--						return optsMap, fmt.Errorf("Required header not set.")
--					}
--				}
--			}
--
--		}
--		return optsMap, nil
--	}
--	// Return an error if the underlying type of 'opts' isn't a struct.
--	return optsMap, fmt.Errorf("Options type is not a struct.")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/params_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/params_test.go
-deleted file mode 100644
-index 9f1d3bd..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/params_test.go
-+++ /dev/null
-@@ -1,142 +0,0 @@
--package gophercloud
--
--import (
--	"net/url"
--	"reflect"
--	"testing"
--	"time"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestMaybeString(t *testing.T) {
--	testString := ""
--	var expected *string
--	actual := MaybeString(testString)
--	th.CheckDeepEquals(t, expected, actual)
--
--	testString = "carol"
--	expected = &testString
--	actual = MaybeString(testString)
--	th.CheckDeepEquals(t, expected, actual)
--}
--
--func TestMaybeInt(t *testing.T) {
--	testInt := 0
--	var expected *int
--	actual := MaybeInt(testInt)
--	th.CheckDeepEquals(t, expected, actual)
--
--	testInt = 4
--	expected = &testInt
--	actual = MaybeInt(testInt)
--	th.CheckDeepEquals(t, expected, actual)
--}
--
--func TestBuildQueryString(t *testing.T) {
--	opts := struct {
--		J int    `q:"j"`
--		R string `q:"r,required"`
--		C bool   `q:"c"`
--	}{
--		J: 2,
--		R: "red",
--		C: true,
--	}
--	expected := &url.URL{RawQuery: "j=2&r=red&c=true"}
--	actual, err := BuildQueryString(&opts)
--	if err != nil {
--		t.Errorf("Error building query string: %v", err)
--	}
--	th.CheckDeepEquals(t, expected, actual)
--
--	opts = struct {
--		J int    `q:"j"`
--		R string `q:"r,required"`
--		C bool   `q:"c"`
--	}{
--		J: 2,
--		C: true,
--	}
--	_, err = BuildQueryString(&opts)
--	if err == nil {
--		t.Errorf("Expected error: 'Required field not set'")
--	}
--	th.CheckDeepEquals(t, expected, actual)
--
--	_, err = BuildQueryString(map[string]interface{}{"Number": 4})
--	if err == nil {
--		t.Errorf("Expected error: 'Options type is not a struct'")
--	}
--}
--
--func TestBuildHeaders(t *testing.T) {
--	testStruct := struct {
--		Accept string `h:"Accept"`
--		Num    int    `h:"Number,required"`
--		Style  bool   `h:"Style"`
--	}{
--		Accept: "application/json",
--		Num:    4,
--		Style:  true,
--	}
--	expected := map[string]string{"Accept": "application/json", "Number": "4", "Style": "true"}
--	actual, err := BuildHeaders(&testStruct)
--	th.CheckNoErr(t, err)
--	th.CheckDeepEquals(t, expected, actual)
--
--	testStruct.Num = 0
--	_, err = BuildHeaders(&testStruct)
--	if err == nil {
--		t.Errorf("Expected error: 'Required header not set'")
--	}
--
--	_, err = BuildHeaders(map[string]interface{}{"Number": 4})
--	if err == nil {
--		t.Errorf("Expected error: 'Options type is not a struct'")
--	}
--}
--
--func TestIsZero(t *testing.T) {
--	var testMap map[string]interface{}
--	testMapValue := reflect.ValueOf(testMap)
--	expected := true
--	actual := isZero(testMapValue)
--	th.CheckEquals(t, expected, actual)
--	testMap = map[string]interface{}{"empty": false}
--	testMapValue = reflect.ValueOf(testMap)
--	expected = false
--	actual = isZero(testMapValue)
--	th.CheckEquals(t, expected, actual)
--
--	var testArray [2]string
--	testArrayValue := reflect.ValueOf(testArray)
--	expected = true
--	actual = isZero(testArrayValue)
--	th.CheckEquals(t, expected, actual)
--	testArray = [2]string{"one", "two"}
--	testArrayValue = reflect.ValueOf(testArray)
--	expected = false
--	actual = isZero(testArrayValue)
--	th.CheckEquals(t, expected, actual)
--
--	var testStruct struct {
--		A string
--		B time.Time
--	}
--	testStructValue := reflect.ValueOf(testStruct)
--	expected = true
--	actual = isZero(testStructValue)
--	th.CheckEquals(t, expected, actual)
--	testStruct = struct {
--		A string
--		B time.Time
--	}{
--		B: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
--	}
--	testStructValue = reflect.ValueOf(testStruct)
--	expected = false
--	actual = isZero(testStructValue)
--	th.CheckEquals(t, expected, actual)
--
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client.go
-deleted file mode 100644
-index 7754c20..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--package gophercloud
--
--// ProviderClient stores details that are required to interact with any
--// services within a specific provider's API.
--//
--// Generally, you acquire a ProviderClient by calling the NewClient method in
--// the appropriate provider's child package, providing whatever authentication
--// credentials are required.
--type ProviderClient struct {
--	// IdentityBase is the base URL used for a particular provider's identity
--	// service - it will be used when issuing authenticatation requests. It
--	// should point to the root resource of the identity service, not a specific
--	// identity version.
--	IdentityBase string
--
--	// IdentityEndpoint is the identity endpoint. This may be a specific version
--	// of the identity service. If this is the case, this endpoint is used rather
--	// than querying versions first.
--	IdentityEndpoint string
--
--	// TokenID is the ID of the most recently issued valid token.
--	TokenID string
--
--	// EndpointLocator describes how this provider discovers the endpoints for
--	// its constituent services.
--	EndpointLocator EndpointLocator
--}
--
--// AuthenticatedHeaders returns a map of HTTP headers that are common for all
--// authenticated service requests.
--func (client *ProviderClient) AuthenticatedHeaders() map[string]string {
--	return map[string]string{"X-Auth-Token": client.TokenID}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client_test.go
-deleted file mode 100644
-index b260246..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client_test.go
-+++ /dev/null
-@@ -1,16 +0,0 @@
--package gophercloud
--
--import (
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestAuthenticatedHeaders(t *testing.T) {
--	p := &ProviderClient{
--		TokenID: "1234",
--	}
--	expected := map[string]string{"X-Auth-Token": "1234"}
--	actual := p.AuthenticatedHeaders()
--	th.CheckDeepEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/auth_env.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/auth_env.go
-deleted file mode 100644
-index 5852c3c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/auth_env.go
-+++ /dev/null
-@@ -1,57 +0,0 @@
--package rackspace
--
--import (
--	"fmt"
--	"os"
--
--	"github.com/rackspace/gophercloud"
--)
--
--var nilOptions = gophercloud.AuthOptions{}
--
--// ErrNoAuthUrl, ErrNoUsername, and ErrNoPassword errors indicate of the
--// required RS_AUTH_URL, RS_USERNAME, or RS_PASSWORD environment variables,
--// respectively, remain undefined.  See the AuthOptions() function for more details.
--var (
--	ErrNoAuthURL  = fmt.Errorf("Environment variable RS_AUTH_URL or OS_AUTH_URL need to be set.")
--	ErrNoUsername = fmt.Errorf("Environment variable RS_USERNAME or OS_USERNAME need to be set.")
--	ErrNoPassword = fmt.Errorf("Environment variable RS_API_KEY or RS_PASSWORD needs to be set.")
--)
--
--func prefixedEnv(base string) string {
--	value := os.Getenv("RS_" + base)
--	if value == "" {
--		value = os.Getenv("OS_" + base)
--	}
--	return value
--}
--
--// AuthOptionsFromEnv fills out an identity.AuthOptions structure with the
--// settings found on the various Rackspace RS_* environment variables.
--func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) {
--	authURL := prefixedEnv("AUTH_URL")
--	username := prefixedEnv("USERNAME")
--	password := prefixedEnv("PASSWORD")
--	apiKey := prefixedEnv("API_KEY")
--
--	if authURL == "" {
--		return nilOptions, ErrNoAuthURL
--	}
--
--	if username == "" {
--		return nilOptions, ErrNoUsername
--	}
--
--	if password == "" && apiKey == "" {
--		return nilOptions, ErrNoPassword
--	}
--
--	ao := gophercloud.AuthOptions{
--		IdentityEndpoint: authURL,
--		Username:         username,
--		Password:         password,
--		APIKey:           apiKey,
--	}
--
--	return ao, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate.go
-deleted file mode 100644
-index b338c36..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate.go
-+++ /dev/null
-@@ -1,134 +0,0 @@
--package snapshots
--
--import (
--	"errors"
--
--	"github.com/racker/perigee"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots"
--)
--
--func updateURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("snapshots", id)
--}
--
--// CreateOptsBuilder allows extensions to add additional parameters to the
--// Create request.
--type CreateOptsBuilder interface {
--	ToSnapshotCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts contains options for creating a Snapshot. This object is passed to
--// the snapshots.Create function. For more information about these parameters,
--// see the Snapshot object.
--type CreateOpts struct {
--	// REQUIRED
--	VolumeID string
--	// OPTIONAL
--	Description string
--	// OPTIONAL
--	Force bool
--	// OPTIONAL
--	Name string
--}
--
--// ToSnapshotCreateMap assembles a request body based on the contents of a
--// CreateOpts.
--func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) {
--	s := make(map[string]interface{})
--
--	if opts.VolumeID == "" {
--		return nil, errors.New("Required CreateOpts field 'VolumeID' not set.")
--	}
--
--	s["volume_id"] = opts.VolumeID
--
--	if opts.Description != "" {
--		s["display_description"] = opts.Description
--	}
--	if opts.Name != "" {
--		s["display_name"] = opts.Name
--	}
--	if opts.Force {
--		s["force"] = opts.Force
--	}
--
--	return map[string]interface{}{"snapshot": s}, nil
--}
--
--// Create will create a new Snapshot based on the values in CreateOpts. To
--// extract the Snapshot object from the response, call the Extract method on the
--// CreateResult.
--func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	return CreateResult{os.Create(client, opts)}
--}
--
--// Delete will delete the existing Snapshot with the provided ID.
--func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult {
--	return os.Delete(client, id)
--}
--
--// Get retrieves the Snapshot with the provided ID. To extract the Snapshot
--// object from the response, call the Extract method on the GetResult.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	return GetResult{os.Get(client, id)}
--}
--
--// List returns Snapshots.
--func List(client *gophercloud.ServiceClient) pagination.Pager {
--	return os.List(client, os.ListOpts{})
--}
--
--// UpdateOptsBuilder is the interface options structs have to satisfy in order
--// to be used in the main Update operation in this package. Since many
--// extensions decorate or modify the common logic, it is useful for them to
--// satisfy a basic interface in order for them to be used.
--type UpdateOptsBuilder interface {
--	ToSnapshotUpdateMap() (map[string]interface{}, error)
--}
--
--// UpdateOpts is the common options struct used in this package's Update
--// operation.
--type UpdateOpts struct {
--	Name        string
--	Description string
--}
--
--// ToSnapshotUpdateMap casts a UpdateOpts struct to a map.
--func (opts UpdateOpts) ToSnapshotUpdateMap() (map[string]interface{}, error) {
--	s := make(map[string]interface{})
--
--	if opts.Name != "" {
--		s["display_name"] = opts.Name
--	}
--	if opts.Description != "" {
--		s["display_description"] = opts.Description
--	}
--
--	return map[string]interface{}{"snapshot": s}, nil
--}
--
--// Update accepts a UpdateOpts struct and updates an existing snapshot using the
--// values provided.
--func Update(c *gophercloud.ServiceClient, snapshotID string, opts UpdateOptsBuilder) UpdateResult {
--	var res UpdateResult
--
--	reqBody, err := opts.ToSnapshotUpdateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	// Send request to API
--	_, res.Err = perigee.Request("PUT", updateURL(c, snapshotID), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 201},
--	})
--
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate_test.go
-deleted file mode 100644
-index 1a02b46..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/delegate_test.go
-+++ /dev/null
-@@ -1,97 +0,0 @@
--package snapshots
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--const endpoint = "http://localhost:57909/v1/12345"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestUpdateURL(t *testing.T) {
--	actual := updateURL(endpointClient(), "foo")
--	expected := endpoint + "snapshots/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockListResponse(t)
--
--	count := 0
--
--	err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractSnapshots(page)
--		if err != nil {
--			t.Errorf("Failed to extract snapshots: %v", err)
--			return false, err
--		}
--
--		expected := []Snapshot{
--			Snapshot{
--				ID:   "289da7f8-6440-407c-9fb4-7db01ec49164",
--				Name: "snapshot-001",
--			},
--			Snapshot{
--				ID:   "96c3bda7-c82a-4f50-be73-ca7621794835",
--				Name: "snapshot-002",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	th.AssertEquals(t, 1, count)
--	th.AssertNoErr(t, err)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockGetResponse(t)
--
--	v, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, v.Name, "snapshot-001")
--	th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockCreateResponse(t)
--
--	options := &CreateOpts{VolumeID: "1234", Name: "snapshot-001"}
--	n, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.VolumeID, "1234")
--	th.AssertEquals(t, n.Name, "snapshot-001")
--	th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockDeleteResponse(t)
--
--	res := Delete(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/doc.go
-deleted file mode 100644
-index ad6064f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package snapshots provides information and interaction with the snapshot
--// API resource for the Rackspace Block Storage service.
--package snapshots
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/results.go
-deleted file mode 100644
-index 0fab282..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/snapshots/results.go
-+++ /dev/null
-@@ -1,149 +0,0 @@
--package snapshots
--
--import (
--	"github.com/racker/perigee"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--// Status is the type used to represent a snapshot's status
--type Status string
--
--// Constants to use for supported statuses
--const (
--	Creating    Status = "CREATING"
--	Available   Status = "AVAILABLE"
--	Deleting    Status = "DELETING"
--	Error       Status = "ERROR"
--	DeleteError Status = "ERROR_DELETING"
--)
--
--// Snapshot is the Rackspace representation of an external block storage device.
--type Snapshot struct {
--	// The timestamp when this snapshot was created.
--	CreatedAt string `mapstructure:"created_at"`
--
--	// The human-readable description for this snapshot.
--	Description string `mapstructure:"display_description"`
--
--	// The human-readable name for this snapshot.
--	Name string `mapstructure:"display_name"`
--
--	// The UUID for this snapshot.
--	ID string `mapstructure:"id"`
--
--	// The random metadata associated with this snapshot. Note: unlike standard
--	// OpenStack snapshots, this cannot actually be set.
--	Metadata map[string]string `mapstructure:"metadata"`
--
--	// Indicates the current progress of the snapshot's backup procedure.
--	Progress string `mapstructure:"os-extended-snapshot-attributes:progress"`
--
--	// The project ID.
--	ProjectID string `mapstructure:"os-extended-snapshot-attributes:project_id"`
--
--	// The size of the volume which this snapshot backs up.
--	Size int `mapstructure:"size"`
--
--	// The status of the snapshot.
--	Status Status `mapstructure:"status"`
--
--	// The ID of the volume which this snapshot seeks to back up.
--	VolumeID string `mapstructure:"volume_id"`
--}
--
--// CreateResult represents the result of a create operation
--type CreateResult struct {
--	os.CreateResult
--}
--
--// GetResult represents the result of a get operation
--type GetResult struct {
--	os.GetResult
--}
--
--// UpdateResult represents the result of an update operation
--type UpdateResult struct {
--	gophercloud.Result
--}
--
--func commonExtract(resp interface{}, err error) (*Snapshot, error) {
--	if err != nil {
--		return nil, err
--	}
--
--	var respStruct struct {
--		Snapshot *Snapshot `json:"snapshot"`
--	}
--
--	err = mapstructure.Decode(resp, &respStruct)
--
--	return respStruct.Snapshot, err
--}
--
--// Extract will get the Snapshot object out of the GetResult object.
--func (r GetResult) Extract() (*Snapshot, error) {
--	return commonExtract(r.Body, r.Err)
--}
--
--// Extract will get the Snapshot object out of the CreateResult object.
--func (r CreateResult) Extract() (*Snapshot, error) {
--	return commonExtract(r.Body, r.Err)
--}
--
--// Extract will get the Snapshot object out of the UpdateResult object.
--func (r UpdateResult) Extract() (*Snapshot, error) {
--	return commonExtract(r.Body, r.Err)
--}
--
--// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call.
--func ExtractSnapshots(page pagination.Page) ([]Snapshot, error) {
--	var response struct {
--		Snapshots []Snapshot `json:"snapshots"`
--	}
--
--	err := mapstructure.Decode(page.(os.ListResult).Body, &response)
--	return response.Snapshots, err
--}
--
--// WaitUntilComplete will continually poll a snapshot until it successfully
--// transitions to a specified state. It will do this for at most the number of
--// seconds specified.
--func (snapshot Snapshot) WaitUntilComplete(c *gophercloud.ServiceClient, timeout int) error {
--	return gophercloud.WaitFor(timeout, func() (bool, error) {
--		// Poll resource
--		current, err := Get(c, snapshot.ID).Extract()
--		if err != nil {
--			return false, err
--		}
--
--		// Has it been built yet?
--		if current.Progress == "100%" {
--			return true, nil
--		}
--
--		return false, nil
--	})
--}
--
--// WaitUntilDeleted will continually poll a snapshot until it has been
--// successfully deleted, i.e. returns a 404 status.
--func (snapshot Snapshot) WaitUntilDeleted(c *gophercloud.ServiceClient, timeout int) error {
--	return gophercloud.WaitFor(timeout, func() (bool, error) {
--		// Poll resource
--		_, err := Get(c, snapshot.ID).Extract()
--
--		// Check for a 404
--		if casted, ok := err.(*perigee.UnexpectedResponseCodeError); ok && casted.Actual == 404 {
--			return true, nil
--		} else if err != nil {
--			return false, err
--		}
--
--		return false, nil
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go
-deleted file mode 100644
-index 4383494..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go
-+++ /dev/null
-@@ -1,75 +0,0 @@
--package volumes
--
--import (
--	"fmt"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type CreateOpts struct {
--	os.CreateOpts
--}
--
--func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) {
--	if opts.Size < 75 || opts.Size > 1024 {
--		return nil, fmt.Errorf("Size field must be between 75 and 1024")
--	}
--
--	return opts.CreateOpts.ToVolumeCreateMap()
--}
--
--// Create will create a new Volume based on the values in CreateOpts. To extract
--// the Volume object from the response, call the Extract method on the
--// CreateResult.
--func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) CreateResult {
--	return CreateResult{os.Create(client, opts)}
--}
--
--// Delete will delete the existing Volume with the provided ID.
--func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult {
--	return os.Delete(client, id)
--}
--
--// Get retrieves the Volume with the provided ID. To extract the Volume object
--// from the response, call the Extract method on the GetResult.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	return GetResult{os.Get(client, id)}
--}
--
--// List returns volumes optionally limited by the conditions provided in ListOpts.
--func List(client *gophercloud.ServiceClient) pagination.Pager {
--	return os.List(client, os.ListOpts{})
--}
--
--// UpdateOpts contain options for updating an existing Volume. This object is passed
--// to the volumes.Update function. For more information about the parameters, see
--// the Volume object.
--type UpdateOpts struct {
--	// OPTIONAL
--	Name string
--	// OPTIONAL
--	Description string
--}
--
--// ToVolumeUpdateMap assembles a request body based on the contents of an
--// UpdateOpts.
--func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) {
--	v := make(map[string]interface{})
--
--	if opts.Description != "" {
--		v["display_description"] = opts.Description
--	}
--	if opts.Name != "" {
--		v["display_name"] = opts.Name
--	}
--
--	return map[string]interface{}{"volume": v}, nil
--}
--
--// Update will update the Volume with provided information. To extract the updated
--// Volume from the response, call the Extract method on the UpdateResult.
--func Update(client *gophercloud.ServiceClient, id string, opts os.UpdateOptsBuilder) UpdateResult {
--	return UpdateResult{os.Update(client, id, opts)}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate_test.go
-deleted file mode 100644
-index b44564c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate_test.go
-+++ /dev/null
-@@ -1,106 +0,0 @@
--package volumes
--
--import (
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockListResponse(t)
--
--	count := 0
--
--	err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractVolumes(page)
--		if err != nil {
--			t.Errorf("Failed to extract volumes: %v", err)
--			return false, err
--		}
--
--		expected := []Volume{
--			Volume{
--				ID:   "289da7f8-6440-407c-9fb4-7db01ec49164",
--				Name: "vol-001",
--			},
--			Volume{
--				ID:   "96c3bda7-c82a-4f50-be73-ca7621794835",
--				Name: "vol-002",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	th.AssertEquals(t, 1, count)
--	th.AssertNoErr(t, err)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockGetResponse(t)
--
--	v, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, v.Name, "vol-001")
--	th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockCreateResponse(t)
--
--	n, err := Create(fake.ServiceClient(), CreateOpts{os.CreateOpts{Size: 75}}).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Size, 4)
--	th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestSizeRange(t *testing.T) {
--	_, err := Create(fake.ServiceClient(), CreateOpts{os.CreateOpts{Size: 1}}).Extract()
--	if err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--
--	_, err = Create(fake.ServiceClient(), CreateOpts{os.CreateOpts{Size: 2000}}).Extract()
--	if err == nil {
--		t.Fatalf("Expected error, got none")
--	}
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockDeleteResponse(t)
--
--	res := Delete(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestUpdate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockUpdateResponse(t)
--
--	options := &UpdateOpts{Name: "vol-002"}
--	v, err := Update(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22", options).Extract()
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, "vol-002", v.Name)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go
-deleted file mode 100644
-index b2be25c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package volumes provides information and interaction with the volume
--// API resource for the Rackspace Block Storage service.
--package volumes
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go
-deleted file mode 100644
-index c7c2cc4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go
-+++ /dev/null
-@@ -1,66 +0,0 @@
--package volumes
--
--import (
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--// Volume wraps an Openstack volume
--type Volume os.Volume
--
--// CreateResult represents the result of a create operation
--type CreateResult struct {
--	os.CreateResult
--}
--
--// GetResult represents the result of a get operation
--type GetResult struct {
--	os.GetResult
--}
--
--// UpdateResult represents the result of an update operation
--type UpdateResult struct {
--	os.UpdateResult
--}
--
--func commonExtract(resp interface{}, err error) (*Volume, error) {
--	if err != nil {
--		return nil, err
--	}
--
--	var respStruct struct {
--		Volume *Volume `json:"volume"`
--	}
--
--	err = mapstructure.Decode(resp, &respStruct)
--
--	return respStruct.Volume, err
--}
--
--// Extract will get the Volume object out of the GetResult object.
--func (r GetResult) Extract() (*Volume, error) {
--	return commonExtract(r.Body, r.Err)
--}
--
--// Extract will get the Volume object out of the CreateResult object.
--func (r CreateResult) Extract() (*Volume, error) {
--	return commonExtract(r.Body, r.Err)
--}
--
--// Extract will get the Volume object out of the UpdateResult object.
--func (r UpdateResult) Extract() (*Volume, error) {
--	return commonExtract(r.Body, r.Err)
--}
--
--// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call.
--func ExtractVolumes(page pagination.Page) ([]Volume, error) {
--	var response struct {
--		Volumes []Volume `json:"volumes"`
--	}
--
--	err := mapstructure.Decode(page.(os.ListResult).Body, &response)
--
--	return response.Volumes, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate.go
-deleted file mode 100644
-index c96b3e4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate.go
-+++ /dev/null
-@@ -1,18 +0,0 @@
--package volumetypes
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// List returns all volume types.
--func List(client *gophercloud.ServiceClient) pagination.Pager {
--	return os.List(client)
--}
--
--// Get will retrieve the volume type with the provided ID. To extract the volume
--// type from the result, call the Extract method on the GetResult.
--func Get(client *gophercloud.ServiceClient, id string) GetResult {
--	return GetResult{os.Get(client, id)}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate_test.go
-deleted file mode 100644
-index 6e65c90..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/delegate_test.go
-+++ /dev/null
-@@ -1,64 +0,0 @@
--package volumetypes
--
--import (
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockListResponse(t)
--
--	count := 0
--
--	err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractVolumeTypes(page)
--		if err != nil {
--			t.Errorf("Failed to extract volume types: %v", err)
--			return false, err
--		}
--
--		expected := []VolumeType{
--			VolumeType{
--				ID:   "289da7f8-6440-407c-9fb4-7db01ec49164",
--				Name: "vol-type-001",
--				ExtraSpecs: map[string]interface{}{
--					"capabilities": "gpu",
--				},
--			},
--			VolumeType{
--				ID:         "96c3bda7-c82a-4f50-be73-ca7621794835",
--				Name:       "vol-type-002",
--				ExtraSpecs: map[string]interface{}{},
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--
--	th.AssertEquals(t, 1, count)
--	th.AssertNoErr(t, err)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	os.MockGetResponse(t)
--
--	vt, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertDeepEquals(t, vt.ExtraSpecs, map[string]interface{}{"serverNumber": "2"})
--	th.AssertEquals(t, vt.Name, "vol-type-001")
--	th.AssertEquals(t, vt.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/doc.go
-deleted file mode 100644
-index 70122b7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package volumetypes provides information and interaction with the volume type
--// API resource for the Rackspace Block Storage service.
--package volumetypes
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/results.go
-deleted file mode 100644
-index 39c8d6f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumetypes/results.go
-+++ /dev/null
-@@ -1,37 +0,0 @@
--package volumetypes
--
--import (
--	"github.com/mitchellh/mapstructure"
--	os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type VolumeType os.VolumeType
--
--type GetResult struct {
--	os.GetResult
--}
--
--// Extract will get the Volume Type struct out of the response.
--func (r GetResult) Extract() (*VolumeType, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		VolumeType *VolumeType `json:"volume_type" mapstructure:"volume_type"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.VolumeType, err
--}
--
--func ExtractVolumeTypes(page pagination.Page) ([]VolumeType, error) {
--	var response struct {
--		VolumeTypes []VolumeType `mapstructure:"volume_types"`
--	}
--
--	err := mapstructure.Decode(page.(os.ListResult).Body, &response)
--	return response.VolumeTypes, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client.go
-deleted file mode 100644
-index 5f739a8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client.go
-+++ /dev/null
-@@ -1,156 +0,0 @@
--package rackspace
--
--import (
--	"fmt"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack"
--	"github.com/rackspace/gophercloud/openstack/utils"
--	tokens2 "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens"
--)
--
--const (
--	// RackspaceUSIdentity is an identity endpoint located in the United States.
--	RackspaceUSIdentity = "https://identity.api.rackspacecloud.com/v2.0/"
--
--	// RackspaceUKIdentity is an identity endpoint located in the UK.
--	RackspaceUKIdentity = "https://lon.identity.api.rackspacecloud.com/v2.0/"
--)
--
--const (
--	v20 = "v2.0"
--)
--
--// NewClient creates a client that's prepared to communicate with the Rackspace API, but is not
--// yet authenticated. Most users will probably prefer using the AuthenticatedClient function
--// instead.
--//
--// Provide the base URL of the identity endpoint you wish to authenticate against as "endpoint".
--// Often, this will be either RackspaceUSIdentity or RackspaceUKIdentity.
--func NewClient(endpoint string) (*gophercloud.ProviderClient, error) {
--	if endpoint == "" {
--		return os.NewClient(RackspaceUSIdentity)
--	}
--	return os.NewClient(endpoint)
--}
--
--// AuthenticatedClient logs in to Rackspace with the provided credentials and constructs a
--// ProviderClient that's ready to operate.
--//
--// If the provided AuthOptions does not specify an explicit IdentityEndpoint, it will default to
--// the canonical, production Rackspace US identity endpoint.
--func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) {
--	client, err := NewClient(options.IdentityEndpoint)
--	if err != nil {
--		return nil, err
--	}
--
--	err = Authenticate(client, options)
--	if err != nil {
--		return nil, err
--	}
--	return client, nil
--}
--
--// Authenticate or re-authenticate against the most recent identity service supported at the
--// provided endpoint.
--func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error {
--	versions := []*utils.Version{
--		&utils.Version{ID: v20, Priority: 20, Suffix: "/v2.0/"},
--	}
--
--	chosen, endpoint, err := utils.ChooseVersion(client.IdentityBase, client.IdentityEndpoint, versions)
--	if err != nil {
--		return err
--	}
--
--	switch chosen.ID {
--	case v20:
--		return v2auth(client, endpoint, options)
--	default:
--		// The switch statement must be out of date from the versions list.
--		return fmt.Errorf("Unrecognized identity version: %s", chosen.ID)
--	}
--}
--
--// AuthenticateV2 explicitly authenticates with v2 of the identity service.
--func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error {
--	return v2auth(client, "", options)
--}
--
--func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error {
--	v2Client := NewIdentityV2(client)
--	if endpoint != "" {
--		v2Client.Endpoint = endpoint
--	}
--
--	result := tokens2.Create(v2Client, tokens2.WrapOptions(options))
--
--	token, err := result.ExtractToken()
--	if err != nil {
--		return err
--	}
--
--	catalog, err := result.ExtractServiceCatalog()
--	if err != nil {
--		return err
--	}
--
--	client.TokenID = token.ID
--	client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) {
--		return os.V2EndpointURL(catalog, opts)
--	}
--
--	return nil
--}
--
--// NewIdentityV2 creates a ServiceClient that may be used to access the v2 identity service.
--func NewIdentityV2(client *gophercloud.ProviderClient) *gophercloud.ServiceClient {
--	v2Endpoint := client.IdentityBase + "v2.0/"
--
--	return &gophercloud.ServiceClient{
--		ProviderClient: client,
--		Endpoint:       v2Endpoint,
--	}
--}
--
--// NewComputeV2 creates a ServiceClient that may be used to access the v2 compute service.
--func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
--	eo.ApplyDefaults("compute")
--	url, err := client.EndpointLocator(eo)
--	if err != nil {
--		return nil, err
--	}
--
--	return &gophercloud.ServiceClient{
--		ProviderClient: client,
--		Endpoint:       url,
--	}, nil
--}
--
--// NewObjectCDNV1 creates a ServiceClient that may be used with the Rackspace v1 CDN.
--func NewObjectCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
--	eo.ApplyDefaults("rax:object-cdn")
--	url, err := client.EndpointLocator(eo)
--	if err != nil {
--		return nil, err
--	}
--	return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil
--}
--
--// NewObjectStorageV1 creates a ServiceClient that may be used with the Rackspace v1 object storage package.
--func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
--	return os.NewObjectStorageV1(client, eo)
--}
--
--// NewBlockStorageV1 creates a ServiceClient that can be used to access the
--// Rackspace Cloud Block Storage v1 API.
--func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {
--	eo.ApplyDefaults("volume")
--	url, err := client.EndpointLocator(eo)
--	if err != nil {
--		return nil, err
--	}
--
--	return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client_test.go
-deleted file mode 100644
-index 73b1c88..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package rackspace
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestAuthenticatedClientV2(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/v2.0/tokens", func(w http.ResponseWriter, r *http.Request) {
--		fmt.Fprintf(w, `
--      {
--        "access": {
--          "token": {
--            "id": "01234567890",
--            "expires": "2014-10-01T10:00:00.000000Z"
--          },
--          "serviceCatalog": []
--        }
--      }
--    `)
--	})
--
--	options := gophercloud.AuthOptions{
--		Username:         "me",
--		APIKey:           "09876543210",
--		IdentityEndpoint: th.Endpoint() + "v2.0/",
--	}
--	client, err := AuthenticatedClient(options)
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, "01234567890", client.TokenID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate.go
-deleted file mode 100644
-index 2580459..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate.go
-+++ /dev/null
-@@ -1,12 +0,0 @@
--package bootfromvolume
--
--import (
--	"github.com/rackspace/gophercloud"
--	osBFV "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume"
--	osServers "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--)
--
--// Create requests the creation of a server from the given block device mapping.
--func Create(client *gophercloud.ServiceClient, opts osServers.CreateOptsBuilder) osServers.CreateResult {
--	return osBFV.Create(client, opts)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate_test.go
-deleted file mode 100644
-index 0b53527..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/bootfromvolume/delegate_test.go
-+++ /dev/null
-@@ -1,52 +0,0 @@
--package bootfromvolume
--
--import (
--	"testing"
--
--	osBFV "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume"
--	"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestCreateOpts(t *testing.T) {
--	base := servers.CreateOpts{
--		Name:      "createdserver",
--		ImageRef:  "asdfasdfasdf",
--		FlavorRef: "performance1-1",
--	}
--
--	ext := osBFV.CreateOptsExt{
--		CreateOptsBuilder: base,
--		BlockDevice: []osBFV.BlockDevice{
--			osBFV.BlockDevice{
--				UUID:            "123456",
--				SourceType:      osBFV.Image,
--				DestinationType: "volume",
--				VolumeSize:      10,
--			},
--		},
--	}
--
--	expected := `
--    {
--      "server": {
--        "name": "createdserver",
--        "imageRef": "asdfasdfasdf",
--        "flavorRef": "performance1-1",
--        "block_device_mapping_v2":[
--          {
--            "uuid":"123456",
--            "source_type":"image",
--            "destination_type":"volume",
--            "boot_index": "0",
--            "delete_on_termination": "false",
--            "volume_size": "10"
--          }
--        ]
--      }
--    }
--  `
--	actual, err := ext.ToServerCreateMap()
--	th.AssertNoErr(t, err)
--	th.CheckJSONEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate.go
-deleted file mode 100644
-index 6bfc20c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate.go
-+++ /dev/null
-@@ -1,46 +0,0 @@
--package flavors
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/flavors"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListOpts helps control the results returned by the List() function. For example, a flavor with a
--// minDisk field of 10 will not be returned if you specify MinDisk set to 20.
--type ListOpts struct {
--
--	// MinDisk and MinRAM, if provided, elide flavors that do not meet your criteria.
--	MinDisk int `q:"minDisk"`
--	MinRAM  int `q:"minRam"`
--
--	// Marker specifies the ID of the last flavor in the previous page.
--	Marker string `q:"marker"`
--
--	// Limit instructs List to refrain from sending excessively large lists of flavors.
--	Limit int `q:"limit"`
--}
--
--// ToFlavorListQuery formats a ListOpts into a query string.
--func (opts ListOpts) ToFlavorListQuery() (string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return "", err
--	}
--	return q.String(), nil
--}
--
--// ListDetail enumerates the server images available to your account.
--func ListDetail(client *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager {
--	return os.ListDetail(client, opts)
--}
--
--// Get returns details about a single flavor, identity by ID.
--func Get(client *gophercloud.ServiceClient, id string) os.GetResult {
--	return os.Get(client, id)
--}
--
--// ExtractFlavors interprets a page of List results as Flavors.
--func ExtractFlavors(page pagination.Page) ([]os.Flavor, error) {
--	return os.ExtractFlavors(page)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate_test.go
-deleted file mode 100644
-index 204081d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/delegate_test.go
-+++ /dev/null
-@@ -1,62 +0,0 @@
--package flavors
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListFlavors(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/flavors/detail", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, ListOutput)
--		case "performance1-2":
--			fmt.Fprintf(w, `{ "flavors": [] }`)
--		default:
--			t.Fatalf("Unexpected marker: [%s]", marker)
--		}
--	})
--
--	count := 0
--	err := ListDetail(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) {
--		actual, err := ExtractFlavors(page)
--		th.AssertNoErr(t, err)
--		th.CheckDeepEquals(t, ExpectedFlavorSlice, actual)
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestGetFlavor(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/flavors/performance1-1", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, GetOutput)
--	})
--
--	actual, err := Get(client.ServiceClient(), "performance1-1").Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &Performance1Flavor, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/doc.go
-deleted file mode 100644
-index 278229a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package flavors provides information and interaction with the flavor
--// API resource for the Rackspace Cloud Servers service.
--package flavors
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/fixtures.go
-deleted file mode 100644
-index b6dca93..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/flavors/fixtures.go
-+++ /dev/null
-@@ -1,128 +0,0 @@
--// +build fixtures
--package flavors
--
--import (
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/flavors"
--)
--
--// ListOutput is a sample response of a flavor List request.
--const ListOutput = `
--{
--  "flavors": [
--    {
--      "OS-FLV-EXT-DATA:ephemeral": 0,
--      "OS-FLV-WITH-EXT-SPECS:extra_specs": {
--        "class": "performance1",
--        "disk_io_index": "40",
--        "number_of_data_disks": "0",
--        "policy_class": "performance_flavor",
--        "resize_policy_class": "performance_flavor"
--      },
--      "disk": 20,
--      "id": "performance1-1",
--      "links": [
--        {
--          "href": "https://iad.servers.api.rackspacecloud.com/v2/864477/flavors/performance1-1",
--          "rel": "self"
--        },
--        {
--          "href": "https://iad.servers.api.rackspacecloud.com/864477/flavors/performance1-1",
--          "rel": "bookmark"
--        }
--      ],
--      "name": "1 GB Performance",
--      "ram": 1024,
--      "rxtx_factor": 200,
--      "swap": "",
--      "vcpus": 1
--    },
--    {
--      "OS-FLV-EXT-DATA:ephemeral": 20,
--      "OS-FLV-WITH-EXT-SPECS:extra_specs": {
--        "class": "performance1",
--        "disk_io_index": "40",
--        "number_of_data_disks": "1",
--        "policy_class": "performance_flavor",
--        "resize_policy_class": "performance_flavor"
--      },
--      "disk": 40,
--      "id": "performance1-2",
--      "links": [
--        {
--          "href": "https://iad.servers.api.rackspacecloud.com/v2/864477/flavors/performance1-2",
--          "rel": "self"
--        },
--        {
--          "href": "https://iad.servers.api.rackspacecloud.com/864477/flavors/performance1-2",
--          "rel": "bookmark"
--        }
--      ],
--      "name": "2 GB Performance",
--      "ram": 2048,
--      "rxtx_factor": 400,
--      "swap": "",
--      "vcpus": 2
--    }
--  ]
--}`
--
--// GetOutput is a sample response from a flavor Get request. Its contents correspond to the
--// Performance1Flavor struct.
--const GetOutput = `
--{
--  "flavor": {
--    "OS-FLV-EXT-DATA:ephemeral": 0,
--    "OS-FLV-WITH-EXT-SPECS:extra_specs": {
--      "class": "performance1",
--      "disk_io_index": "40",
--      "number_of_data_disks": "0",
--      "policy_class": "performance_flavor",
--      "resize_policy_class": "performance_flavor"
--    },
--    "disk": 20,
--    "id": "performance1-1",
--    "links": [
--      {
--        "href": "https://iad.servers.api.rackspacecloud.com/v2/864477/flavors/performance1-1",
--        "rel": "self"
--      },
--      {
--        "href": "https://iad.servers.api.rackspacecloud.com/864477/flavors/performance1-1",
--        "rel": "bookmark"
--      }
--    ],
--    "name": "1 GB Performance",
--    "ram": 1024,
--    "rxtx_factor": 200,
--    "swap": "",
--    "vcpus": 1
--  }
--}
--`
--
--// Performance1Flavor is the expected result of parsing GetOutput, or the first element of
--// ListOutput.
--var Performance1Flavor = os.Flavor{
--	ID:         "performance1-1",
--	Disk:       20,
--	RAM:        1024,
--	Name:       "1 GB Performance",
--	RxTxFactor: 200.0,
--	Swap:       0,
--	VCPUs:      1,
--}
--
--// Performance2Flavor is the second result expected from parsing ListOutput.
--var Performance2Flavor = os.Flavor{
--	ID:         "performance1-2",
--	Disk:       40,
--	RAM:        2048,
--	Name:       "2 GB Performance",
--	RxTxFactor: 400.0,
--	Swap:       0,
--	VCPUs:      2,
--}
--
--// ExpectedFlavorSlice is the slice of Flavor structs that are expected to be parsed from
--// ListOutput.
--var ExpectedFlavorSlice = []os.Flavor{Performance1Flavor, Performance2Flavor}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate.go
-deleted file mode 100644
-index 18e1f31..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate.go
-+++ /dev/null
-@@ -1,22 +0,0 @@
--package images
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/images"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ListDetail enumerates the available server images.
--func ListDetail(client *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager {
--	return os.ListDetail(client, opts)
--}
--
--// Get acquires additional detail about a specific image by ID.
--func Get(client *gophercloud.ServiceClient, id string) os.GetResult {
--	return os.Get(client, id)
--}
--
--// ExtractImages interprets a page as a collection of server images.
--func ExtractImages(page pagination.Page) ([]os.Image, error) {
--	return os.ExtractImages(page)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate_test.go
-deleted file mode 100644
-index db0a6e3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/delegate_test.go
-+++ /dev/null
-@@ -1,62 +0,0 @@
--package images
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListImageDetails(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/images/detail", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		r.ParseForm()
--		marker := r.Form.Get("marker")
--		switch marker {
--		case "":
--			fmt.Fprintf(w, ListOutput)
--		case "e19a734c-c7e6-443a-830c-242209c4d65d":
--			fmt.Fprintf(w, `{ "images": [] }`)
--		default:
--			t.Fatalf("Unexpected marker: [%s]", marker)
--		}
--	})
--
--	count := 0
--	err := ListDetail(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractImages(page)
--		th.AssertNoErr(t, err)
--		th.CheckDeepEquals(t, ExpectedImageSlice, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestGetImageDetails(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/images/e19a734c-c7e6-443a-830c-242209c4d65d", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, GetOutput)
--	})
--
--	actual, err := Get(client.ServiceClient(), "e19a734c-c7e6-443a-830c-242209c4d65d").Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &UbuntuImage, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/doc.go
-deleted file mode 100644
-index cfae806..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package images provides information and interaction with the image
--// API resource for the Rackspace Cloud Servers service.
--package images
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/fixtures.go
-deleted file mode 100644
-index c46d196..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/images/fixtures.go
-+++ /dev/null
-@@ -1,199 +0,0 @@
--// +build fixtures
--package images
--
--import (
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/images"
--)
--
--// ListOutput is an example response from an /images/detail request.
--const ListOutput = `
--{
--	"images": [
--		{
--			"OS-DCF:diskConfig": "MANUAL",
--			"OS-EXT-IMG-SIZE:size": 1.017415075e+09,
--			"created": "2014-10-01T15:49:02Z",
--			"id": "30aa010e-080e-4d4b-a7f9-09fc55b07d69",
--			"links": [
--				{
--					"href": "https://iad.servers.api.rackspacecloud.com/v2/111222/images/30aa010e-080e-4d4b-a7f9-09fc55b07d69",
--					"rel": "self"
--				},
--				{
--					"href": "https://iad.servers.api.rackspacecloud.com/111222/images/30aa010e-080e-4d4b-a7f9-09fc55b07d69",
--					"rel": "bookmark"
--				},
--				{
--					"href": "https://iad.servers.api.rackspacecloud.com/111222/images/30aa010e-080e-4d4b-a7f9-09fc55b07d69",
--					"rel": "alternate",
--					"type": "application/vnd.openstack.image"
--				}
--			],
--			"metadata": {
--				"auto_disk_config": "disabled",
--				"cache_in_nova": "True",
--				"com.rackspace__1__build_core": "1",
--				"com.rackspace__1__build_managed": "1",
--				"com.rackspace__1__build_rackconnect": "1",
--				"com.rackspace__1__options": "0",
--				"com.rackspace__1__platform_target": "PublicCloud",
--				"com.rackspace__1__release_build_date": "2014-10-01_15-46-08",
--				"com.rackspace__1__release_id": "100",
--				"com.rackspace__1__release_version": "10",
--				"com.rackspace__1__source": "kickstart",
--				"com.rackspace__1__visible_core": "1",
--				"com.rackspace__1__visible_managed": "0",
--				"com.rackspace__1__visible_rackconnect": "0",
--				"image_type": "base",
--				"org.openstack__1__architecture": "x64",
--				"org.openstack__1__os_distro": "org.archlinux",
--				"org.openstack__1__os_version": "2014.8",
--				"os_distro": "arch",
--				"os_type": "linux",
--				"vm_mode": "hvm"
--			},
--			"minDisk": 20,
--			"minRam": 512,
--			"name": "Arch 2014.10 (PVHVM)",
--			"progress": 100,
--			"status": "ACTIVE",
--			"updated": "2014-10-01T19:37:58Z"
--		},
--		{
--			"OS-DCF:diskConfig": "AUTO",
--			"OS-EXT-IMG-SIZE:size": 1.060306463e+09,
--			"created": "2014-10-01T12:58:11Z",
--			"id": "e19a734c-c7e6-443a-830c-242209c4d65d",
--			"links": [
--				{
--					"href": "https://iad.servers.api.rackspacecloud.com/v2/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d",
--					"rel": "self"
--				},
--				{
--					"href": "https://iad.servers.api.rackspacecloud.com/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d",
--					"rel": "bookmark"
--				},
--				{
--					"href": "https://iad.servers.api.rackspacecloud.com/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d",
--					"rel": "alternate",
--					"type": "application/vnd.openstack.image"
--				}
--			],
--			"metadata": {
--				"auto_disk_config": "True",
--				"cache_in_nova": "True",
--				"com.rackspace__1__build_core": "1",
--				"com.rackspace__1__build_managed": "1",
--				"com.rackspace__1__build_rackconnect": "1",
--				"com.rackspace__1__options": "0",
--				"com.rackspace__1__platform_target": "PublicCloud",
--				"com.rackspace__1__release_build_date": "2014-10-01_12-31-03",
--				"com.rackspace__1__release_id": "1007",
--				"com.rackspace__1__release_version": "6",
--				"com.rackspace__1__source": "kickstart",
--				"com.rackspace__1__visible_core": "1",
--				"com.rackspace__1__visible_managed": "1",
--				"com.rackspace__1__visible_rackconnect": "1",
--				"image_type": "base",
--				"org.openstack__1__architecture": "x64",
--				"org.openstack__1__os_distro": "com.ubuntu",
--				"org.openstack__1__os_version": "14.04",
--				"os_distro": "ubuntu",
--				"os_type": "linux",
--				"vm_mode": "xen"
--			},
--			"minDisk": 20,
--			"minRam": 512,
--			"name": "Ubuntu 14.04 LTS (Trusty Tahr)",
--			"progress": 100,
--			"status": "ACTIVE",
--			"updated": "2014-10-01T15:51:44Z"
--		}
--	]
--}
--`
--
--// GetOutput is an example response from an /images request.
--const GetOutput = `
--{
--	"image": {
--		"OS-DCF:diskConfig": "AUTO",
--		"OS-EXT-IMG-SIZE:size": 1060306463,
--		"created": "2014-10-01T12:58:11Z",
--		"id": "e19a734c-c7e6-443a-830c-242209c4d65d",
--		"links": [
--			{
--				"href": "https://iad.servers.api.rackspacecloud.com/v2/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d",
--				"rel": "self"
--			},
--			{
--				"href": "https://iad.servers.api.rackspacecloud.com/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d",
--				"rel": "bookmark"
--			},
--			{
--				"href": "https://iad.servers.api.rackspacecloud.com/111222/images/e19a734c-c7e6-443a-830c-242209c4d65d",
--				"rel": "alternate",
--				"type": "application/vnd.openstack.image"
--			}
--		],
--		"metadata": {
--			"auto_disk_config": "True",
--			"cache_in_nova": "True",
--			"com.rackspace__1__build_core": "1",
--			"com.rackspace__1__build_managed": "1",
--			"com.rackspace__1__build_rackconnect": "1",
--			"com.rackspace__1__options": "0",
--			"com.rackspace__1__platform_target": "PublicCloud",
--			"com.rackspace__1__release_build_date": "2014-10-01_12-31-03",
--			"com.rackspace__1__release_id": "1007",
--			"com.rackspace__1__release_version": "6",
--			"com.rackspace__1__source": "kickstart",
--			"com.rackspace__1__visible_core": "1",
--			"com.rackspace__1__visible_managed": "1",
--			"com.rackspace__1__visible_rackconnect": "1",
--			"image_type": "base",
--			"org.openstack__1__architecture": "x64",
--			"org.openstack__1__os_distro": "com.ubuntu",
--			"org.openstack__1__os_version": "14.04",
--			"os_distro": "ubuntu",
--			"os_type": "linux",
--			"vm_mode": "xen"
--		},
--		"minDisk": 20,
--		"minRam": 512,
--		"name": "Ubuntu 14.04 LTS (Trusty Tahr)",
--		"progress": 100,
--		"status": "ACTIVE",
--		"updated": "2014-10-01T15:51:44Z"
--	}
--}
--`
--
--// ArchImage is the first Image structure that should be parsed from ListOutput.
--var ArchImage = os.Image{
--	ID:       "30aa010e-080e-4d4b-a7f9-09fc55b07d69",
--	Name:     "Arch 2014.10 (PVHVM)",
--	Created:  "2014-10-01T15:49:02Z",
--	Updated:  "2014-10-01T19:37:58Z",
--	MinDisk:  20,
--	MinRAM:   512,
--	Progress: 100,
--	Status:   "ACTIVE",
--}
--
--// UbuntuImage is the second Image structure that should be parsed from ListOutput and
--// the only image that should be extracted from GetOutput.
--var UbuntuImage = os.Image{
--	ID:       "e19a734c-c7e6-443a-830c-242209c4d65d",
--	Name:     "Ubuntu 14.04 LTS (Trusty Tahr)",
--	Created:  "2014-10-01T12:58:11Z",
--	Updated:  "2014-10-01T15:51:44Z",
--	MinDisk:  20,
--	MinRAM:   512,
--	Progress: 100,
--	Status:   "ACTIVE",
--}
--
--// ExpectedImageSlice is the collection of images that should be parsed from ListOutput,
--// in order.
--var ExpectedImageSlice = []os.Image{ArchImage, UbuntuImage}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate.go
-deleted file mode 100644
-index 3e53525..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--package keypairs
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// List returns a Pager that allows you to iterate over a collection of KeyPairs.
--func List(client *gophercloud.ServiceClient) pagination.Pager {
--	return os.List(client)
--}
--
--// Create requests the creation of a new keypair on the server, or to import a pre-existing
--// keypair.
--func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult {
--	return os.Create(client, opts)
--}
--
--// Get returns public data about a previously uploaded KeyPair.
--func Get(client *gophercloud.ServiceClient, name string) os.GetResult {
--	return os.Get(client, name)
--}
--
--// Delete requests the deletion of a previous stored KeyPair from the server.
--func Delete(client *gophercloud.ServiceClient, name string) os.DeleteResult {
--	return os.Delete(client, name)
--}
--
--// ExtractKeyPairs interprets a page of results as a slice of KeyPairs.
--func ExtractKeyPairs(page pagination.Page) ([]os.KeyPair, error) {
--	return os.ExtractKeyPairs(page)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate_test.go
-deleted file mode 100644
-index 62e5df9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/delegate_test.go
-+++ /dev/null
-@@ -1,72 +0,0 @@
--package keypairs
--
--import (
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleListSuccessfully(t)
--
--	count := 0
--	err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractKeyPairs(page)
--		th.AssertNoErr(t, err)
--		th.CheckDeepEquals(t, os.ExpectedKeyPairSlice, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleCreateSuccessfully(t)
--
--	actual, err := Create(client.ServiceClient(), os.CreateOpts{
--		Name: "createdkey",
--	}).Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &os.CreatedKeyPair, actual)
--}
--
--func TestImport(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleImportSuccessfully(t)
--
--	actual, err := Create(client.ServiceClient(), os.CreateOpts{
--		Name:      "importedkey",
--		PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova",
--	}).Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &os.ImportedKeyPair, actual)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleGetSuccessfully(t)
--
--	actual, err := Get(client.ServiceClient(), "firstkey").Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &os.FirstKeyPair, actual)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleDeleteSuccessfully(t)
--
--	err := Delete(client.ServiceClient(), "deletedkey").ExtractErr()
--	th.AssertNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/doc.go
-deleted file mode 100644
-index 3171375..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/keypairs/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package keypairs provides information and interaction with the keypair
--// API resource for the Rackspace Cloud Servers service.
--package keypairs
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/doc.go
-deleted file mode 100644
-index 8e5c773..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package networks provides information and interaction with the network
--// API resource for the Rackspace Cloud Servers service.
--package networks
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go
-deleted file mode 100644
-index d3c973e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go
-+++ /dev/null
-@@ -1,101 +0,0 @@
--package networks
--
--import (
--	"errors"
--
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// List returns a Pager which allows you to iterate over a collection of
--// networks. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--func List(c *gophercloud.ServiceClient) pagination.Pager {
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return NetworkPage{pagination.SinglePageBase(r)}
--	}
--
--	return pagination.NewPager(c, listURL(c), createPage)
--}
--
--// Get retrieves a specific network based on its unique ID.
--func Get(c *gophercloud.ServiceClient, id string) GetResult {
--	var res GetResult
--	_, res.Err = perigee.Request("GET", getURL(c, id), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		Results:     &res.Body,
--		OkCodes:     []int{200},
--	})
--	return res
--}
--
--// CreateOptsBuilder is the interface options structs have to satisfy in order
--// to be used in the main Create operation in this package. Since many
--// extensions decorate or modify the common logic, it is useful for them to
--// satisfy a basic interface in order for them to be used.
--type CreateOptsBuilder interface {
--	ToNetworkCreateMap() (map[string]interface{}, error)
--}
--
--// CreateOpts is the common options struct used in this package's Create
--// operation.
--type CreateOpts struct {
--	// REQUIRED. See Network object for more info.
--	CIDR string
--	// REQUIRED. See Network object for more info.
--	Label string
--}
--
--// ToNetworkCreateMap casts a CreateOpts struct to a map.
--func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) {
--	n := make(map[string]interface{})
--
--	if opts.CIDR == "" {
--		return nil, errors.New("Required field CIDR not set.")
--	}
--	if opts.Label == "" {
--		return nil, errors.New("Required field Label not set.")
--	}
--
--	n["label"] = opts.Label
--	n["cidr"] = opts.CIDR
--	return map[string]interface{}{"network": n}, nil
--}
--
--// Create accepts a CreateOpts struct and creates a new network using the values
--// provided. This operation does not actually require a request body, i.e. the
--// CreateOpts struct argument can be empty.
--//
--// The tenant ID that is contained in the URI is the tenant that creates the
--// network. An admin user, however, has the option of specifying another tenant
--// ID in the CreateOpts struct.
--func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult {
--	var res CreateResult
--
--	reqBody, err := opts.ToNetworkCreateMap()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	// Send request to API
--	_, res.Err = perigee.Request("POST", createURL(c), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 201, 202},
--	})
--	return res
--}
--
--// Delete accepts a unique ID and deletes the network associated with it.
--func Delete(c *gophercloud.ServiceClient, networkID string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(c, networkID), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests_test.go
-deleted file mode 100644
-index 6f44c1c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests_test.go
-+++ /dev/null
-@@ -1,156 +0,0 @@
--package networks
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/os-networksv2", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "networks": [
--        {
--            "label": "test-network-1",
--            "cidr": "192.168.100.0/24",
--            "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--        },
--        {
--            "label": "test-network-2",
--            "cidr": "192.30.250.00/18",
--            "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324"
--        }
--    ]
--}
--      `)
--	})
--
--	client := fake.ServiceClient()
--	count := 0
--
--	err := List(client).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractNetworks(page)
--		if err != nil {
--			t.Errorf("Failed to extract networks: %v", err)
--			return false, err
--		}
--
--		expected := []Network{
--			Network{
--				Label: "test-network-1",
--				CIDR:  "192.168.100.0/24",
--				ID:    "d32019d3-bc6e-4319-9c1d-6722fc136a22",
--			},
--			Network{
--				Label: "test-network-2",
--				CIDR:  "192.30.250.00/18",
--				ID:    "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/os-networksv2/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "label": "test-network-1",
--        "cidr": "192.168.100.0/24",
--        "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22"
--    }
--}
--      `)
--	})
--
--	n, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.CIDR, "192.168.100.0/24")
--	th.AssertEquals(t, n.Label, "test-network-1")
--	th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22")
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/os-networksv2", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "network": {
--        "label": "test-network-1",
--        "cidr": "192.168.100.0/24"
--    }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `
--{
--    "network": {
--        "label": "test-network-1",
--        "cidr": "192.168.100.0/24",
--        "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c"
--    }
--}
--    `)
--	})
--
--	options := CreateOpts{Label: "test-network-1", CIDR: "192.168.100.0/24"}
--	n, err := Create(fake.ServiceClient(), options).Extract()
--	th.AssertNoErr(t, err)
--
--	th.AssertEquals(t, n.Label, "test-network-1")
--	th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c")
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/os-networksv2/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/results.go
-deleted file mode 100644
-index eb6a76c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/results.go
-+++ /dev/null
-@@ -1,81 +0,0 @@
--package networks
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a network resource.
--func (r commonResult) Extract() (*Network, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		Network *Network `json:"network"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return res.Network, err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// GetResult represents the result of a get operation.
--type GetResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// Network represents, well, a network.
--type Network struct {
--	// UUID for the network
--	ID string `mapstructure:"id" json:"id"`
--
--	// Human-readable name for the network. Might not be unique.
--	Label string `mapstructure:"label" json:"label"`
--
--	// Classless Inter-Domain Routing
--	CIDR string `mapstructure:"cidr" json:"cidr"`
--}
--
--// NetworkPage is the page returned by a pager when traversing over a
--// collection of networks.
--type NetworkPage struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty returns true if the NetworkPage contains no Networks.
--func (r NetworkPage) IsEmpty() (bool, error) {
--	networks, err := ExtractNetworks(r)
--	if err != nil {
--		return true, err
--	}
--	return len(networks) == 0, nil
--}
--
--// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct,
--// and extracts the elements into a slice of Network structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractNetworks(page pagination.Page) ([]Network, error) {
--	var resp struct {
--		Networks []Network `mapstructure:"networks" json:"networks"`
--	}
--
--	err := mapstructure.Decode(page.(NetworkPage).Body, &resp)
--
--	return resp.Networks, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls.go
-deleted file mode 100644
-index 19a21aa..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls.go
-+++ /dev/null
-@@ -1,27 +0,0 @@
--package networks
--
--import "github.com/rackspace/gophercloud"
--
--func resourceURL(c *gophercloud.ServiceClient, id string) string {
--	return c.ServiceURL("os-networksv2", id)
--}
--
--func rootURL(c *gophercloud.ServiceClient) string {
--	return c.ServiceURL("os-networksv2")
--}
--
--func getURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
--
--func listURL(c *gophercloud.ServiceClient) string {
--	return rootURL(c)
--}
--
--func createURL(c *gophercloud.ServiceClient) string {
--	return rootURL(c)
--}
--
--func deleteURL(c *gophercloud.ServiceClient, id string) string {
--	return resourceURL(c, id)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls_test.go
-deleted file mode 100644
-index 983992e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/urls_test.go
-+++ /dev/null
-@@ -1,38 +0,0 @@
--package networks
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestGetURL(t *testing.T) {
--	actual := getURL(endpointClient(), "foo")
--	expected := endpoint + "os-networksv2/foo"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "os-networksv2"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestListURL(t *testing.T) {
--	actual := createURL(endpointClient())
--	expected := endpoint + "os-networksv2"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "foo")
--	expected := endpoint + "os-networksv2/foo"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go
-deleted file mode 100644
-index 4c7b249..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go
-+++ /dev/null
-@@ -1,61 +0,0 @@
--package servers
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// List makes a request against the API to list servers accessible to you.
--func List(client *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager {
--	return os.List(client, opts)
--}
--
--// Create requests a server to be provisioned to the user in the current tenant.
--func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult {
--	return os.Create(client, opts)
--}
--
--// Delete requests that a server previously provisioned be removed from your account.
--func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult {
--	return os.Delete(client, id)
--}
--
--// Get requests details on a single server, by ID.
--func Get(client *gophercloud.ServiceClient, id string) os.GetResult {
--	return os.Get(client, id)
--}
--
--// ChangeAdminPassword alters the administrator or root password for a specified server.
--func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) os.ActionResult {
--	return os.ChangeAdminPassword(client, id, newPassword)
--}
--
--// Reboot requests that a given server reboot. Two methods exist for rebooting a server:
--//
--// os.HardReboot (aka PowerCycle) restarts the server instance by physically cutting power to the
--// machine, or if a VM, terminating it at the hypervisor level. It's done. Caput. Full stop. Then,
--// after a brief wait, power is restored or the VM instance restarted.
--//
--// os.SoftReboot (aka OSReboot) simply tells the OS to restart under its own procedures. E.g., in
--// Linux, asking it to enter runlevel 6, or executing "sudo shutdown -r now", or by asking Windows to restart the machine.
--func Reboot(client *gophercloud.ServiceClient, id string, how os.RebootMethod) os.ActionResult {
--	return os.Reboot(client, id, how)
--}
--
--// Rebuild will reprovision the server according to the configuration options provided in the
--// RebuildOpts struct.
--func Rebuild(client *gophercloud.ServiceClient, id string, opts os.RebuildOptsBuilder) os.RebuildResult {
--	return os.Rebuild(client, id, opts)
--}
--
--// WaitForStatus will continually poll a server until it successfully transitions to a specified
--// status. It will do this for at most the number of seconds specified.
--func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error {
--	return os.WaitForStatus(c, id, status, secs)
--}
--
--// ExtractServers interprets the results of a single page from a List() call, producing a slice of Server entities.
--func ExtractServers(page pagination.Page) ([]os.Server, error) {
--	return os.ExtractServers(page)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go
-deleted file mode 100644
-index 7f41404..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go
-+++ /dev/null
-@@ -1,112 +0,0 @@
--package servers
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListServers(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, ListOutput)
--	})
--
--	count := 0
--	err := List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractServers(page)
--		th.AssertNoErr(t, err)
--		th.CheckDeepEquals(t, ExpectedServerSlice, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestCreateServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleServerCreationSuccessfully(t, CreateOutput)
--
--	actual, err := Create(client.ServiceClient(), os.CreateOpts{
--		Name:      "derp",
--		ImageRef:  "f90f6034-2570-4974-8351-6b49732ef2eb",
--		FlavorRef: "1",
--	}).Extract()
--	th.AssertNoErr(t, err)
--
--	th.CheckDeepEquals(t, &CreatedServer, actual)
--}
--
--func TestDeleteServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleServerDeletionSuccessfully(t)
--
--	res := Delete(client.ServiceClient(), "asdfasdfasdf")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestGetServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		fmt.Fprintf(w, GetOutput)
--	})
--
--	actual, err := Get(client.ServiceClient(), "8c65cb68-0681-4c30-bc88-6b83a8a26aee").Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &GophercloudServer, actual)
--}
--
--func TestChangeAdminPassword(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleAdminPasswordChangeSuccessfully(t)
--
--	res := ChangeAdminPassword(client.ServiceClient(), "1234asdf", "new-password")
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestReboot(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleRebootSuccessfully(t)
--
--	res := Reboot(client.ServiceClient(), "1234asdf", os.SoftReboot)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestRebuildServer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleRebuildSuccessfully(t, GetOutput)
--
--	opts := os.RebuildOpts{
--		Name:       "new-name",
--		AdminPass:  "swordfish",
--		ImageID:    "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb",
--		AccessIPv4: "1.2.3.4",
--	}
--	actual, err := Rebuild(client.ServiceClient(), "1234asdf", opts).Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, &GophercloudServer, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go
-deleted file mode 100644
-index c9f77f6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package servers provides information and interaction with the server
--// API resource for the Rackspace Cloud Servers service.
--package servers
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go
-deleted file mode 100644
-index b22a289..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go
-+++ /dev/null
-@@ -1,439 +0,0 @@
--// +build fixtures
--
--package servers
--
--import (
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--)
--
--// ListOutput is the recorded output of a Rackspace servers.List request.
--const ListOutput = `
--{
--	"servers": [
--		{
--			"OS-DCF:diskConfig": "MANUAL",
--			"OS-EXT-STS:power_state": 1,
--			"OS-EXT-STS:task_state": null,
--			"OS-EXT-STS:vm_state": "active",
--			"accessIPv4": "1.2.3.4",
--			"accessIPv6": "1111:4822:7818:121:2000:9b5e:7438:a2d0",
--			"addresses": {
--				"private": [
--					{
--						"addr": "10.208.230.113",
--						"version": 4
--					}
--				],
--				"public": [
--					{
--						"addr": "2001:4800:7818:101:2000:9b5e:7428:a2d0",
--						"version": 6
--					},
--					{
--						"addr": "104.130.131.164",
--						"version": 4
--					}
--				]
--			},
--			"created": "2014-09-23T12:34:58Z",
--			"flavor": {
--				"id": "performance1-8",
--				"links": [
--					{
--						"href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-8",
--						"rel": "bookmark"
--					}
--				]
--			},
--			"hostId": "e8951a524bc465b0898aeac7674da6fe1495e253ae1ea17ddb2c2475",
--			"id": "59818cee-bc8c-44eb-8073-673ee65105f7",
--			"image": {
--				"id": "255df5fb-e3d4-45a3-9a07-c976debf7c14",
--				"links": [
--					{
--						"href": "https://dfw.servers.api.rackspacecloud.com/111111/images/255df5fb-e3d4-45a3-9a07-c976debf7c14",
--						"rel": "bookmark"
--					}
--				]
--			},
--			"key_name": "mykey",
--			"links": [
--				{
--					"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7",
--					"rel": "self"
--				},
--				{
--					"href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7",
--					"rel": "bookmark"
--				}
--			],
--			"metadata": {},
--			"name": "devstack",
--			"progress": 100,
--			"status": "ACTIVE",
--			"tenant_id": "111111",
--			"updated": "2014-09-23T12:38:19Z",
--			"user_id": "14ae7bb21d81422694655f3cc30f2930"
--		},
--		{
--			"OS-DCF:diskConfig": "MANUAL",
--			"OS-EXT-STS:power_state": 1,
--			"OS-EXT-STS:task_state": null,
--			"OS-EXT-STS:vm_state": "active",
--			"accessIPv4": "1.1.2.3",
--			"accessIPv6": "2222:4444:7817:101:be76:4eff:f0e5:9e02",
--			"addresses": {
--				"private": [
--					{
--						"addr": "10.10.20.30",
--						"version": 4
--					}
--				],
--				"public": [
--					{
--						"addr": "1.1.2.3",
--						"version": 4
--					},
--					{
--						"addr": "2222:4444:7817:101:be76:4eff:f0e5:9e02",
--						"version": 6
--					}
--				]
--			},
--			"created": "2014-07-21T19:32:55Z",
--			"flavor": {
--				"id": "performance1-2",
--				"links": [
--					{
--						"href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-2",
--						"rel": "bookmark"
--					}
--				]
--			},
--			"hostId": "f859679906d6b1a38c1bd516b78f4dcc7d5fcf012578fa3ce460716c",
--			"id": "25f1c7f5-e00a-4715-b354-16e24b2f4630",
--			"image": {
--				"id": "bb02b1a3-bc77-4d17-ab5b-421d89850fca",
--				"links": [
--					{
--						"href": "https://dfw.servers.api.rackspacecloud.com/111111/images/bb02b1a3-bc77-4d17-ab5b-421d89850fca",
--						"rel": "bookmark"
--					}
--				]
--			},
--			"key_name": "otherkey",
--			"links": [
--				{
--					"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630",
--					"rel": "self"
--				},
--				{
--					"href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630",
--					"rel": "bookmark"
--				}
--			],
--			"metadata": {},
--			"name": "peril-dfw",
--			"progress": 100,
--			"status": "ACTIVE",
--			"tenant_id": "111111",
--			"updated": "2014-07-21T19:34:24Z",
--			"user_id": "14ae7bb21d81422694655f3cc30f2930"
--		}
--	]
--}
--`
--
--// GetOutput is the recorded output of a Rackspace servers.Get request.
--const GetOutput = `
--{
--	"server": {
--		"OS-DCF:diskConfig": "AUTO",
--		"OS-EXT-STS:power_state": 1,
--		"OS-EXT-STS:task_state": null,
--		"OS-EXT-STS:vm_state": "active",
--		"accessIPv4": "1.2.4.8",
--		"accessIPv6": "2001:4800:6666:105:2a0f:c056:f594:7777",
--		"addresses": {
--			"private": [
--				{
--					"addr": "10.20.40.80",
--					"version": 4
--				}
--			],
--			"public": [
--				{
--					"addr": "1.2.4.8",
--					"version": 4
--				},
--				{
--					"addr": "2001:4800:6666:105:2a0f:c056:f594:7777",
--					"version": 6
--				}
--			]
--		},
--		"created": "2014-10-21T14:42:16Z",
--		"flavor": {
--			"id": "performance1-1",
--			"links": [
--				{
--					"href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1",
--					"rel": "bookmark"
--				}
--			]
--		},
--		"hostId": "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7",
--		"id": "8c65cb68-0681-4c30-bc88-6b83a8a26aee",
--		"image": {
--			"id": "e19a734c-c7e6-443a-830c-242209c4d65d",
--			"links": [
--				{
--					"href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d",
--					"rel": "bookmark"
--				}
--			]
--		},
--		"key_name": null,
--		"links": [
--			{
--				"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee",
--				"rel": "self"
--			},
--			{
--				"href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee",
--				"rel": "bookmark"
--			}
--		],
--		"metadata": {},
--		"name": "Gophercloud-pxpGGuey",
--		"progress": 100,
--		"status": "ACTIVE",
--		"tenant_id": "111111",
--		"updated": "2014-10-21T14:42:57Z",
--		"user_id": "14ae7bb21d81423694655f4dd30f2930"
--	}
--}
--`
--
--// CreateOutput contains a sample of Rackspace's response to a Create call.
--const CreateOutput = `
--{
--	"server": {
--		"OS-DCF:diskConfig": "AUTO",
--		"adminPass": "v7tADqbE5pr9",
--		"id": "bb63327b-6a2f-34bc-b0ef-4b6d97ea637e",
--		"links": [
--			{
--				"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/bb63327b-6a2f-34bc-b0ef-4b6d97ea637e",
--				"rel": "self"
--			},
--			{
--				"href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/bb63327b-6a2f-34bc-b0ef-4b6d97ea637e",
--				"rel": "bookmark"
--			}
--		]
--	}
--}
--`
--
--// DevstackServer is the expected first result from parsing ListOutput.
--var DevstackServer = os.Server{
--	ID:         "59818cee-bc8c-44eb-8073-673ee65105f7",
--	Name:       "devstack",
--	TenantID:   "111111",
--	UserID:     "14ae7bb21d81422694655f3cc30f2930",
--	HostID:     "e8951a524bc465b0898aeac7674da6fe1495e253ae1ea17ddb2c2475",
--	Updated:    "2014-09-23T12:38:19Z",
--	Created:    "2014-09-23T12:34:58Z",
--	AccessIPv4: "1.2.3.4",
--	AccessIPv6: "1111:4822:7818:121:2000:9b5e:7438:a2d0",
--	Progress:   100,
--	Status:     "ACTIVE",
--	Image: map[string]interface{}{
--		"id": "255df5fb-e3d4-45a3-9a07-c976debf7c14",
--		"links": []interface{}{
--			map[string]interface{}{
--				"href": "https://dfw.servers.api.rackspacecloud.com/111111/images/255df5fb-e3d4-45a3-9a07-c976debf7c14",
--				"rel":  "bookmark",
--			},
--		},
--	},
--	Flavor: map[string]interface{}{
--		"id": "performance1-8",
--		"links": []interface{}{
--			map[string]interface{}{
--				"href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-8",
--				"rel":  "bookmark",
--			},
--		},
--	},
--	Addresses: map[string]interface{}{
--		"private": []interface{}{
--			map[string]interface{}{
--				"addr":    "10.20.30.40",
--				"version": float64(4.0),
--			},
--		},
--		"public": []interface{}{
--			map[string]interface{}{
--				"addr":    "1111:4822:7818:121:2000:9b5e:7438:a2d0",
--				"version": float64(6.0),
--			},
--			map[string]interface{}{
--				"addr":    "1.2.3.4",
--				"version": float64(4.0),
--			},
--		},
--	},
--	Metadata: map[string]interface{}{},
--	Links: []interface{}{
--		map[string]interface{}{
--			"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59918cee-bd9d-44eb-8173-673ee75105f7",
--			"rel":  "self",
--		},
--		map[string]interface{}{
--			"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7",
--			"rel":  "bookmark",
--		},
--	},
--	KeyName:   "mykey",
--	AdminPass: "",
--}
--
--// PerilServer is the expected second result from parsing ListOutput.
--var PerilServer = os.Server{
--	ID:         "25f1c7f5-e00a-4715-b354-16e24b2f4630",
--	Name:       "peril-dfw",
--	TenantID:   "111111",
--	UserID:     "14ae7bb21d81422694655f3cc30f2930",
--	HostID:     "f859679906d6b1a38c1bd516b78f4dcc7d5fcf012578fa3ce460716c",
--	Updated:    "2014-07-21T19:34:24Z",
--	Created:    "2014-07-21T19:32:55Z",
--	AccessIPv4: "1.1.2.3",
--	AccessIPv6: "2222:4444:7817:101:be76:4eff:f0e5:9e02",
--	Progress:   100,
--	Status:     "ACTIVE",
--	Image: map[string]interface{}{
--		"id": "bb02b1a3-bc77-4d17-ab5b-421d89850fca",
--		"links": []interface{}{
--			map[string]interface{}{
--				"href": "https://dfw.servers.api.rackspacecloud.com/111111/images/bb02b1a3-bc77-4d17-ab5b-421d89850fca",
--				"rel":  "bookmark",
--			},
--		},
--	},
--	Flavor: map[string]interface{}{
--		"id": "performance1-2",
--		"links": []interface{}{
--			map[string]interface{}{
--				"href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-2",
--				"rel":  "bookmark",
--			},
--		},
--	},
--	Addresses: map[string]interface{}{
--		"private": []interface{}{
--			map[string]interface{}{
--				"addr":    "10.10.20.30",
--				"version": float64(4.0),
--			},
--		},
--		"public": []interface{}{
--			map[string]interface{}{
--				"addr":    "2222:4444:7817:101:be76:4eff:f0e5:9e02",
--				"version": float64(6.0),
--			},
--			map[string]interface{}{
--				"addr":    "1.1.2.3",
--				"version": float64(4.0),
--			},
--		},
--	},
--	Metadata: map[string]interface{}{},
--	Links: []interface{}{
--		map[string]interface{}{
--			"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630",
--			"rel":  "self",
--		},
--		map[string]interface{}{
--			"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630",
--			"rel":  "bookmark",
--		},
--	},
--	KeyName:   "otherkey",
--	AdminPass: "",
--}
--
--// GophercloudServer is the expected result from parsing GetOutput.
--var GophercloudServer = os.Server{
--	ID:         "8c65cb68-0681-4c30-bc88-6b83a8a26aee",
--	Name:       "Gophercloud-pxpGGuey",
--	TenantID:   "111111",
--	UserID:     "14ae7bb21d81423694655f4dd30f2930",
--	HostID:     "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7",
--	Updated:    "2014-10-21T14:42:57Z",
--	Created:    "2014-10-21T14:42:16Z",
--	AccessIPv4: "1.2.4.8",
--	AccessIPv6: "2001:4800:6666:105:2a0f:c056:f594:7777",
--	Progress:   100,
--	Status:     "ACTIVE",
--	Image: map[string]interface{}{
--		"id": "e19a734c-c7e6-443a-830c-242209c4d65d",
--		"links": []interface{}{
--			map[string]interface{}{
--				"href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d",
--				"rel":  "bookmark",
--			},
--		},
--	},
--	Flavor: map[string]interface{}{
--		"id": "performance1-1",
--		"links": []interface{}{
--			map[string]interface{}{
--				"href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1",
--				"rel":  "bookmark",
--			},
--		},
--	},
--	Addresses: map[string]interface{}{
--		"private": []interface{}{
--			map[string]interface{}{
--				"addr":    "10.20.40.80",
--				"version": float64(4.0),
--			},
--		},
--		"public": []interface{}{
--			map[string]interface{}{
--				"addr":    "2001:4800:6666:105:2a0f:c056:f594:7777",
--				"version": float64(6.0),
--			},
--			map[string]interface{}{
--				"addr":    "1.2.4.8",
--				"version": float64(4.0),
--			},
--		},
--	},
--	Metadata: map[string]interface{}{},
--	Links: []interface{}{
--		map[string]interface{}{
--			"href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee",
--			"rel":  "self",
--		},
--		map[string]interface{}{
--			"href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee",
--			"rel":  "bookmark",
--		},
--	},
--	KeyName:   "",
--	AdminPass: "",
--}
--
--// CreatedServer is the partial Server struct that can be parsed from CreateOutput.
--var CreatedServer = os.Server{
--	ID:        "bb63327b-6a2f-34bc-b0ef-4b6d97ea637e",
--	AdminPass: "v7tADqbE5pr9",
--	Links:     []interface{}{},
--}
--
--// ExpectedServerSlice is the collection of servers, in order, that should be parsed from ListOutput.
--var ExpectedServerSlice = []os.Server{DevstackServer, PerilServer}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go
-deleted file mode 100644
-index 884b9cb..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go
-+++ /dev/null
-@@ -1,158 +0,0 @@
--package servers
--
--import (
--	"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume"
--	"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig"
--	os "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
--)
--
--// CreateOpts specifies all of the options that Rackspace accepts in its Create request, including
--// the union of all extensions that Rackspace supports.
--type CreateOpts struct {
--	// Name [required] is the name to assign to the newly launched server.
--	Name string
--
--	// ImageRef [required] is the ID or full URL to the image that contains the server's OS and initial state.
--	// Optional if using the boot-from-volume extension.
--	ImageRef string
--
--	// FlavorRef [required] is the ID or full URL to the flavor that describes the server's specs.
--	FlavorRef string
--
--	// SecurityGroups [optional] lists the names of the security groups to which this server should belong.
--	SecurityGroups []string
--
--	// UserData [optional] contains configuration information or scripts to use upon launch.
--	// Create will base64-encode it for you.
--	UserData []byte
--
--	// AvailabilityZone [optional] in which to launch the server.
--	AvailabilityZone string
--
--	// Networks [optional] dictates how this server will be attached to available networks.
--	// By default, the server will be attached to all isolated networks for the tenant.
--	Networks []os.Network
--
--	// Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server.
--	Metadata map[string]string
--
--	// Personality [optional] includes the path and contents of a file to inject into the server at launch.
--	// The maximum size of the file is 255 bytes (decoded).
--	Personality []byte
--
--	// ConfigDrive [optional] enables metadata injection through a configuration drive.
--	ConfigDrive bool
--
--	// Rackspace-specific extensions begin here.
--
--	// KeyPair [optional] specifies the name of the SSH KeyPair to be injected into the newly launched
--	// server. See the "keypairs" extension in OpenStack compute v2.
--	KeyPair string
--
--	// DiskConfig [optional] controls how the created server's disk is partitioned. See the "diskconfig"
--	// extension in OpenStack compute v2.
--	DiskConfig diskconfig.DiskConfig
--
--	// BlockDevice [optional] will create the server from a volume, which is created from an image,
--	// a snapshot, or an another volume.
--	BlockDevice []bootfromvolume.BlockDevice
--}
--
--// ToServerCreateMap constructs a request body using all of the OpenStack extensions that are
--// active on Rackspace.
--func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) {
--	base := os.CreateOpts{
--		Name:             opts.Name,
--		ImageRef:         opts.ImageRef,
--		FlavorRef:        opts.FlavorRef,
--		SecurityGroups:   opts.SecurityGroups,
--		UserData:         opts.UserData,
--		AvailabilityZone: opts.AvailabilityZone,
--		Networks:         opts.Networks,
--		Metadata:         opts.Metadata,
--		Personality:      opts.Personality,
--		ConfigDrive:      opts.ConfigDrive,
--	}
--
--	drive := diskconfig.CreateOptsExt{
--		CreateOptsBuilder: base,
--		DiskConfig:        opts.DiskConfig,
--	}
--
--	res, err := drive.ToServerCreateMap()
--	if err != nil {
--		return nil, err
--	}
--
--	if len(opts.BlockDevice) != 0 {
--		bfv := bootfromvolume.CreateOptsExt{
--			CreateOptsBuilder: drive,
--			BlockDevice:       opts.BlockDevice,
--		}
--
--		res, err = bfv.ToServerCreateMap()
--		if err != nil {
--			return nil, err
--		}
--	}
--
--	// key_name doesn't actually come from the extension (or at least isn't documented there) so
--	// we need to add it manually.
--	serverMap := res["server"].(map[string]interface{})
--	serverMap["key_name"] = opts.KeyPair
--
--	return res, nil
--}
--
--// RebuildOpts represents all of the configuration options used in a server rebuild operation that
--// are supported by Rackspace.
--type RebuildOpts struct {
--	// Required. The ID of the image you want your server to be provisioned on
--	ImageID string
--
--	// Name to set the server to
--	Name string
--
--	// Required. The server's admin password
--	AdminPass string
--
--	// AccessIPv4 [optional] provides a new IPv4 address for the instance.
--	AccessIPv4 string
--
--	// AccessIPv6 [optional] provides a new IPv6 address for the instance.
--	AccessIPv6 string
--
--	// Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server.
--	Metadata map[string]string
--
--	// Personality [optional] includes the path and contents of a file to inject into the server at launch.
--	// The maximum size of the file is 255 bytes (decoded).
--	Personality []byte
--
--	// Rackspace-specific stuff begins here.
--
--	// DiskConfig [optional] controls how the created server's disk is partitioned. See the "diskconfig"
--	// extension in OpenStack compute v2.
--	DiskConfig diskconfig.DiskConfig
--}
--
--// ToServerRebuildMap constructs a request body using all of the OpenStack extensions that are
--// active on Rackspace.
--func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) {
--	base := os.RebuildOpts{
--		ImageID:     opts.ImageID,
--		Name:        opts.Name,
--		AdminPass:   opts.AdminPass,
--		AccessIPv4:  opts.AccessIPv4,
--		AccessIPv6:  opts.AccessIPv6,
--		Metadata:    opts.Metadata,
--		Personality: opts.Personality,
--	}
--
--	drive := diskconfig.RebuildOptsExt{
--		RebuildOptsBuilder: base,
--		DiskConfig:         opts.DiskConfig,
--	}
--
--	return drive.ToServerRebuildMap()
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests_test.go
-deleted file mode 100644
-index 3c0f806..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests_test.go
-+++ /dev/null
-@@ -1,57 +0,0 @@
--package servers
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestCreateOpts(t *testing.T) {
--	opts := CreateOpts{
--		Name:       "createdserver",
--		ImageRef:   "image-id",
--		FlavorRef:  "flavor-id",
--		KeyPair:    "mykey",
--		DiskConfig: diskconfig.Manual,
--	}
--
--	expected := `
--	{
--		"server": {
--			"name": "createdserver",
--			"imageRef": "image-id",
--			"flavorRef": "flavor-id",
--			"key_name": "mykey",
--			"OS-DCF:diskConfig": "MANUAL"
--		}
--	}
--	`
--	actual, err := opts.ToServerCreateMap()
--	th.AssertNoErr(t, err)
--	th.CheckJSONEquals(t, expected, actual)
--}
--
--func TestRebuildOpts(t *testing.T) {
--	opts := RebuildOpts{
--		Name:       "rebuiltserver",
--		AdminPass:  "swordfish",
--		ImageID:    "asdfasdfasdf",
--		DiskConfig: diskconfig.Auto,
--	}
--
--	actual, err := opts.ToServerRebuildMap()
--	th.AssertNoErr(t, err)
--
--	expected := `
--	{
--		"rebuild": {
--			"name": "rebuiltserver",
--			"imageRef": "asdfasdfasdf",
--			"adminPass": "swordfish",
--			"OS-DCF:diskConfig": "AUTO"
--		}
--	}
--	`
--	th.CheckJSONEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go
-deleted file mode 100644
-index bfe3487..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go
-+++ /dev/null
-@@ -1,51 +0,0 @@
--package virtualinterfaces
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--
--	"github.com/racker/perigee"
--)
--
--// List returns a Pager which allows you to iterate over a collection of
--// networks. It accepts a ListOpts struct, which allows you to filter and sort
--// the returned collection for greater efficiency.
--func List(c *gophercloud.ServiceClient, instanceID string) pagination.Pager {
--	createPage := func(r pagination.PageResult) pagination.Page {
--		return VirtualInterfacePage{pagination.SinglePageBase(r)}
--	}
--
--	return pagination.NewPager(c, listURL(c, instanceID), createPage)
--}
--
--// Create creates a new virtual interface for a network and attaches the network
--// to the server instance.
--func Create(c *gophercloud.ServiceClient, instanceID, networkID string) CreateResult {
--	var res CreateResult
--
--	reqBody := map[string]map[string]string{
--		"virtual_interface": {
--			"network_id": networkID,
--		},
--	}
--
--	// Send request to API
--	_, res.Err = perigee.Request("POST", createURL(c, instanceID), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		ReqBody:     &reqBody,
--		Results:     &res.Body,
--		OkCodes:     []int{200, 201, 202},
--	})
--	return res
--}
--
--// Delete deletes the interface with interfaceID attached to the instance with
--// instanceID.
--func Delete(c *gophercloud.ServiceClient, instanceID, interfaceID string) DeleteResult {
--	var res DeleteResult
--	_, res.Err = perigee.Request("DELETE", deleteURL(c, instanceID, interfaceID), perigee.Options{
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{200, 204},
--	})
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests_test.go
-deleted file mode 100644
-index d40af9c..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests_test.go
-+++ /dev/null
-@@ -1,165 +0,0 @@
--package virtualinterfaces
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/12345/os-virtual-interfacesv2", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "GET")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusOK)
--
--		fmt.Fprintf(w, `
--{
--    "virtual_interfaces": [
--        {
--            "id": "de7c6d53-b895-4b4a-963c-517ccb0f0775",
--            "ip_addresses": [
--                {
--                    "address": "192.168.0.2",
--                    "network_id": "f212726e-6321-4210-9bae-a13f5a33f83f",
--                    "network_label": "superprivate_xml"
--                }
--            ],
--            "mac_address": "BC:76:4E:04:85:20"
--        },
--        {
--            "id": "e14e789d-3b98-44a6-9c2d-c23eb1d1465c",
--            "ip_addresses": [
--                {
--                    "address": "10.181.1.30",
--                    "network_id": "3b324a1b-31b8-4db5-9fe5-4a2067f60297",
--                    "network_label": "private"
--                }
--            ],
--            "mac_address": "BC:76:4E:04:81:55"
--        }
--    ]
--}
--      `)
--	})
--
--	client := fake.ServiceClient()
--	count := 0
--
--	err := List(client, "12345").EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractVirtualInterfaces(page)
--		if err != nil {
--			t.Errorf("Failed to extract networks: %v", err)
--			return false, err
--		}
--
--		expected := []VirtualInterface{
--			VirtualInterface{
--				MACAddress: "BC:76:4E:04:85:20",
--				IPAddresses: []IPAddress{
--					IPAddress{
--						Address:      "192.168.0.2",
--						NetworkID:    "f212726e-6321-4210-9bae-a13f5a33f83f",
--						NetworkLabel: "superprivate_xml",
--					},
--				},
--				ID: "de7c6d53-b895-4b4a-963c-517ccb0f0775",
--			},
--			VirtualInterface{
--				MACAddress: "BC:76:4E:04:81:55",
--				IPAddresses: []IPAddress{
--					IPAddress{
--						Address:      "10.181.1.30",
--						NetworkID:    "3b324a1b-31b8-4db5-9fe5-4a2067f60297",
--						NetworkLabel: "private",
--					},
--				},
--				ID: "e14e789d-3b98-44a6-9c2d-c23eb1d1465c",
--			},
--		}
--
--		th.CheckDeepEquals(t, expected, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestCreate(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/12345/os-virtual-interfacesv2", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "POST")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Content-Type", "application/json")
--		th.TestHeader(t, r, "Accept", "application/json")
--		th.TestJSONRequest(t, r, `
--{
--    "virtual_interface": {
--        "network_id": "6789"
--    }
--}
--      `)
--
--		w.Header().Add("Content-Type", "application/json")
--		w.WriteHeader(http.StatusCreated)
--
--		fmt.Fprintf(w, `{
--      "virtual_interfaces": [
--        {
--          "id": "de7c6d53-b895-4b4a-963c-517ccb0f0775",
--          "ip_addresses": [
--            {
--              "address": "192.168.0.2",
--              "network_id": "f212726e-6321-4210-9bae-a13f5a33f83f",
--              "network_label": "superprivate_xml"
--            }
--          ],
--          "mac_address": "BC:76:4E:04:85:20"
--        }
--      ]
--    }`)
--	})
--
--	expected := &VirtualInterface{
--		MACAddress: "BC:76:4E:04:85:20",
--		IPAddresses: []IPAddress{
--			IPAddress{
--				Address:      "192.168.0.2",
--				NetworkID:    "f212726e-6321-4210-9bae-a13f5a33f83f",
--				NetworkLabel: "superprivate_xml",
--			},
--		},
--		ID: "de7c6d53-b895-4b4a-963c-517ccb0f0775",
--	}
--
--	actual, err := Create(fake.ServiceClient(), "12345", "6789").Extract()
--	th.AssertNoErr(t, err)
--
--	th.CheckDeepEquals(t, expected, actual)
--}
--
--func TestDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--
--	th.Mux.HandleFunc("/servers/12345/os-virtual-interfacesv2/6789", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	res := Delete(fake.ServiceClient(), "12345", "6789")
--	th.AssertNoErr(t, res.Err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/results.go
-deleted file mode 100644
-index 26fa7f3..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/results.go
-+++ /dev/null
-@@ -1,81 +0,0 @@
--package virtualinterfaces
--
--import (
--	"github.com/mitchellh/mapstructure"
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--type commonResult struct {
--	gophercloud.Result
--}
--
--// Extract is a function that accepts a result and extracts a network resource.
--func (r commonResult) Extract() (*VirtualInterface, error) {
--	if r.Err != nil {
--		return nil, r.Err
--	}
--
--	var res struct {
--		VirtualInterfaces []VirtualInterface `mapstructure:"virtual_interfaces" json:"virtual_interfaces"`
--	}
--
--	err := mapstructure.Decode(r.Body, &res)
--
--	return &res.VirtualInterfaces[0], err
--}
--
--// CreateResult represents the result of a create operation.
--type CreateResult struct {
--	commonResult
--}
--
--// DeleteResult represents the result of a delete operation.
--type DeleteResult struct {
--	gophercloud.ErrResult
--}
--
--// IPAddress represents a vitual address attached to a VirtualInterface.
--type IPAddress struct {
--	Address      string `mapstructure:"address" json:"address"`
--	NetworkID    string `mapstructure:"network_id" json:"network_id"`
--	NetworkLabel string `mapstructure:"network_label" json:"network_label"`
--}
--
--// VirtualInterface represents a virtual interface.
--type VirtualInterface struct {
--	// UUID for the virtual interface
--	ID string `mapstructure:"id" json:"id"`
--
--	MACAddress string `mapstructure:"mac_address" json:"mac_address"`
--
--	IPAddresses []IPAddress `mapstructure:"ip_addresses" json:"ip_addresses"`
--}
--
--// VirtualInterfacePage is the page returned by a pager when traversing over a
--// collection of virtual interfaces.
--type VirtualInterfacePage struct {
--	pagination.SinglePageBase
--}
--
--// IsEmpty returns true if the NetworkPage contains no Networks.
--func (r VirtualInterfacePage) IsEmpty() (bool, error) {
--	networks, err := ExtractVirtualInterfaces(r)
--	if err != nil {
--		return true, err
--	}
--	return len(networks) == 0, nil
--}
--
--// ExtractVirtualInterfaces accepts a Page struct, specifically a VirtualInterfacePage struct,
--// and extracts the elements into a slice of VirtualInterface structs. In other words,
--// a generic collection is mapped into a relevant slice.
--func ExtractVirtualInterfaces(page pagination.Page) ([]VirtualInterface, error) {
--	var resp struct {
--		VirtualInterfaces []VirtualInterface `mapstructure:"virtual_interfaces" json:"virtual_interfaces"`
--	}
--
--	err := mapstructure.Decode(page.(VirtualInterfacePage).Body, &resp)
--
--	return resp.VirtualInterfaces, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls.go
-deleted file mode 100644
-index 9e5693e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls.go
-+++ /dev/null
-@@ -1,15 +0,0 @@
--package virtualinterfaces
--
--import "github.com/rackspace/gophercloud"
--
--func listURL(c *gophercloud.ServiceClient, instanceID string) string {
--	return c.ServiceURL("servers", instanceID, "os-virtual-interfacesv2")
--}
--
--func createURL(c *gophercloud.ServiceClient, instanceID string) string {
--	return c.ServiceURL("servers", instanceID, "os-virtual-interfacesv2")
--}
--
--func deleteURL(c *gophercloud.ServiceClient, instanceID, interfaceID string) string {
--	return c.ServiceURL("servers", instanceID, "os-virtual-interfacesv2", interfaceID)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls_test.go
-deleted file mode 100644
-index 6732e4e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/urls_test.go
-+++ /dev/null
-@@ -1,32 +0,0 @@
--package virtualinterfaces
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestCreateURL(t *testing.T) {
--	actual := createURL(endpointClient(), "12345")
--	expected := endpoint + "servers/12345/os-virtual-interfacesv2"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestListURL(t *testing.T) {
--	actual := createURL(endpointClient(), "12345")
--	expected := endpoint + "servers/12345/os-virtual-interfacesv2"
--	th.AssertEquals(t, expected, actual)
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient(), "12345", "6789")
--	expected := endpoint + "servers/12345/os-virtual-interfacesv2/6789"
--	th.AssertEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate.go
-deleted file mode 100644
-index fc547cd..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate.go
-+++ /dev/null
-@@ -1,24 +0,0 @@
--package extensions
--
--import (
--	"github.com/rackspace/gophercloud"
--	common "github.com/rackspace/gophercloud/openstack/common/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ExtractExtensions accepts a Page struct, specifically an ExtensionPage struct, and extracts the
--// elements into a slice of os.Extension structs.
--func ExtractExtensions(page pagination.Page) ([]common.Extension, error) {
--	return common.ExtractExtensions(page)
--}
--
--// Get retrieves information for a specific extension using its alias.
--func Get(c *gophercloud.ServiceClient, alias string) common.GetResult {
--	return common.Get(c, alias)
--}
--
--// List returns a Pager which allows you to iterate over the full collection of extensions.
--// It does not accept query parameters.
--func List(c *gophercloud.ServiceClient) pagination.Pager {
--	return common.List(c)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate_test.go
-deleted file mode 100644
-index e30f794..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/delegate_test.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--package extensions
--
--import (
--	"testing"
--
--	common "github.com/rackspace/gophercloud/openstack/common/extensions"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestList(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	common.HandleListExtensionsSuccessfully(t)
--
--	count := 0
--
--	err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractExtensions(page)
--		th.AssertNoErr(t, err)
--		th.AssertDeepEquals(t, common.ExpectedExtensions, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
--
--func TestGet(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	common.HandleGetExtensionSuccessfully(t)
--
--	actual, err := Get(fake.ServiceClient(), "agent").Extract()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, common.SingleExtension, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/doc.go
-deleted file mode 100644
-index b02a95b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/extensions/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package extensions provides information and interaction with the all the
--// extensions available for the Rackspace Identity service.
--package extensions
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate.go
-deleted file mode 100644
-index 6cdd0cf..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate.go
-+++ /dev/null
-@@ -1,17 +0,0 @@
--package tenants
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/identity/v2/tenants"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ExtractTenants interprets a page of List results as a more usable slice of Tenant structs.
--func ExtractTenants(page pagination.Page) ([]os.Tenant, error) {
--	return os.ExtractTenants(page)
--}
--
--// List enumerates the tenants to which the current token grants access.
--func List(client *gophercloud.ServiceClient, opts *os.ListOpts) pagination.Pager {
--	return os.List(client, opts)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate_test.go
-deleted file mode 100644
-index eccbfe2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/delegate_test.go
-+++ /dev/null
-@@ -1,28 +0,0 @@
--package tenants
--
--import (
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/identity/v2/tenants"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListTenants(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleListTenantsSuccessfully(t)
--
--	count := 0
--	err := List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) {
--		actual, err := ExtractTenants(page)
--		th.AssertNoErr(t, err)
--		th.CheckDeepEquals(t, os.ExpectedTenantSlice, actual)
--
--		count++
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, 1, count)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/doc.go
-deleted file mode 100644
-index c1825c2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tenants/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package tenants provides information and interaction with the tenant
--// API resource for the Rackspace Identity service.
--package tenants
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go
-deleted file mode 100644
-index 4f9885a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go
-+++ /dev/null
-@@ -1,60 +0,0 @@
--package tokens
--
--import (
--	"errors"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/identity/v2/tokens"
--)
--
--var (
--	// ErrPasswordProvided is returned if both a password and an API key are provided to Create.
--	ErrPasswordProvided = errors.New("Please provide either a password or an API key.")
--)
--
--// AuthOptions wraps the OpenStack AuthOptions struct to be able to customize the request body
--// when API key authentication is used.
--type AuthOptions struct {
--	os.AuthOptions
--}
--
--// WrapOptions embeds a root AuthOptions struct in a package-specific one.
--func WrapOptions(original gophercloud.AuthOptions) AuthOptions {
--	return AuthOptions{AuthOptions: os.WrapOptions(original)}
--}
--
--// ToTokenCreateMap serializes an AuthOptions into a request body. If an API key is provided, it
--// will be used, otherwise
--func (auth AuthOptions) ToTokenCreateMap() (map[string]interface{}, error) {
--	if auth.APIKey == "" {
--		return auth.AuthOptions.ToTokenCreateMap()
--	}
--
--	// Verify that other required attributes are present.
--	if auth.Username == "" {
--		return nil, os.ErrUsernameRequired
--	}
--
--	authMap := make(map[string]interface{})
--
--	authMap["RAX-KSKEY:apiKeyCredentials"] = map[string]interface{}{
--		"username": auth.Username,
--		"apiKey":   auth.APIKey,
--	}
--
--	if auth.TenantID != "" {
--		authMap["tenantId"] = auth.TenantID
--	}
--	if auth.TenantName != "" {
--		authMap["tenantName"] = auth.TenantName
--	}
--
--	return map[string]interface{}{"auth": authMap}, nil
--}
--
--// Create authenticates to Rackspace's identity service and attempts to acquire a Token. Rather
--// than interact with this service directly, users should generally call
--// rackspace.AuthenticatedClient().
--func Create(client *gophercloud.ServiceClient, auth AuthOptions) os.CreateResult {
--	return os.Create(client, auth)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate_test.go
-deleted file mode 100644
-index 6678ff4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate_test.go
-+++ /dev/null
-@@ -1,36 +0,0 @@
--package tokens
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/identity/v2/tokens"
--	th "github.com/rackspace/gophercloud/testhelper"
--	"github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func tokenPost(t *testing.T, options gophercloud.AuthOptions, requestJSON string) os.CreateResult {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleTokenPost(t, requestJSON)
--
--	return Create(client.ServiceClient(), WrapOptions(options))
--}
--
--func TestCreateTokenWithAPIKey(t *testing.T) {
--	options := gophercloud.AuthOptions{
--		Username: "me",
--		APIKey:   "1234567890abcdef",
--	}
--
--	os.IsSuccessful(t, tokenPost(t, options, `
--    {
--      "auth": {
--        "RAX-KSKEY:apiKeyCredentials": {
--          "username": "me",
--          "apiKey": "1234567890abcdef"
--        }
--      }
--    }
--  `))
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go
-deleted file mode 100644
-index 44043e5..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package tokens provides information and interaction with the token
--// API resource for the Rackspace Identity service.
--package tokens
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate.go
-deleted file mode 100644
-index 9473930..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--package accounts
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts"
--)
--
--// Get is a function that retrieves an account's metadata. To extract just the
--// custom metadata, call the ExtractMetadata method on the GetResult. To extract
--// all the headers that are returned (including the metadata), call the
--// ExtractHeader method on the GetResult.
--func Get(c *gophercloud.ServiceClient) os.GetResult {
--	return os.Get(c, nil)
--}
--
--// UpdateOpts is a structure that contains parameters for updating, creating, or
--// deleting an account's metadata.
--type UpdateOpts struct {
--	Metadata    map[string]string
--	TempURLKey  string `h:"X-Account-Meta-Temp-URL-Key"`
--	TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"`
--}
--
--// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers.
--func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) {
--	headers, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		headers["X-Account-Meta-"+k] = v
--	}
--	return headers, err
--}
--
--// Update will update an account's metadata with the Metadata in the UpdateOptsBuilder.
--func Update(c *gophercloud.ServiceClient, opts os.UpdateOptsBuilder) os.UpdateResult {
--	return os.Update(c, opts)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate_test.go
-deleted file mode 100644
-index c568bd6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/delegate_test.go
-+++ /dev/null
-@@ -1,30 +0,0 @@
--package accounts
--
--import (
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestGetAccounts(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleGetAccountSuccessfully(t)
--
--	options := &UpdateOpts{Metadata: map[string]string{"gophercloud-test": "accounts"}}
--	res := Update(fake.ServiceClient(), options)
--	th.CheckNoErr(t, res.Err)
--}
--
--func TestUpdateAccounts(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleUpdateAccountSuccessfully(t)
--
--	expected := map[string]string{"Foo": "bar"}
--	actual, err := Get(fake.ServiceClient()).ExtractMetadata()
--	th.CheckNoErr(t, err)
--	th.CheckDeepEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/doc.go
-deleted file mode 100644
-index 293a930..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/accounts/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package accounts provides information and interaction with the account
--// API resource for the Rackspace Cloud Files service.
--package accounts
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/doc.go
-deleted file mode 100644
-index 9c89e22..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package bulk provides functionality for working with bulk operations in the
--// Rackspace Cloud Files service.
--package bulk
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests.go
-deleted file mode 100644
-index d252609..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests.go
-+++ /dev/null
-@@ -1,51 +0,0 @@
--package bulk
--
--import (
--	"net/url"
--	"strings"
--
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--)
--
--// DeleteOptsBuilder allows extensions to add additional parameters to the
--// Delete request.
--type DeleteOptsBuilder interface {
--	ToBulkDeleteBody() (string, error)
--}
--
--// DeleteOpts is a structure that holds parameters for deleting an object.
--type DeleteOpts []string
--
--// ToBulkDeleteBody formats a DeleteOpts into a request body.
--func (opts DeleteOpts) ToBulkDeleteBody() (string, error) {
--	return url.QueryEscape(strings.Join(opts, "\n")), nil
--}
--
--// Delete will delete objects or containers in bulk.
--func Delete(c *gophercloud.ServiceClient, opts DeleteOptsBuilder) DeleteResult {
--	var res DeleteResult
--
--	if opts == nil {
--		return res
--	}
--
--	reqString, err := opts.ToBulkDeleteBody()
--	if err != nil {
--		res.Err = err
--		return res
--	}
--
--	reqBody := strings.NewReader(reqString)
--
--	resp, err := perigee.Request("DELETE", deleteURL(c), perigee.Options{
--		ContentType: "text/plain",
--		MoreHeaders: c.AuthenticatedHeaders(),
--		OkCodes:     []int{200},
--		ReqBody:     reqBody,
--		Results:     &res.Body,
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests_test.go
-deleted file mode 100644
-index 8b5578e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/requests_test.go
-+++ /dev/null
-@@ -1,36 +0,0 @@
--package bulk
--
--import (
--	"fmt"
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestBulkDelete(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "DELETE")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.AssertEquals(t, r.URL.RawQuery, "bulk-delete")
--
--		w.WriteHeader(http.StatusOK)
--		fmt.Fprintf(w, `
--      {
--        "Number Not Found": 1,
--        "Response Status": "200 OK",
--        "Errors": [],
--        "Number Deleted": 1,
--        "Response Body": ""
--      }
--    `)
--	})
--
--	options := DeleteOpts{"gophercloud-testcontainer1", "gophercloud-testcontainer2"}
--	actual, err := Delete(fake.ServiceClient(), options).ExtractBody()
--	th.AssertNoErr(t, err)
--	th.AssertEquals(t, actual.NumberDeleted, 1)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/results.go
-deleted file mode 100644
-index fddc125..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/results.go
-+++ /dev/null
-@@ -1,28 +0,0 @@
--package bulk
--
--import (
--	"github.com/rackspace/gophercloud"
--
--	"github.com/mitchellh/mapstructure"
--)
--
--// DeleteResult represents the result of a bulk delete operation.
--type DeleteResult struct {
--	gophercloud.Result
--}
--
--// DeleteRespBody is the form of the response body returned by a bulk delete request.
--type DeleteRespBody struct {
--	NumberNotFound int      `mapstructure:"Number Not Found"`
--	ResponseStatus string   `mapstructure:"Response Status"`
--	Errors         []string `mapstructure:"Errors"`
--	NumberDeleted  int      `mapstructure:"Number Deleted"`
--	ResponseBody   string   `mapstructure:"Response Body"`
--}
--
--// ExtractBody will extract the body returned by the bulk extract request.
--func (dr DeleteResult) ExtractBody() (DeleteRespBody, error) {
--	var resp DeleteRespBody
--	err := mapstructure.Decode(dr.Body, &resp)
--	return resp, err
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls.go
-deleted file mode 100644
-index 2e11203..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package bulk
--
--import "github.com/rackspace/gophercloud"
--
--func deleteURL(c *gophercloud.ServiceClient) string {
--	return c.Endpoint + "?bulk-delete"
--}
--
--func extractURL(c *gophercloud.ServiceClient, ext string) string {
--	return c.Endpoint + "?extract-archive=" + ext
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls_test.go
-deleted file mode 100644
-index 9169e52..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/bulk/urls_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--package bulk
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestDeleteURL(t *testing.T) {
--	actual := deleteURL(endpointClient())
--	expected := endpoint + "?bulk-delete"
--	th.CheckEquals(t, expected, actual)
--}
--
--func TestExtractURL(t *testing.T) {
--	actual := extractURL(endpointClient(), "tar")
--	expected := endpoint + "?extract-archive=tar"
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate.go
-deleted file mode 100644
-index d7eef20..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate.go
-+++ /dev/null
-@@ -1,71 +0,0 @@
--package cdncontainers
--
--import (
--	"strconv"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ExtractNames interprets a page of List results when just the container
--// names are requested.
--func ExtractNames(page pagination.Page) ([]string, error) {
--	return os.ExtractNames(page)
--}
--
--// ListOpts are options for listing Rackspace CDN containers.
--type ListOpts struct {
--	EndMarker string `q:"end_marker"`
--	Format    string `q:"format"`
--	Limit     int    `q:"limit"`
--	Marker    string `q:"marker"`
--}
--
--// ToContainerListParams formats a ListOpts into a query string and boolean
--// representing whether to list complete information for each container.
--func (opts ListOpts) ToContainerListParams() (bool, string, error) {
--	q, err := gophercloud.BuildQueryString(opts)
--	if err != nil {
--		return false, "", err
--	}
--	return false, q.String(), nil
--}
--
--// List is a function that retrieves containers associated with the account as
--// well as account metadata. It returns a pager which can be iterated with the
--// EachPage function.
--func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager {
--	return os.List(c, opts)
--}
--
--// Get is a function that retrieves the metadata of a container. To extract just
--// the custom metadata, pass the GetResult response to the ExtractMetadata
--// function.
--func Get(c *gophercloud.ServiceClient, containerName string) os.GetResult {
--	return os.Get(c, containerName)
--}
--
--// UpdateOpts is a structure that holds parameters for updating, creating, or
--// deleting a container's metadata.
--type UpdateOpts struct {
--	CDNEnabled   bool `h:"X-Cdn-Enabled"`
--	LogRetention bool `h:"X-Log-Retention"`
--	TTL          int  `h:"X-Ttl"`
--}
--
--// ToContainerUpdateMap formats a CreateOpts into a map of headers.
--func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) {
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	h["X-Cdn-Enabled"] = strconv.FormatBool(opts.CDNEnabled)
--	return h, nil
--}
--
--// Update is a function that creates, updates, or deletes a container's
--// metadata.
--func Update(c *gophercloud.ServiceClient, containerName string, opts os.UpdateOptsBuilder) os.UpdateResult {
--	return os.Update(c, containerName, opts)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate_test.go
-deleted file mode 100644
-index 02c3c5e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/delegate_test.go
-+++ /dev/null
-@@ -1,50 +0,0 @@
--package cdncontainers
--
--import (
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListCDNContainers(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleListContainerNamesSuccessfully(t)
--
--	count := 0
--	err := List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractNames(page)
--		th.AssertNoErr(t, err)
--
--		th.CheckDeepEquals(t, os.ExpectedListNames, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestGetCDNContainer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleGetContainerSuccessfully(t)
--
--	_, err := Get(fake.ServiceClient(), "testContainer").ExtractMetadata()
--	th.CheckNoErr(t, err)
--
--}
--
--func TestUpdateCDNContainer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleUpdateContainerSuccessfully(t)
--
--	options := &UpdateOpts{TTL: 3600}
--	res := Update(fake.ServiceClient(), "testContainer", options)
--	th.CheckNoErr(t, res.Err)
--
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/doc.go
-deleted file mode 100644
-index 7b0930e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package cdncontainers provides information and interaction with the CDN
--// Container API resource for the Rackspace Cloud Files service.
--package cdncontainers
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests.go
-deleted file mode 100644
-index 0567833..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests.go
-+++ /dev/null
-@@ -1,58 +0,0 @@
--package cdncontainers
--
--import (
--	"github.com/racker/perigee"
--	"github.com/rackspace/gophercloud"
--)
--
--// EnableOptsBuilder allows extensions to add additional parameters to the Enable
--// request.
--type EnableOptsBuilder interface {
--	ToCDNContainerEnableMap() (map[string]string, error)
--}
--
--// EnableOpts is a structure that holds options for enabling a CDN container.
--type EnableOpts struct {
--	// CDNEnabled indicates whether or not the container is CDN enabled. Set to
--	// `true` to enable the container. Note that changing this setting from true
--	// to false will disable the container in the CDN but only after the TTL has
--	// expired.
--	CDNEnabled bool `h:"X-Cdn-Enabled"`
--	// TTL is the time-to-live for the container (in seconds).
--	TTL int `h:"X-Ttl"`
--}
--
--// ToCDNContainerEnableMap formats an EnableOpts into a map of headers.
--func (opts EnableOpts) ToCDNContainerEnableMap() (map[string]string, error) {
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	return h, nil
--}
--
--// Enable is a function that enables/disables a CDN container.
--func Enable(c *gophercloud.ServiceClient, containerName string, opts EnableOptsBuilder) EnableResult {
--	var res EnableResult
--	h := c.AuthenticatedHeaders()
--
--	if opts != nil {
--		headers, err := opts.ToCDNContainerEnableMap()
--		if err != nil {
--			res.Err = err
--			return res
--		}
--
--		for k, v := range headers {
--			h[k] = v
--		}
--	}
--
--	resp, err := perigee.Request("PUT", enableURL(c, containerName), perigee.Options{
--		MoreHeaders: h,
--		OkCodes:     []int{201, 202, 204},
--	})
--	res.Header = resp.HttpResponse.Header
--	res.Err = err
--	return res
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests_test.go
-deleted file mode 100644
-index 28b963d..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/requests_test.go
-+++ /dev/null
-@@ -1,29 +0,0 @@
--package cdncontainers
--
--import (
--	"net/http"
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestEnableCDNContainer(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) {
--		th.TestMethod(t, r, "PUT")
--		th.TestHeader(t, r, "X-Auth-Token", fake.TokenID)
--		th.TestHeader(t, r, "Accept", "application/json")
--
--		w.Header().Add("X-Ttl", "259200")
--		w.Header().Add("X-Cdn-Enabled", "True")
--		w.WriteHeader(http.StatusNoContent)
--	})
--
--	options := &EnableOpts{CDNEnabled: true, TTL: 259200}
--	actual := Enable(fake.ServiceClient(), "testContainer", options)
--	th.AssertNoErr(t, actual.Err)
--	th.CheckEquals(t, actual.Header["X-Ttl"][0], "259200")
--	th.CheckEquals(t, actual.Header["X-Cdn-Enabled"][0], "True")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/results.go
-deleted file mode 100644
-index a5097ca..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/results.go
-+++ /dev/null
-@@ -1,8 +0,0 @@
--package cdncontainers
--
--import "github.com/rackspace/gophercloud"
--
--// EnableResult represents the result of a get operation.
--type EnableResult struct {
--	gophercloud.HeaderResult
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls.go
-deleted file mode 100644
-index 80653f2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls.go
-+++ /dev/null
-@@ -1,7 +0,0 @@
--package cdncontainers
--
--import "github.com/rackspace/gophercloud"
--
--func enableURL(c *gophercloud.ServiceClient, containerName string) string {
--	return c.ServiceURL(containerName)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls_test.go
-deleted file mode 100644
-index aa5bfe6..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdncontainers/urls_test.go
-+++ /dev/null
-@@ -1,20 +0,0 @@
--package cdncontainers
--
--import (
--	"testing"
--
--	"github.com/rackspace/gophercloud"
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--const endpoint = "http://localhost:57909/"
--
--func endpointClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{Endpoint: endpoint}
--}
--
--func TestEnableURL(t *testing.T) {
--	actual := enableURL(endpointClient(), "foo")
--	expected := endpoint + "foo"
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate.go
-deleted file mode 100644
-index e9d2ff1..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate.go
-+++ /dev/null
-@@ -1,11 +0,0 @@
--package cdnobjects
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects"
--)
--
--// Delete is a function that deletes an object from the CDN.
--func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts os.DeleteOptsBuilder) os.DeleteResult {
--	return os.Delete(c, containerName, objectName, nil)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate_test.go
-deleted file mode 100644
-index b5e04a9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/delegate_test.go
-+++ /dev/null
-@@ -1,19 +0,0 @@
--package cdnobjects
--
--import (
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestDeleteCDNObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleDeleteObjectSuccessfully(t)
--
--	res := Delete(fake.ServiceClient(), "testContainer", "testObject", nil)
--	th.AssertNoErr(t, res.Err)
--
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/doc.go
-deleted file mode 100644
-index 90cd5c9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/cdnobjects/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package cdnobjects provides information and interaction with the CDN
--// Object API resource for the Rackspace Cloud Files service.
--package cdnobjects
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate.go
-deleted file mode 100644
-index 77ed002..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate.go
-+++ /dev/null
-@@ -1,93 +0,0 @@
--package containers
--
--import (
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ExtractInfo interprets a page of List results when full container info
--// is requested.
--func ExtractInfo(page pagination.Page) ([]os.Container, error) {
--	return os.ExtractInfo(page)
--}
--
--// ExtractNames interprets a page of List results when just the container
--// names are requested.
--func ExtractNames(page pagination.Page) ([]string, error) {
--	return os.ExtractNames(page)
--}
--
--// List is a function that retrieves containers associated with the account as
--// well as account metadata. It returns a pager which can be iterated with the
--// EachPage function.
--func List(c *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager {
--	return os.List(c, opts)
--}
--
--// CreateOpts is a structure that holds parameters for creating a container.
--type CreateOpts struct {
--	Metadata         map[string]string
--	ContainerRead    string `h:"X-Container-Read"`
--	ContainerWrite   string `h:"X-Container-Write"`
--	VersionsLocation string `h:"X-Versions-Location"`
--}
--
--// ToContainerCreateMap formats a CreateOpts into a map of headers.
--func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) {
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		h["X-Container-Meta-"+k] = v
--	}
--	return h, nil
--}
--
--// Create is a function that creates a new container.
--func Create(c *gophercloud.ServiceClient, containerName string, opts os.CreateOptsBuilder) os.CreateResult {
--	return os.Create(c, containerName, opts)
--}
--
--// Delete is a function that deletes a container.
--func Delete(c *gophercloud.ServiceClient, containerName string) os.DeleteResult {
--	return os.Delete(c, containerName)
--}
--
--// UpdateOpts is a structure that holds parameters for updating or creating a
--// container's metadata.
--type UpdateOpts struct {
--	Metadata               map[string]string
--	ContainerRead          string `h:"X-Container-Read"`
--	ContainerWrite         string `h:"X-Container-Write"`
--	ContentType            string `h:"Content-Type"`
--	DetectContentType      bool   `h:"X-Detect-Content-Type"`
--	RemoveVersionsLocation string `h:"X-Remove-Versions-Location"`
--	VersionsLocation       string `h:"X-Versions-Location"`
--}
--
--// ToContainerUpdateMap formats a CreateOpts into a map of headers.
--func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) {
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		h["X-Container-Meta-"+k] = v
--	}
--	return h, nil
--}
--
--// Update is a function that creates, updates, or deletes a container's
--// metadata.
--func Update(c *gophercloud.ServiceClient, containerName string, opts os.UpdateOptsBuilder) os.UpdateResult {
--	return os.Update(c, containerName, opts)
--}
--
--// Get is a function that retrieves the metadata of a container. To extract just
--// the custom metadata, pass the GetResult response to the ExtractMetadata
--// function.
--func Get(c *gophercloud.ServiceClient, containerName string) os.GetResult {
--	return os.Get(c, containerName)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate_test.go
-deleted file mode 100644
-index 7ba4eb2..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/delegate_test.go
-+++ /dev/null
-@@ -1,91 +0,0 @@
--package containers
--
--import (
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestListContainerInfo(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleListContainerInfoSuccessfully(t)
--
--	count := 0
--	err := List(fake.ServiceClient(), &os.ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractInfo(page)
--		th.AssertNoErr(t, err)
--
--		th.CheckDeepEquals(t, os.ExpectedListInfo, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestListContainerNames(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleListContainerNamesSuccessfully(t)
--
--	count := 0
--	err := List(fake.ServiceClient(), &os.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractNames(page)
--		if err != nil {
--			t.Errorf("Failed to extract container names: %v", err)
--			return false, err
--		}
--
--		th.CheckDeepEquals(t, os.ExpectedListNames, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestCreateContainers(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleCreateContainerSuccessfully(t)
--
--	options := os.CreateOpts{ContentType: "application/json", Metadata: map[string]string{"foo": "bar"}}
--	res := Create(fake.ServiceClient(), "testContainer", options)
--	th.CheckNoErr(t, res.Err)
--	th.CheckEquals(t, "bar", res.Header["X-Container-Meta-Foo"][0])
--
--}
--
--func TestDeleteContainers(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleDeleteContainerSuccessfully(t)
--
--	res := Delete(fake.ServiceClient(), "testContainer")
--	th.CheckNoErr(t, res.Err)
--}
--
--func TestUpdateContainers(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleUpdateContainerSuccessfully(t)
--
--	options := &os.UpdateOpts{Metadata: map[string]string{"foo": "bar"}}
--	res := Update(fake.ServiceClient(), "testContainer", options)
--	th.CheckNoErr(t, res.Err)
--}
--
--func TestGetContainers(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleGetContainerSuccessfully(t)
--
--	_, err := Get(fake.ServiceClient(), "testContainer").ExtractMetadata()
--	th.CheckNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/doc.go
-deleted file mode 100644
-index d132a07..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/containers/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package containers provides information and interaction with the Container
--// API resource for the Rackspace Cloud Files service.
--package containers
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate.go
-deleted file mode 100644
-index bd4a4f0..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate.go
-+++ /dev/null
-@@ -1,90 +0,0 @@
--package objects
--
--import (
--	"io"
--
--	"github.com/rackspace/gophercloud"
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects"
--	"github.com/rackspace/gophercloud/pagination"
--)
--
--// ExtractInfo is a function that takes a page of objects and returns their full information.
--func ExtractInfo(page pagination.Page) ([]os.Object, error) {
--	return os.ExtractInfo(page)
--}
--
--// ExtractNames is a function that takes a page of objects and returns only their names.
--func ExtractNames(page pagination.Page) ([]string, error) {
--	return os.ExtractNames(page)
--}
--
--// List is a function that retrieves objects in the container as
--// well as container metadata. It returns a pager which can be iterated with the
--// EachPage function.
--func List(c *gophercloud.ServiceClient, containerName string, opts os.ListOptsBuilder) pagination.Pager {
--	return os.List(c, containerName, opts)
--}
--
--// Download is a function that retrieves the content and metadata for an object.
--// To extract just the content, pass the DownloadResult response to the
--// ExtractContent function.
--func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts os.DownloadOptsBuilder) os.DownloadResult {
--	return os.Download(c, containerName, objectName, opts)
--}
--
--// Create is a function that creates a new object or replaces an existing object.
--func Create(c *gophercloud.ServiceClient, containerName, objectName string, content io.Reader, opts os.CreateOptsBuilder) os.CreateResult {
--	return os.Create(c, containerName, objectName, content, opts)
--}
--
--// CopyOpts is a structure that holds parameters for copying one object to
--// another.
--type CopyOpts struct {
--	Metadata           map[string]string
--	ContentDisposition string `h:"Content-Disposition"`
--	ContentEncoding    string `h:"Content-Encoding"`
--	ContentLength      int    `h:"Content-Length"`
--	ContentType        string `h:"Content-Type"`
--	CopyFrom           string `h:"X-Copy_From"`
--	Destination        string `h:"Destination"`
--	DetectContentType  bool   `h:"X-Detect-Content-Type"`
--}
--
--// ToObjectCopyMap formats a CopyOpts into a map of headers.
--func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) {
--	h, err := gophercloud.BuildHeaders(opts)
--	if err != nil {
--		return nil, err
--	}
--	for k, v := range opts.Metadata {
--		h["X-Object-Meta-"+k] = v
--	}
--	// `Content-Length` is required and a value of "0" is acceptable, but calling `gophercloud.BuildHeaders`
--	// will remove the `Content-Length` header if it's set to 0 (or equivalently not set). This will add
--	// the header if it's not already set.
--	if _, ok := h["Content-Length"]; !ok {
--		h["Content-Length"] = "0"
--	}
--	return h, nil
--}
--
--// Copy is a function that copies one object to another.
--func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts os.CopyOptsBuilder) os.CopyResult {
--	return os.Copy(c, containerName, objectName, opts)
--}
--
--// Delete is a function that deletes an object.
--func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts os.DeleteOptsBuilder) os.DeleteResult {
--	return os.Delete(c, containerName, objectName, opts)
--}
--
--// Get is a function that retrieves the metadata of an object. To extract just the custom
--// metadata, pass the GetResult response to the ExtractMetadata function.
--func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts os.GetOptsBuilder) os.GetResult {
--	return os.Get(c, containerName, objectName, opts)
--}
--
--// Update is a function that creates, updates, or deletes an object's metadata.
--func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts os.UpdateOptsBuilder) os.UpdateResult {
--	return os.Update(c, containerName, objectName, opts)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate_test.go
-deleted file mode 100644
-index 08831ec..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/delegate_test.go
-+++ /dev/null
-@@ -1,115 +0,0 @@
--package objects
--
--import (
--	"bytes"
--	"testing"
--
--	os "github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects"
--	"github.com/rackspace/gophercloud/pagination"
--	th "github.com/rackspace/gophercloud/testhelper"
--	fake "github.com/rackspace/gophercloud/testhelper/client"
--)
--
--func TestDownloadObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleDownloadObjectSuccessfully(t)
--
--	content, err := Download(fake.ServiceClient(), "testContainer", "testObject", nil).ExtractContent()
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, string(content), "Successful download with Gophercloud")
--}
--
--func TestListObjectsInfo(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleListObjectsInfoSuccessfully(t)
--
--	count := 0
--	options := &os.ListOpts{Full: true}
--	err := List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractInfo(page)
--		th.AssertNoErr(t, err)
--
--		th.CheckDeepEquals(t, os.ExpectedListInfo, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestListObjectNames(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleListObjectNamesSuccessfully(t)
--
--	count := 0
--	options := &os.ListOpts{Full: false}
--	err := List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) {
--		count++
--		actual, err := ExtractNames(page)
--		if err != nil {
--			t.Errorf("Failed to extract container names: %v", err)
--			return false, err
--		}
--
--		th.CheckDeepEquals(t, os.ExpectedListNames, actual)
--
--		return true, nil
--	})
--	th.AssertNoErr(t, err)
--	th.CheckEquals(t, count, 1)
--}
--
--func TestCreateObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleCreateObjectSuccessfully(t)
--
--	content := bytes.NewBufferString("Did gyre and gimble in the wabe")
--	options := &os.CreateOpts{ContentType: "application/json"}
--	res := Create(fake.ServiceClient(), "testContainer", "testObject", content, options)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestCopyObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleCopyObjectSuccessfully(t)
--
--	options := &CopyOpts{Destination: "/newTestContainer/newTestObject"}
--	res := Copy(fake.ServiceClient(), "testContainer", "testObject", options)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestDeleteObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleDeleteObjectSuccessfully(t)
--
--	res := Delete(fake.ServiceClient(), "testContainer", "testObject", nil)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestUpdateObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleUpdateObjectSuccessfully(t)
--
--	options := &os.UpdateOpts{Metadata: map[string]string{"Gophercloud-Test": "objects"}}
--	res := Update(fake.ServiceClient(), "testContainer", "testObject", options)
--	th.AssertNoErr(t, res.Err)
--}
--
--func TestGetObject(t *testing.T) {
--	th.SetupHTTP()
--	defer th.TeardownHTTP()
--	os.HandleGetObjectSuccessfully(t)
--
--	expected := map[string]string{"Gophercloud-Test": "objects"}
--	actual, err := Get(fake.ServiceClient(), "testContainer", "testObject", nil).ExtractMetadata()
--	th.AssertNoErr(t, err)
--	th.CheckDeepEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/doc.go
-deleted file mode 100644
-index 781984b..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/objectstorage/v1/objects/doc.go
-+++ /dev/null
-@@ -1,3 +0,0 @@
--// Package objects provides information and interaction with the Object
--// API resource for the Rackspace Cloud Files service.
--package objects
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/results.go
-deleted file mode 100644
-index f480bc7..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/results.go
-+++ /dev/null
-@@ -1,83 +0,0 @@
--package gophercloud
--
--import (
--	"encoding/json"
--	"net/http"
--)
--
--// Result acts as a base struct that other results can embed.
--type Result struct {
--	// Body is the payload of the HTTP response from the server. In most cases, this will be the
--	// deserialized JSON structure.
--	Body interface{}
--
--	// Header contains the HTTP header structure from the original response.
--	Header http.Header
--
--	// Err is an error that occurred during the operation. It's deferred until extraction to make
--	// it easier to chain operations.
--	Err error
--}
--
--// PrettyPrintJSON creates a string containing the full response body as pretty-printed JSON.
--func (r Result) PrettyPrintJSON() string {
--	pretty, err := json.MarshalIndent(r.Body, "", "  ")
--	if err != nil {
--		panic(err.Error())
--	}
--	return string(pretty)
--}
--
--// ErrResult represents results that only contain a potential error and
--// nothing else. Usually if the operation executed successfully, the Err field
--// will be nil; otherwise it will be stocked with a relevant error.
--type ErrResult struct {
--	Result
--}
--
--// ExtractErr is a function that extracts error information from a result.
--func (r ErrResult) ExtractErr() error {
--	return r.Err
--}
--
--// HeaderResult represents a result that only contains an `error` (possibly nil)
--// and an http.Header. This is used, for example, by the `objectstorage` packages
--// in `openstack`, because most of the operations don't return response bodies.
--type HeaderResult struct {
--	Result
--}
--
--// ExtractHeader will return the http.Header and error from the HeaderResult.
--// Usage: header, err := objects.Create(client, "my_container", objects.CreateOpts{}).ExtractHeader()
--func (hr HeaderResult) ExtractHeader() (http.Header, error) {
--	return hr.Header, hr.Err
--}
--
--// RFC3339Milli describes a time format used by API responses.
--const RFC3339Milli = "2006-01-02T15:04:05.999999Z"
--
--// Link represents a structure that enables paginated collections how to
--// traverse backward or forward. The "Rel" field is usually either "next".
--type Link struct {
--	Href string `mapstructure:"href"`
--	Rel  string `mapstructure:"rel"`
--}
--
--// ExtractNextURL attempts to extract the next URL from a JSON structure. It
--// follows the common convention of nesting back and next URLs in a "links"
--// JSON array.
--func ExtractNextURL(links []Link) (string, error) {
--	var url string
--
--	for _, l := range links {
--		if l.Rel == "next" {
--			url = l.Href
--		}
--	}
--
--	if url == "" {
--		return "", nil
--	}
--
--	return url, nil
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/acceptancetest b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/acceptancetest
-deleted file mode 100644
-index f9c89f4..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/acceptancetest
-+++ /dev/null
-@@ -1,5 +0,0 @@
--#!/bin/bash
--#
--# Run the acceptance tests.
--
--exec go test -p=1 -tags 'acceptance fixtures' github.com/rackspace/gophercloud/acceptance/... $@
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/bootstrap b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/bootstrap
-deleted file mode 100644
-index 6bae6e8..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/bootstrap
-+++ /dev/null
-@@ -1,26 +0,0 @@
--#!/bin/bash
--#
--# This script helps new contributors set up their local workstation for
--# gophercloud development and contributions.
--
--# Create the environment
--export GOPATH=$HOME/go/gophercloud
--mkdir -p $GOPATH
--
--# Download gophercloud into that environment
--go get github.com/rackspace/gophercloud
--cd $GOPATH/src/github.com/rackspace/gophercloud
--git checkout master
--
--# Write out the env.sh convenience file.
--cd $GOPATH
--cat <<EOF >env.sh
--#!/bin/bash
--export GOPATH=$(pwd)
--export GOPHERCLOUD=$GOPATH/src/github.com/rackspace/gophercloud
--EOF
--chmod a+x env.sh
--
--# Make changes immediately available as a convenience.
--. ./env.sh
--
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/cibuild b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/cibuild
-deleted file mode 100644
-index 1cb389e..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/cibuild
-+++ /dev/null
-@@ -1,5 +0,0 @@
--#!/bin/bash
--#
--# Test script to be invoked by Travis.
--
--exec script/unittest -v
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/test b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/test
-deleted file mode 100644
-index 1e03dff..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/test
-+++ /dev/null
-@@ -1,5 +0,0 @@
--#!/bin/bash
--#
--# Run all the tests.
--
--exec go test -tags 'acceptance fixtures' ./... $@
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/unittest b/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/unittest
-deleted file mode 100644
-index d3440a9..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/script/unittest
-+++ /dev/null
-@@ -1,5 +0,0 @@
--#!/bin/bash
--#
--# Run the unit tests.
--
--exec go test -tags fixtures ./... $@
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/service_client.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/service_client.go
-deleted file mode 100644
-index 3490da0..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/service_client.go
-+++ /dev/null
-@@ -1,32 +0,0 @@
--package gophercloud
--
--import "strings"
--
--// ServiceClient stores details required to interact with a specific service API implemented by a provider.
--// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient.
--type ServiceClient struct {
--	// ProviderClient is a reference to the provider that implements this service.
--	*ProviderClient
--
--	// Endpoint is the base URL of the service's API, acquired from a service catalog.
--	// It MUST end with a /.
--	Endpoint string
--
--	// ResourceBase is the base URL shared by the resources within a service's API. It should include
--	// the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used
--	// as-is, instead.
--	ResourceBase string
--}
--
--// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /.
--func (client *ServiceClient) ResourceBaseURL() string {
--	if client.ResourceBase != "" {
--		return client.ResourceBase
--	}
--	return client.Endpoint
--}
--
--// ServiceURL constructs a URL for a resource belonging to this provider.
--func (client *ServiceClient) ServiceURL(parts ...string) string {
--	return client.ResourceBaseURL() + strings.Join(parts, "/")
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/service_client_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/service_client_test.go
-deleted file mode 100644
-index 84beb3f..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/service_client_test.go
-+++ /dev/null
-@@ -1,14 +0,0 @@
--package gophercloud
--
--import (
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestServiceURL(t *testing.T) {
--	c := &ServiceClient{Endpoint: "http://123.45.67.8/"}
--	expected := "http://123.45.67.8/more/parts/here"
--	actual := c.ServiceURL("more", "parts", "here")
--	th.CheckEquals(t, expected, actual)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/client/fake.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/client/fake.go
-deleted file mode 100644
-index 5b69b05..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/client/fake.go
-+++ /dev/null
-@@ -1,17 +0,0 @@
--package client
--
--import (
--	"github.com/rackspace/gophercloud"
--	"github.com/rackspace/gophercloud/testhelper"
--)
--
--// Fake token to use.
--const TokenID = "cbc36478b0bd8e67e89469c7749d4127"
--
--// ServiceClient returns a generic service client for use in tests.
--func ServiceClient() *gophercloud.ServiceClient {
--	return &gophercloud.ServiceClient{
--		ProviderClient: &gophercloud.ProviderClient{TokenID: TokenID},
--		Endpoint:       testhelper.Endpoint(),
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/convenience.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/convenience.go
-deleted file mode 100644
-index cf33e1a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/convenience.go
-+++ /dev/null
-@@ -1,329 +0,0 @@
--package testhelper
--
--import (
--	"encoding/json"
--	"fmt"
--	"path/filepath"
--	"reflect"
--	"runtime"
--	"strings"
--	"testing"
--)
--
--const (
--	logBodyFmt = "\033[1;31m%s %s\033[0m"
--	greenCode  = "\033[0m\033[1;32m"
--	yellowCode = "\033[0m\033[1;33m"
--	resetCode  = "\033[0m\033[1;31m"
--)
--
--func prefix(depth int) string {
--	_, file, line, _ := runtime.Caller(depth)
--	return fmt.Sprintf("Failure in %s, line %d:", filepath.Base(file), line)
--}
--
--func green(str interface{}) string {
--	return fmt.Sprintf("%s%#v%s", greenCode, str, resetCode)
--}
--
--func yellow(str interface{}) string {
--	return fmt.Sprintf("%s%#v%s", yellowCode, str, resetCode)
--}
--
--func logFatal(t *testing.T, str string) {
--	t.Fatalf(logBodyFmt, prefix(3), str)
--}
--
--func logError(t *testing.T, str string) {
--	t.Errorf(logBodyFmt, prefix(3), str)
--}
--
--type diffLogger func([]string, interface{}, interface{})
--
--type visit struct {
--	a1  uintptr
--	a2  uintptr
--	typ reflect.Type
--}
--
--// Recursively visits the structures of "expected" and "actual". The diffLogger function will be
--// invoked with each different value encountered, including the reference path that was followed
--// to get there.
--func deepDiffEqual(expected, actual reflect.Value, visited map[visit]bool, path []string, logDifference diffLogger) {
--	defer func() {
--		// Fall back to the regular reflect.DeepEquals function.
--		if r := recover(); r != nil {
--			var e, a interface{}
--			if expected.IsValid() {
--				e = expected.Interface()
--			}
--			if actual.IsValid() {
--				a = actual.Interface()
--			}
--
--			if !reflect.DeepEqual(e, a) {
--				logDifference(path, e, a)
--			}
--		}
--	}()
--
--	if !expected.IsValid() && actual.IsValid() {
--		logDifference(path, nil, actual.Interface())
--		return
--	}
--	if expected.IsValid() && !actual.IsValid() {
--		logDifference(path, expected.Interface(), nil)
--		return
--	}
--	if !expected.IsValid() && !actual.IsValid() {
--		return
--	}
--
--	hard := func(k reflect.Kind) bool {
--		switch k {
--		case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
--			return true
--		}
--		return false
--	}
--
--	if expected.CanAddr() && actual.CanAddr() && hard(expected.Kind()) {
--		addr1 := expected.UnsafeAddr()
--		addr2 := actual.UnsafeAddr()
--
--		if addr1 > addr2 {
--			addr1, addr2 = addr2, addr1
--		}
--
--		if addr1 == addr2 {
--			// References are identical. We can short-circuit
--			return
--		}
--
--		typ := expected.Type()
--		v := visit{addr1, addr2, typ}
--		if visited[v] {
--			// Already visited.
--			return
--		}
--
--		// Remember this visit for later.
--		visited[v] = true
--	}
--
--	switch expected.Kind() {
--	case reflect.Array:
--		for i := 0; i < expected.Len(); i++ {
--			hop := append(path, fmt.Sprintf("[%d]", i))
--			deepDiffEqual(expected.Index(i), actual.Index(i), visited, hop, logDifference)
--		}
--		return
--	case reflect.Slice:
--		if expected.IsNil() != actual.IsNil() {
--			logDifference(path, expected.Interface(), actual.Interface())
--			return
--		}
--		if expected.Len() == actual.Len() && expected.Pointer() == actual.Pointer() {
--			return
--		}
--		for i := 0; i < expected.Len(); i++ {
--			hop := append(path, fmt.Sprintf("[%d]", i))
--			deepDiffEqual(expected.Index(i), actual.Index(i), visited, hop, logDifference)
--		}
--		return
--	case reflect.Interface:
--		if expected.IsNil() != actual.IsNil() {
--			logDifference(path, expected.Interface(), actual.Interface())
--			return
--		}
--		deepDiffEqual(expected.Elem(), actual.Elem(), visited, path, logDifference)
--		return
--	case reflect.Ptr:
--		deepDiffEqual(expected.Elem(), actual.Elem(), visited, path, logDifference)
--		return
--	case reflect.Struct:
--		for i, n := 0, expected.NumField(); i < n; i++ {
--			field := expected.Type().Field(i)
--			hop := append(path, "."+field.Name)
--			deepDiffEqual(expected.Field(i), actual.Field(i), visited, hop, logDifference)
--		}
--		return
--	case reflect.Map:
--		if expected.IsNil() != actual.IsNil() {
--			logDifference(path, expected.Interface(), actual.Interface())
--			return
--		}
--		if expected.Len() == actual.Len() && expected.Pointer() == actual.Pointer() {
--			return
--		}
--
--		var keys []reflect.Value
--		if expected.Len() >= actual.Len() {
--			keys = expected.MapKeys()
--		} else {
--			keys = actual.MapKeys()
--		}
--
--		for _, k := range keys {
--			expectedValue := expected.MapIndex(k)
--			actualValue := expected.MapIndex(k)
--
--			if !expectedValue.IsValid() {
--				logDifference(path, nil, actual.Interface())
--				return
--			}
--			if !actualValue.IsValid() {
--				logDifference(path, expected.Interface(), nil)
--				return
--			}
--
--			hop := append(path, fmt.Sprintf("[%v]", k))
--			deepDiffEqual(expectedValue, actualValue, visited, hop, logDifference)
--		}
--		return
--	case reflect.Func:
--		if expected.IsNil() != actual.IsNil() {
--			logDifference(path, expected.Interface(), actual.Interface())
--		}
--		return
--	default:
--		if expected.Interface() != actual.Interface() {
--			logDifference(path, expected.Interface(), actual.Interface())
--		}
--	}
--}
--
--func deepDiff(expected, actual interface{}, logDifference diffLogger) {
--	if expected == nil || actual == nil {
--		logDifference([]string{}, expected, actual)
--		return
--	}
--
--	expectedValue := reflect.ValueOf(expected)
--	actualValue := reflect.ValueOf(actual)
--
--	if expectedValue.Type() != actualValue.Type() {
--		logDifference([]string{}, expected, actual)
--		return
--	}
--	deepDiffEqual(expectedValue, actualValue, map[visit]bool{}, []string{}, logDifference)
--}
--
--// AssertEquals compares two arbitrary values and performs a comparison. If the
--// comparison fails, a fatal error is raised that will fail the test
--func AssertEquals(t *testing.T, expected, actual interface{}) {
--	if expected != actual {
--		logFatal(t, fmt.Sprintf("expected %s but got %s", green(expected), yellow(actual)))
--	}
--}
--
--// CheckEquals is similar to AssertEquals, except with a non-fatal error
--func CheckEquals(t *testing.T, expected, actual interface{}) {
--	if expected != actual {
--		logError(t, fmt.Sprintf("expected %s but got %s", green(expected), yellow(actual)))
--	}
--}
--
--// AssertDeepEquals - like Equals - performs a comparison - but on more complex
--// structures that requires deeper inspection
--func AssertDeepEquals(t *testing.T, expected, actual interface{}) {
--	pre := prefix(2)
--
--	differed := false
--	deepDiff(expected, actual, func(path []string, expected, actual interface{}) {
--		differed = true
--		t.Errorf("\033[1;31m%sat %s expected %s, but got %s\033[0m",
--			pre,
--			strings.Join(path, ""),
--			green(expected),
--			yellow(actual))
--	})
--	if differed {
--		logFatal(t, "The structures were different.")
--	}
--}
--
--// CheckDeepEquals is similar to AssertDeepEquals, except with a non-fatal error
--func CheckDeepEquals(t *testing.T, expected, actual interface{}) {
--	pre := prefix(2)
--
--	deepDiff(expected, actual, func(path []string, expected, actual interface{}) {
--		t.Errorf("\033[1;31m%s at %s expected %s, but got %s\033[0m",
--			pre,
--			strings.Join(path, ""),
--			green(expected),
--			yellow(actual))
--	})
--}
--
--// isJSONEquals is a utility function that implements JSON comparison for AssertJSONEquals and
--// CheckJSONEquals.
--func isJSONEquals(t *testing.T, expectedJSON string, actual interface{}) bool {
--	var parsedExpected, parsedActual interface{}
--	err := json.Unmarshal([]byte(expectedJSON), &parsedExpected)
--	if err != nil {
--		t.Errorf("Unable to parse expected value as JSON: %v", err)
--		return false
--	}
--
--	jsonActual, err := json.Marshal(actual)
--	AssertNoErr(t, err)
--	err = json.Unmarshal(jsonActual, &parsedActual)
--	AssertNoErr(t, err)
--
--	if !reflect.DeepEqual(parsedExpected, parsedActual) {
--		prettyExpected, err := json.MarshalIndent(parsedExpected, "", "  ")
--		if err != nil {
--			t.Logf("Unable to pretty-print expected JSON: %v\n%s", err, expectedJSON)
--		} else {
--			// We can't use green() here because %#v prints prettyExpected as a byte array literal, which
--			// is... unhelpful. Converting it to a string first leaves "\n" uninterpreted for some reason.
--			t.Logf("Expected JSON:\n%s%s%s", greenCode, prettyExpected, resetCode)
--		}
--
--		prettyActual, err := json.MarshalIndent(actual, "", "  ")
--		if err != nil {
--			t.Logf("Unable to pretty-print actual JSON: %v\n%#v", err, actual)
--		} else {
--			// We can't use yellow() for the same reason.
--			t.Logf("Actual JSON:\n%s%s%s", yellowCode, prettyActual, resetCode)
--		}
--
--		return false
--	}
--	return true
--}
--
--// AssertJSONEquals serializes a value as JSON, parses an expected string as JSON, and ensures that
--// both are consistent. If they aren't, the expected and actual structures are pretty-printed and
--// shown for comparison.
--//
--// This is useful for comparing structures that are built as nested map[string]interface{} values,
--// which are a pain to construct as literals.
--func AssertJSONEquals(t *testing.T, expectedJSON string, actual interface{}) {
--	if !isJSONEquals(t, expectedJSON, actual) {
--		logFatal(t, "The generated JSON structure differed.")
--	}
--}
--
--// CheckJSONEquals is similar to AssertJSONEquals, but nonfatal.
--func CheckJSONEquals(t *testing.T, expectedJSON string, actual interface{}) {
--	if !isJSONEquals(t, expectedJSON, actual) {
--		logError(t, "The generated JSON structure differed.")
--	}
--}
--
--// AssertNoErr is a convenience function for checking whether an error value is
--// an actual error
--func AssertNoErr(t *testing.T, e error) {
--	if e != nil {
--		logFatal(t, fmt.Sprintf("unexpected error %s", yellow(e.Error())))
--	}
--}
--
--// CheckNoErr is similar to AssertNoErr, except with a non-fatal error
--func CheckNoErr(t *testing.T, e error) {
--	if e != nil {
--		logError(t, fmt.Sprintf("unexpected error %s", yellow(e.Error())))
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/doc.go
-deleted file mode 100644
-index 25b4dfe..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/doc.go
-+++ /dev/null
-@@ -1,4 +0,0 @@
--/*
--Package testhelper container methods that are useful for writing unit tests.
--*/
--package testhelper
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/http_responses.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/http_responses.go
-deleted file mode 100644
-index e1f1f9a..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/testhelper/http_responses.go
-+++ /dev/null
-@@ -1,91 +0,0 @@
--package testhelper
--
--import (
--	"encoding/json"
--	"io/ioutil"
--	"net/http"
--	"net/http/httptest"
--	"net/url"
--	"reflect"
--	"testing"
--)
--
--var (
--	// Mux is a multiplexer that can be used to register handlers.
--	Mux *http.ServeMux
--
--	// Server is an in-memory HTTP server for testing.
--	Server *httptest.Server
--)
--
--// SetupHTTP prepares the Mux and Server.
--func SetupHTTP() {
--	Mux = http.NewServeMux()
--	Server = httptest.NewServer(Mux)
--}
--
--// TeardownHTTP releases HTTP-related resources.
--func TeardownHTTP() {
--	Server.Close()
--}
--
--// Endpoint returns a fake endpoint that will actually target the Mux.
--func Endpoint() string {
--	return Server.URL + "/"
--}
--
--// TestFormValues ensures that all the URL parameters given to the http.Request are the same as values.
--func TestFormValues(t *testing.T, r *http.Request, values map[string]string) {
--	want := url.Values{}
--	for k, v := range values {
--		want.Add(k, v)
--	}
--
--	r.ParseForm()
--	if !reflect.DeepEqual(want, r.Form) {
--		t.Errorf("Request parameters = %v, want %v", r.Form, want)
--	}
--}
--
--// TestMethod checks that the Request has the expected method (e.g. GET, POST).
--func TestMethod(t *testing.T, r *http.Request, expected string) {
--	if expected != r.Method {
--		t.Errorf("Request method = %v, expected %v", r.Method, expected)
--	}
--}
--
--// TestHeader checks that the header on the http.Request matches the expected value.
--func TestHeader(t *testing.T, r *http.Request, header string, expected string) {
--	if actual := r.Header.Get(header); expected != actual {
--		t.Errorf("Header %s = %s, expected %s", header, actual, expected)
--	}
--}
--
--// TestBody verifies that the request body matches an expected body.
--func TestBody(t *testing.T, r *http.Request, expected string) {
--	b, err := ioutil.ReadAll(r.Body)
--	if err != nil {
--		t.Errorf("Unable to read body: %v", err)
--	}
--	str := string(b)
--	if expected != str {
--		t.Errorf("Body = %s, expected %s", str, expected)
--	}
--}
--
--// TestJSONRequest verifies that the JSON payload of a request matches an expected structure, without asserting things about
--// whitespace or ordering.
--func TestJSONRequest(t *testing.T, r *http.Request, expected string) {
--	b, err := ioutil.ReadAll(r.Body)
--	if err != nil {
--		t.Errorf("Unable to read request body: %v", err)
--	}
--
--	var actualJSON interface{}
--	err = json.Unmarshal(b, &actualJSON)
--	if err != nil {
--		t.Errorf("Unable to parse request body as JSON: %v", err)
--	}
--
--	CheckJSONEquals(t, expected, actualJSON)
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/util.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/util.go
-deleted file mode 100644
-index 101fd39..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/util.go
-+++ /dev/null
-@@ -1,39 +0,0 @@
--package gophercloud
--
--import (
--	"errors"
--	"strings"
--	"time"
--)
--
--// WaitFor polls a predicate function, once per second, up to a timeout limit.
--// It usually does this to wait for the resource to transition to a certain state.
--func WaitFor(timeout int, predicate func() (bool, error)) error {
--	start := time.Now().Second()
--	for {
--		// Force a 1s sleep
--		time.Sleep(1 * time.Second)
--
--		// If a timeout is set, and that's been exceeded, shut it down
--		if timeout >= 0 && time.Now().Second()-start >= timeout {
--			return errors.New("A timeout occurred")
--		}
--
--		// Execute the function
--		satisfied, err := predicate()
--		if err != nil {
--			return err
--		}
--		if satisfied {
--			return nil
--		}
--	}
--}
--
--// NormalizeURL ensures that each endpoint URL has a closing `/`, as expected by ServiceClient.
--func NormalizeURL(url string) string {
--	if !strings.HasSuffix(url, "/") {
--		return url + "/"
--	}
--	return url
--}
-diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/util_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/util_test.go
-deleted file mode 100644
-index 5a15a00..0000000
---- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/util_test.go
-+++ /dev/null
-@@ -1,14 +0,0 @@
--package gophercloud
--
--import (
--	"testing"
--
--	th "github.com/rackspace/gophercloud/testhelper"
--)
--
--func TestWaitFor(t *testing.T) {
--	err := WaitFor(5, func() (bool, error) {
--		return true, nil
--	})
--	th.CheckNoErr(t, err)
--}
-diff --git a/Godeps/_workspace/src/github.com/skratchdot/LICENSE-MIT b/Godeps/_workspace/src/github.com/skratchdot/LICENSE-MIT
-deleted file mode 100644
-index afd04c8..0000000
---- a/Godeps/_workspace/src/github.com/skratchdot/LICENSE-MIT
-+++ /dev/null
-@@ -1,22 +0,0 @@
--Copyright (c) 2013 skratchdot
--
--Permission is hereby granted, free of charge, to any person
--obtaining a copy of this software and associated documentation
--files (the "Software"), to deal in the Software without
--restriction, including without limitation the rights to use,
--copy, modify, merge, publish, distribute, sublicense, and/or sell
--copies of the Software, and to permit persons to whom the
--Software is furnished to do so, subject to the following
--conditions:
--
--The above copyright notice and this permission notice shall be
--included in all copies or substantial portions of the Software.
--
--THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
--EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
--OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
--NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
--HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
--WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
--FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
--OTHER DEALINGS IN THE SOFTWARE.
-diff --git a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec.go b/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec.go
-deleted file mode 100644
-index cc37339..0000000
---- a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec.go
-+++ /dev/null
-@@ -1,15 +0,0 @@
--// +build !windows,!darwin
--
--package open
--
--import (
--	"os/exec"
--)
--
--func open(input string) *exec.Cmd {
--	return exec.Command("xdg-open", input)
--}
--
--func openWith(input string, appName string) *exec.Cmd {
--	return exec.Command(appName, input)
--}
-diff --git a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec_darwin.go b/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec_darwin.go
-deleted file mode 100644
-index 16160e6..0000000
---- a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec_darwin.go
-+++ /dev/null
-@@ -1,15 +0,0 @@
--// +build darwin
--
--package open
--
--import (
--	"os/exec"
--)
--
--func open(input string) *exec.Cmd {
--	return exec.Command("open", input)
--}
--
--func openWith(input string, appName string) *exec.Cmd {
--	return exec.Command("open", "-a", appName, input)
--}
-diff --git a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec_windows.go b/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec_windows.go
-deleted file mode 100644
-index ac44cfa..0000000
---- a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/exec_windows.go
-+++ /dev/null
-@@ -1,21 +0,0 @@
--// +build windows
--
--package open
--
--import (
--	"os/exec"
--	"strings"
--)
--
--func cleaninput(input string) string {
--	r := strings.NewReplacer("&", "^&")
--	return r.Replace(input)
--}
--
--func open(input string) *exec.Cmd {
--	return exec.Command("cmd", "/C", "start", "", cleaninput(input))
--}
--
--func openWith(input string, appName string) *exec.Cmd {
--	return exec.Command("cmd", "/C", "start", "", appName, cleaninput(input))
--}
-diff --git a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/open.go b/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/open.go
-deleted file mode 100644
-index b1f648f..0000000
---- a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/open.go
-+++ /dev/null
-@@ -1,50 +0,0 @@
--/*
--
--	Open a file, directory, or URI using the OS's default
--	application for that object type.  Optionally, you can
--	specify an application to use.
--
--	This is a proxy for the following commands:
--
--	         OSX: "open"
--	     Windows: "start"
--	 Linux/Other: "xdg-open"
--
--	This is a golang port of the node.js module: https://github.com/pwnall/node-open
--
--*/
--package open
--
--/*
--	Open a file, directory, or URI using the OS's default
--	application for that object type. Wait for the open
--	command to complete.
--*/
--func Run(input string) error {
--	return open(input).Run()
--}
--
--/*
--	Open a file, directory, or URI using the OS's default
--	application for that object type. Don't wait for the
--	open command to complete.
--*/
--func Start(input string) error {
--	return open(input).Start()
--}
--
--/*
--	Open a file, directory, or URI using the specified application.
--	Wait for the open command to complete.
--*/
--func RunWith(input string, appName string) error {
--	return openWith(input, appName).Run()
--}
--
--/*
--	Open a file, directory, or URI using the specified application.
--	Don't wait for the open command to complete.
--*/
--func StartWith(input string, appName string) error {
--	return openWith(input, appName).Start()
--}
-diff --git a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/open_test.go b/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/open_test.go
-deleted file mode 100644
-index 5db2da2..0000000
---- a/Godeps/_workspace/src/github.com/skratchdot/open-golang/open/open_test.go
-+++ /dev/null
-@@ -1,70 +0,0 @@
--package open
--
--import "testing"
--
--func TestRun(t *testing.T) {
--	// shouldn't error
--	input := "https://google.com/"
--	err := Run(input)
--	if err != nil {
--		t.Errorf("open.Run(\"%s\") threw an error: %s", input, err)
--	}
--
--	// should error
--	input = "xxxxxxxxxxxxxxx"
--	err = Run(input)
--	if err == nil {
--		t.Errorf("Run(\"%s\") did not throw an error as expected", input)
--	}
--}
--
--func TestStart(t *testing.T) {
--	// shouldn't error
--	input := "https://google.com/"
--	err := Start(input)
--	if err != nil {
--		t.Errorf("open.Start(\"%s\") threw an error: %s", input, err)
--	}
--
--	// shouldn't error
--	input = "xxxxxxxxxxxxxxx"
--	err = Start(input)
--	if err != nil {
--		t.Errorf("open.Start(\"%s\") shouldn't even fail on invalid input: %s", input, err)
--	}
--}
--
--func TestRunWith(t *testing.T) {
--	// shouldn't error
--	input := "https://google.com/"
--	app := "firefox"
--	err := RunWith(input, app)
--	if err != nil {
--		t.Errorf("open.RunWith(\"%s\", \"%s\") threw an error: %s", input, app, err)
--	}
--
--	// should error
--	app = "xxxxxxxxxxxxxxx"
--	err = RunWith(input, app)
--	if err == nil {
--		t.Errorf("RunWith(\"%s\", \"%s\") did not throw an error as expected", input, app)
--	}
--}
--
--func TestStartWith(t *testing.T) {
--	// shouldn't error
--	input := "https://google.com/"
--	app := "firefox"
--	err := StartWith(input, app)
--	if err != nil {
--		t.Errorf("open.StartWith(\"%s\", \"%s\") threw an error: %s", input, app, err)
--	}
--
--	// shouldn't error
--	input = "[<Invalid URL>]"
--	err = StartWith(input, app)
--	if err != nil {
--		t.Errorf("StartWith(\"%s\", \"%s\") shouldn't even fail on invalid input: %s", input, app, err)
--	}
--
--}
-diff --git a/Godeps/_workspace/src/github.com/skynetservices/skydns/msg/service.go b/Godeps/_workspace/src/github.com/skynetservices/skydns/msg/service.go
-deleted file mode 100644
-index 564363a..0000000
---- a/Godeps/_workspace/src/github.com/skynetservices/skydns/msg/service.go
-+++ /dev/null
-@@ -1,95 +0,0 @@
--// Copyright (c) 2014 The SkyDNS Authors. All rights reserved.
--// Use of this source code is governed by The MIT License (MIT) that can be
--// found in the LICENSE file.
--
--package msg
--
--import (
--	"net"
--	"path"
--	"strings"
--
--	"github.com/miekg/dns"
--)
--
--// This *is* the rdata from a SRV record, but with a twist.
--// Host (Target in SRV) must be a domain name, but if it looks like an IP
--// address (4/6), we will treat it like an IP address.
--type Service struct {
--	Host     string `json:"host,omitempty"`
--	Port     int    `json:"port,omitempty"`
--	Priority int    `json:"priority,omitempty"`
--	Weight   int    `json:"weight,omitempty"`
--	Ttl      uint32 `json:"ttl,omitempty"`
--	// etcd key where we found this service and ignore from json un-/marshalling
--	Key string `json:"-"`
--}
--
--// NewSRV returns a new SRV record based on the Service.
--func (s *Service) NewSRV(name string, weight uint16) *dns.SRV {
--	return &dns.SRV{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeSRV, Class: dns.ClassINET, Ttl: s.Ttl},
--		Priority: uint16(s.Priority), Weight: weight, Port: uint16(s.Port), Target: dns.Fqdn(s.Host)}
--}
--
--// NewA returns a new A record based on the Service.
--func (s *Service) NewA(name string, ip net.IP) *dns.A {
--	return &dns.A{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: s.Ttl}, A: ip}
--}
--
--// NewAAAA returns a new AAAA record based on the Service.
--func (s *Service) NewAAAA(name string, ip net.IP) *dns.AAAA {
--	return &dns.AAAA{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: s.Ttl}, AAAA: ip}
--}
--
--// NewCNAME returns a new CNAME record based on the Service.
--func (s *Service) NewCNAME(name string, target string) *dns.CNAME {
--	return &dns.CNAME{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: s.Ttl}, Target: target}
--}
--
--// NewNS returns a new NS record based on the Service.
--func (s *Service) NewNS(name string, target string) *dns.NS {
--	return &dns.NS{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: s.Ttl}, Ns: target}
--}
--
--// NewPTR returns a new PTR record based on the Service.
--func (s *Service) NewPTR(name string, ttl uint32) *dns.PTR {
--	return &dns.PTR{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: ttl}, Ptr: dns.Fqdn(s.Host)}
--}
--
--// As Path, but
--// if a name contains wildcards (*), the name will be chopped of before the (first) wildcard, and
--// we do a highler evel search and later find the matching names.
--// So service.*.skydns.local, will look for all services under skydns.local and will later check
--// for names that match service.*.skydns.local.  If a wildcard is found the returned bool is true.
--func PathWithWildcard(s string) (string, bool) {
--	l := dns.SplitDomainName(s)
--	for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
--		l[i], l[j] = l[j], l[i]
--	}
--	for i, k := range l {
--		if k == "*" {
--			return path.Join(append([]string{"/skydns/"}, l[:i]...)...), true
--		}
--	}
--	return path.Join(append([]string{"/skydns/"}, l...)...), false
--}
--
--// Path converts a domainname to an etcd path. If s looks like service.staging.skydns.local.,
--// the resulting key will be /skydns/local/skydns/staging/service .
--func Path(s string) string {
--	l := dns.SplitDomainName(s)
--	for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
--		l[i], l[j] = l[j], l[i]
--	}
--	return path.Join(append([]string{"/skydns/"}, l...)...)
--}
--
--// Domain is the opposite of Path.
--func Domain(s string) string {
--	l := strings.Split(s, "/")
--	// start with 1, to strip /skydns
--	for i, j := 1, len(l)-1; i < j; i, j = i+1, j-1 {
--		l[i], l[j] = l[j], l[i]
--	}
--	return dns.Fqdn(strings.Join(l[1:len(l)-1], "."))
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/.gitignore b/Godeps/_workspace/src/github.com/spf13/cobra/.gitignore
-deleted file mode 100644
-index 36d1a84..0000000
---- a/Godeps/_workspace/src/github.com/spf13/cobra/.gitignore
-+++ /dev/null
-@@ -1,24 +0,0 @@
--# Compiled Object files, Static and Dynamic libs (Shared Objects)
--*.o
--*.a
--*.so
--
--# Folders
--_obj
--_test
--
--# Architecture specific extensions/prefixes
--*.[568vq]
--[568vq].out
--
--*.cgo1.go
--*.cgo2.c
--_cgo_defun.c
--_cgo_gotypes.go
--_cgo_export.*
--
--_testmain.go
--
--*.exe
--
--cobra.test
-diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml b/Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml
-deleted file mode 100644
-index 6de9ac2..0000000
---- a/Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml
-+++ /dev/null
-@@ -1,6 +0,0 @@
--language: go
--go:
--  - 1.1
--script:
--  - go test ./...
--  - go build
-diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/LICENSE.txt b/Godeps/_workspace/src/github.com/spf13/cobra/LICENSE.txt
-deleted file mode 100644
-index 298f0e2..0000000
---- a/Godeps/_workspace/src/github.com/spf13/cobra/LICENSE.txt
-+++ /dev/null
-@@ -1,174 +0,0 @@
--                                Apache License
--                           Version 2.0, January 2004
--                        http://www.apache.org/licenses/
--
--   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
--
--   1. Definitions.
--
--      "License" shall mean the terms and conditions for use, reproduction,
--      and distribution as defined by Sections 1 through 9 of this document.
--
--      "Licensor" shall mean the copyright owner or entity authorized by
--      the copyright owner that is granting the License.
--
--      "Legal Entity" shall mean the union of the acting entity and all
--      other entities that control, are controlled by, or are under common
--      control with that entity. For the purposes of this definition,
--      "control" means (i) the power, direct or indirect, to cause the
--      direction or management of such entity, whether by contract or
--      otherwise, or (ii) ownership of fifty percent (50%) or more of the
--      outstanding shares, or (iii) beneficial ownership of such entity.
--
--      "You" (or "Your") shall mean an individual or Legal Entity
--      exercising permissions granted by this License.
--
--      "Source" form shall mean the preferred form for making modifications,
--      including but not limited to software source code, documentation
--      source, and configuration files.
--
--      "Object" form shall mean any form resulting from mechanical
--      transformation or translation of a Source form, including but
--      not limited to compiled object code, generated documentation,
--      and conversions to other media types.
--
--      "Work" shall mean the work of authorship, whether in Source or
--      Object form, made available under the License, as indicated by a
--      copyright notice that is included in or attached to the work
--      (an example is provided in the Appendix below).
--
--      "Derivative Works" shall mean any work, whether in Source or Object
--      form, that is based on (or derived from) the Work and for which the
--      editorial revisions, annotations, elaborations, or other modifications
--      represent, as a whole, an original work of authorship. For the purposes
--      of this License, Derivative Works shall not include works that remain
--      separable from, or merely link (or bind by name) to the interfaces of,
--      the Work and Derivative Works thereof.
--
--      "Contribution" shall mean any work of authorship, including
--      the original version of the Work and any modifications or additions
--      to that Work or Derivative Works thereof, that is intentionally
--      submitted to Licensor for inclusion in the Work by the copyright owner
--      or by an individual or Legal Entity authorized to submit on behalf of
--      the copyright owner. For the purposes of this definition, "submitted"
--      means any form of electronic, verbal, or written communication sent
--      to the Licensor or its representatives, including but not limited to
--      communication on electronic mailing lists, source code control systems,
--      and issue tracking systems that are managed by, or on behalf of, the
--      Licensor for the purpose of discussing and improving the Work, but
--      excluding communication that is conspicuously marked or otherwise
--      designated in writing by the copyright owner as "Not a Contribution."
--
--      "Contributor" shall mean Licensor and any individual or Legal Entity
--      on behalf of whom a Contribution has been received by Licensor and
--      subsequently incorporated within the Work.
--
--   2. Grant of Copyright License. Subject to the terms and conditions of
--      this License, each Contributor hereby grants to You a perpetual,
--      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
--      copyright license to reproduce, prepare Derivative Works of,
--      publicly display, publicly perform, sublicense, and distribute the
--      Work and such Derivative Works in Source or Object form.
--
--   3. Grant of Patent License. Subject to the terms and conditions of
--      this License, each Contributor hereby grants to You a perpetual,
--      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
--      (except as stated in this section) patent license to make, have made,
--      use, offer to sell, sell, import, and otherwise transfer the Work,
--      where such license applies only to those patent claims licensable
--      by such Contributor that are necessarily infringed by their
--      Contribution(s) alone or by combination of their Contribution(s)
--      with the Work to which such Contribution(s) was submitted. If You
--      institute patent litigation against any entity (including a
--      cross-claim or counterclaim in a lawsuit) alleging that the Work
--      or a Contribution incorporated within the Work constitutes direct
--      or contributory patent infringement, then any patent licenses
--      granted to You under this License for that Work shall terminate
--      as of the date such litigation is filed.
--
--   4. Redistribution. You may reproduce and distribute copies of the
--      Work or Derivative Works thereof in any medium, with or without
--      modifications, and in Source or Object form, provided that You
--      meet the following conditions:
--
--      (a) You must give any other recipients of the Work or
--          Derivative Works a copy of this License; and
--
--      (b) You must cause any modified files to carry prominent notices
--          stating that You changed the files; and
--
--      (c) You must retain, in the Source form of any Derivative Works
--          that You distribute, all copyright, patent, trademark, and
--          attribution notices from the Source form of the Work,
--          excluding those notices that do not pertain to any part of
--          the Derivative Works; and
--
--      (d) If the Work includes a "NOTICE" text file as part of its
--          distribution, then any Derivative Works that You distribute must
--          include a readable copy of the attribution notices contained
--          within such NOTICE file, excluding those notices that do not
--          pertain to any part of the Derivative Works, in at least one
--          of the following places: within a NOTICE text file distributed
--          as part of the Derivative Works; within the Source form or
--          documentation, if provided along with the Derivative Works; or,
--          within a display generated by the Derivative Works, if and
--          wherever such third-party notices normally appear. The contents
--          of the NOTICE file are for informational purposes only and
--          do not modify the License. You may add Your own attribution
--          notices within Derivative Works that You distribute, alongside
--          or as an addendum to the NOTICE text from the Work, provided
--          that such additional attribution notices cannot be construed
--          as modifying the License.
--
--      You may add Your own copyright statement to Your modifications and
--      may provide additional or different license terms and conditions
--      for use, reproduction, or distribution of Your modifications, or
--      for any such Derivative Works as a whole, provided Your use,
--      reproduction, and distribution of the Work otherwise complies with
--      the conditions stated in this License.
--
--   5. Submission of Contributions. Unless You explicitly state otherwise,
--      any Contribution intentionally submitted for inclusion in the Work
--      by You to the Licensor shall be under the terms and conditions of
--      this License, without any additional terms or conditions.
--      Notwithstanding the above, nothing herein shall supersede or modify
--      the terms of any separate license agreement you may have executed
--      with Licensor regarding such Contributions.
--
--   6. Trademarks. This License does not grant permission to use the trade
--      names, trademarks, service marks, or product names of the Licensor,
--      except as required for reasonable and customary use in describing the
--      origin of the Work and reproducing the content of the NOTICE file.
--
--   7. Disclaimer of Warranty. Unless required by applicable law or
--      agreed to in writing, Licensor provides the Work (and each
--      Contributor provides its Contributions) on an "AS IS" BASIS,
--      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
--      implied, including, without limitation, any warranties or conditions
--      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
--      PARTICULAR PURPOSE. You are solely responsible for determining the
--      appropriateness of using or redistributing the Work and assume any
--      risks associated with Your exercise of permissions under this License.
--
--   8. Limitation of Liability. In no event and under no legal theory,
--      whether in tort (including negligence), contract, or otherwise,
--      unless required by applicable law (such as deliberate and grossly
--      negligent acts) or agreed to in writing, shall any Contributor be
--      liable to You for damages, including any direct, indirect, special,
--      incidental, or consequential damages of any character arising as a
--      result of this License or out of the use or inability to use the
--      Work (including but not limited to damages for loss of goodwill,
--      work stoppage, computer failure or malfunction, or any and all
--      other commercial damages or losses), even if such Contributor
--      has been advised of the possibility of such damages.
--
--   9. Accepting Warranty or Additional Liability. While redistributing
--      the Work or Derivative Works thereof, You may choose to offer,
--      and charge a fee for, acceptance of support, warranty, indemnity,
--      or other liability obligations and/or rights consistent with this
--      License. However, in accepting such obligations, You may act only
--      on Your own behalf and on Your sole responsibility, not on behalf
--      of any other Contributor, and only if You agree to indemnify,
--      defend, and hold each Contributor harmless for any liability
--      incurred by, or claims asserted against, such Contributor by reason
--      of your accepting any such warranty or additional liability.
-diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/README.md b/Godeps/_workspace/src/github.com/spf13/cobra/README.md
-deleted file mode 100644
-index 528233d..0000000
---- a/Godeps/_workspace/src/github.com/spf13/cobra/README.md
-+++ /dev/null
-@@ -1,399 +0,0 @@
--# Cobra
--
--A Commander for modern go CLI interactions
--
--[![Build Status](https://travis-ci.org/spf13/cobra.png)](https://travis-ci.org/spf13/cobra)
--
--## Overview
--
--Cobra is a commander providing a simple interface to create powerful modern CLI
--interfaces similar to git & go tools. In addition to providing an interface, Cobra
--simultaneously provides a controller to organize your application code.
--
--Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by
--providing **fully posix compliant flags** (including short & long versions),
--**nesting commands**, and the ability to **define your own help and usage** for any or
--all commands.
--
--Cobra has an exceptionally clean interface and simple design without needless
--constructors or initialization methods.
--
--Applications built with Cobra commands are designed to be as user friendly as
--possible. Flags can be placed before or after the command (as long as a
--confusing space isn’t provided). Both short and long flags can be used. A
--command need not even be fully typed. The shortest unambiguous string will
--suffice. Help is automatically generated and available for the application or
--for a specific command using either the help command or the --help flag.
--
--## Concepts
--
--Cobra is built on a structure of commands & flags.
--
--**Commands** represent actions and **Flags** are modifiers for those actions.
--
--In the following example 'server' is a command and 'port' is a flag.
--
--    hugo server --port=1313
--
--### Commands
--
--Command is the central point of the application. Each interaction that
--the application supports will be contained in a Command. A command can
--have children commands and optionally run an action.
--
--In the example above 'server' is the command
--
--A Command has the following structure:
--
--    type Command struct {
--        Use string // The one-line usage message.
--        Short string // The short description shown in the 'help' output.
--        Long string // The long message shown in the 'help <this-command>' output.
--        Run func(cmd *Command, args []string) // Run runs the command.
--    }
--
--### Flags
--
--A Flag is a way to modify the behavior of an command. Cobra supports
--fully posix compliant flags as well as the go flag package. 
--A Cobra command can define flags that persist through to children commands
--and flags that are only available to that command.
--
--In the example above 'port' is the flag.
--
--Flag functionality is provided by the [pflag
--libary](https://github.com/ogier/pflag), a fork of the flag standard library
--which maintains the same interface while adding posix compliance.
--
--## Usage
--
--Cobra works by creating a set of commands and then organizing them into a tree.
--The tree defines the structure of the application.
--
--Once each command is defined with it's corresponding flags, then the
--tree is assigned to the commander which is finally executed.
--
--### Installing
--Using Cobra is easy. First use go get to install the latest version
--of the library.
--
--    $ go get github.com/spf13/cobra
--
--Next include cobra in your application.
--
--    import "github.com/spf13/cobra"
--
--### Create the root command
--
--The root command represents your binary itself.
--
--Cobra doesn't require any special constructors. Simply create your commands.
--
--    var HugoCmd = &cobra.Command{
--        Use:   "hugo",
--        Short: "Hugo is a very fast static site generator",
--        Long: `A Fast and Flexible Static Site Generator built with
--                love by spf13 and friends in Go.
--                Complete documentation is available at http://hugo.spf13.com`,
--        Run: func(cmd *cobra.Command, args []string) {
--            // Do Stuff Here
--        },
--    }
--
--### Create additional commands
--
--Additional commands can be defined.
--
--    var versionCmd = &cobra.Command{
--        Use:   "version",
--        Short: "Print the version number of Hugo",
--        Long:  `All software has versions. This is Hugo's`,
--        Run: func(cmd *cobra.Command, args []string) {
--            fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
--        },
--    }
--
--### Attach command to its parent
--In this example we are attaching it to the root, but commands can be attached at any level.
--
--	HugoCmd.AddCommand(versionCmd)
--
--### Assign flags to a command
--
--Since the flags are defined and used in different locations, we need to
--define a variable outside with the correct scope to assign the flag to
--work with.
--
--    var Verbose bool
--    var Source string
--
--There are two different approaches to assign a flag.
--
--#### Persistent Flags
--
--A flag can be 'persistent' meaning that this flag will be available to the
--command it's assigned to as well as every command under that command. For
--global flags assign a flag as a persistent flag on the root.
--
--	HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
--
--#### Local Flags
--
--A flag can also be assigned locally which will only apply to that specific command.
--
--	HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
--
--### Once all commands and flags are defined, Execute the commands
--
--Execute should be run on the root for clarity, though it can be called on any command.
--
--    HugoCmd.Execute()
--
--## Example
--
--In the example below we have defined three commands. Two are at the top level
--and one (cmdTimes) is a child of one of the top commands. In this case the root
--is not executable meaning that a subcommand is required. This is accomplished
--by not providing a 'Run' for the 'rootCmd'.
--
--We have only defined one flag for a single command.
--
--More documentation about flags is available at https://github.com/spf13/pflag
--
--    import(
--        "github.com/spf13/cobra"
--        "fmt"
--        "strings"
--    )
--
--    func main() {
--
--        var echoTimes int
--
--        var cmdPrint = &cobra.Command{
--            Use:   "print [string to print]",
--            Short: "Print anything to the screen",
--            Long:  `print is for printing anything back to the screen.
--            For many years people have printed back to the screen.
--            `,
--            Run: func(cmd *cobra.Command, args []string) {
--                fmt.Println("Print: " + strings.Join(args, " "))
--            },
--        }
--
--        var cmdEcho = &cobra.Command{
--            Use:   "echo [string to echo]",
--            Short: "Echo anything to the screen",
--            Long:  `echo is for echoing anything back.
--            Echo works a lot like print, except it has a child command.
--            `,
--            Run: func(cmd *cobra.Command, args []string) {
--                fmt.Println("Print: " + strings.Join(args, " "))
--            },
--        }
--
--        var cmdTimes = &cobra.Command{
--            Use:   "times [# times] [string to echo]",
--            Short: "Echo anything to the screen more times",
--            Long:  `echo things multiple times back to the user by providing
--            a count and a string.`,
--            Run: func(cmd *cobra.Command, args []string) {
--                for i:=0; i < echoTimes; i++ {
--                    fmt.Println("Echo: " + strings.Join(args, " "))
--                }
--            },
--        }
--
--        cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
--
--        var rootCmd = &cobra.Command{Use: "app"}
--        rootCmd.AddCommand(cmdPrint, cmdEcho)
--        cmdEcho.AddCommand(cmdTimes)
--        rootCmd.Execute()
--    }
--
--For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com)
--
--## The Help Command
--
--Cobra automatically adds a help command to your application.
--This will be called when a user runs 'app help'. Additionally help will also
--support all other commands as input. Say for instance you have a command called
--'create' without any additional configuration cobra will work when 'app help
--create' is called.
--
--### Example
--
--The following output is automatically generated by cobra. Nothing beyond the
--command and flag definitions are needed.
--
--    > hugo help
--
--    A Fast and Flexible Static Site Generator built with
--    love by spf13 and friends in Go.
--
--    Complete documentation is available at http://hugo.spf13.com
--
--    Usage:
--      hugo [flags]
--      hugo [command]
--
--    Available Commands:
--      server          :: Hugo runs it's own a webserver to render the files
--      version         :: Print the version number of Hugo
--      check           :: Check content in the source directory
--      benchmark       :: Benchmark hugo by building a site a number of times
--      help [command]  :: Help about any command
--
--     Available Flags:
--      -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/
--      -D, --build-drafts=false: include content marked as draft
--          --config="": config file (default is path/config.yaml|json|toml)
--      -d, --destination="": filesystem path to write files to
--      -s, --source="": filesystem path to read files relative from
--          --stepAnalysis=false: display memory and timing of different steps of the program
--          --uglyurls=false: if true, use /filename.html instead of /filename/
--      -v, --verbose=false: verbose output
--      -w, --watch=false: watch filesystem for changes and recreate as needed
--
--    Use "hugo help [command]" for more information about that command.
--
--
--
--Help is just a command like any other. There is no special logic or behavior
--around it. In fact you can provide your own if you want.
--
--### Defining your own help
--
--You can provide your own Help command or you own template for the default command to use.
--
--The default help command is 
--
--    func (c *Command) initHelp() {
--        if c.helpCommand == nil {
--            c.helpCommand = &Command{
--                Use:   "help [command]",
--                Short: "Help about any command",
--                Long: `Help provides help for any command in the application.
--        Simply type ` + c.Name() + ` help [path to command] for full details.`,
--                Run: c.HelpFunc(),
--            }
--        }
--        c.AddCommand(c.helpCommand)
--    }
--
--You can provide your own command, function or template through the following methods.
--
--    command.SetHelpCommand(cmd *Command)
--
--    command.SetHelpFunc(f func(*Command, []string))
--
--    command.SetHelpTemplate(s string)
--
--The latter two will also apply to any children commands.
--
--## Usage
--
--When the user provides an invalid flag or invalid command Cobra responds by
--showing the user the 'usage'
--
--### Example
--You may recognize this from the help above. That's because the default help
--embeds the usage as part of it's output.
--
--    Usage:
--      hugo [flags]
--      hugo [command]
--
--    Available Commands:
--      server          Hugo runs it's own a webserver to render the files
--      version         Print the version number of Hugo
--      check           Check content in the source directory
--      benchmark       Benchmark hugo by building a site a number of times
--      help [command]  Help about any command
--
--     Available Flags:
--      -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/
--      -D, --build-drafts=false: include content marked as draft
--          --config="": config file (default is path/config.yaml|json|toml)
--      -d, --destination="": filesystem path to write files to
--      -s, --source="": filesystem path to read files relative from
--          --stepAnalysis=false: display memory and timing of different steps of the program
--          --uglyurls=false: if true, use /filename.html instead of /filename/
--      -v, --verbose=false: verbose output
--      -w, --watch=false: watch filesystem for changes and recreate as needed
--
--### Defining your own usage
--You can provide your own usage function or template for cobra to use.
--
--The default usage function is
--
--		return func(c *Command) error {
--			err := tmpl(c.Out(), c.UsageTemplate(), c)
--			return err
--		}
--
--Like help the function and template are over ridable through public methods.
--
--    command.SetUsageFunc(f func(*Command) error)
--
--    command.SetUsageTemplate(s string)
--
--
--## Debugging
--
--Cobra provides a ‘DebugFlags’ method on a command which when called will print
--out everything Cobra knows about the flags for each command
--
--### Example
--
--    command.DebugFlags()
--
--## Release Notes
--* **0.9.0** June 17, 2014
--  * flags can appears anywhere in the args (provided they are unambiguous)
--  * --help prints usage screen for app or command
--  * Prefix matching for commands
--  * Cleaner looking help and usage output
--  * Extensive test suite
--* **0.8.0** Nov 5, 2013
--  * Reworked interface to remove commander completely
--  * Command now primary structure
--  * No initialization needed
--  * Usage & Help templates & functions definable at any level
--  * Updated Readme
--* **0.7.0** Sept 24, 2013
--  * Needs more eyes
--  * Test suite
--  * Support for automatic error messages
--  * Support for help command
--  * Support for printing to any io.Writer instead of os.Stderr
--  * Support for persistent flags which cascade down tree
--  * Ready for integration into Hugo
--* **0.1.0** Sept 3, 2013
--  * Implement first draft
--
--## ToDo
--* Launch proper documentation site
--
--## Contributing
--
--1. Fork it
--2. Create your feature branch (`git checkout -b my-new-feature`)
--3. Commit your changes (`git commit -am 'Add some feature'`)
--4. Push to the branch (`git push origin my-new-feature`)
--5. Create new Pull Request
--
--## Contributors
--
--Names in no particular order:
--
--* [spf13](https://github.com/spf13)
--
--## License
--
--Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
--
--
--[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
--
-diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go b/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go
-deleted file mode 100644
-index 1b5ca36..0000000
---- a/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go
-+++ /dev/null
-@@ -1,104 +0,0 @@
--// Copyright © 2013 Steve Francia <spf at spf13.com>.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--// http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--// Commands similar to git, go tools and other modern CLI tools
--// inspired by go, go-Commander, gh and subcommand
--
--package cobra
--
--import (
--	"fmt"
--	"io"
--	"reflect"
--	"strconv"
--	"strings"
--	"text/template"
--)
--
--var initializers []func()
--
--// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
--// Set this to true to enable it
--var EnablePrefixMatching bool = false
--
--//OnInitialize takes a series of func() arguments and appends them to a slice of func().
--func OnInitialize(y ...func()) {
--	for _, x := range y {
--		initializers = append(initializers, x)
--	}
--}
--
--//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
--//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
--//ints and then compared.
--func Gt(a interface{}, b interface{}) bool {
--	var left, right int64
--	av := reflect.ValueOf(a)
--
--	switch av.Kind() {
--	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
--		left = int64(av.Len())
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		left = av.Int()
--	case reflect.String:
--		left, _ = strconv.ParseInt(av.String(), 10, 64)
--	}
--
--	bv := reflect.ValueOf(b)
--
--	switch bv.Kind() {
--	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
--		right = int64(bv.Len())
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		right = bv.Int()
--	case reflect.String:
--		right, _ = strconv.ParseInt(bv.String(), 10, 64)
--	}
--
--	return left > right
--}
--
--//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
--func Eq(a interface{}, b interface{}) bool {
--	av := reflect.ValueOf(a)
--	bv := reflect.ValueOf(b)
--
--	switch av.Kind() {
--	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
--		panic("Eq called on unsupported type")
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		return av.Int() == bv.Int()
--	case reflect.String:
--		return av.String() == bv.String()
--	}
--	return false
--}
--
--//rpad adds padding to the right of a string
--func rpad(s string, padding int) string {
--	template := fmt.Sprintf("%%-%ds", padding)
--	return fmt.Sprintf(template, s)
--}
--
--// tmpl executes the given template text on data, writing the result to w.
--func tmpl(w io.Writer, text string, data interface{}) error {
--	t := template.New("top")
--	t.Funcs(template.FuncMap{
--		"trim": strings.TrimSpace,
--		"rpad": rpad,
--		"gt":   Gt,
--		"eq":   Eq,
--	})
--	template.Must(t.Parse(text))
--	return t.Execute(w, data)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go b/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go
-deleted file mode 100644
-index dd44812..0000000
---- a/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go
-+++ /dev/null
-@@ -1,553 +0,0 @@
--package cobra
--
--import (
--	"bytes"
--	"fmt"
--	"strings"
--	"testing"
--)
--
--var _ = fmt.Println
--
--var tp, te, tt, t1 []string
--var flagb1, flagb2, flagb3, flagbr bool
--var flags1, flags2, flags3 string
--var flagi1, flagi2, flagi3, flagir int
--var globalFlag1 bool
--var flagEcho, rootcalled bool
--
--var cmdPrint = &Command{
--	Use:   "print [string to print]",
--	Short: "Print anything to the screen",
--	Long:  `an utterly useless command for testing.`,
--	Run: func(cmd *Command, args []string) {
--		tp = args
--	},
--}
--
--var cmdEcho = &Command{
--	Use:     "echo [string to echo]",
--	Aliases: []string{"say"},
--	Short:   "Echo anything to the screen",
--	Long:    `an utterly useless command for testing.`,
--	Run: func(cmd *Command, args []string) {
--		te = args
--	},
--}
--
--var cmdTimes = &Command{
--	Use:   "times [# times] [string to echo]",
--	Short: "Echo anything to the screen more times",
--	Long:  `an slightly useless command for testing.`,
--	Run: func(cmd *Command, args []string) {
--		tt = args
--	},
--}
--
--var cmdRootNoRun = &Command{
--	Use:   "cobra-test",
--	Short: "The root can run it's own function",
--	Long:  "The root description for help",
--}
--
--var cmdRootSameName = &Command{
--	Use:   "print",
--	Short: "Root with the same name as a subcommand",
--	Long:  "The root description for help",
--}
--
--var cmdRootWithRun = &Command{
--	Use:   "cobra-test",
--	Short: "The root can run it's own function",
--	Long:  "The root description for help",
--	Run: func(cmd *Command, args []string) {
--		rootcalled = true
--	},
--}
--
--func flagInit() {
--	cmdEcho.ResetFlags()
--	cmdPrint.ResetFlags()
--	cmdTimes.ResetFlags()
--	cmdRootNoRun.ResetFlags()
--	cmdRootSameName.ResetFlags()
--	cmdRootWithRun.ResetFlags()
--	cmdEcho.Flags().IntVarP(&flagi1, "intone", "i", 123, "help message for flag intone")
--	cmdTimes.Flags().IntVarP(&flagi2, "inttwo", "j", 234, "help message for flag inttwo")
--	cmdPrint.Flags().IntVarP(&flagi3, "intthree", "i", 345, "help message for flag intthree")
--	cmdEcho.PersistentFlags().StringVarP(&flags1, "strone", "s", "one", "help message for flag strone")
--	cmdTimes.PersistentFlags().StringVarP(&flags2, "strtwo", "t", "two", "help message for flag strtwo")
--	cmdPrint.PersistentFlags().StringVarP(&flags3, "strthree", "s", "three", "help message for flag strthree")
--	cmdEcho.Flags().BoolVarP(&flagb1, "boolone", "b", true, "help message for flag boolone")
--	cmdTimes.Flags().BoolVarP(&flagb2, "booltwo", "c", false, "help message for flag booltwo")
--	cmdPrint.Flags().BoolVarP(&flagb3, "boolthree", "b", true, "help message for flag boolthree")
--}
--
--func commandInit() {
--	cmdEcho.ResetCommands()
--	cmdPrint.ResetCommands()
--	cmdTimes.ResetCommands()
--	cmdRootNoRun.ResetCommands()
--	cmdRootSameName.ResetCommands()
--	cmdRootWithRun.ResetCommands()
--}
--
--func initialize() *Command {
--	tt, tp, te = nil, nil, nil
--	var c = cmdRootNoRun
--	flagInit()
--	commandInit()
--	return c
--}
--
--func initializeWithSameName() *Command {
--	tt, tp, te = nil, nil, nil
--	var c = cmdRootSameName
--	flagInit()
--	commandInit()
--	return c
--}
--
--func initializeWithRootCmd() *Command {
--	cmdRootWithRun.ResetCommands()
--	tt, tp, te, rootcalled = nil, nil, nil, false
--	flagInit()
--	cmdRootWithRun.Flags().BoolVarP(&flagbr, "boolroot", "b", false, "help message for flag boolroot")
--	cmdRootWithRun.Flags().IntVarP(&flagir, "introot", "i", 321, "help message for flag introot")
--	commandInit()
--	return cmdRootWithRun
--}
--
--type resulter struct {
--	Error   error
--	Output  string
--	Command *Command
--}
--
--func fullSetupTest(input string) resulter {
--	c := initializeWithRootCmd()
--
--	return fullTester(c, input)
--}
--
--func noRRSetupTest(input string) resulter {
--	c := initialize()
--
--	return fullTester(c, input)
--}
--
--func rootOnlySetupTest(input string) resulter {
--	c := initializeWithRootCmd()
--
--	return simpleTester(c, input)
--}
--
--func simpleTester(c *Command, input string) resulter {
--	buf := new(bytes.Buffer)
--	// Testing flag with invalid input
--	c.SetOutput(buf)
--	c.SetArgs(strings.Split(input, " "))
--
--	err := c.Execute()
--	output := buf.String()
--
--	return resulter{err, output, c}
--}
--
--func fullTester(c *Command, input string) resulter {
--	buf := new(bytes.Buffer)
--	// Testing flag with invalid input
--	c.SetOutput(buf)
--	cmdEcho.AddCommand(cmdTimes)
--	c.AddCommand(cmdPrint, cmdEcho)
--	c.SetArgs(strings.Split(input, " "))
--
--	err := c.Execute()
--	output := buf.String()
--
--	return resulter{err, output, c}
--}
--
--func checkResultContains(t *testing.T, x resulter, check string) {
--	if !strings.Contains(x.Output, check) {
--		t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", check, x.Output)
--	}
--}
--
--func checkResultOmits(t *testing.T, x resulter, check string) {
--	if strings.Contains(x.Output, check) {
--		t.Errorf("Unexpected response.\nExpecting to omit: \n %q\nGot:\n %q\n", check, x.Output)
--	}
--}
--
--func checkOutputContains(t *testing.T, c *Command, check string) {
--	buf := new(bytes.Buffer)
--	c.SetOutput(buf)
--	c.Execute()
--
--	if !strings.Contains(buf.String(), check) {
--		t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", check, buf.String())
--	}
--}
--
--func TestSingleCommand(t *testing.T) {
--	noRRSetupTest("print one two")
--
--	if te != nil || tt != nil {
--		t.Error("Wrong command called")
--	}
--	if tp == nil {
--		t.Error("Wrong command called")
--	}
--	if strings.Join(tp, " ") != "one two" {
--		t.Error("Command didn't parse correctly")
--	}
--}
--
--func TestChildCommand(t *testing.T) {
--	noRRSetupTest("echo times one two")
--
--	if te != nil || tp != nil {
--		t.Error("Wrong command called")
--	}
--	if tt == nil {
--		t.Error("Wrong command called")
--	}
--	if strings.Join(tt, " ") != "one two" {
--		t.Error("Command didn't parse correctly")
--	}
--}
--
--func TestCommandAlias(t *testing.T) {
--	noRRSetupTest("say times one two")
--
--	if te != nil || tp != nil {
--		t.Error("Wrong command called")
--	}
--	if tt == nil {
--		t.Error("Wrong command called")
--	}
--	if strings.Join(tt, " ") != "one two" {
--		t.Error("Command didn't parse correctly")
--	}
--}
--
--func TestPrefixMatching(t *testing.T) {
--	EnablePrefixMatching = true
--	noRRSetupTest("ech times one two")
--
--	if te != nil || tp != nil {
--		t.Error("Wrong command called")
--	}
--	if tt == nil {
--		t.Error("Wrong command called")
--	}
--	if strings.Join(tt, " ") != "one two" {
--		t.Error("Command didn't parse correctly")
--	}
--
--	EnablePrefixMatching = false
--}
--
--func TestNoPrefixMatching(t *testing.T) {
--	EnablePrefixMatching = false
--
--	noRRSetupTest("ech times one two")
--
--	if !(tt == nil && te == nil && tp == nil) {
--		t.Error("Wrong command called")
--	}
--}
--
--func TestAliasPrefixMatching(t *testing.T) {
--	EnablePrefixMatching = true
--	noRRSetupTest("sa times one two")
--
--	if te != nil || tp != nil {
--		t.Error("Wrong command called")
--	}
--	if tt == nil {
--		t.Error("Wrong command called")
--	}
--	if strings.Join(tt, " ") != "one two" {
--		t.Error("Command didn't parse correctly")
--	}
--	EnablePrefixMatching = false
--}
--
--func TestChildSameName(t *testing.T) {
--	c := initializeWithSameName()
--	c.AddCommand(cmdPrint, cmdEcho)
--	c.SetArgs(strings.Split("print one two", " "))
--	c.Execute()
--
--	if te != nil || tt != nil {
--		t.Error("Wrong command called")
--	}
--	if tp == nil {
--		t.Error("Wrong command called")
--	}
--	if strings.Join(tp, " ") != "one two" {
--		t.Error("Command didn't parse correctly")
--	}
--}
--
--func TestFlagLong(t *testing.T) {
--	noRRSetupTest("echo --intone=13 something here")
--
--	if strings.Join(te, " ") != "something here" {
--		t.Errorf("flags didn't leave proper args remaining..%s given", te)
--	}
--	if flagi1 != 13 {
--		t.Errorf("int flag didn't get correct value, had %d", flagi1)
--	}
--	if flagi2 != 234 {
--		t.Errorf("default flag value changed, 234 expected, %d given", flagi2)
--	}
--}
--
--func TestFlagShort(t *testing.T) {
--	noRRSetupTest("echo -i13 something here")
--
--	if strings.Join(te, " ") != "something here" {
--		t.Errorf("flags didn't leave proper args remaining..%s given", te)
--	}
--	if flagi1 != 13 {
--		t.Errorf("int flag didn't get correct value, had %d", flagi1)
--	}
--	if flagi2 != 234 {
--		t.Errorf("default flag value changed, 234 expected, %d given", flagi2)
--	}
--
--	noRRSetupTest("echo -i 13 something here")
--
--	if strings.Join(te, " ") != "something here" {
--		t.Errorf("flags didn't leave proper args remaining..%s given", te)
--	}
--	if flagi1 != 13 {
--		t.Errorf("int flag didn't get correct value, had %d", flagi1)
--	}
--	if flagi2 != 234 {
--		t.Errorf("default flag value changed, 234 expected, %d given", flagi2)
--	}
--
--	noRRSetupTest("print -i99 one two")
--
--	if strings.Join(tp, " ") != "one two" {
--		t.Errorf("flags didn't leave proper args remaining..%s given", tp)
--	}
--	if flagi3 != 99 {
--		t.Errorf("int flag didn't get correct value, had %d", flagi3)
--	}
--	if flagi1 != 123 {
--		t.Errorf("default flag value changed on different command with same shortname, 234 expected, %d given", flagi2)
--	}
--}
--
--func TestChildCommandFlags(t *testing.T) {
--	noRRSetupTest("echo times -j 99 one two")
--
--	if strings.Join(tt, " ") != "one two" {
--		t.Errorf("flags didn't leave proper args remaining..%s given", tt)
--	}
--
--	// Testing with flag that shouldn't be persistent
--	r := noRRSetupTest("echo times -j 99 -i77 one two")
--
--	if r.Error == nil {
--		t.Errorf("invalid flag should generate error")
--	}
--
--	if !strings.Contains(r.Output, "unknown shorthand") {
--		t.Errorf("Wrong error message displayed, \n %s", r.Output)
--	}
--
--	if flagi2 != 99 {
--		t.Errorf("flag value should be 99, %d given", flagi2)
--	}
--
--	if flagi1 != 123 {
--		t.Errorf("unset flag should have default value, expecting 123, given %d", flagi1)
--	}
--
--	// Testing with flag only existing on child
--	r = noRRSetupTest("echo -j 99 -i77 one two")
--
--	if r.Error == nil {
--		t.Errorf("invalid flag should generate error")
--	}
--
--	if !strings.Contains(r.Output, "intone=123") {
--		t.Errorf("Wrong error message displayed, \n %s", r.Output)
--	}
--
--	// Testing flag with invalid input
--	r = noRRSetupTest("echo -i10E")
--
--	if r.Error == nil {
--		t.Errorf("invalid input should generate error")
--	}
--
--	if !strings.Contains(r.Output, "invalid argument \"10E\" for -i10E") {
--		t.Errorf("Wrong error message displayed, \n %s", r.Output)
--	}
--}
--
--func TestTrailingCommandFlags(t *testing.T) {
--	x := fullSetupTest("echo two -x")
--
--	if x.Error == nil {
--		t.Errorf("invalid flag should generate error")
--	}
--}
--
--func TestPersistentFlags(t *testing.T) {
--	fullSetupTest("echo -s something more here")
--
--	// persistentFlag should act like normal flag on it's own command
--	if strings.Join(te, " ") != "more here" {
--		t.Errorf("flags didn't leave proper args remaining..%s given", te)
--	}
--
--	// persistentFlag should act like normal flag on it's own command
--	if flags1 != "something" {
--		t.Errorf("string flag didn't get correct value, had %v", flags1)
--	}
--
--	fullSetupTest("echo times -s again -c test here")
--
--	if strings.Join(tt, " ") != "test here" {
--		t.Errorf("flags didn't leave proper args remaining..%s given", tt)
--	}
--
--	if flags1 != "again" {
--		t.Errorf("string flag didn't get correct value, had %v", flags1)
--	}
--
--	if flagb2 != true {
--		t.Errorf("local flag not parsed correctly. Expected false, had %v", flagb2)
--	}
--}
--
--func TestHelpCommand(t *testing.T) {
--	c := fullSetupTest("help echo")
--	checkResultContains(t, c, cmdEcho.Long)
--
--	r := fullSetupTest("help echo times")
--	checkResultContains(t, r, cmdTimes.Long)
--}
--
--func TestRunnableRootCommand(t *testing.T) {
--	fullSetupTest("")
--
--	if rootcalled != true {
--		t.Errorf("Root Function was not called")
--	}
--}
--
--func TestRootFlags(t *testing.T) {
--	fullSetupTest("-i 17 -b")
--
--	if flagbr != true {
--		t.Errorf("flag value should be true, %v given", flagbr)
--	}
--
--	if flagir != 17 {
--		t.Errorf("flag value should be 17, %d given", flagir)
--	}
--}
--
--func TestRootHelp(t *testing.T) {
--	x := fullSetupTest("--help")
--
--	checkResultContains(t, x, "Available Commands:")
--	checkResultContains(t, x, "for more information about that command")
--
--	if strings.Contains(x.Output, "unknown flag: --help") {
--		t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output)
--	}
--
--	x = fullSetupTest("echo --help")
--
--	checkResultContains(t, x, "Available Commands:")
--	checkResultContains(t, x, "for more information about that command")
--
--	if strings.Contains(x.Output, "unknown flag: --help") {
--		t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output)
--	}
--
--}
--
--func TestRootNoCommandHelp(t *testing.T) {
--	x := rootOnlySetupTest("--help")
--
--	checkResultOmits(t, x, "Available Commands:")
--	checkResultOmits(t, x, "for more information about that command")
--
--	if strings.Contains(x.Output, "unknown flag: --help") {
--		t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output)
--	}
--
--	x = rootOnlySetupTest("echo --help")
--
--	checkResultOmits(t, x, "Available Commands:")
--	checkResultOmits(t, x, "for more information about that command")
--
--	if strings.Contains(x.Output, "unknown flag: --help") {
--		t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output)
--	}
--}
--
--func TestFlagsBeforeCommand(t *testing.T) {
--	// short without space
--	x := fullSetupTest("-i10 echo")
--	if x.Error != nil {
--		t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error)
--	}
--
--	// short (int) with equals
--	// It appears that pflags doesn't support this...
--	// Commenting out until support can be added
--
--	//x = noRRSetupTest("echo -i=10")
--	//if x.Error != nil {
--	//t.Errorf("Valid Input shouldn't have errors, got:\n %s", x.Error)
--	//}
--
--	// long with equals
--	x = noRRSetupTest("--intone=123 echo one two")
--	if x.Error != nil {
--		t.Errorf("Valid Input shouldn't have errors, got:\n %s", x.Error)
--	}
--
--	// With parsing error properly reported
--	x = fullSetupTest("-i10E echo")
--	if !strings.Contains(x.Output, "invalid argument \"10E\" for -i10E") {
--		t.Errorf("Wrong error message displayed, \n %s", x.Output)
--	}
--
--	//With quotes
--	x = fullSetupTest("-s=\"walking\" echo")
--	if x.Error != nil {
--		t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error)
--	}
--
--	//With quotes and space
--	x = fullSetupTest("-s=\"walking fast\" echo")
--	if x.Error != nil {
--		t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error)
--	}
--
--	//With inner quote
--	x = fullSetupTest("-s=\"walking \\\"Inner Quote\\\" fast\" echo")
--	if x.Error != nil {
--		t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error)
--	}
--
--	//With quotes and space
--	x = fullSetupTest("-s=\"walking \\\"Inner Quote\\\" fast\" echo")
--	if x.Error != nil {
--		t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error)
--	}
--
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/command.go b/Godeps/_workspace/src/github.com/spf13/cobra/command.go
-deleted file mode 100644
-index a3a01ad..0000000
---- a/Godeps/_workspace/src/github.com/spf13/cobra/command.go
-+++ /dev/null
-@@ -1,790 +0,0 @@
--// Copyright © 2013 Steve Francia <spf at spf13.com>.
--//
--// Licensed under the Apache License, Version 2.0 (the "License");
--// you may not use this file except in compliance with the License.
--// You may obtain a copy of the License at
--// http://www.apache.org/licenses/LICENSE-2.0
--//
--// Unless required by applicable law or agreed to in writing, software
--// distributed under the License is distributed on an "AS IS" BASIS,
--// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--// See the License for the specific language governing permissions and
--// limitations under the License.
--
--//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
--//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
--package cobra
--
--import (
--	"bytes"
--	"fmt"
--	"io"
--	"os"
--	"strings"
--
--	flag "github.com/spf13/pflag"
--)
--
--// Command is just that, a command for your application.
--// eg.  'go run' ... 'run' is the command. Cobra requires
--// you to define the usage and description as part of your command
--// definition to ensure usability.
--type Command struct {
--	// Name is the command name, usually the executable's name.
--	name string
--	// The one-line usage message.
--	Use string
--	// An array of aliases that can be used instead of the first word in Use.
--	Aliases []string
--	// The short description shown in the 'help' output.
--	Short string
--	// The long message shown in the 'help <this-command>' output.
--	Long string
--	// Set of flags specific to this command.
--	flags *flag.FlagSet
--	// Set of flags children commands will inherit
--	pflags *flag.FlagSet
--	// Run runs the command.
--	// The args are the arguments after the command name.
--	Run func(cmd *Command, args []string)
--	// Commands is the list of commands supported by this program.
--	commands []*Command
--	// Parent Command for this command
--	parent *Command
--	// max lengths of commands' string lengths for use in padding
--	commandsMaxUseLen         int
--	commandsMaxCommandPathLen int
--
--	flagErrorBuf *bytes.Buffer
--	cmdErrorBuf  *bytes.Buffer
--
--	args          []string                 // actual args parsed from flags
--	output        *io.Writer               // nil means stderr; use Out() method instead
--	usageFunc     func(*Command) error     // Usage can be defined by application
--	usageTemplate string                   // Can be defined by Application
--	helpTemplate  string                   // Can be defined by Application
--	helpFunc      func(*Command, []string) // Help can be defined by application
--	helpCommand   *Command                 // The help command
--	helpFlagVal   bool
--}
--
--// os.Args[1:] by default, if desired, can be overridden
--// particularly useful when testing.
--func (c *Command) SetArgs(a []string) {
--	c.args = a
--}
--
--func (c *Command) Out() io.Writer {
--	if c.output != nil {
--		return *c.output
--	}
--
--	if c.HasParent() {
--		return c.parent.Out()
--	} else {
--		return os.Stderr
--	}
--}
--
--// SetOutput sets the destination for usage and error messages.
--// If output is nil, os.Stderr is used.
--func (c *Command) SetOutput(output io.Writer) {
--	c.output = &output
--}
--
--// Usage can be defined by application
--func (c *Command) SetUsageFunc(f func(*Command) error) {
--	c.usageFunc = f
--}
--
--// Can be defined by Application
--func (c *Command) SetUsageTemplate(s string) {
--	c.usageTemplate = s
--}
--
--// Can be defined by Application
--func (c *Command) SetHelpFunc(f func(*Command, []string)) {
--	c.helpFunc = f
--}
--
--func (c *Command) SetHelpCommand(cmd *Command) {
--	c.helpCommand = cmd
--}
--
--// Can be defined by Application
--func (c *Command) SetHelpTemplate(s string) {
--	c.helpTemplate = s
--}
--
--func (c *Command) UsageFunc() (f func(*Command) error) {
--	if c.usageFunc != nil {
--		return c.usageFunc
--	}
--
--	if c.HasParent() {
--		return c.parent.UsageFunc()
--	} else {
--		return func(c *Command) error {
--			err := tmpl(c.Out(), c.UsageTemplate(), c)
--			return err
--		}
--	}
--}
--func (c *Command) HelpFunc() func(*Command, []string) {
--	if c.helpFunc != nil {
--		return c.helpFunc
--	}
--
--	if c.HasParent() {
--		return c.parent.HelpFunc()
--	} else {
--		return func(c *Command, args []string) {
--			if len(args) == 0 {
--				// Help called without any topic, calling on root
--				c.Root().Help()
--				return
--			}
--
--			cmd, _, e := c.Root().Find(args)
--			if cmd == nil || e != nil {
--				c.Printf("Unknown help topic %#q.", args)
--
--				c.Root().Usage()
--			} else {
--				err := cmd.Help()
--				if err != nil {
--					c.Println(err)
--				}
--			}
--		}
--	}
--}
--
--var minUsagePadding int = 25
--
--func (c *Command) UsagePadding() int {
--	if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
--		return minUsagePadding
--	} else {
--		return c.parent.commandsMaxUseLen
--	}
--}
--
--var minCommandPathPadding int = 11
--
--//
--func (c *Command) CommandPathPadding() int {
--	if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
--		return minCommandPathPadding
--	} else {
--		return c.parent.commandsMaxCommandPathLen
--	}
--}
--
--func (c *Command) UsageTemplate() string {
--	if c.usageTemplate != "" {
--		return c.usageTemplate
--	}
--
--	if c.HasParent() {
--		return c.parent.UsageTemplate()
--	} else {
--		return `{{ $cmd := . }}
--Usage: {{if .Runnable}}
--  {{.UseLine}}{{if .HasFlags}} [flags]{{end}}{{end}}{{if .HasSubCommands}}
--  {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}}
--
--Aliases:
--  {{.NameAndAliases}}{{end}}
--{{ if .HasSubCommands}}
--Available Commands: {{range .Commands}}{{if .Runnable}}
--  {{rpad .Use .UsagePadding }} {{.Short}}{{end}}{{end}}
--{{end}}
--{{ if .HasFlags}} Available Flags:
--{{.Flags.FlagUsages}}{{end}}{{if .HasParent}}{{if and (gt .Commands 0) (gt .Parent.Commands 1) }}
--Additional help topics: {{if gt .Commands 0 }}{{range .Commands}}{{if not .Runnable}} {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if gt .Parent.Commands 1 }}{{range .Parent.Commands}}{{if .Runnable}}{{if not (eq .Name $cmd.Name) }}{{end}}
--  {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{end}}
--{{end}}{{ if .HasSubCommands }}
--Use "{{.Root.Name}} help [command]" for more information about that command.
--{{end}}`
--	}
--}
--
--func (c *Command) HelpTemplate() string {
--	if c.helpTemplate != "" {
--		return c.helpTemplate
--	}
--
--	if c.HasParent() {
--		return c.parent.HelpTemplate()
--	} else {
--		return `{{.Long | trim}}
--{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}
--`
--	}
--}
--
--// Really only used when casting a command to a commander
--func (c *Command) resetChildrensParents() {
--	for _, x := range c.commands {
--		x.parent = c
--	}
--}
--
--func stripFlags(args []string) []string {
--	if len(args) < 1 {
--		return args
--	}
--
--	commands := []string{}
--
--	inQuote := false
--	for _, y := range args {
--		if !inQuote {
--			switch {
--			case strings.HasPrefix(y, "\""):
--				inQuote = true
--			case strings.Contains(y, "=\""):
--				inQuote = true
--			case !strings.HasPrefix(y, "-"):
--				commands = append(commands, y)
--			}
--		}
--
--		if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") {
--			inQuote = false
--		}
--	}
--
--	return commands
--}
--
--func argsMinusX(args []string, x string) []string {
--	newargs := []string{}
--
--	for _, y := range args {
--		if x != y {
--			newargs = append(newargs, y)
--		}
--	}
--	return newargs
--}
--
--// find the target command given the args and command tree
--// Meant to be run on the highest node. Only searches down.
--func (c *Command) Find(arrs []string) (*Command, []string, error) {
--	if c == nil {
--		return nil, nil, fmt.Errorf("Called find() on a nil Command")
--	}
--
--	if len(arrs) == 0 {
--		return c.Root(), arrs, nil
--	}
--
--	var innerfind func(*Command, []string) (*Command, []string)
--
--	innerfind = func(c *Command, args []string) (*Command, []string) {
--		if len(args) > 0 && c.HasSubCommands() {
--			argsWOflags := stripFlags(args)
--			if len(argsWOflags) > 0 {
--				matches := make([]*Command, 0)
--				for _, cmd := range c.commands {
--					if cmd.Name() == argsWOflags[0] || cmd.HasAlias(argsWOflags[0]) { // exact name or alias match
--						return innerfind(cmd, argsMinusX(args, argsWOflags[0]))
--					} else if EnablePrefixMatching {
--						if strings.HasPrefix(cmd.Name(), argsWOflags[0]) { // prefix match
--							matches = append(matches, cmd)
--						}
--						for _, x := range cmd.Aliases {
--							if strings.HasPrefix(x, argsWOflags[0]) {
--								matches = append(matches, cmd)
--							}
--						}
--					}
--				}
--
--				// only accept a single prefix match - multiple matches would be ambiguous
--				if len(matches) == 1 {
--					return innerfind(matches[0], argsMinusX(args, argsWOflags[0]))
--				}
--			}
--		}
--
--		return c, args
--	}
--
--	commandFound, a := innerfind(c, arrs)
--
--	// if commander returned and the first argument (if it exists) doesn't
--	// match the command name, return nil & error
--	if commandFound.Name() == c.Name() && len(arrs[0]) > 0 && commandFound.Name() != arrs[0] {
--		return nil, a, fmt.Errorf("unknown command %q\nRun 'help' for usage.\n", a[0])
--	}
--
--	return commandFound, a, nil
--}
--
--func (c *Command) Root() *Command {
--	var findRoot func(*Command) *Command
--
--	findRoot = func(x *Command) *Command {
--		if x.HasParent() {
--			return findRoot(x.parent)
--		} else {
--			return x
--		}
--	}
--
--	return findRoot(c)
--}
--
--// execute the command determined by args and the command tree
--func (c *Command) findAndExecute(args []string) (err error) {
--
--	cmd, a, e := c.Find(args)
--	if e != nil {
--		return e
--	}
--	return cmd.execute(a)
--}
--
--func (c *Command) execute(a []string) (err error) {
--	if c == nil {
--		return fmt.Errorf("Called Execute() on a nil Command")
--	}
--
--	err = c.ParseFlags(a)
--
--	if err != nil {
--		// We're writing subcommand usage to root command's error buffer to have it displayed to the user
--		r := c.Root()
--		if r.cmdErrorBuf == nil {
--			r.cmdErrorBuf = new(bytes.Buffer)
--		}
--		// for writing the usage to the buffer we need to switch the output temporarily
--		// since Out() returns root output, you also need to revert that on root
--		out := r.Out()
--		r.SetOutput(r.cmdErrorBuf)
--		c.Usage()
--		r.SetOutput(out)
--		return err
--	} else {
--		// If help is called, regardless of other flags, we print that.
--		// Print help also if c.Run is nil.
--		if c.helpFlagVal || !c.Runnable() {
--			c.Help()
--			return nil
--		}
--
--		c.preRun()
--		argWoFlags := c.Flags().Args()
--		c.Run(c, argWoFlags)
--		return nil
--	}
--}
--
--func (c *Command) preRun() {
--	for _, x := range initializers {
--		x()
--	}
--}
--
--func (c *Command) errorMsgFromParse() string {
--	s := c.flagErrorBuf.String()
--
--	x := strings.Split(s, "\n")
--
--	if len(x) > 0 {
--		return x[0]
--	} else {
--		return ""
--	}
--}
--
--// Call execute to use the args (os.Args[1:] by default)
--// and run through the command tree finding appropriate matches
--// for commands and then corresponding flags.
--func (c *Command) Execute() (err error) {
--
--	// Regardless of what command execute is called on, run on Root only
--	if c.HasParent() {
--		return c.Root().Execute()
--	}
--
--	// initialize help as the last point possible to allow for user
--	// overriding
--	c.initHelp()
--
--	var args []string
--
--	if len(c.args) == 0 {
--		args = os.Args[1:]
--	} else {
--		args = c.args
--	}
--
--	if len(args) == 0 {
--		// Only the executable is called and the root is runnable, run it
--		if c.Runnable() {
--			err = c.execute([]string(nil))
--		} else {
--			c.Help()
--		}
--	} else {
--		err = c.findAndExecute(args)
--	}
--
--	// Now handle the case where the root is runnable and only flags are provided
--	if err != nil && c.Runnable() {
--		// This is pretty much a custom version of the *Command.execute method
--		// with a few differences because it's the final command (no fall back)
--		e := c.ParseFlags(args)
--		if e != nil {
--			// Flags parsing had an error.
--			// If an error happens here, we have to report it to the user
--			c.Println(c.errorMsgFromParse())
--			// If an error happens search also for subcommand info about that
--			if c.cmdErrorBuf != nil && c.cmdErrorBuf.Len() > 0 {
--				c.Println(c.cmdErrorBuf.String())
--			} else {
--				c.Usage()
--			}
--			return e
--		} else {
--			// If help is called, regardless of other flags, we print that
--			if c.helpFlagVal {
--				c.Help()
--				return nil
--			}
--
--			argWoFlags := c.Flags().Args()
--			if len(argWoFlags) > 0 {
--				// If there are arguments (not flags) one of the earlier
--				// cases should have caught it.. It means invalid usage
--				// print the usage
--				c.Usage()
--			} else {
--				// Only flags left... Call root.Run
--				c.preRun()
--				c.Run(c, argWoFlags)
--				err = nil
--			}
--		}
--	}
--
--	if err != nil {
--		c.Println("Error:", err.Error())
--		c.Printf("%v: invalid command %#q\n", c.Root().Name(), os.Args[1:])
--		c.Printf("Run '%v help' for usage\n", c.Root().Name())
--	}
--
--	return
--}
--
--func (c *Command) initHelp() {
--	if c.helpCommand == nil {
--		if !c.HasSubCommands() {
--			return
--		}
--
--		c.helpCommand = &Command{
--			Use:   "help [command]",
--			Short: "Help about any command",
--			Long: `Help provides help for any command in the application.
--    Simply type ` + c.Name() + ` help [path to command] for full details.`,
--			Run: c.HelpFunc(),
--		}
--	}
--	c.AddCommand(c.helpCommand)
--}
--
--// Used for testing
--func (c *Command) ResetCommands() {
--	c.commands = nil
--	c.helpCommand = nil
--	c.cmdErrorBuf = new(bytes.Buffer)
--	c.cmdErrorBuf.Reset()
--}
--
--//Commands returns a slice of child commands.
--func (c *Command) Commands() []*Command {
--	return c.commands
--}
--
--// AddCommand adds one or more commands to this parent command.
--func (c *Command) AddCommand(cmds ...*Command) {
--	for i, x := range cmds {
--		if cmds[i] == c {
--			panic("Command can't be a child of itself")
--		}
--		cmds[i].parent = c
--		// update max lengths
--		usageLen := len(x.Use)
--		if usageLen > c.commandsMaxUseLen {
--			c.commandsMaxUseLen = usageLen
--		}
--		commandPathLen := len(x.CommandPath())
--		if commandPathLen > c.commandsMaxCommandPathLen {
--			c.commandsMaxCommandPathLen = commandPathLen
--		}
--		c.commands = append(c.commands, x)
--	}
--}
--
--// Convenience method to Print to the defined output
--func (c *Command) Print(i ...interface{}) {
--	fmt.Fprint(c.Out(), i...)
--}
--
--// Convenience method to Println to the defined output
--func (c *Command) Println(i ...interface{}) {
--	str := fmt.Sprintln(i...)
--	c.Print(str)
--}
--
--// Convenience method to Printf to the defined output
--func (c *Command) Printf(format string, i ...interface{}) {
--	str := fmt.Sprintf(format, i...)
--	c.Print(str)
--}
--
--// Output the usage for the command
--// Used when a user provides invalid input
--// Can be defined by user by overriding UsageFunc
--func (c *Command) Usage() error {
--	c.mergePersistentFlags()
--	err := c.UsageFunc()(c)
--	return err
--}
--
--// Output the help for the command
--// Used when a user calls help [command]
--// by the default HelpFunc in the commander
--func (c *Command) Help() error {
--	c.mergePersistentFlags()
--	err := tmpl(c.Out(), c.HelpTemplate(), c)
--	return err
--}
--
--func (c *Command) UsageString() string {
--	tmpOutput := c.output
--	bb := new(bytes.Buffer)
--	c.SetOutput(bb)
--	c.Usage()
--	c.output = tmpOutput
--	return bb.String()
--}
--
--// CommandPath returns the full path to this command.
--func (c *Command) CommandPath() string {
--	str := c.Name()
--	x := c
--	for x.HasParent() {
--		str = x.parent.Name() + " " + str
--		x = x.parent
--	}
--	return str
--}
--
--//The full usage for a given command (including parents)
--func (c *Command) UseLine() string {
--	str := ""
--	if c.HasParent() {
--		str = c.parent.CommandPath() + " "
--	}
--	return str + c.Use
--}
--
--// For use in determining which flags have been assigned to which commands
--// and which persist
--func (c *Command) DebugFlags() {
--	c.Println("DebugFlags called on", c.Name())
--	var debugflags func(*Command)
--
--	debugflags = func(x *Command) {
--		if x.HasFlags() || x.HasPersistentFlags() {
--			c.Println(x.Name())
--		}
--		if x.HasFlags() {
--			x.flags.VisitAll(func(f *flag.Flag) {
--				if x.HasPersistentFlags() {
--					if x.persistentFlag(f.Name) == nil {
--						c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [L]")
--					} else {
--						c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [LP]")
--					}
--				} else {
--					c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [L]")
--				}
--			})
--		}
--		if x.HasPersistentFlags() {
--			x.pflags.VisitAll(func(f *flag.Flag) {
--				if x.HasFlags() {
--					if x.flags.Lookup(f.Name) == nil {
--						c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [P]")
--					}
--				} else {
--					c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [P]")
--				}
--			})
--		}
--		c.Println(x.flagErrorBuf)
--		if x.HasSubCommands() {
--			for _, y := range x.commands {
--				debugflags(y)
--			}
--		}
--	}
--
--	debugflags(c)
--}
--
--// Name returns the command's name: the first word in the use line.
--func (c *Command) Name() string {
--	if c.name != "" {
--		return c.name
--	}
--	name := c.Use
--	i := strings.Index(name, " ")
--	if i >= 0 {
--		name = name[:i]
--	}
--	return name
--}
--
--// Determine if a given string is an alias of the command.
--func (c *Command) HasAlias(s string) bool {
--	for _, a := range c.Aliases {
--		if a == s {
--			return true
--		}
--	}
--	return false
--}
--
--func (c *Command) NameAndAliases() string {
--	return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
--}
--
--// Determine if the command is itself runnable
--func (c *Command) Runnable() bool {
--	return c.Run != nil
--}
--
--// Determine if the command has children commands
--func (c *Command) HasSubCommands() bool {
--	return len(c.commands) > 0
--}
--
--// Determine if the command is a child command
--func (c *Command) HasParent() bool {
--	return c.parent != nil
--}
--
--// Get the Commands FlagSet
--func (c *Command) Flags() *flag.FlagSet {
--	if c.flags == nil {
--		c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
--		if c.flagErrorBuf == nil {
--			c.flagErrorBuf = new(bytes.Buffer)
--		}
--		c.flags.SetOutput(c.flagErrorBuf)
--		c.PersistentFlags().BoolVarP(&c.helpFlagVal, "help", "h", false, "help for "+c.Name())
--	}
--	return c.flags
--}
--
--// Get the Commands Persistent FlagSet
--func (c *Command) PersistentFlags() *flag.FlagSet {
--	if c.pflags == nil {
--		c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
--		if c.flagErrorBuf == nil {
--			c.flagErrorBuf = new(bytes.Buffer)
--		}
--		c.pflags.SetOutput(c.flagErrorBuf)
--	}
--	return c.pflags
--}
--
--// For use in testing
--func (c *Command) ResetFlags() {
--	c.flagErrorBuf = new(bytes.Buffer)
--	c.flagErrorBuf.Reset()
--	c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
--	c.flags.SetOutput(c.flagErrorBuf)
--	c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
--	c.pflags.SetOutput(c.flagErrorBuf)
--}
--
--// Does the command contain flags (local not persistent)
--func (c *Command) HasFlags() bool {
--	return c.Flags().HasFlags()
--}
--
--// Does the command contain persistent flags
--func (c *Command) HasPersistentFlags() bool {
--	return c.PersistentFlags().HasFlags()
--}
--
--// Climbs up the command tree looking for matching flag
--func (c *Command) Flag(name string) (flag *flag.Flag) {
--	flag = c.Flags().Lookup(name)
--
--	if flag == nil {
--		flag = c.persistentFlag(name)
--	}
--
--	return
--}
--
--// recursively find matching persistent flag
--func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
--	if c.HasPersistentFlags() {
--		flag = c.PersistentFlags().Lookup(name)
--	}
--
--	if flag == nil && c.HasParent() {
--		flag = c.parent.persistentFlag(name)
--	}
--	return
--}
--
--// Parses persistent flag tree & local flags
--func (c *Command) ParseFlags(args []string) (err error) {
--	c.mergePersistentFlags()
--	err = c.Flags().Parse(args)
--
--	// The upstream library adds spaces to the error
--	// response regardless of success.
--	// Handling it here until fixing upstream
--	if len(strings.TrimSpace(c.flagErrorBuf.String())) > 1 {
--		return fmt.Errorf("%s", c.flagErrorBuf.String())
--	}
--
--	//always return nil because upstream library is inconsistent & we always check the error buffer anyway
--	return nil
--}
--
--func (c *Command) mergePersistentFlags() {
--	var rmerge func(x *Command)
--
--	rmerge = func(x *Command) {
--		if x.HasPersistentFlags() {
--			x.PersistentFlags().VisitAll(func(f *flag.Flag) {
--				if c.Flags().Lookup(f.Name) == nil {
--					c.Flags().AddFlag(f)
--				}
--			})
--		}
--		if x.HasParent() {
--			rmerge(x.parent)
--		}
--	}
--
--	rmerge(c)
--}
--
--func (c *Command) Parent() *Command {
--	return c.parent
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/LICENSE b/Godeps/_workspace/src/github.com/spf13/pflag/LICENSE
-deleted file mode 100644
-index 63ed1cf..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/LICENSE
-+++ /dev/null
-@@ -1,28 +0,0 @@
--Copyright (c) 2012 Alex Ogier. All rights reserved.
--Copyright (c) 2012 The Go Authors. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--   * Neither the name of Google Inc. nor the names of its
--contributors may be used to endorse or promote products derived from
--this software without specific prior written permission.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/README.md b/Godeps/_workspace/src/github.com/spf13/pflag/README.md
-deleted file mode 100644
-index a12d94d..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/README.md
-+++ /dev/null
-@@ -1,155 +0,0 @@
--## Description
--
--pflag is a drop-in replacement for Go's flag package, implementing
--POSIX/GNU-style --flags.
--
--pflag is compatible with the [GNU extensions to the POSIX recommendations
--for command-line options][1]. For a more precise description, see the
--"Command-line flag syntax" section below.
--
--[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
--
--pflag is available under the same style of BSD license as the Go language,
--which can be found in the LICENSE file.
--
--## Installation
--
--pflag is available using the standard `go get` command.
--
--Install by running:
--
--    go get github.com/ogier/pflag
--
--Run tests by running:
--
--    go test github.com/ogier/pflag
--
--## Usage
--
--pflag is a drop-in replacement of Go's native flag package. If you import
--pflag under the name "flag" then all code should continue to function
--with no changes.
--
--``` go
--import flag "github.com/ogier/pflag"
--```
--
--There is one exception to this: if you directly instantiate the Flag struct
--there is one more field "Shorthand" that you will need to set.
--Most code never instantiates this struct directly, and instead uses
--functions such as String(), BoolVar(), and Var(), and is therefore
--unaffected.
--
--Define flags using flag.String(), Bool(), Int(), etc.
--
--This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
--
--``` go
--var ip *int = flag.Int("flagname", 1234, "help message for flagname")
--```
--
--If you like, you can bind the flag to a variable using the Var() functions.
--
--``` go
--var flagvar int
--func init() {
--    flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
--}
--```
--
--Or you can create custom flags that satisfy the Value interface (with
--pointer receivers) and couple them to flag parsing by
--
--``` go
--flag.Var(&flagVal, "name", "help message for flagname")
--```
--
--For such flags, the default value is just the initial value of the variable.
--
--After all flags are defined, call
--
--``` go
--flag.Parse()
--```
--
--to parse the command line into the defined flags.
--
--Flags may then be used directly. If you're using the flags themselves,
--they are all pointers; if you bind to variables, they're values.
--
--``` go
--fmt.Println("ip has value ", *ip)
--fmt.Println("flagvar has value ", flagvar)
--```
--
--After parsing, the arguments after the flag are available as the
--slice flag.Args() or individually as flag.Arg(i).
--The arguments are indexed from 0 through flag.NArg()-1.
--
--The pflag package also defines some new functions that are not in flag,
--that give one-letter shorthands for flags. You can use these by appending
--'P' to the name of any function that defines a flag.
--
--``` go
--var ip = flag.IntP("flagname", "f", 1234, "help message")
--var flagvar bool
--func init() {
--    flag.BoolVarP("boolname", "b", true, "help message")
--}
--flag.VarP(&flagVar, "varname", "v", 1234, "help message")
--```
--
--Shorthand letters can be used with single dashes on the command line.
--Boolean shorthand flags can be combined with other shorthand flags.
--
--The default set of command-line flags is controlled by
--top-level functions.  The FlagSet type allows one to define
--independent sets of flags, such as to implement subcommands
--in a command-line interface. The methods of FlagSet are
--analogous to the top-level functions for the command-line
--flag set.
--
--## Command line flag syntax
--
--```
----flag    // boolean flags only
----flag=x
--```
--
--Unlike the flag package, a single dash before an option means something
--different than a double dash. Single dashes signify a series of shorthand
--letters for flags. All but the last shorthand letter must be boolean flags.
--
--```
--// boolean flags
---f
---abc
--
--// non-boolean flags
---n 1234
---Ifile
--
--// mixed
---abcs "hello"
---abcn1234
--```
--
--Flag parsing stops after the terminator "--". Unlike the flag package,
--flags can be interspersed with arguments anywhere on the command line
--before this terminator.
--
--Integer flags accept 1234, 0664, 0x1234 and may be negative.
--Boolean flags (in their long form) accept 1, 0, t, f, true, false,
--TRUE, FALSE, True, False.
--Duration flags accept any input valid for time.ParseDuration.
--
--## More info
--
--You can see the full reference documentation of the pflag package
--[at godoc.org][3], or through go's standard documentation system by
--running `godoc -http=:6060` and browsing to
--[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
--installation.
--
--[2]: http://localhost:6060/pkg/github.com/ogier/pflag
--[3]: http://godoc.org/github.com/ogier/pflag
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/bool.go b/Godeps/_workspace/src/github.com/spf13/pflag/bool.go
-deleted file mode 100644
-index 010576c..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/bool.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- bool Value
--type boolValue bool
--
--func newBoolValue(val bool, p *bool) *boolValue {
--	*p = val
--	return (*boolValue)(p)
--}
--
--func (b *boolValue) Set(s string) error {
--	v, err := strconv.ParseBool(s)
--	*b = boolValue(v)
--	return err
--}
--
--func (b *boolValue) Type() string {
--	return "bool"
--}
--
--func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
--
--// BoolVar defines a bool flag with specified name, default value, and usage string.
--// The argument p points to a bool variable in which to store the value of the flag.
--func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
--	f.VarP(newBoolValue(value, p), name, "", usage)
--}
--
--// Like BoolVar, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
--	f.VarP(newBoolValue(value, p), name, shorthand, usage)
--}
--
--// BoolVar defines a bool flag with specified name, default value, and usage string.
--// The argument p points to a bool variable in which to store the value of the flag.
--func BoolVar(p *bool, name string, value bool, usage string) {
--	CommandLine.VarP(newBoolValue(value, p), name, "", usage)
--}
--
--// Like BoolVar, but accepts a shorthand letter that can be used after a single dash.
--func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
--	CommandLine.VarP(newBoolValue(value, p), name, shorthand, usage)
--}
--
--// Bool defines a bool flag with specified name, default value, and usage string.
--// The return value is the address of a bool variable that stores the value of the flag.
--func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
--	p := new(bool)
--	f.BoolVarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Bool, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
--	p := new(bool)
--	f.BoolVarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Bool defines a bool flag with specified name, default value, and usage string.
--// The return value is the address of a bool variable that stores the value of the flag.
--func Bool(name string, value bool, usage string) *bool {
--	return CommandLine.BoolP(name, "", value, usage)
--}
--
--// Like Bool, but accepts a shorthand letter that can be used after a single dash.
--func BoolP(name, shorthand string, value bool, usage string) *bool {
--	return CommandLine.BoolP(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/duration.go b/Godeps/_workspace/src/github.com/spf13/pflag/duration.go
-deleted file mode 100644
-index 66ed7ac..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/duration.go
-+++ /dev/null
-@@ -1,71 +0,0 @@
--package pflag
--
--import "time"
--
--// -- time.Duration Value
--type durationValue time.Duration
--
--func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
--	*p = val
--	return (*durationValue)(p)
--}
--
--func (d *durationValue) Set(s string) error {
--	v, err := time.ParseDuration(s)
--	*d = durationValue(v)
--	return err
--}
--
--func (d *durationValue) Type() string {
--	return "duration"
--}
--
--func (d *durationValue) String() string { return (*time.Duration)(d).String() }
--
--// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
--// The argument p points to a time.Duration variable in which to store the value of the flag.
--func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
--	f.VarP(newDurationValue(value, p), name, "", usage)
--}
--
--// Like DurationVar, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
--	f.VarP(newDurationValue(value, p), name, shorthand, usage)
--}
--
--// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
--// The argument p points to a time.Duration variable in which to store the value of the flag.
--func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
--	CommandLine.VarP(newDurationValue(value, p), name, "", usage)
--}
--
--// Like DurationVar, but accepts a shorthand letter that can be used after a single dash.
--func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
--	CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
--}
--
--// Duration defines a time.Duration flag with specified name, default value, and usage string.
--// The return value is the address of a time.Duration variable that stores the value of the flag.
--func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
--	p := new(time.Duration)
--	f.DurationVarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Duration, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
--	p := new(time.Duration)
--	f.DurationVarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Duration defines a time.Duration flag with specified name, default value, and usage string.
--// The return value is the address of a time.Duration variable that stores the value of the flag.
--func Duration(name string, value time.Duration, usage string) *time.Duration {
--	return CommandLine.DurationP(name, "", value, usage)
--}
--
--// Like Duration, but accepts a shorthand letter that can be used after a single dash.
--func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
--	return CommandLine.DurationP(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go
-deleted file mode 100644
-index 03ebeaa..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go
-+++ /dev/null
-@@ -1,73 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// These examples demonstrate more intricate uses of the flag package.
--package pflag_test
--
--import (
--	"errors"
--	"fmt"
--	"strings"
--	"time"
--
--	flag "github.com/ogier/pflag"
--)
--
--// Example 1: A single string flag called "species" with default value "gopher".
--var species = flag.String("species", "gopher", "the species we are studying")
--
--// Example 2: A flag with a shorthand letter.
--var gopherType = flag.StringP("gopher_type", "g", "pocket", "the variety of gopher")
--
--// Example 3: A user-defined flag type, a slice of durations.
--type interval []time.Duration
--
--// String is the method to format the flag's value, part of the flag.Value interface.
--// The String method's output will be used in diagnostics.
--func (i *interval) String() string {
--	return fmt.Sprint(*i)
--}
--
--// Set is the method to set the flag value, part of the flag.Value interface.
--// Set's argument is a string to be parsed to set the flag.
--// It's a comma-separated list, so we split it.
--func (i *interval) Set(value string) error {
--	// If we wanted to allow the flag to be set multiple times,
--	// accumulating values, we would delete this if statement.
--	// That would permit usages such as
--	//	-deltaT 10s -deltaT 15s
--	// and other combinations.
--	if len(*i) > 0 {
--		return errors.New("interval flag already set")
--	}
--	for _, dt := range strings.Split(value, ",") {
--		duration, err := time.ParseDuration(dt)
--		if err != nil {
--			return err
--		}
--		*i = append(*i, duration)
--	}
--	return nil
--}
--
--// Define a flag to accumulate durations. Because it has a special type,
--// we need to use the Var function and therefore create the flag during
--// init.
--
--var intervalFlag interval
--
--func init() {
--	// Tie the command-line flag to the intervalFlag variable and
--	// set a usage message.
--	flag.Var(&intervalFlag, "deltaT", "comma-separated list of intervals to use between events")
--}
--
--func Example() {
--	// All the interesting pieces are with the variables declared above, but
--	// to enable the flag package to see the flags defined there, one must
--	// execute, typically at the start of main (not init!):
--	//	flag.Parse()
--	// We don't run it here because this is not a main function and
--	// the testing suite has already parsed the flags.
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go
-deleted file mode 100644
-index 9318fee..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go
-+++ /dev/null
-@@ -1,29 +0,0 @@
--// Copyright 2010 The Go Authors.  All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package pflag
--
--import (
--	"io/ioutil"
--	"os"
--)
--
--// Additional routines compiled into the package only during testing.
--
--// ResetForTesting clears all flag state and sets the usage function as directed.
--// After calling ResetForTesting, parse errors in flag handling will not
--// exit the program.
--func ResetForTesting(usage func()) {
--	CommandLine = &FlagSet{
--		name:          os.Args[0],
--		errorHandling: ContinueOnError,
--		output:        ioutil.Discard,
--	}
--	Usage = usage
--}
--
--// GetCommandLine returns the default FlagSet.
--func GetCommandLine() *FlagSet {
--	return CommandLine
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag.go
-deleted file mode 100644
-index 47761a0..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go
-+++ /dev/null
-@@ -1,621 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--/*
--	pflag is a drop-in replacement for Go's flag package, implementing
--	POSIX/GNU-style --flags.
--
--	pflag is compatible with the GNU extensions to the POSIX recommendations
--	for command-line options. See
--	http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
--
--	Usage:
--
--	pflag is a drop-in replacement of Go's native flag package. If you import
--	pflag under the name "flag" then all code should continue to function
--	with no changes.
--
--		import flag "github.com/ogier/pflag"
--
--	There is one exception to this: if you directly instantiate the Flag struct
--	there is one more field "Shorthand" that you will need to set.
--	Most code never instantiates this struct directly, and instead uses
--	functions such as String(), BoolVar(), and Var(), and is therefore
--	unaffected.
--
--	Define flags using flag.String(), Bool(), Int(), etc.
--
--	This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
--		var ip = flag.Int("flagname", 1234, "help message for flagname")
--	If you like, you can bind the flag to a variable using the Var() functions.
--		var flagvar int
--		func init() {
--			flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
--		}
--	Or you can create custom flags that satisfy the Value interface (with
--	pointer receivers) and couple them to flag parsing by
--		flag.Var(&flagVal, "name", "help message for flagname")
--	For such flags, the default value is just the initial value of the variable.
--
--	After all flags are defined, call
--		flag.Parse()
--	to parse the command line into the defined flags.
--
--	Flags may then be used directly. If you're using the flags themselves,
--	they are all pointers; if you bind to variables, they're values.
--		fmt.Println("ip has value ", *ip)
--		fmt.Println("flagvar has value ", flagvar)
--
--	After parsing, the arguments after the flag are available as the
--	slice flag.Args() or individually as flag.Arg(i).
--	The arguments are indexed from 0 through flag.NArg()-1.
--
--	The pflag package also defines some new functions that are not in flag,
--	that give one-letter shorthands for flags. You can use these by appending
--	'P' to the name of any function that defines a flag.
--		var ip = flag.IntP("flagname", "f", 1234, "help message")
--		var flagvar bool
--		func init() {
--			flag.BoolVarP("boolname", "b", true, "help message")
--		}
--		flag.VarP(&flagVar, "varname", "v", 1234, "help message")
--	Shorthand letters can be used with single dashes on the command line.
--	Boolean shorthand flags can be combined with other shorthand flags.
--
--	Command line flag syntax:
--		--flag    // boolean flags only
--		--flag=x
--
--	Unlike the flag package, a single dash before an option means something
--	different than a double dash. Single dashes signify a series of shorthand
--	letters for flags. All but the last shorthand letter must be boolean flags.
--		// boolean flags
--		-f
--		-abc
--		// non-boolean flags
--		-n 1234
--		-Ifile
--		// mixed
--		-abcs "hello"
--		-abcn1234
--
--	Flag parsing stops after the terminator "--". Unlike the flag package,
--	flags can be interspersed with arguments anywhere on the command line
--	before this terminator.
--
--	Integer flags accept 1234, 0664, 0x1234 and may be negative.
--	Boolean flags (in their long form) accept 1, 0, t, f, true, false,
--	TRUE, FALSE, True, False.
--	Duration flags accept any input valid for time.ParseDuration.
--
--	The default set of command-line flags is controlled by
--	top-level functions.  The FlagSet type allows one to define
--	independent sets of flags, such as to implement subcommands
--	in a command-line interface. The methods of FlagSet are
--	analogous to the top-level functions for the command-line
--	flag set.
--*/
--package pflag
--
--import (
--	"bytes"
--	"errors"
--	"fmt"
--	"io"
--	"os"
--	"sort"
--	"strings"
--)
--
--// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
--var ErrHelp = errors.New("pflag: help requested")
--
--// ErrorHandling defines how to handle flag parsing errors.
--type ErrorHandling int
--
--const (
--	ContinueOnError ErrorHandling = iota
--	ExitOnError
--	PanicOnError
--)
--
--// A FlagSet represents a set of defined flags.
--type FlagSet struct {
--	// Usage is the function called when an error occurs while parsing flags.
--	// The field is a function (not a method) that may be changed to point to
--	// a custom error handler.
--	Usage func()
--
--	name          string
--	parsed        bool
--	actual        map[string]*Flag
--	formal        map[string]*Flag
--	shorthands    map[byte]*Flag
--	args          []string // arguments after flags
--	exitOnError   bool     // does the program exit if there's an error?
--	errorHandling ErrorHandling
--	output        io.Writer // nil means stderr; use out() accessor
--	interspersed  bool      // allow interspersed option/non-option args
--}
--
--// A Flag represents the state of a flag.
--type Flag struct {
--	Name      string // name as it appears on command line
--	Shorthand string // one-letter abbreviated flag
--	Usage     string // help message
--	Value     Value  // value as set
--	DefValue  string // default value (as text); for usage message
--	Changed   bool   // If the user set the value (or if left to default)
--}
--
--// Value is the interface to the dynamic value stored in a flag.
--// (The default value is represented as a string.)
--type Value interface {
--	String() string
--	Set(string) error
--	Type() string
--}
--
--// sortFlags returns the flags as a slice in lexicographical sorted order.
--func sortFlags(flags map[string]*Flag) []*Flag {
--	list := make(sort.StringSlice, len(flags))
--	i := 0
--	for _, f := range flags {
--		list[i] = f.Name
--		i++
--	}
--	list.Sort()
--	result := make([]*Flag, len(list))
--	for i, name := range list {
--		result[i] = flags[name]
--	}
--	return result
--}
--
--func (f *FlagSet) out() io.Writer {
--	if f.output == nil {
--		return os.Stderr
--	}
--	return f.output
--}
--
--// SetOutput sets the destination for usage and error messages.
--// If output is nil, os.Stderr is used.
--func (f *FlagSet) SetOutput(output io.Writer) {
--	f.output = output
--}
--
--// VisitAll visits the flags in lexicographical order, calling fn for each.
--// It visits all flags, even those not set.
--func (f *FlagSet) VisitAll(fn func(*Flag)) {
--	for _, flag := range sortFlags(f.formal) {
--		fn(flag)
--	}
--}
--
--func (f *FlagSet) HasFlags() bool {
--	return len(f.formal) > 0
--}
--
--// VisitAll visits the command-line flags in lexicographical order, calling
--// fn for each.  It visits all flags, even those not set.
--func VisitAll(fn func(*Flag)) {
--	CommandLine.VisitAll(fn)
--}
--
--// Visit visits the flags in lexicographical order, calling fn for each.
--// It visits only those flags that have been set.
--func (f *FlagSet) Visit(fn func(*Flag)) {
--	for _, flag := range sortFlags(f.actual) {
--		fn(flag)
--	}
--}
--
--// Visit visits the command-line flags in lexicographical order, calling fn
--// for each.  It visits only those flags that have been set.
--func Visit(fn func(*Flag)) {
--	CommandLine.Visit(fn)
--}
--
--// Lookup returns the Flag structure of the named flag, returning nil if none exists.
--func (f *FlagSet) Lookup(name string) *Flag {
--	return f.formal[name]
--}
--
--// Lookup returns the Flag structure of the named command-line flag,
--// returning nil if none exists.
--func Lookup(name string) *Flag {
--	return CommandLine.formal[name]
--}
--
--// Set sets the value of the named flag.
--func (f *FlagSet) Set(name, value string) error {
--	flag, ok := f.formal[name]
--	if !ok {
--		return fmt.Errorf("no such flag -%v", name)
--	}
--	err := flag.Value.Set(value)
--	if err != nil {
--		return err
--	}
--	if f.actual == nil {
--		f.actual = make(map[string]*Flag)
--	}
--	f.actual[name] = flag
--	f.Lookup(name).Changed = true
--	return nil
--}
--
--// Set sets the value of the named command-line flag.
--func Set(name, value string) error {
--	return CommandLine.Set(name, value)
--}
--
--// PrintDefaults prints, to standard error unless configured
--// otherwise, the default values of all defined flags in the set.
--func (f *FlagSet) PrintDefaults() {
--	f.VisitAll(func(flag *Flag) {
--		format := "--%s=%s: %s\n"
--		if _, ok := flag.Value.(*stringValue); ok {
--			// put quotes on the value
--			format = "--%s=%q: %s\n"
--		}
--		if len(flag.Shorthand) > 0 {
--			format = "  -%s, " + format
--		} else {
--			format = "   %s   " + format
--		}
--		fmt.Fprintf(f.out(), format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage)
--	})
--}
--
--func (f *FlagSet) FlagUsages() string {
--	x := new(bytes.Buffer)
--
--	f.VisitAll(func(flag *Flag) {
--		format := "--%s=%s: %s\n"
--		if _, ok := flag.Value.(*stringValue); ok {
--			// put quotes on the value
--			format = "--%s=%q: %s\n"
--		}
--		if len(flag.Shorthand) > 0 {
--			format = "  -%s, " + format
--		} else {
--			format = "   %s   " + format
--		}
--		fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage)
--	})
--
--	return x.String()
--}
--
--// PrintDefaults prints to standard error the default values of all defined command-line flags.
--func PrintDefaults() {
--	CommandLine.PrintDefaults()
--}
--
--// defaultUsage is the default function to print a usage message.
--func defaultUsage(f *FlagSet) {
--	fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
--	f.PrintDefaults()
--}
--
--// NOTE: Usage is not just defaultUsage(CommandLine)
--// because it serves (via godoc flag Usage) as the example
--// for how to write your own usage function.
--
--// Usage prints to standard error a usage message documenting all defined command-line flags.
--// The function is a variable that may be changed to point to a custom function.
--var Usage = func() {
--	fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
--	PrintDefaults()
--}
--
--// NFlag returns the number of flags that have been set.
--func (f *FlagSet) NFlag() int { return len(f.actual) }
--
--// NFlag returns the number of command-line flags that have been set.
--func NFlag() int { return len(CommandLine.actual) }
--
--// Arg returns the i'th argument.  Arg(0) is the first remaining argument
--// after flags have been processed.
--func (f *FlagSet) Arg(i int) string {
--	if i < 0 || i >= len(f.args) {
--		return ""
--	}
--	return f.args[i]
--}
--
--// Arg returns the i'th command-line argument.  Arg(0) is the first remaining argument
--// after flags have been processed.
--func Arg(i int) string {
--	return CommandLine.Arg(i)
--}
--
--// NArg is the number of arguments remaining after flags have been processed.
--func (f *FlagSet) NArg() int { return len(f.args) }
--
--// NArg is the number of arguments remaining after flags have been processed.
--func NArg() int { return len(CommandLine.args) }
--
--// Args returns the non-flag arguments.
--func (f *FlagSet) Args() []string { return f.args }
--
--// Args returns the non-flag command-line arguments.
--func Args() []string { return CommandLine.args }
--
--// Var defines a flag with the specified name and usage string. The type and
--// value of the flag are represented by the first argument, of type Value, which
--// typically holds a user-defined implementation of Value. For instance, the
--// caller could create a flag that turns a comma-separated string into a slice
--// of strings by giving the slice the methods of Value; in particular, Set would
--// decompose the comma-separated string into the slice.
--func (f *FlagSet) Var(value Value, name string, usage string) {
--	f.VarP(value, name, "", usage)
--}
--
--// Like Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
--	// Remember the default value as a string; it won't change.
--	flag := &Flag{name, shorthand, usage, value, value.String(), false}
--	f.AddFlag(flag)
--}
--
--func (f *FlagSet) AddFlag(flag *Flag) {
--	_, alreadythere := f.formal[flag.Name]
--	if alreadythere {
--		msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
--		fmt.Fprintln(f.out(), msg)
--		panic(msg) // Happens only if flags are declared with identical names
--	}
--	if f.formal == nil {
--		f.formal = make(map[string]*Flag)
--	}
--	f.formal[flag.Name] = flag
--
--	if len(flag.Shorthand) == 0 {
--		return
--	}
--	if len(flag.Shorthand) > 1 {
--		fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
--		panic("shorthand is more than one character")
--	}
--	if f.shorthands == nil {
--		f.shorthands = make(map[byte]*Flag)
--	}
--	c := flag.Shorthand[0]
--	old, alreadythere := f.shorthands[c]
--	if alreadythere {
--		fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
--		panic("shorthand redefinition")
--	}
--	f.shorthands[c] = flag
--}
--
--// Var defines a flag with the specified name and usage string. The type and
--// value of the flag are represented by the first argument, of type Value, which
--// typically holds a user-defined implementation of Value. For instance, the
--// caller could create a flag that turns a comma-separated string into a slice
--// of strings by giving the slice the methods of Value; in particular, Set would
--// decompose the comma-separated string into the slice.
--func Var(value Value, name string, usage string) {
--	CommandLine.VarP(value, name, "", usage)
--}
--
--// Like Var, but accepts a shorthand letter that can be used after a single dash.
--func VarP(value Value, name, shorthand, usage string) {
--	CommandLine.VarP(value, name, shorthand, usage)
--}
--
--// failf prints to standard error a formatted error and usage message and
--// returns the error.
--func (f *FlagSet) failf(format string, a ...interface{}) error {
--	err := fmt.Errorf(format, a...)
--	fmt.Fprintln(f.out(), err)
--	f.usage()
--	return err
--}
--
--// usage calls the Usage method for the flag set, or the usage function if
--// the flag set is CommandLine.
--func (f *FlagSet) usage() {
--	if f == CommandLine {
--		Usage()
--	} else if f.Usage == nil {
--		defaultUsage(f)
--	} else {
--		f.Usage()
--	}
--}
--
--func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
--	if err := flag.Value.Set(value); err != nil {
--		return f.failf("invalid argument %q for %s: %v", value, origArg, err)
--	}
--	// mark as visited for Visit()
--	if f.actual == nil {
--		f.actual = make(map[string]*Flag)
--	}
--	f.actual[flag.Name] = flag
--	flag.Changed = true
--	return nil
--}
--
--func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) {
--	a = args
--	if len(s) == 2 { // "--" terminates the flags
--		f.args = append(f.args, args...)
--		return
--	}
--	name := s[2:]
--	if len(name) == 0 || name[0] == '-' || name[0] == '=' {
--		err = f.failf("bad flag syntax: %s", s)
--		return
--	}
--	split := strings.SplitN(name, "=", 2)
--	name = split[0]
--	m := f.formal
--	flag, alreadythere := m[name] // BUG
--	if !alreadythere {
--		if name == "help" { // special case for nice help message.
--			f.usage()
--			return args, ErrHelp
--		}
--		err = f.failf("unknown flag: --%s", name)
--		return
--	}
--	if len(split) == 1 {
--		if _, ok := flag.Value.(*boolValue); !ok {
--			err = f.failf("flag needs an argument: %s", s)
--			return
--		}
--		f.setFlag(flag, "true", s)
--	} else {
--		if e := f.setFlag(flag, split[1], s); e != nil {
--			err = e
--			return
--		}
--	}
--	return args, nil
--}
--
--func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) {
--	a = args
--	shorthands := s[1:]
--
--	for i := 0; i < len(shorthands); i++ {
--		c := shorthands[i]
--		flag, alreadythere := f.shorthands[c]
--		if !alreadythere {
--			if c == 'h' { // special case for nice help message.
--				f.usage()
--				err = ErrHelp
--				return
--			}
--			//TODO continue on error
--			err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
--			if len(args) == 0 {
--				return
--			}
--		}
--		if alreadythere {
--			if _, ok := flag.Value.(*boolValue); ok {
--				f.setFlag(flag, "true", s)
--				continue
--			}
--			if i < len(shorthands)-1 {
--				if e := f.setFlag(flag, shorthands[i+1:], s); e != nil {
--					err = e
--					return
--				}
--				break
--			}
--			if len(args) == 0 {
--				err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
--				return
--			}
--			if e := f.setFlag(flag, args[0], s); e != nil {
--				err = e
--				return
--			}
--		}
--		a = args[1:]
--		break // should be unnecessary
--	}
--
--	return
--}
--
--func (f *FlagSet) parseArgs(args []string) (err error) {
--	for len(args) > 0 {
--		s := args[0]
--		args = args[1:]
--		if len(s) == 0 || s[0] != '-' || len(s) == 1 {
--			if !f.interspersed {
--				f.args = append(f.args, s)
--				f.args = append(f.args, args...)
--				return nil
--			}
--			f.args = append(f.args, s)
--			continue
--		}
--
--		if s[1] == '-' {
--			args, err = f.parseLongArg(s, args)
--		} else {
--			args, err = f.parseShortArg(s, args)
--		}
--	}
--	return
--}
--
--// Parse parses flag definitions from the argument list, which should not
--// include the command name.  Must be called after all flags in the FlagSet
--// are defined and before flags are accessed by the program.
--// The return value will be ErrHelp if -help was set but not defined.
--func (f *FlagSet) Parse(arguments []string) error {
--	f.parsed = true
--	f.args = make([]string, 0, len(arguments))
--	err := f.parseArgs(arguments)
--	if err != nil {
--		switch f.errorHandling {
--		case ContinueOnError:
--			return err
--		case ExitOnError:
--			os.Exit(2)
--		case PanicOnError:
--			panic(err)
--		}
--	}
--	return nil
--}
--
--// Parsed reports whether f.Parse has been called.
--func (f *FlagSet) Parsed() bool {
--	return f.parsed
--}
--
--// Parse parses the command-line flags from os.Args[1:].  Must be called
--// after all flags are defined and before flags are accessed by the program.
--func Parse() {
--	// Ignore errors; CommandLine is set for ExitOnError.
--	CommandLine.Parse(os.Args[1:])
--}
--
--// Whether to support interspersed option/non-option arguments.
--func SetInterspersed(interspersed bool) {
--	CommandLine.SetInterspersed(interspersed)
--}
--
--// Parsed returns true if the command-line flags have been parsed.
--func Parsed() bool {
--	return CommandLine.Parsed()
--}
--
--// The default set of command-line flags, parsed from os.Args.
--var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
--
--// NewFlagSet returns a new, empty flag set with the specified name and
--// error handling property.
--func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
--	f := &FlagSet{
--		name:          name,
--		errorHandling: errorHandling,
--		interspersed:  true,
--	}
--	return f
--}
--
--// Whether to support interspersed option/non-option arguments.
--func (f *FlagSet) SetInterspersed(interspersed bool) {
--	f.interspersed = interspersed
--}
--
--// Init sets the name and error handling property for a flag set.
--// By default, the zero FlagSet uses an empty name and the
--// ContinueOnError error handling policy.
--func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
--	f.name = name
--	f.errorHandling = errorHandling
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go
-deleted file mode 100644
-index 4d95b15..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go
-+++ /dev/null
-@@ -1,354 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package pflag_test
--
--import (
--	"bytes"
--	"fmt"
--	"io/ioutil"
--	"os"
--	"sort"
--	"strings"
--	"testing"
--	"time"
--
--	. "github.com/spf13/pflag"
--)
--
--var (
--	test_bool     = Bool("test_bool", false, "bool value")
--	test_int      = Int("test_int", 0, "int value")
--	test_int64    = Int64("test_int64", 0, "int64 value")
--	test_uint     = Uint("test_uint", 0, "uint value")
--	test_uint64   = Uint64("test_uint64", 0, "uint64 value")
--	test_string   = String("test_string", "0", "string value")
--	test_float64  = Float64("test_float64", 0, "float64 value")
--	test_duration = Duration("test_duration", 0, "time.Duration value")
--)
--
--func boolString(s string) string {
--	if s == "0" {
--		return "false"
--	}
--	return "true"
--}
--
--func TestEverything(t *testing.T) {
--	m := make(map[string]*Flag)
--	desired := "0"
--	visitor := func(f *Flag) {
--		if len(f.Name) > 5 && f.Name[0:5] == "test_" {
--			m[f.Name] = f
--			ok := false
--			switch {
--			case f.Value.String() == desired:
--				ok = true
--			case f.Name == "test_bool" && f.Value.String() == boolString(desired):
--				ok = true
--			case f.Name == "test_duration" && f.Value.String() == desired+"s":
--				ok = true
--			}
--			if !ok {
--				t.Error("Visit: bad value", f.Value.String(), "for", f.Name)
--			}
--		}
--	}
--	VisitAll(visitor)
--	if len(m) != 8 {
--		t.Error("VisitAll misses some flags")
--		for k, v := range m {
--			t.Log(k, *v)
--		}
--	}
--	m = make(map[string]*Flag)
--	Visit(visitor)
--	if len(m) != 0 {
--		t.Errorf("Visit sees unset flags")
--		for k, v := range m {
--			t.Log(k, *v)
--		}
--	}
--	// Now set all flags
--	Set("test_bool", "true")
--	Set("test_int", "1")
--	Set("test_int64", "1")
--	Set("test_uint", "1")
--	Set("test_uint64", "1")
--	Set("test_string", "1")
--	Set("test_float64", "1")
--	Set("test_duration", "1s")
--	desired = "1"
--	Visit(visitor)
--	if len(m) != 8 {
--		t.Error("Visit fails after set")
--		for k, v := range m {
--			t.Log(k, *v)
--		}
--	}
--	// Now test they're visited in sort order.
--	var flagNames []string
--	Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) })
--	if !sort.StringsAreSorted(flagNames) {
--		t.Errorf("flag names not sorted: %v", flagNames)
--	}
--}
--
--func TestUsage(t *testing.T) {
--	called := false
--	ResetForTesting(func() { called = true })
--	if GetCommandLine().Parse([]string{"--x"}) == nil {
--		t.Error("parse did not fail for unknown flag")
--	}
--	if !called {
--		t.Error("did not call Usage for unknown flag")
--	}
--}
--
--func testParse(f *FlagSet, t *testing.T) {
--	if f.Parsed() {
--		t.Error("f.Parse() = true before Parse")
--	}
--	boolFlag := f.Bool("bool", false, "bool value")
--	bool2Flag := f.Bool("bool2", false, "bool2 value")
--	bool3Flag := f.Bool("bool3", false, "bool3 value")
--	intFlag := f.Int("int", 0, "int value")
--	int64Flag := f.Int64("int64", 0, "int64 value")
--	uintFlag := f.Uint("uint", 0, "uint value")
--	uint64Flag := f.Uint64("uint64", 0, "uint64 value")
--	stringFlag := f.String("string", "0", "string value")
--	float64Flag := f.Float64("float64", 0, "float64 value")
--	durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value")
--	extra := "one-extra-argument"
--	args := []string{
--		"--bool",
--		"--bool2=true",
--		"--bool3=false",
--		"--int=22",
--		"--int64=0x23",
--		"--uint=24",
--		"--uint64=25",
--		"--string=hello",
--		"--float64=2718e28",
--		"--duration=2m",
--		extra,
--	}
--	if err := f.Parse(args); err != nil {
--		t.Fatal(err)
--	}
--	if !f.Parsed() {
--		t.Error("f.Parse() = false after Parse")
--	}
--	if *boolFlag != true {
--		t.Error("bool flag should be true, is ", *boolFlag)
--	}
--	if *bool2Flag != true {
--		t.Error("bool2 flag should be true, is ", *bool2Flag)
--	}
--	if *bool3Flag != false {
--		t.Error("bool3 flag should be false, is ", *bool2Flag)
--	}
--	if *intFlag != 22 {
--		t.Error("int flag should be 22, is ", *intFlag)
--	}
--	if *int64Flag != 0x23 {
--		t.Error("int64 flag should be 0x23, is ", *int64Flag)
--	}
--	if *uintFlag != 24 {
--		t.Error("uint flag should be 24, is ", *uintFlag)
--	}
--	if *uint64Flag != 25 {
--		t.Error("uint64 flag should be 25, is ", *uint64Flag)
--	}
--	if *stringFlag != "hello" {
--		t.Error("string flag should be `hello`, is ", *stringFlag)
--	}
--	if *float64Flag != 2718e28 {
--		t.Error("float64 flag should be 2718e28, is ", *float64Flag)
--	}
--	if *durationFlag != 2*time.Minute {
--		t.Error("duration flag should be 2m, is ", *durationFlag)
--	}
--	if len(f.Args()) != 1 {
--		t.Error("expected one argument, got", len(f.Args()))
--	} else if f.Args()[0] != extra {
--		t.Errorf("expected argument %q got %q", extra, f.Args()[0])
--	}
--}
--
--func TestShorthand(t *testing.T) {
--	f := NewFlagSet("shorthand", ContinueOnError)
--	if f.Parsed() {
--		t.Error("f.Parse() = true before Parse")
--	}
--	boolaFlag := f.BoolP("boola", "a", false, "bool value")
--	boolbFlag := f.BoolP("boolb", "b", false, "bool2 value")
--	boolcFlag := f.BoolP("boolc", "c", false, "bool3 value")
--	stringFlag := f.StringP("string", "s", "0", "string value")
--	extra := "interspersed-argument"
--	notaflag := "--i-look-like-a-flag"
--	args := []string{
--		"-ab",
--		extra,
--		"-cs",
--		"hello",
--		"--",
--		notaflag,
--	}
--	f.SetOutput(ioutil.Discard)
--	if err := f.Parse(args); err == nil {
--		t.Error("--i-look-like-a-flag should throw an error")
--	}
--	if !f.Parsed() {
--		t.Error("f.Parse() = false after Parse")
--	}
--	if *boolaFlag != true {
--		t.Error("boola flag should be true, is ", *boolaFlag)
--	}
--	if *boolbFlag != true {
--		t.Error("boolb flag should be true, is ", *boolbFlag)
--	}
--	if *boolcFlag != true {
--		t.Error("boolc flag should be true, is ", *boolcFlag)
--	}
--	if *stringFlag != "hello" {
--		t.Error("string flag should be `hello`, is ", *stringFlag)
--	}
--	if len(f.Args()) != 2 {
--		t.Error("expected one argument, got", len(f.Args()))
--	} else if f.Args()[0] != extra {
--		t.Errorf("expected argument %q got %q", extra, f.Args()[0])
--	} else if f.Args()[1] != notaflag {
--		t.Errorf("expected argument %q got %q", notaflag, f.Args()[1])
--	}
--}
--
--func TestParse(t *testing.T) {
--	ResetForTesting(func() { t.Error("bad parse") })
--	testParse(GetCommandLine(), t)
--}
--
--func TestFlagSetParse(t *testing.T) {
--	testParse(NewFlagSet("test", ContinueOnError), t)
--}
--
--// Declare a user-defined flag type.
--type flagVar []string
--
--func (f *flagVar) String() string {
--	return fmt.Sprint([]string(*f))
--}
--
--func (f *flagVar) Set(value string) error {
--	*f = append(*f, value)
--	return nil
--}
--
--func TestUserDefined(t *testing.T) {
--	var flags FlagSet
--	flags.Init("test", ContinueOnError)
--	var v flagVar
--	flags.VarP(&v, "v", "v", "usage")
--	if err := flags.Parse([]string{"--v=1", "-v2", "-v", "3"}); err != nil {
--		t.Error(err)
--	}
--	if len(v) != 3 {
--		t.Fatal("expected 3 args; got ", len(v))
--	}
--	expect := "[1 2 3]"
--	if v.String() != expect {
--		t.Errorf("expected value %q got %q", expect, v.String())
--	}
--}
--
--func TestSetOutput(t *testing.T) {
--	var flags FlagSet
--	var buf bytes.Buffer
--	flags.SetOutput(&buf)
--	flags.Init("test", ContinueOnError)
--	flags.Parse([]string{"--unknown"})
--	if out := buf.String(); !strings.Contains(out, "--unknown") {
--		t.Logf("expected output mentioning unknown; got %q", out)
--	}
--}
--
--// This tests that one can reset the flags. This still works but not well, and is
--// superseded by FlagSet.
--func TestChangingArgs(t *testing.T) {
--	ResetForTesting(func() { t.Fatal("bad parse") })
--	oldArgs := os.Args
--	defer func() { os.Args = oldArgs }()
--	os.Args = []string{"cmd", "--before", "subcmd"}
--	before := Bool("before", false, "")
--	if err := GetCommandLine().Parse(os.Args[1:]); err != nil {
--		t.Fatal(err)
--	}
--	cmd := Arg(0)
--	os.Args = []string{"subcmd", "--after", "args"}
--	after := Bool("after", false, "")
--	Parse()
--	args := Args()
--
--	if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" {
--		t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args)
--	}
--}
--
--// Test that -help invokes the usage message and returns ErrHelp.
--func TestHelp(t *testing.T) {
--	var helpCalled = false
--	fs := NewFlagSet("help test", ContinueOnError)
--	fs.Usage = func() { helpCalled = true }
--	var flag bool
--	fs.BoolVar(&flag, "flag", false, "regular flag")
--	// Regular flag invocation should work
--	err := fs.Parse([]string{"--flag=true"})
--	if err != nil {
--		t.Fatal("expected no error; got ", err)
--	}
--	if !flag {
--		t.Error("flag was not set by --flag")
--	}
--	if helpCalled {
--		t.Error("help called for regular flag")
--		helpCalled = false // reset for next test
--	}
--	// Help flag should work as expected.
--	err = fs.Parse([]string{"--help"})
--	if err == nil {
--		t.Fatal("error expected")
--	}
--	if err != ErrHelp {
--		t.Fatal("expected ErrHelp; got ", err)
--	}
--	if !helpCalled {
--		t.Fatal("help was not called")
--	}
--	// If we define a help flag, that should override.
--	var help bool
--	fs.BoolVar(&help, "help", false, "help flag")
--	helpCalled = false
--	err = fs.Parse([]string{"--help"})
--	if err != nil {
--		t.Fatal("expected no error for defined --help; got ", err)
--	}
--	if helpCalled {
--		t.Fatal("help was called; should not have been for defined help flag")
--	}
--}
--
--func TestNoInterspersed(t *testing.T) {
--	f := NewFlagSet("test", ContinueOnError)
--	f.SetInterspersed(false)
--	f.Bool("true", true, "always true")
--	f.Bool("false", false, "always false")
--	err := f.Parse([]string{"--true", "break", "--false"})
--	if err != nil {
--		t.Fatal("expected no error; got ", err)
--	}
--	args := f.Args()
--	if len(args) != 2 || args[0] != "break" || args[1] != "--false" {
--		t.Fatal("expected interspersed options/non-options to fail")
--	}
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/float32.go b/Godeps/_workspace/src/github.com/spf13/pflag/float32.go
-deleted file mode 100644
-index b7ad67d..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/float32.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- float32 Value
--type float32Value float32
--
--func newFloat32Value(val float32, p *float32) *float32Value {
--	*p = val
--	return (*float32Value)(p)
--}
--
--func (f *float32Value) Set(s string) error {
--	v, err := strconv.ParseFloat(s, 32)
--	*f = float32Value(v)
--	return err
--}
--
--func (f *float32Value) Type() string {
--	return "float32"
--}
--
--func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) }
--
--// Float32Var defines a float32 flag with specified name, default value, and usage string.
--// The argument p points to a float32 variable in which to store the value of the flag.
--func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
--	f.VarP(newFloat32Value(value, p), name, "", usage)
--}
--
--// Like Float32Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
--	f.VarP(newFloat32Value(value, p), name, shorthand, usage)
--}
--
--// Float32Var defines a float32 flag with specified name, default value, and usage string.
--// The argument p points to a float32 variable in which to store the value of the flag.
--func Float32Var(p *float32, name string, value float32, usage string) {
--	CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
--}
--
--// Like Float32Var, but accepts a shorthand letter that can be used after a single dash.
--func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
--	CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
--}
--
--// Float32 defines a float32 flag with specified name, default value, and usage string.
--// The return value is the address of a float32 variable that stores the value of the flag.
--func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
--	p := new(float32)
--	f.Float32VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Float32, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
--	p := new(float32)
--	f.Float32VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Float32 defines a float32 flag with specified name, default value, and usage string.
--// The return value is the address of a float32 variable that stores the value of the flag.
--func Float32(name string, value float32, usage string) *float32 {
--	return CommandLine.Float32P(name, "", value, usage)
--}
--
--// Like Float32, but accepts a shorthand letter that can be used after a single dash.
--func Float32P(name, shorthand string, value float32, usage string) *float32 {
--	return CommandLine.Float32P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/float64.go b/Godeps/_workspace/src/github.com/spf13/pflag/float64.go
-deleted file mode 100644
-index 0315512..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/float64.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- float64 Value
--type float64Value float64
--
--func newFloat64Value(val float64, p *float64) *float64Value {
--	*p = val
--	return (*float64Value)(p)
--}
--
--func (f *float64Value) Set(s string) error {
--	v, err := strconv.ParseFloat(s, 64)
--	*f = float64Value(v)
--	return err
--}
--
--func (f *float64Value) Type() string {
--	return "float64"
--}
--
--func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
--
--// Float64Var defines a float64 flag with specified name, default value, and usage string.
--// The argument p points to a float64 variable in which to store the value of the flag.
--func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
--	f.VarP(newFloat64Value(value, p), name, "", usage)
--}
--
--// Like Float64Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
--	f.VarP(newFloat64Value(value, p), name, shorthand, usage)
--}
--
--// Float64Var defines a float64 flag with specified name, default value, and usage string.
--// The argument p points to a float64 variable in which to store the value of the flag.
--func Float64Var(p *float64, name string, value float64, usage string) {
--	CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
--}
--
--// Like Float64Var, but accepts a shorthand letter that can be used after a single dash.
--func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
--	CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
--}
--
--// Float64 defines a float64 flag with specified name, default value, and usage string.
--// The return value is the address of a float64 variable that stores the value of the flag.
--func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
--	p := new(float64)
--	f.Float64VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Float64, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
--	p := new(float64)
--	f.Float64VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Float64 defines a float64 flag with specified name, default value, and usage string.
--// The return value is the address of a float64 variable that stores the value of the flag.
--func Float64(name string, value float64, usage string) *float64 {
--	return CommandLine.Float64P(name, "", value, usage)
--}
--
--// Like Float64, but accepts a shorthand letter that can be used after a single dash.
--func Float64P(name, shorthand string, value float64, usage string) *float64 {
--	return CommandLine.Float64P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int.go b/Godeps/_workspace/src/github.com/spf13/pflag/int.go
-deleted file mode 100644
-index dca9da6..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/int.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- int Value
--type intValue int
--
--func newIntValue(val int, p *int) *intValue {
--	*p = val
--	return (*intValue)(p)
--}
--
--func (i *intValue) Set(s string) error {
--	v, err := strconv.ParseInt(s, 0, 64)
--	*i = intValue(v)
--	return err
--}
--
--func (i *intValue) Type() string {
--	return "int"
--}
--
--func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
--
--// IntVar defines an int flag with specified name, default value, and usage string.
--// The argument p points to an int variable in which to store the value of the flag.
--func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
--	f.VarP(newIntValue(value, p), name, "", usage)
--}
--
--// Like IntVar, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
--	f.VarP(newIntValue(value, p), name, shorthand, usage)
--}
--
--// IntVar defines an int flag with specified name, default value, and usage string.
--// The argument p points to an int variable in which to store the value of the flag.
--func IntVar(p *int, name string, value int, usage string) {
--	CommandLine.VarP(newIntValue(value, p), name, "", usage)
--}
--
--// Like IntVar, but accepts a shorthand letter that can be used after a single dash.
--func IntVarP(p *int, name, shorthand string, value int, usage string) {
--	CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
--}
--
--// Int defines an int flag with specified name, default value, and usage string.
--// The return value is the address of an int variable that stores the value of the flag.
--func (f *FlagSet) Int(name string, value int, usage string) *int {
--	p := new(int)
--	f.IntVarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Int, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
--	p := new(int)
--	f.IntVarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Int defines an int flag with specified name, default value, and usage string.
--// The return value is the address of an int variable that stores the value of the flag.
--func Int(name string, value int, usage string) *int {
--	return CommandLine.IntP(name, "", value, usage)
--}
--
--// Like Int, but accepts a shorthand letter that can be used after a single dash.
--func IntP(name, shorthand string, value int, usage string) *int {
--	return CommandLine.IntP(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int32.go b/Godeps/_workspace/src/github.com/spf13/pflag/int32.go
-deleted file mode 100644
-index 18eaacd..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/int32.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- int32 Value
--type int32Value int32
--
--func newInt32Value(val int32, p *int32) *int32Value {
--	*p = val
--	return (*int32Value)(p)
--}
--
--func (i *int32Value) Set(s string) error {
--	v, err := strconv.ParseInt(s, 0, 32)
--	*i = int32Value(v)
--	return err
--}
--
--func (i *int32Value) Type() string {
--	return "int32"
--}
--
--func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
--
--// Int32Var defines an int32 flag with specified name, default value, and usage string.
--// The argument p points to an int32 variable in which to store the value of the flag.
--func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
--	f.VarP(newInt32Value(value, p), name, "", usage)
--}
--
--// Like Int32Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
--	f.VarP(newInt32Value(value, p), name, shorthand, usage)
--}
--
--// Int32Var defines an int32 flag with specified name, default value, and usage string.
--// The argument p points to an int32 variable in which to store the value of the flag.
--func Int32Var(p *int32, name string, value int32, usage string) {
--	CommandLine.VarP(newInt32Value(value, p), name, "", usage)
--}
--
--// Like Int32Var, but accepts a shorthand letter that can be used after a single dash.
--func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
--	CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
--}
--
--// Int32 defines an int32 flag with specified name, default value, and usage string.
--// The return value is the address of an int32 variable that stores the value of the flag.
--func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
--	p := new(int32)
--	f.Int32VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Int32, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
--	p := new(int32)
--	f.Int32VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Int32 defines an int32 flag with specified name, default value, and usage string.
--// The return value is the address of an int32 variable that stores the value of the flag.
--func Int32(name string, value int32, usage string) *int32 {
--	return CommandLine.Int32P(name, "", value, usage)
--}
--
--// Like Int32, but accepts a shorthand letter that can be used after a single dash.
--func Int32P(name, shorthand string, value int32, usage string) *int32 {
--	return CommandLine.Int32P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int64.go b/Godeps/_workspace/src/github.com/spf13/pflag/int64.go
-deleted file mode 100644
-index 0114aaa..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/int64.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- int64 Value
--type int64Value int64
--
--func newInt64Value(val int64, p *int64) *int64Value {
--	*p = val
--	return (*int64Value)(p)
--}
--
--func (i *int64Value) Set(s string) error {
--	v, err := strconv.ParseInt(s, 0, 64)
--	*i = int64Value(v)
--	return err
--}
--
--func (i *int64Value) Type() string {
--	return "int64"
--}
--
--func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
--
--// Int64Var defines an int64 flag with specified name, default value, and usage string.
--// The argument p points to an int64 variable in which to store the value of the flag.
--func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
--	f.VarP(newInt64Value(value, p), name, "", usage)
--}
--
--// Like Int64Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
--	f.VarP(newInt64Value(value, p), name, shorthand, usage)
--}
--
--// Int64Var defines an int64 flag with specified name, default value, and usage string.
--// The argument p points to an int64 variable in which to store the value of the flag.
--func Int64Var(p *int64, name string, value int64, usage string) {
--	CommandLine.VarP(newInt64Value(value, p), name, "", usage)
--}
--
--// Like Int64Var, but accepts a shorthand letter that can be used after a single dash.
--func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
--	CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
--}
--
--// Int64 defines an int64 flag with specified name, default value, and usage string.
--// The return value is the address of an int64 variable that stores the value of the flag.
--func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
--	p := new(int64)
--	f.Int64VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Int64, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
--	p := new(int64)
--	f.Int64VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Int64 defines an int64 flag with specified name, default value, and usage string.
--// The return value is the address of an int64 variable that stores the value of the flag.
--func Int64(name string, value int64, usage string) *int64 {
--	return CommandLine.Int64P(name, "", value, usage)
--}
--
--// Like Int64, but accepts a shorthand letter that can be used after a single dash.
--func Int64P(name, shorthand string, value int64, usage string) *int64 {
--	return CommandLine.Int64P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int8.go b/Godeps/_workspace/src/github.com/spf13/pflag/int8.go
-deleted file mode 100644
-index aab1022..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/int8.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- int8 Value
--type int8Value int8
--
--func newInt8Value(val int8, p *int8) *int8Value {
--	*p = val
--	return (*int8Value)(p)
--}
--
--func (i *int8Value) Set(s string) error {
--	v, err := strconv.ParseInt(s, 0, 8)
--	*i = int8Value(v)
--	return err
--}
--
--func (i *int8Value) Type() string {
--	return "int8"
--}
--
--func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) }
--
--// Int8Var defines an int8 flag with specified name, default value, and usage string.
--// The argument p points to an int8 variable in which to store the value of the flag.
--func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
--	f.VarP(newInt8Value(value, p), name, "", usage)
--}
--
--// Like Int8Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
--	f.VarP(newInt8Value(value, p), name, shorthand, usage)
--}
--
--// Int8Var defines an int8 flag with specified name, default value, and usage string.
--// The argument p points to an int8 variable in which to store the value of the flag.
--func Int8Var(p *int8, name string, value int8, usage string) {
--	CommandLine.VarP(newInt8Value(value, p), name, "", usage)
--}
--
--// Like Int8Var, but accepts a shorthand letter that can be used after a single dash.
--func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
--	CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
--}
--
--// Int8 defines an int8 flag with specified name, default value, and usage string.
--// The return value is the address of an int8 variable that stores the value of the flag.
--func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
--	p := new(int8)
--	f.Int8VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Int8, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
--	p := new(int8)
--	f.Int8VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Int8 defines an int8 flag with specified name, default value, and usage string.
--// The return value is the address of an int8 variable that stores the value of the flag.
--func Int8(name string, value int8, usage string) *int8 {
--	return CommandLine.Int8P(name, "", value, usage)
--}
--
--// Like Int8, but accepts a shorthand letter that can be used after a single dash.
--func Int8P(name, shorthand string, value int8, usage string) *int8 {
--	return CommandLine.Int8P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/ip.go b/Godeps/_workspace/src/github.com/spf13/pflag/ip.go
-deleted file mode 100644
-index 9d53bd3..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/ip.go
-+++ /dev/null
-@@ -1,79 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"net"
--)
--
--// -- net.IP value
--type ipValue net.IP
--
--func newIPValue(val net.IP, p *net.IP) *ipValue {
--	*p = val
--	return (*ipValue)(p)
--}
--
--func (i *ipValue) String() string { return net.IP(*i).String() }
--func (i *ipValue) Set(s string) error {
--	ip := net.ParseIP(s)
--	if ip == nil {
--		return fmt.Errorf("failed to parse IP: %q", s)
--	}
--	*i = ipValue(ip)
--	return nil
--}
--func (i *ipValue) Get() interface{} {
--	return net.IP(*i)
--}
--
--func (i *ipValue) Type() string {
--	return "ip"
--}
--
--// IPVar defines an net.IP flag with specified name, default value, and usage string.
--// The argument p points to an net.IP variable in which to store the value of the flag.
--func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
--	f.VarP(newIPValue(value, p), name, "", usage)
--}
--
--// Like IPVar, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
--	f.VarP(newIPValue(value, p), name, shorthand, usage)
--}
--
--// IPVar defines an net.IP flag with specified name, default value, and usage string.
--// The argument p points to an net.IP variable in which to store the value of the flag.
--func IPVar(p *net.IP, name string, value net.IP, usage string) {
--	CommandLine.VarP(newIPValue(value, p), name, "", usage)
--}
--
--// Like IPVar, but accepts a shorthand letter that can be used after a single dash.
--func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
--	CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
--}
--
--// IP defines an net.IP flag with specified name, default value, and usage string.
--// The return value is the address of an net.IP variable that stores the value of the flag.
--func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
--	p := new(net.IP)
--	f.IPVarP(p, name, "", value, usage)
--	return p
--}
--
--// Like IP, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
--	p := new(net.IP)
--	f.IPVarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// IP defines an net.IP flag with specified name, default value, and usage string.
--// The return value is the address of an net.IP variable that stores the value of the flag.
--func IP(name string, value net.IP, usage string) *net.IP {
--	return CommandLine.IPP(name, "", value, usage)
--}
--
--// Like IP, but accepts a shorthand letter that can be used after a single dash.
--func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
--	return CommandLine.IPP(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go b/Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go
-deleted file mode 100644
-index 6f85be9..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go
-+++ /dev/null
-@@ -1,89 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"net"
--)
--
--// -- net.IPMask value
--type ipMaskValue net.IPMask
--
--func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
--	*p = val
--	return (*ipMaskValue)(p)
--}
--
--func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
--func (i *ipMaskValue) Set(s string) error {
--	ip := ParseIPv4Mask(s)
--	if ip == nil {
--		return fmt.Errorf("failed to parse IP mask: %q", s)
--	}
--	*i = ipMaskValue(ip)
--	return nil
--}
--func (i *ipMaskValue) Get() interface{} {
--	return net.IPMask(*i)
--}
--
--func (i *ipMaskValue) Type() string {
--	return "ipMask"
--}
--
--// Parse IPv4 netmask written in IP form (e.g. 255.255.255.0).
--// This function should really belong to the net package.
--func ParseIPv4Mask(s string) net.IPMask {
--	mask := net.ParseIP(s)
--	if mask == nil {
--		return nil
--	}
--	return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
--}
--
--// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
--// The argument p points to an net.IPMask variable in which to store the value of the flag.
--func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
--	f.VarP(newIPMaskValue(value, p), name, "", usage)
--}
--
--// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
--	f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
--}
--
--// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
--// The argument p points to an net.IPMask variable in which to store the value of the flag.
--func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
--	CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
--}
--
--// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
--func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
--	CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
--}
--
--// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
--// The return value is the address of an net.IPMask variable that stores the value of the flag.
--func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
--	p := new(net.IPMask)
--	f.IPMaskVarP(p, name, "", value, usage)
--	return p
--}
--
--// Like IPMask, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
--	p := new(net.IPMask)
--	f.IPMaskVarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
--// The return value is the address of an net.IPMask variable that stores the value of the flag.
--func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
--	return CommandLine.IPMaskP(name, "", value, usage)
--}
--
--// Like IP, but accepts a shorthand letter that can be used after a single dash.
--func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
--	return CommandLine.IPMaskP(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/string.go b/Godeps/_workspace/src/github.com/spf13/pflag/string.go
-deleted file mode 100644
-index 362fbf8..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/string.go
-+++ /dev/null
-@@ -1,69 +0,0 @@
--package pflag
--
--import "fmt"
--
--// -- string Value
--type stringValue string
--
--func newStringValue(val string, p *string) *stringValue {
--	*p = val
--	return (*stringValue)(p)
--}
--
--func (s *stringValue) Set(val string) error {
--	*s = stringValue(val)
--	return nil
--}
--func (s *stringValue) Type() string {
--	return "string"
--}
--
--func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
--
--// StringVar defines a string flag with specified name, default value, and usage string.
--// The argument p points to a string variable in which to store the value of the flag.
--func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
--	f.VarP(newStringValue(value, p), name, "", usage)
--}
--
--// Like StringVar, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
--	f.VarP(newStringValue(value, p), name, shorthand, usage)
--}
--
--// StringVar defines a string flag with specified name, default value, and usage string.
--// The argument p points to a string variable in which to store the value of the flag.
--func StringVar(p *string, name string, value string, usage string) {
--	CommandLine.VarP(newStringValue(value, p), name, "", usage)
--}
--
--// Like StringVar, but accepts a shorthand letter that can be used after a single dash.
--func StringVarP(p *string, name, shorthand string, value string, usage string) {
--	CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
--}
--
--// String defines a string flag with specified name, default value, and usage string.
--// The return value is the address of a string variable that stores the value of the flag.
--func (f *FlagSet) String(name string, value string, usage string) *string {
--	p := new(string)
--	f.StringVarP(p, name, "", value, usage)
--	return p
--}
--
--// Like String, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
--	p := new(string)
--	f.StringVarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// String defines a string flag with specified name, default value, and usage string.
--// The return value is the address of a string variable that stores the value of the flag.
--func String(name string, value string, usage string) *string {
--	return CommandLine.StringP(name, "", value, usage)
--}
--
--// Like String, but accepts a shorthand letter that can be used after a single dash.
--func StringP(name, shorthand string, value string, usage string) *string {
--	return CommandLine.StringP(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint.go
-deleted file mode 100644
-index c063fe7..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/uint.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- uint Value
--type uintValue uint
--
--func newUintValue(val uint, p *uint) *uintValue {
--	*p = val
--	return (*uintValue)(p)
--}
--
--func (i *uintValue) Set(s string) error {
--	v, err := strconv.ParseUint(s, 0, 64)
--	*i = uintValue(v)
--	return err
--}
--
--func (i *uintValue) Type() string {
--	return "uint"
--}
--
--func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
--
--// UintVar defines a uint flag with specified name, default value, and usage string.
--// The argument p points to a uint variable in which to store the value of the flag.
--func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
--	f.VarP(newUintValue(value, p), name, "", usage)
--}
--
--// Like UintVar, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
--	f.VarP(newUintValue(value, p), name, shorthand, usage)
--}
--
--// UintVar defines a uint flag with specified name, default value, and usage string.
--// The argument p points to a uint  variable in which to store the value of the flag.
--func UintVar(p *uint, name string, value uint, usage string) {
--	CommandLine.VarP(newUintValue(value, p), name, "", usage)
--}
--
--// Like UintVar, but accepts a shorthand letter that can be used after a single dash.
--func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
--	CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
--}
--
--// Uint defines a uint flag with specified name, default value, and usage string.
--// The return value is the address of a uint  variable that stores the value of the flag.
--func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
--	p := new(uint)
--	f.UintVarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Uint, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
--	p := new(uint)
--	f.UintVarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Uint defines a uint flag with specified name, default value, and usage string.
--// The return value is the address of a uint  variable that stores the value of the flag.
--func Uint(name string, value uint, usage string) *uint {
--	return CommandLine.UintP(name, "", value, usage)
--}
--
--// Like Uint, but accepts a shorthand letter that can be used after a single dash.
--func UintP(name, shorthand string, value uint, usage string) *uint {
--	return CommandLine.UintP(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint16.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint16.go
-deleted file mode 100644
-index ec14ab0..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/uint16.go
-+++ /dev/null
-@@ -1,76 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- uint16 value
--type uint16Value uint16
--
--func newUint16Value(val uint16, p *uint16) *uint16Value {
--	*p = val
--	return (*uint16Value)(p)
--}
--func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) }
--func (i *uint16Value) Set(s string) error {
--	v, err := strconv.ParseUint(s, 0, 16)
--	*i = uint16Value(v)
--	return err
--}
--
--func (i *uint16Value) Get() interface{} {
--	return uint16(*i)
--}
--
--func (i *uint16Value) Type() string {
--	return "uint16"
--}
--
--// Uint16Var defines a uint flag with specified name, default value, and usage string.
--// The argument p points to a uint variable in which to store the value of the flag.
--func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
--	f.VarP(newUint16Value(value, p), name, "", usage)
--}
--
--// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
--	f.VarP(newUint16Value(value, p), name, shorthand, usage)
--}
--
--// Uint16Var defines a uint flag with specified name, default value, and usage string.
--// The argument p points to a uint  variable in which to store the value of the flag.
--func Uint16Var(p *uint16, name string, value uint16, usage string) {
--	CommandLine.VarP(newUint16Value(value, p), name, "", usage)
--}
--
--// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
--func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
--	CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
--}
--
--// Uint16 defines a uint flag with specified name, default value, and usage string.
--// The return value is the address of a uint  variable that stores the value of the flag.
--func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
--	p := new(uint16)
--	f.Uint16VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Uint16, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
--	p := new(uint16)
--	f.Uint16VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Uint16 defines a uint flag with specified name, default value, and usage string.
--// The return value is the address of a uint  variable that stores the value of the flag.
--func Uint16(name string, value uint16, usage string) *uint16 {
--	return CommandLine.Uint16P(name, "", value, usage)
--}
--
--// Like Uint16, but accepts a shorthand letter that can be used after a single dash.
--func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
--	return CommandLine.Uint16P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint32.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint32.go
-deleted file mode 100644
-index 05bc3bd..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/uint32.go
-+++ /dev/null
-@@ -1,75 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- uint16 value
--type uint32Value uint32
--
--func newUint32Value(val uint32, p *uint32) *uint32Value {
--	*p = val
--	return (*uint32Value)(p)
--}
--func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) }
--func (i *uint32Value) Set(s string) error {
--	v, err := strconv.ParseUint(s, 0, 32)
--	*i = uint32Value(v)
--	return err
--}
--func (i *uint32Value) Get() interface{} {
--	return uint32(*i)
--}
--
--func (i *uint32Value) Type() string {
--	return "uint32"
--}
--
--// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
--// The argument p points to a uint32 variable in which to store the value of the flag.
--func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
--	f.VarP(newUint32Value(value, p), name, "", usage)
--}
--
--// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
--	f.VarP(newUint32Value(value, p), name, shorthand, usage)
--}
--
--// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
--// The argument p points to a uint32  variable in which to store the value of the flag.
--func Uint32Var(p *uint32, name string, value uint32, usage string) {
--	CommandLine.VarP(newUint32Value(value, p), name, "", usage)
--}
--
--// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
--func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
--	CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
--}
--
--// Uint32 defines a uint32 flag with specified name, default value, and usage string.
--// The return value is the address of a uint32  variable that stores the value of the flag.
--func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
--	p := new(uint32)
--	f.Uint32VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Uint32, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
--	p := new(uint32)
--	f.Uint32VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Uint32 defines a uint32 flag with specified name, default value, and usage string.
--// The return value is the address of a uint32  variable that stores the value of the flag.
--func Uint32(name string, value uint32, usage string) *uint32 {
--	return CommandLine.Uint32P(name, "", value, usage)
--}
--
--// Like Uint32, but accepts a shorthand letter that can be used after a single dash.
--func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
--	return CommandLine.Uint32P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint64.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint64.go
-deleted file mode 100644
-index 99c7e80..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/uint64.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- uint64 Value
--type uint64Value uint64
--
--func newUint64Value(val uint64, p *uint64) *uint64Value {
--	*p = val
--	return (*uint64Value)(p)
--}
--
--func (i *uint64Value) Set(s string) error {
--	v, err := strconv.ParseUint(s, 0, 64)
--	*i = uint64Value(v)
--	return err
--}
--
--func (i *uint64Value) Type() string {
--	return "uint64"
--}
--
--func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
--
--// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
--// The argument p points to a uint64 variable in which to store the value of the flag.
--func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
--	f.VarP(newUint64Value(value, p), name, "", usage)
--}
--
--// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
--	f.VarP(newUint64Value(value, p), name, shorthand, usage)
--}
--
--// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
--// The argument p points to a uint64 variable in which to store the value of the flag.
--func Uint64Var(p *uint64, name string, value uint64, usage string) {
--	CommandLine.VarP(newUint64Value(value, p), name, "", usage)
--}
--
--// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
--func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
--	CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
--}
--
--// Uint64 defines a uint64 flag with specified name, default value, and usage string.
--// The return value is the address of a uint64 variable that stores the value of the flag.
--func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
--	p := new(uint64)
--	f.Uint64VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Uint64, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
--	p := new(uint64)
--	f.Uint64VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Uint64 defines a uint64 flag with specified name, default value, and usage string.
--// The return value is the address of a uint64 variable that stores the value of the flag.
--func Uint64(name string, value uint64, usage string) *uint64 {
--	return CommandLine.Uint64P(name, "", value, usage)
--}
--
--// Like Uint64, but accepts a shorthand letter that can be used after a single dash.
--func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
--	return CommandLine.Uint64P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint8.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint8.go
-deleted file mode 100644
-index 6fef508..0000000
---- a/Godeps/_workspace/src/github.com/spf13/pflag/uint8.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--package pflag
--
--import (
--	"fmt"
--	"strconv"
--)
--
--// -- uint8 Value
--type uint8Value uint8
--
--func newUint8Value(val uint8, p *uint8) *uint8Value {
--	*p = val
--	return (*uint8Value)(p)
--}
--
--func (i *uint8Value) Set(s string) error {
--	v, err := strconv.ParseUint(s, 0, 8)
--	*i = uint8Value(v)
--	return err
--}
--
--func (i *uint8Value) Type() string {
--	return "uint8"
--}
--
--func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) }
--
--// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
--// The argument p points to a uint8 variable in which to store the value of the flag.
--func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
--	f.VarP(newUint8Value(value, p), name, "", usage)
--}
--
--// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
--	f.VarP(newUint8Value(value, p), name, shorthand, usage)
--}
--
--// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
--// The argument p points to a uint8 variable in which to store the value of the flag.
--func Uint8Var(p *uint8, name string, value uint8, usage string) {
--	CommandLine.VarP(newUint8Value(value, p), name, "", usage)
--}
--
--// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
--func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
--	CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
--}
--
--// Uint8 defines a uint8 flag with specified name, default value, and usage string.
--// The return value is the address of a uint8 variable that stores the value of the flag.
--func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
--	p := new(uint8)
--	f.Uint8VarP(p, name, "", value, usage)
--	return p
--}
--
--// Like Uint8, but accepts a shorthand letter that can be used after a single dash.
--func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
--	p := new(uint8)
--	f.Uint8VarP(p, name, shorthand, value, usage)
--	return p
--}
--
--// Uint8 defines a uint8 flag with specified name, default value, and usage string.
--// The return value is the address of a uint8 variable that stores the value of the flag.
--func Uint8(name string, value uint8, usage string) *uint8 {
--	return CommandLine.Uint8P(name, "", value, usage)
--}
--
--// Like Uint8, but accepts a shorthand letter that can be used after a single dash.
--func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
--	return CommandLine.Uint8P(name, shorthand, value, usage)
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore b/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore
-deleted file mode 100644
-index 0026861..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/.gitignore
-+++ /dev/null
-@@ -1,22 +0,0 @@
--# Compiled Object files, Static and Dynamic libs (Shared Objects)
--*.o
--*.a
--*.so
--
--# Folders
--_obj
--_test
--
--# Architecture specific extensions/prefixes
--*.[568vq]
--[568vq].out
--
--*.cgo1.go
--*.cgo2.c
--_cgo_defun.c
--_cgo_gotypes.go
--_cgo_export.*
--
--_testmain.go
--
--*.exe
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/README.md b/Godeps/_workspace/src/github.com/stretchr/objx/README.md
-deleted file mode 100644
-index 4aa1806..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/README.md
-+++ /dev/null
-@@ -1,3 +0,0 @@
--# objx
--
--  * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx)
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go
-deleted file mode 100644
-index 721bcac..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/accessors.go
-+++ /dev/null
-@@ -1,179 +0,0 @@
--package objx
--
--import (
--	"fmt"
--	"regexp"
--	"strconv"
--	"strings"
--)
--
--// arrayAccesRegexString is the regex used to extract the array number
--// from the access path
--const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$`
--
--// arrayAccesRegex is the compiled arrayAccesRegexString
--var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
--
--// Get gets the value using the specified selector and
--// returns it inside a new Obj object.
--//
--// If it cannot find the value, Get will return a nil
--// value inside an instance of Obj.
--//
--// Get can only operate directly on map[string]interface{} and []interface.
--//
--// Example
--//
--// To access the title of the third chapter of the second book, do:
--//
--//    o.Get("books[1].chapters[2].title")
--func (m Map) Get(selector string) *Value {
--	rawObj := access(m, selector, nil, false, false)
--	return &Value{data: rawObj}
--}
--
--// Set sets the value using the specified selector and
--// returns the object on which Set was called.
--//
--// Set can only operate directly on map[string]interface{} and []interface
--//
--// Example
--//
--// To set the title of the third chapter of the second book, do:
--//
--//    o.Set("books[1].chapters[2].title","Time to Go")
--func (m Map) Set(selector string, value interface{}) Map {
--	access(m, selector, value, true, false)
--	return m
--}
--
--// access accesses the object using the selector and performs the
--// appropriate action.
--func access(current, selector, value interface{}, isSet, panics bool) interface{} {
--
--	switch selector.(type) {
--	case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
--
--		if array, ok := current.([]interface{}); ok {
--			index := intFromInterface(selector)
--
--			if index >= len(array) {
--				if panics {
--					panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
--				}
--				return nil
--			}
--
--			return array[index]
--		}
--
--		return nil
--
--	case string:
--
--		selStr := selector.(string)
--		selSegs := strings.SplitN(selStr, PathSeparator, 2)
--		thisSel := selSegs[0]
--		index := -1
--		var err error
--
--		// https://github.com/stretchr/objx/issues/12
--		if strings.Contains(thisSel, "[") {
--
--			arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel)
--
--			if len(arrayMatches) > 0 {
--
--				// Get the key into the map
--				thisSel = arrayMatches[1]
--
--				// Get the index into the array at the key
--				index, err = strconv.Atoi(arrayMatches[2])
--
--				if err != nil {
--					// This should never happen. If it does, something has gone
--					// seriously wrong. Panic.
--					panic("objx: Array index is not an integer.  Must use array[int].")
--				}
--
--			}
--		}
--
--		if curMap, ok := current.(Map); ok {
--			current = map[string]interface{}(curMap)
--		}
--
--		// get the object in question
--		switch current.(type) {
--		case map[string]interface{}:
--			curMSI := current.(map[string]interface{})
--			if len(selSegs) <= 1 && isSet {
--				curMSI[thisSel] = value
--				return nil
--			} else {
--				current = curMSI[thisSel]
--			}
--		default:
--			current = nil
--		}
--
--		if current == nil && panics {
--			panic(fmt.Sprintf("objx: '%v' invalid on object.", selector))
--		}
--
--		// do we need to access the item of an array?
--		if index > -1 {
--			if array, ok := current.([]interface{}); ok {
--				if index < len(array) {
--					current = array[index]
--				} else {
--					if panics {
--						panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
--					}
--					current = nil
--				}
--			}
--		}
--
--		if len(selSegs) > 1 {
--			current = access(current, selSegs[1], value, isSet, panics)
--		}
--
--	}
--
--	return current
--
--}
--
--// intFromInterface converts an interface object to the largest
--// representation of an unsigned integer using a type switch and
--// assertions
--func intFromInterface(selector interface{}) int {
--	var value int
--	switch selector.(type) {
--	case int:
--		value = selector.(int)
--	case int8:
--		value = int(selector.(int8))
--	case int16:
--		value = int(selector.(int16))
--	case int32:
--		value = int(selector.(int32))
--	case int64:
--		value = int(selector.(int64))
--	case uint:
--		value = int(selector.(uint))
--	case uint8:
--		value = int(selector.(uint8))
--	case uint16:
--		value = int(selector.(uint16))
--	case uint32:
--		value = int(selector.(uint32))
--	case uint64:
--		value = int(selector.(uint64))
--	default:
--		panic("objx: array access argument is not an integer type (this should never happen)")
--	}
--
--	return value
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go
-deleted file mode 100644
-index ce5d8e4..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/accessors_test.go
-+++ /dev/null
-@@ -1,145 +0,0 @@
--package objx
--
--import (
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--func TestAccessorsAccessGetSingleField(t *testing.T) {
--
--	current := map[string]interface{}{"name": "Tyler"}
--	assert.Equal(t, "Tyler", access(current, "name", nil, false, true))
--
--}
--func TestAccessorsAccessGetDeep(t *testing.T) {
--
--	current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}}
--	assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true))
--	assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true))
--
--}
--func TestAccessorsAccessGetDeepDeep(t *testing.T) {
--
--	current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}}
--	assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true))
--
--}
--func TestAccessorsAccessGetInsideArray(t *testing.T) {
--
--	current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}}
--	assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true))
--	assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true))
--	assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true))
--	assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true))
--
--	assert.Panics(t, func() {
--		access(current, "names[2]", nil, false, true)
--	})
--	assert.Nil(t, access(current, "names[2]", nil, false, false))
--
--}
--
--func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) {
--
--	current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}
--	one := access(current, 0, nil, false, false)
--	two := access(current, 1, nil, false, false)
--	three := access(current, 2, nil, false, false)
--
--	assert.Equal(t, "Tyler", one.(map[string]interface{})["first"])
--	assert.Equal(t, "Capitol", two.(map[string]interface{})["first"])
--	assert.Nil(t, three)
--
--}
--
--func TestAccessorsGet(t *testing.T) {
--
--	current := New(map[string]interface{}{"name": "Tyler"})
--	assert.Equal(t, "Tyler", current.Get("name").data)
--
--}
--
--func TestAccessorsAccessSetSingleField(t *testing.T) {
--
--	current := map[string]interface{}{"name": "Tyler"}
--	access(current, "name", "Mat", true, false)
--	assert.Equal(t, current["name"], "Mat")
--
--	access(current, "age", 29, true, true)
--	assert.Equal(t, current["age"], 29)
--
--}
--
--func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) {
--
--	current := map[string]interface{}{}
--	access(current, "name", "Mat", true, false)
--	assert.Equal(t, current["name"], "Mat")
--
--}
--
--func TestAccessorsAccessSetDeep(t *testing.T) {
--
--	current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}}
--
--	access(current, "name.first", "Mat", true, true)
--	access(current, "name.last", "Ryer", true, true)
--
--	assert.Equal(t, "Mat", access(current, "name.first", nil, false, true))
--	assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true))
--
--}
--func TestAccessorsAccessSetDeepDeep(t *testing.T) {
--
--	current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}}
--
--	access(current, "one.two.three.four", 5, true, true)
--
--	assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true))
--
--}
--func TestAccessorsAccessSetArray(t *testing.T) {
--
--	current := map[string]interface{}{"names": []interface{}{"Tyler"}}
--
--	access(current, "names[0]", "Mat", true, true)
--
--	assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true))
--
--}
--func TestAccessorsAccessSetInsideArray(t *testing.T) {
--
--	current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}}
--
--	access(current, "names[0].first", "Mat", true, true)
--	access(current, "names[0].last", "Ryer", true, true)
--	access(current, "names[1].first", "Captain", true, true)
--	access(current, "names[1].last", "Underpants", true, true)
--
--	assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true))
--	assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true))
--	assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true))
--	assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true))
--
--}
--
--func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) {
--
--	current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}
--	one := access(current, 0, nil, false, false)
--	two := access(current, 1, nil, false, false)
--	three := access(current, 2, nil, false, false)
--
--	assert.Equal(t, "Tyler", one.(map[string]interface{})["first"])
--	assert.Equal(t, "Capitol", two.(map[string]interface{})["first"])
--	assert.Nil(t, three)
--
--}
--
--func TestAccessorsSet(t *testing.T) {
--
--	current := New(map[string]interface{}{"name": "Tyler"})
--	current.Set("name", "Mat")
--	assert.Equal(t, "Mat", current.Get("name").data)
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt
-deleted file mode 100644
-index 3060234..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/array-access.txt
-+++ /dev/null
-@@ -1,14 +0,0 @@
--  case []{1}:
--    a := object.([]{1})
--    if isSet {
--      a[index] = value.({1})
--    } else {
--      if index >= len(a) {
--        if panics {
--          panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a)))
--        }
--        return nil
--      } else {
--        return a[index]
--      }
--    }
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html
-deleted file mode 100644
-index 379ffc3..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/index.html
-+++ /dev/null
-@@ -1,86 +0,0 @@
--<html>
--	<head>
--	<title>
--		Codegen
--	</title>
--	<style>
--		body {
--			width: 800px;
--			margin: auto;
--		}
--		textarea {
--			width: 100%;
--			min-height: 100px;
--			font-family: Courier;
--		}
--	</style>
--	</head>
--	<body>
--
--		<h2>
--			Template
--		</h2>
--		<p>
--			Use <code>{x}</code> as a placeholder for each argument.
--		</p>
--		<textarea id="template"></textarea>
--
--		<h2>
--			Arguments (comma separated)
--		</h2>
--		<p>
--			One block per line
--		</p>
--		<textarea id="args"></textarea>
--
--		<h2>
--			Output
--		</h2>
--		<input id="go" type="button" value="Generate code" />
--
--		<textarea id="output"></textarea>
--
--		<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
--		<script>
--
--			$(function(){
--
--				$("#go").click(function(){
--
--					var output = ""
--					var template = $("#template").val()
--					var args = $("#args").val()
--
--					// collect the args
--					var argLines = args.split("\n")
--					for (var line in argLines) {
--
--						var argLine = argLines[line];
--						var thisTemp = template
--
--						// get individual args
--						var args = argLine.split(",")
--
--						for (var argI in args) {
--							var argText = args[argI];
--							var argPlaceholder = "{" + argI + "}";
--
--							while (thisTemp.indexOf(argPlaceholder) > -1) {
--								thisTemp = thisTemp.replace(argPlaceholder, argText);
--							}
--
--						}
--
--						output += thisTemp
--
--					}
--
--					$("#output").val(output);
--
--				});
--
--			});
--
--		</script>
--	</body>
--</html>
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt
-deleted file mode 100644
-index b396900..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/template.txt
-+++ /dev/null
-@@ -1,286 +0,0 @@
--/*
--	{4} ({1} and []{1})
--	--------------------------------------------------
--*/
--
--// {4} gets the value as a {1}, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) {4}(optionalDefault ...{1}) {1} {
--	if s, ok := v.data.({1}); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return {3}
--}
--
--// Must{4} gets the value as a {1}.
--//
--// Panics if the object is not a {1}.
--func (v *Value) Must{4}() {1} {
--	return v.data.({1})
--}
--
--// {4}Slice gets the value as a []{1}, returns the optionalDefault
--// value or nil if the value is not a []{1}.
--func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} {
--	if s, ok := v.data.([]{1}); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// Must{4}Slice gets the value as a []{1}.
--//
--// Panics if the object is not a []{1}.
--func (v *Value) Must{4}Slice() []{1} {
--	return v.data.([]{1})
--}
--
--// Is{4} gets whether the object contained is a {1} or not.
--func (v *Value) Is{4}() bool {
--  _, ok := v.data.({1})
--  return ok
--}
--
--// Is{4}Slice gets whether the object contained is a []{1} or not.
--func (v *Value) Is{4}Slice() bool {
--	_, ok := v.data.([]{1})
--	return ok
--}
--
--// Each{4} calls the specified callback for each object
--// in the []{1}.
--//
--// Panics if the object is the wrong type.
--func (v *Value) Each{4}(callback func(int, {1}) bool) *Value {
--
--	for index, val := range v.Must{4}Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// Where{4} uses the specified decider function to select items
--// from the []{1}.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) Where{4}(decider func(int, {1}) bool) *Value {
--
--	var selected []{1}
--
--	v.Each{4}(func(index int, val {1}) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data:selected}
--
--}
--
--// Group{4} uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]{1}.
--func (v *Value) Group{4}(grouper func(int, {1}) string) *Value {
--
--	groups := make(map[string][]{1})
--
--	v.Each{4}(func(index int, val {1}) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]{1}, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data:groups}
--
--}
--
--// Replace{4} uses the specified function to replace each {1}s
--// by iterating each item.  The data in the returned result will be a
--// []{1} containing the replaced items.
--func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value {
--
--	arr := v.Must{4}Slice()
--	replaced := make([]{1}, len(arr))
--
--	v.Each{4}(func(index int, val {1}) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data:replaced}
--
--}
--
--// Collect{4} uses the specified collector function to collect a value
--// for each of the {1}s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value {
--
--	arr := v.Must{4}Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.Each{4}(func(index int, val {1}) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data:collected}
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func Test{4}(t *testing.T) {
--
--  val := {1}( {2} )
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").{4}())
--	assert.Equal(t, val, New(m).Get("value").Must{4}())
--	assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}())
--	assert.Equal(t, val, New(m).Get("nothing").{4}({2}))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").Must{4}()
--	})
--
--}
--
--func Test{4}Slice(t *testing.T) {
--
--  val := {1}( {2} )
--	m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").{4}Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0])
--	assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice())
--	assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").Must{4}Slice()
--	})
--
--}
--
--func TestIs{4}(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: {1}({2})}
--	assert.True(t, v.Is{4}())
--
--	v = &Value{data: []{1}{ {1}({2}) }}
--	assert.True(t, v.Is{4}Slice())
--
--}
--
--func TestEach{4}(t *testing.T) {
--
--	v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
--	count := 0
--	replacedVals := make([]{1}, 0)
--	assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0])
--	assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1])
--	assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2])
--
--}
--
--func TestWhere{4}(t *testing.T) {
--
--	v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
--
--	selected := v.Where{4}(func(i int, val {1}) bool {
--		return i%2==0
--	}).Must{4}Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroup{4}(t *testing.T) {
--
--	v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
--
--	grouped := v.Group{4}(func(i int, val {1}) string {
--		return fmt.Sprintf("%v", i%2==0)
--	}).data.(map[string][]{1})
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplace{4}(t *testing.T) {
--
--	v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
--
--	rawArr := v.Must{4}Slice()
--
--	replaced := v.Replace{4}(func(index int, val {1}) {1} {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.Must{4}Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollect{4}(t *testing.T) {
--
--	v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }}
--
--	collected := v.Collect{4}(func(index int, val {1}) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt b/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt
-deleted file mode 100644
-index 069d43d..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/codegen/types_list.txt
-+++ /dev/null
-@@ -1,20 +0,0 @@
--Interface,interface{},"something",nil,Inter
--Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI
--ObjxMap,(Map),New(1),New(nil),ObjxMap
--Bool,bool,true,false,Bool
--String,string,"hello","",Str
--Int,int,1,0,Int
--Int8,int8,1,0,Int8
--Int16,int16,1,0,Int16
--Int32,int32,1,0,Int32
--Int64,int64,1,0,Int64
--Uint,uint,1,0,Uint
--Uint8,uint8,1,0,Uint8
--Uint16,uint16,1,0,Uint16
--Uint32,uint32,1,0,Uint32
--Uint64,uint64,1,0,Uint64
--Uintptr,uintptr,1,0,Uintptr
--Float32,float32,1,0,Float32
--Float64,float64,1,0,Float64
--Complex64,complex64,1,0,Complex64
--Complex128,complex128,1,0,Complex128
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/constants.go b/Godeps/_workspace/src/github.com/stretchr/objx/constants.go
-deleted file mode 100644
-index f9eb42a..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/constants.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package objx
--
--const (
--	// PathSeparator is the character used to separate the elements
--	// of the keypath.
--	//
--	// For example, `location.address.city`
--	PathSeparator string = "."
--
--	// SignatureSeparator is the character that is used to
--	// separate the Base64 string from the security signature.
--	SignatureSeparator = "_"
--)
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go
-deleted file mode 100644
-index 9cdfa9f..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/conversions.go
-+++ /dev/null
-@@ -1,117 +0,0 @@
--package objx
--
--import (
--	"bytes"
--	"encoding/base64"
--	"encoding/json"
--	"errors"
--	"fmt"
--	"net/url"
--)
--
--// JSON converts the contained object to a JSON string
--// representation
--func (m Map) JSON() (string, error) {
--
--	result, err := json.Marshal(m)
--
--	if err != nil {
--		err = errors.New("objx: JSON encode failed with: " + err.Error())
--	}
--
--	return string(result), err
--
--}
--
--// MustJSON converts the contained object to a JSON string
--// representation and panics if there is an error
--func (m Map) MustJSON() string {
--	result, err := m.JSON()
--	if err != nil {
--		panic(err.Error())
--	}
--	return result
--}
--
--// Base64 converts the contained object to a Base64 string
--// representation of the JSON string representation
--func (m Map) Base64() (string, error) {
--
--	var buf bytes.Buffer
--
--	jsonData, err := m.JSON()
--	if err != nil {
--		return "", err
--	}
--
--	encoder := base64.NewEncoder(base64.StdEncoding, &buf)
--	encoder.Write([]byte(jsonData))
--	encoder.Close()
--
--	return buf.String(), nil
--
--}
--
--// MustBase64 converts the contained object to a Base64 string
--// representation of the JSON string representation and panics
--// if there is an error
--func (m Map) MustBase64() string {
--	result, err := m.Base64()
--	if err != nil {
--		panic(err.Error())
--	}
--	return result
--}
--
--// SignedBase64 converts the contained object to a Base64 string
--// representation of the JSON string representation and signs it
--// using the provided key.
--func (m Map) SignedBase64(key string) (string, error) {
--
--	base64, err := m.Base64()
--	if err != nil {
--		return "", err
--	}
--
--	sig := HashWithKey(base64, key)
--
--	return base64 + SignatureSeparator + sig, nil
--
--}
--
--// MustSignedBase64 converts the contained object to a Base64 string
--// representation of the JSON string representation and signs it
--// using the provided key and panics if there is an error
--func (m Map) MustSignedBase64(key string) string {
--	result, err := m.SignedBase64(key)
--	if err != nil {
--		panic(err.Error())
--	}
--	return result
--}
--
--/*
--	URL Query
--	------------------------------------------------
--*/
--
--// URLValues creates a url.Values object from an Obj. This
--// function requires that the wrapped object be a map[string]interface{}
--func (m Map) URLValues() url.Values {
--
--	vals := make(url.Values)
--
--	for k, v := range m {
--		//TODO: can this be done without sprintf?
--		vals.Set(k, fmt.Sprintf("%v", v))
--	}
--
--	return vals
--}
--
--// URLQuery gets an encoded URL query representing the given
--// Obj. This function requires that the wrapped object be a
--// map[string]interface{}
--func (m Map) URLQuery() (string, error) {
--	return m.URLValues().Encode(), nil
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go
-deleted file mode 100644
-index e9ccd29..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/conversions_test.go
-+++ /dev/null
-@@ -1,94 +0,0 @@
--package objx
--
--import (
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--func TestConversionJSON(t *testing.T) {
--
--	jsonString := `{"name":"Mat"}`
--	o := MustFromJSON(jsonString)
--
--	result, err := o.JSON()
--
--	if assert.NoError(t, err) {
--		assert.Equal(t, jsonString, result)
--	}
--
--	assert.Equal(t, jsonString, o.MustJSON())
--
--}
--
--func TestConversionJSONWithError(t *testing.T) {
--
--	o := MSI()
--	o["test"] = func() {}
--
--	assert.Panics(t, func() {
--		o.MustJSON()
--	})
--
--	_, err := o.JSON()
--
--	assert.Error(t, err)
--
--}
--
--func TestConversionBase64(t *testing.T) {
--
--	o := New(map[string]interface{}{"name": "Mat"})
--
--	result, err := o.Base64()
--
--	if assert.NoError(t, err) {
--		assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result)
--	}
--
--	assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64())
--
--}
--
--func TestConversionBase64WithError(t *testing.T) {
--
--	o := MSI()
--	o["test"] = func() {}
--
--	assert.Panics(t, func() {
--		o.MustBase64()
--	})
--
--	_, err := o.Base64()
--
--	assert.Error(t, err)
--
--}
--
--func TestConversionSignedBase64(t *testing.T) {
--
--	o := New(map[string]interface{}{"name": "Mat"})
--
--	result, err := o.SignedBase64("key")
--
--	if assert.NoError(t, err) {
--		assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result)
--	}
--
--	assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key"))
--
--}
--
--func TestConversionSignedBase64WithError(t *testing.T) {
--
--	o := MSI()
--	o["test"] = func() {}
--
--	assert.Panics(t, func() {
--		o.MustSignedBase64("key")
--	})
--
--	_, err := o.SignedBase64("key")
--
--	assert.Error(t, err)
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/doc.go b/Godeps/_workspace/src/github.com/stretchr/objx/doc.go
-deleted file mode 100644
-index 47bf85e..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/doc.go
-+++ /dev/null
-@@ -1,72 +0,0 @@
--// objx - Go package for dealing with maps, slices, JSON and other data.
--//
--// Overview
--//
--// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes
--// a powerful `Get` method (among others) that allows you to easily and quickly get
--// access to data within the map, without having to worry too much about type assertions,
--// missing data, default values etc.
--//
--// Pattern
--//
--// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s
--// easy.
--//
--// Call one of the `objx.` functions to create your `objx.Map` to get going:
--//
--//     m, err := objx.FromJSON(json)
--//
--// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong,
--// the rest will be optimistic and try to figure things out without panicking.
--//
--// Use `Get` to access the value you're interested in.  You can use dot and array
--// notation too:
--//
--//     m.Get("places[0].latlng")
--//
--// Once you have saught the `Value` you're interested in, you can use the `Is*` methods
--// to determine its type.
--//
--//     if m.Get("code").IsStr() { /* ... */ }
--//
--// Or you can just assume the type, and use one of the strong type methods to
--// extract the real value:
--//
--//     m.Get("code").Int()
--//
--// If there's no value there (or if it's the wrong type) then a default value
--// will be returned, or you can be explicit about the default value.
--//
--//     Get("code").Int(-1)
--//
--// If you're dealing with a slice of data as a value, Objx provides many useful
--// methods for iterating, manipulating and selecting that data.  You can find out more
--// by exploring the index below.
--//
--// Reading data
--//
--// A simple example of how to use Objx:
--//
--//     // use MustFromJSON to make an objx.Map from some JSON
--//     m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
--//
--//     // get the details
--//     name := m.Get("name").Str()
--//     age := m.Get("age").Int()
--//
--//     // get their nickname (or use their name if they
--//     // don't have one)
--//     nickname := m.Get("nickname").Str(name)
--//
--// Ranging
--//
--// Since `objx.Map` is a `map[string]interface{}` you can treat it as such.  For
--// example, to `range` the data, do what you would expect:
--//
--//     m := objx.MustFromJSON(json)
--//     for key, value := range m {
--//
--//       /* ... do your magic ... */
--//
--//     }
--package objx
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go
-deleted file mode 100644
-index 27f7d90..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/fixture_test.go
-+++ /dev/null
-@@ -1,98 +0,0 @@
--package objx
--
--import (
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--var fixtures = []struct {
--	// name is the name of the fixture (used for reporting
--	// failures)
--	name string
--	// data is the JSON data to be worked on
--	data string
--	// get is the argument(s) to pass to Get
--	get interface{}
--	// output is the expected output
--	output interface{}
--}{
--	{
--		name:   "Simple get",
--		data:   `{"name": "Mat"}`,
--		get:    "name",
--		output: "Mat",
--	},
--	{
--		name:   "Get with dot notation",
--		data:   `{"address": {"city": "Boulder"}}`,
--		get:    "address.city",
--		output: "Boulder",
--	},
--	{
--		name:   "Deep get with dot notation",
--		data:   `{"one": {"two": {"three": {"four": "hello"}}}}`,
--		get:    "one.two.three.four",
--		output: "hello",
--	},
--	{
--		name:   "Get missing with dot notation",
--		data:   `{"one": {"two": {"three": {"four": "hello"}}}}`,
--		get:    "one.ten",
--		output: nil,
--	},
--	{
--		name:   "Get with array notation",
--		data:   `{"tags": ["one", "two", "three"]}`,
--		get:    "tags[1]",
--		output: "two",
--	},
--	{
--		name:   "Get with array and dot notation",
--		data:   `{"types": { "tags": ["one", "two", "three"]}}`,
--		get:    "types.tags[1]",
--		output: "two",
--	},
--	{
--		name:   "Get with array and dot notation - field after array",
--		data:   `{"tags": [{"name":"one"}, {"name":"two"}, {"name":"three"}]}`,
--		get:    "tags[1].name",
--		output: "two",
--	},
--	{
--		name:   "Complex get with array and dot notation",
--		data:   `{"tags": [{"list": [{"one":"pizza"}]}]}`,
--		get:    "tags[0].list[0].one",
--		output: "pizza",
--	},
--	{
--		name:   "Get field from within string should be nil",
--		data:   `{"name":"Tyler"}`,
--		get:    "name.something",
--		output: nil,
--	},
--	{
--		name:   "Get field from within string (using array accessor) should be nil",
--		data:   `{"numbers":["one", "two", "three"]}`,
--		get:    "numbers[0].nope",
--		output: nil,
--	},
--}
--
--func TestFixtures(t *testing.T) {
--
--	for _, fixture := range fixtures {
--
--		m := MustFromJSON(fixture.data)
--
--		// get the value
--		t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture)
--		value := m.Get(fixture.get.(string))
--
--		// make sure it matches
--		assert.Equal(t, fixture.output, value.data,
--			"Get fixture \"%s\" failed: %v", fixture.name, fixture,
--		)
--
--	}
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map.go b/Godeps/_workspace/src/github.com/stretchr/objx/map.go
-deleted file mode 100644
-index eb6ed8e..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/map.go
-+++ /dev/null
-@@ -1,222 +0,0 @@
--package objx
--
--import (
--	"encoding/base64"
--	"encoding/json"
--	"errors"
--	"io/ioutil"
--	"net/url"
--	"strings"
--)
--
--// MSIConvertable is an interface that defines methods for converting your
--// custom types to a map[string]interface{} representation.
--type MSIConvertable interface {
--	// MSI gets a map[string]interface{} (msi) representing the
--	// object.
--	MSI() map[string]interface{}
--}
--
--// Map provides extended functionality for working with
--// untyped data, in particular map[string]interface (msi).
--type Map map[string]interface{}
--
--// Value returns the internal value instance
--func (m Map) Value() *Value {
--	return &Value{data: m}
--}
--
--// Nil represents a nil Map.
--var Nil Map = New(nil)
--
--// New creates a new Map containing the map[string]interface{} in the data argument.
--// If the data argument is not a map[string]interface, New attempts to call the
--// MSI() method on the MSIConvertable interface to create one.
--func New(data interface{}) Map {
--	if _, ok := data.(map[string]interface{}); !ok {
--		if converter, ok := data.(MSIConvertable); ok {
--			data = converter.MSI()
--		} else {
--			return nil
--		}
--	}
--	return Map(data.(map[string]interface{}))
--}
--
--// MSI creates a map[string]interface{} and puts it inside a new Map.
--//
--// The arguments follow a key, value pattern.
--//
--// Panics
--//
--// Panics if any key arugment is non-string or if there are an odd number of arguments.
--//
--// Example
--//
--// To easily create Maps:
--//
--//     m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true))
--//
--//     // creates an Map equivalent to
--//     m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}})
--func MSI(keyAndValuePairs ...interface{}) Map {
--
--	newMap := make(map[string]interface{})
--	keyAndValuePairsLen := len(keyAndValuePairs)
--
--	if keyAndValuePairsLen%2 != 0 {
--		panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.")
--	}
--
--	for i := 0; i < keyAndValuePairsLen; i = i + 2 {
--
--		key := keyAndValuePairs[i]
--		value := keyAndValuePairs[i+1]
--
--		// make sure the key is a string
--		keyString, keyStringOK := key.(string)
--		if !keyStringOK {
--			panic("objx: MSI must follow 'string, interface{}' pattern.  " + keyString + " is not a valid key.")
--		}
--
--		newMap[keyString] = value
--
--	}
--
--	return New(newMap)
--}
--
--// ****** Conversion Constructors
--
--// MustFromJSON creates a new Map containing the data specified in the
--// jsonString.
--//
--// Panics if the JSON is invalid.
--func MustFromJSON(jsonString string) Map {
--	o, err := FromJSON(jsonString)
--
--	if err != nil {
--		panic("objx: MustFromJSON failed with error: " + err.Error())
--	}
--
--	return o
--}
--
--// FromJSON creates a new Map containing the data specified in the
--// jsonString.
--//
--// Returns an error if the JSON is invalid.
--func FromJSON(jsonString string) (Map, error) {
--
--	var data interface{}
--	err := json.Unmarshal([]byte(jsonString), &data)
--
--	if err != nil {
--		return Nil, err
--	}
--
--	return New(data), nil
--
--}
--
--// FromBase64 creates a new Obj containing the data specified
--// in the Base64 string.
--//
--// The string is an encoded JSON string returned by Base64
--func FromBase64(base64String string) (Map, error) {
--
--	decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String))
--
--	decoded, err := ioutil.ReadAll(decoder)
--	if err != nil {
--		return nil, err
--	}
--
--	return FromJSON(string(decoded))
--}
--
--// MustFromBase64 creates a new Obj containing the data specified
--// in the Base64 string and panics if there is an error.
--//
--// The string is an encoded JSON string returned by Base64
--func MustFromBase64(base64String string) Map {
--
--	result, err := FromBase64(base64String)
--
--	if err != nil {
--		panic("objx: MustFromBase64 failed with error: " + err.Error())
--	}
--
--	return result
--}
--
--// FromSignedBase64 creates a new Obj containing the data specified
--// in the Base64 string.
--//
--// The string is an encoded JSON string returned by SignedBase64
--func FromSignedBase64(base64String, key string) (Map, error) {
--	parts := strings.Split(base64String, SignatureSeparator)
--	if len(parts) != 2 {
--		return nil, errors.New("objx: Signed base64 string is malformed.")
--	}
--
--	sig := HashWithKey(parts[0], key)
--	if parts[1] != sig {
--		return nil, errors.New("objx: Signature for base64 data does not match.")
--	}
--
--	return FromBase64(parts[0])
--}
--
--// MustFromSignedBase64 creates a new Obj containing the data specified
--// in the Base64 string and panics if there is an error.
--//
--// The string is an encoded JSON string returned by Base64
--func MustFromSignedBase64(base64String, key string) Map {
--
--	result, err := FromSignedBase64(base64String, key)
--
--	if err != nil {
--		panic("objx: MustFromSignedBase64 failed with error: " + err.Error())
--	}
--
--	return result
--}
--
--// FromURLQuery generates a new Obj by parsing the specified
--// query.
--//
--// For queries with multiple values, the first value is selected.
--func FromURLQuery(query string) (Map, error) {
--
--	vals, err := url.ParseQuery(query)
--
--	if err != nil {
--		return nil, err
--	}
--
--	m := make(map[string]interface{})
--	for k, vals := range vals {
--		m[k] = vals[0]
--	}
--
--	return New(m), nil
--}
--
--// MustFromURLQuery generates a new Obj by parsing the specified
--// query.
--//
--// For queries with multiple values, the first value is selected.
--//
--// Panics if it encounters an error
--func MustFromURLQuery(query string) Map {
--
--	o, err := FromURLQuery(query)
--
--	if err != nil {
--		panic("objx: MustFromURLQuery failed with error: " + err.Error())
--	}
--
--	return o
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go
-deleted file mode 100644
-index 6beb506..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/map_for_test.go
-+++ /dev/null
-@@ -1,10 +0,0 @@
--package objx
--
--var TestMap map[string]interface{} = map[string]interface{}{
--	"name": "Tyler",
--	"address": map[string]interface{}{
--		"city":  "Salt Lake City",
--		"state": "UT",
--	},
--	"numbers": []interface{}{"one", "two", "three", "four", "five"},
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go
-deleted file mode 100644
-index 1f8b45c..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/map_test.go
-+++ /dev/null
-@@ -1,147 +0,0 @@
--package objx
--
--import (
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--type Convertable struct {
--	name string
--}
--
--func (c *Convertable) MSI() map[string]interface{} {
--	return map[string]interface{}{"name": c.name}
--}
--
--type Unconvertable struct {
--	name string
--}
--
--func TestMapCreation(t *testing.T) {
--
--	o := New(nil)
--	assert.Nil(t, o)
--
--	o = New("Tyler")
--	assert.Nil(t, o)
--
--	unconvertable := &Unconvertable{name: "Tyler"}
--	o = New(unconvertable)
--	assert.Nil(t, o)
--
--	convertable := &Convertable{name: "Tyler"}
--	o = New(convertable)
--	if assert.NotNil(t, convertable) {
--		assert.Equal(t, "Tyler", o["name"], "Tyler")
--	}
--
--	o = MSI()
--	if assert.NotNil(t, o) {
--		assert.NotNil(t, o)
--	}
--
--	o = MSI("name", "Tyler")
--	if assert.NotNil(t, o) {
--		if assert.NotNil(t, o) {
--			assert.Equal(t, o["name"], "Tyler")
--		}
--	}
--
--}
--
--func TestMapMustFromJSONWithError(t *testing.T) {
--
--	_, err := FromJSON(`"name":"Mat"}`)
--	assert.Error(t, err)
--
--}
--
--func TestMapFromJSON(t *testing.T) {
--
--	o := MustFromJSON(`{"name":"Mat"}`)
--
--	if assert.NotNil(t, o) {
--		if assert.NotNil(t, o) {
--			assert.Equal(t, "Mat", o["name"])
--		}
--	}
--
--}
--
--func TestMapFromJSONWithError(t *testing.T) {
--
--	var m Map
--
--	assert.Panics(t, func() {
--		m = MustFromJSON(`"name":"Mat"}`)
--	})
--
--	assert.Nil(t, m)
--
--}
--
--func TestMapFromBase64String(t *testing.T) {
--
--	base64String := "eyJuYW1lIjoiTWF0In0="
--
--	o, err := FromBase64(base64String)
--
--	if assert.NoError(t, err) {
--		assert.Equal(t, o.Get("name").Str(), "Mat")
--	}
--
--	assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat")
--
--}
--
--func TestMapFromBase64StringWithError(t *testing.T) {
--
--	base64String := "eyJuYW1lIjoiTWFasd0In0="
--
--	_, err := FromBase64(base64String)
--
--	assert.Error(t, err)
--
--	assert.Panics(t, func() {
--		MustFromBase64(base64String)
--	})
--
--}
--
--func TestMapFromSignedBase64String(t *testing.T) {
--
--	base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6"
--
--	o, err := FromSignedBase64(base64String, "key")
--
--	if assert.NoError(t, err) {
--		assert.Equal(t, o.Get("name").Str(), "Mat")
--	}
--
--	assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat")
--
--}
--
--func TestMapFromSignedBase64StringWithError(t *testing.T) {
--
--	base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6"
--
--	_, err := FromSignedBase64(base64String, "key")
--
--	assert.Error(t, err)
--
--	assert.Panics(t, func() {
--		MustFromSignedBase64(base64String, "key")
--	})
--
--}
--
--func TestMapFromURLQuery(t *testing.T) {
--
--	m, err := FromURLQuery("name=tyler&state=UT")
--	if assert.NoError(t, err) && assert.NotNil(t, m) {
--		assert.Equal(t, "tyler", m.Get("name").Str())
--		assert.Equal(t, "UT", m.Get("state").Str())
--	}
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go
-deleted file mode 100644
-index b35c863..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/mutations.go
-+++ /dev/null
-@@ -1,81 +0,0 @@
--package objx
--
--// Exclude returns a new Map with the keys in the specified []string
--// excluded.
--func (d Map) Exclude(exclude []string) Map {
--
--	excluded := make(Map)
--	for k, v := range d {
--		var shouldInclude bool = true
--		for _, toExclude := range exclude {
--			if k == toExclude {
--				shouldInclude = false
--				break
--			}
--		}
--		if shouldInclude {
--			excluded[k] = v
--		}
--	}
--
--	return excluded
--}
--
--// Copy creates a shallow copy of the Obj.
--func (m Map) Copy() Map {
--	copied := make(map[string]interface{})
--	for k, v := range m {
--		copied[k] = v
--	}
--	return New(copied)
--}
--
--// Merge blends the specified map with a copy of this map and returns the result.
--//
--// Keys that appear in both will be selected from the specified map.
--// This method requires that the wrapped object be a map[string]interface{}
--func (m Map) Merge(merge Map) Map {
--	return m.Copy().MergeHere(merge)
--}
--
--// Merge blends the specified map with this map and returns the current map.
--//
--// Keys that appear in both will be selected from the specified map.  The original map
--// will be modified. This method requires that
--// the wrapped object be a map[string]interface{}
--func (m Map) MergeHere(merge Map) Map {
--
--	for k, v := range merge {
--		m[k] = v
--	}
--
--	return m
--
--}
--
--// Transform builds a new Obj giving the transformer a chance
--// to change the keys and values as it goes. This method requires that
--// the wrapped object be a map[string]interface{}
--func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map {
--	newMap := make(map[string]interface{})
--	for k, v := range m {
--		modifiedKey, modifiedVal := transformer(k, v)
--		newMap[modifiedKey] = modifiedVal
--	}
--	return New(newMap)
--}
--
--// TransformKeys builds a new map using the specified key mapping.
--//
--// Unspecified keys will be unaltered.
--// This method requires that the wrapped object be a map[string]interface{}
--func (m Map) TransformKeys(mapping map[string]string) Map {
--	return m.Transform(func(key string, value interface{}) (string, interface{}) {
--
--		if newKey, ok := mapping[key]; ok {
--			return newKey, value
--		}
--
--		return key, value
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go
-deleted file mode 100644
-index e20ee23..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/mutations_test.go
-+++ /dev/null
-@@ -1,77 +0,0 @@
--package objx
--
--import (
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--func TestExclude(t *testing.T) {
--
--	d := make(Map)
--	d["name"] = "Mat"
--	d["age"] = 29
--	d["secret"] = "ABC"
--
--	excluded := d.Exclude([]string{"secret"})
--
--	assert.Equal(t, d["name"], excluded["name"])
--	assert.Equal(t, d["age"], excluded["age"])
--	assert.False(t, excluded.Has("secret"), "secret should be excluded")
--
--}
--
--func TestCopy(t *testing.T) {
--
--	d1 := make(map[string]interface{})
--	d1["name"] = "Tyler"
--	d1["location"] = "UT"
--
--	d1Obj := New(d1)
--	d2Obj := d1Obj.Copy()
--
--	d2Obj["name"] = "Mat"
--
--	assert.Equal(t, d1Obj.Get("name").Str(), "Tyler")
--	assert.Equal(t, d2Obj.Get("name").Str(), "Mat")
--
--}
--
--func TestMerge(t *testing.T) {
--
--	d := make(map[string]interface{})
--	d["name"] = "Mat"
--
--	d1 := make(map[string]interface{})
--	d1["name"] = "Tyler"
--	d1["location"] = "UT"
--
--	dObj := New(d)
--	d1Obj := New(d1)
--
--	merged := dObj.Merge(d1Obj)
--
--	assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str())
--	assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str())
--	assert.Empty(t, dObj.Get("location").Str())
--
--}
--
--func TestMergeHere(t *testing.T) {
--
--	d := make(map[string]interface{})
--	d["name"] = "Mat"
--
--	d1 := make(map[string]interface{})
--	d1["name"] = "Tyler"
--	d1["location"] = "UT"
--
--	dObj := New(d)
--	d1Obj := New(d1)
--
--	merged := dObj.MergeHere(d1Obj)
--
--	assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map")
--	assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str())
--	assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str())
--	assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str())
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security.go b/Godeps/_workspace/src/github.com/stretchr/objx/security.go
-deleted file mode 100644
-index fdd6be9..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/security.go
-+++ /dev/null
-@@ -1,14 +0,0 @@
--package objx
--
--import (
--	"crypto/sha1"
--	"encoding/hex"
--)
--
--// HashWithKey hashes the specified string using the security
--// key.
--func HashWithKey(data, key string) string {
--	hash := sha1.New()
--	hash.Write([]byte(data + ":" + key))
--	return hex.EncodeToString(hash.Sum(nil))
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go
-deleted file mode 100644
-index 8f0898f..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/security_test.go
-+++ /dev/null
-@@ -1,12 +0,0 @@
--package objx
--
--import (
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--func TestHashWithKey(t *testing.T) {
--
--	assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def"))
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go
-deleted file mode 100644
-index 5408c7f..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/simple_example_test.go
-+++ /dev/null
-@@ -1,41 +0,0 @@
--package objx
--
--import (
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--func TestSimpleExample(t *testing.T) {
--
--	// build a map from a JSON object
--	o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`)
--
--	// Map can be used as a straight map[string]interface{}
--	assert.Equal(t, o["name"], "Mat")
--
--	// Get an Value object
--	v := o.Get("name")
--	assert.Equal(t, v, &Value{data: "Mat"})
--
--	// Test the contained value
--	assert.False(t, v.IsInt())
--	assert.False(t, v.IsBool())
--	assert.True(t, v.IsStr())
--
--	// Get the contained value
--	assert.Equal(t, v.Str(), "Mat")
--
--	// Get a default value if the contained value is not of the expected type or does not exist
--	assert.Equal(t, 1, v.Int(1))
--
--	// Get a value by using array notation
--	assert.Equal(t, "indian", o.Get("foods[0]").Data())
--
--	// Set a value by using array notation
--	o.Set("foods[0]", "italian")
--	assert.Equal(t, "italian", o.Get("foods[0]").Str())
--
--	// Get a value by using dot notation
--	assert.Equal(t, "hobbiton", o.Get("location.county").Str())
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests.go
-deleted file mode 100644
-index d9e0b47..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/tests.go
-+++ /dev/null
-@@ -1,17 +0,0 @@
--package objx
--
--// Has gets whether there is something at the specified selector
--// or not.
--//
--// If m is nil, Has will always return false.
--func (m Map) Has(selector string) bool {
--	if m == nil {
--		return false
--	}
--	return !m.Get(selector).IsNil()
--}
--
--// IsNil gets whether the data is nil or not.
--func (v *Value) IsNil() bool {
--	return v == nil || v.data == nil
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go
-deleted file mode 100644
-index bcc1eb0..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/tests_test.go
-+++ /dev/null
-@@ -1,24 +0,0 @@
--package objx
--
--import (
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--func TestHas(t *testing.T) {
--
--	m := New(TestMap)
--
--	assert.True(t, m.Has("name"))
--	assert.True(t, m.Has("address.state"))
--	assert.True(t, m.Has("numbers[4]"))
--
--	assert.False(t, m.Has("address.state.nope"))
--	assert.False(t, m.Has("address.nope"))
--	assert.False(t, m.Has("nope"))
--	assert.False(t, m.Has("numbers[5]"))
--
--	m = nil
--	assert.False(t, m.Has("nothing"))
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go
-deleted file mode 100644
-index f3ecb29..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen.go
-+++ /dev/null
-@@ -1,2881 +0,0 @@
--package objx
--
--/*
--	Inter (interface{} and []interface{})
--	--------------------------------------------------
--*/
--
--// Inter gets the value as a interface{}, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Inter(optionalDefault ...interface{}) interface{} {
--	if s, ok := v.data.(interface{}); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustInter gets the value as a interface{}.
--//
--// Panics if the object is not a interface{}.
--func (v *Value) MustInter() interface{} {
--	return v.data.(interface{})
--}
--
--// InterSlice gets the value as a []interface{}, returns the optionalDefault
--// value or nil if the value is not a []interface{}.
--func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} {
--	if s, ok := v.data.([]interface{}); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustInterSlice gets the value as a []interface{}.
--//
--// Panics if the object is not a []interface{}.
--func (v *Value) MustInterSlice() []interface{} {
--	return v.data.([]interface{})
--}
--
--// IsInter gets whether the object contained is a interface{} or not.
--func (v *Value) IsInter() bool {
--	_, ok := v.data.(interface{})
--	return ok
--}
--
--// IsInterSlice gets whether the object contained is a []interface{} or not.
--func (v *Value) IsInterSlice() bool {
--	_, ok := v.data.([]interface{})
--	return ok
--}
--
--// EachInter calls the specified callback for each object
--// in the []interface{}.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachInter(callback func(int, interface{}) bool) *Value {
--
--	for index, val := range v.MustInterSlice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereInter uses the specified decider function to select items
--// from the []interface{}.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value {
--
--	var selected []interface{}
--
--	v.EachInter(func(index int, val interface{}) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupInter uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]interface{}.
--func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value {
--
--	groups := make(map[string][]interface{})
--
--	v.EachInter(func(index int, val interface{}) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]interface{}, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceInter uses the specified function to replace each interface{}s
--// by iterating each item.  The data in the returned result will be a
--// []interface{} containing the replaced items.
--func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value {
--
--	arr := v.MustInterSlice()
--	replaced := make([]interface{}, len(arr))
--
--	v.EachInter(func(index int, val interface{}) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectInter uses the specified collector function to collect a value
--// for each of the interface{}s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value {
--
--	arr := v.MustInterSlice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachInter(func(index int, val interface{}) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	MSI (map[string]interface{} and []map[string]interface{})
--	--------------------------------------------------
--*/
--
--// MSI gets the value as a map[string]interface{}, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} {
--	if s, ok := v.data.(map[string]interface{}); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustMSI gets the value as a map[string]interface{}.
--//
--// Panics if the object is not a map[string]interface{}.
--func (v *Value) MustMSI() map[string]interface{} {
--	return v.data.(map[string]interface{})
--}
--
--// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault
--// value or nil if the value is not a []map[string]interface{}.
--func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} {
--	if s, ok := v.data.([]map[string]interface{}); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustMSISlice gets the value as a []map[string]interface{}.
--//
--// Panics if the object is not a []map[string]interface{}.
--func (v *Value) MustMSISlice() []map[string]interface{} {
--	return v.data.([]map[string]interface{})
--}
--
--// IsMSI gets whether the object contained is a map[string]interface{} or not.
--func (v *Value) IsMSI() bool {
--	_, ok := v.data.(map[string]interface{})
--	return ok
--}
--
--// IsMSISlice gets whether the object contained is a []map[string]interface{} or not.
--func (v *Value) IsMSISlice() bool {
--	_, ok := v.data.([]map[string]interface{})
--	return ok
--}
--
--// EachMSI calls the specified callback for each object
--// in the []map[string]interface{}.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value {
--
--	for index, val := range v.MustMSISlice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereMSI uses the specified decider function to select items
--// from the []map[string]interface{}.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value {
--
--	var selected []map[string]interface{}
--
--	v.EachMSI(func(index int, val map[string]interface{}) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupMSI uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]map[string]interface{}.
--func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value {
--
--	groups := make(map[string][]map[string]interface{})
--
--	v.EachMSI(func(index int, val map[string]interface{}) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]map[string]interface{}, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceMSI uses the specified function to replace each map[string]interface{}s
--// by iterating each item.  The data in the returned result will be a
--// []map[string]interface{} containing the replaced items.
--func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value {
--
--	arr := v.MustMSISlice()
--	replaced := make([]map[string]interface{}, len(arr))
--
--	v.EachMSI(func(index int, val map[string]interface{}) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectMSI uses the specified collector function to collect a value
--// for each of the map[string]interface{}s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value {
--
--	arr := v.MustMSISlice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachMSI(func(index int, val map[string]interface{}) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	ObjxMap ((Map) and [](Map))
--	--------------------------------------------------
--*/
--
--// ObjxMap gets the value as a (Map), returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) ObjxMap(optionalDefault ...(Map)) Map {
--	if s, ok := v.data.((Map)); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return New(nil)
--}
--
--// MustObjxMap gets the value as a (Map).
--//
--// Panics if the object is not a (Map).
--func (v *Value) MustObjxMap() Map {
--	return v.data.((Map))
--}
--
--// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault
--// value or nil if the value is not a [](Map).
--func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) {
--	if s, ok := v.data.([](Map)); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustObjxMapSlice gets the value as a [](Map).
--//
--// Panics if the object is not a [](Map).
--func (v *Value) MustObjxMapSlice() [](Map) {
--	return v.data.([](Map))
--}
--
--// IsObjxMap gets whether the object contained is a (Map) or not.
--func (v *Value) IsObjxMap() bool {
--	_, ok := v.data.((Map))
--	return ok
--}
--
--// IsObjxMapSlice gets whether the object contained is a [](Map) or not.
--func (v *Value) IsObjxMapSlice() bool {
--	_, ok := v.data.([](Map))
--	return ok
--}
--
--// EachObjxMap calls the specified callback for each object
--// in the [](Map).
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value {
--
--	for index, val := range v.MustObjxMapSlice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereObjxMap uses the specified decider function to select items
--// from the [](Map).  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value {
--
--	var selected [](Map)
--
--	v.EachObjxMap(func(index int, val Map) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupObjxMap uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][](Map).
--func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value {
--
--	groups := make(map[string][](Map))
--
--	v.EachObjxMap(func(index int, val Map) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([](Map), 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceObjxMap uses the specified function to replace each (Map)s
--// by iterating each item.  The data in the returned result will be a
--// [](Map) containing the replaced items.
--func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value {
--
--	arr := v.MustObjxMapSlice()
--	replaced := make([](Map), len(arr))
--
--	v.EachObjxMap(func(index int, val Map) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectObjxMap uses the specified collector function to collect a value
--// for each of the (Map)s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value {
--
--	arr := v.MustObjxMapSlice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachObjxMap(func(index int, val Map) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Bool (bool and []bool)
--	--------------------------------------------------
--*/
--
--// Bool gets the value as a bool, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Bool(optionalDefault ...bool) bool {
--	if s, ok := v.data.(bool); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return false
--}
--
--// MustBool gets the value as a bool.
--//
--// Panics if the object is not a bool.
--func (v *Value) MustBool() bool {
--	return v.data.(bool)
--}
--
--// BoolSlice gets the value as a []bool, returns the optionalDefault
--// value or nil if the value is not a []bool.
--func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool {
--	if s, ok := v.data.([]bool); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustBoolSlice gets the value as a []bool.
--//
--// Panics if the object is not a []bool.
--func (v *Value) MustBoolSlice() []bool {
--	return v.data.([]bool)
--}
--
--// IsBool gets whether the object contained is a bool or not.
--func (v *Value) IsBool() bool {
--	_, ok := v.data.(bool)
--	return ok
--}
--
--// IsBoolSlice gets whether the object contained is a []bool or not.
--func (v *Value) IsBoolSlice() bool {
--	_, ok := v.data.([]bool)
--	return ok
--}
--
--// EachBool calls the specified callback for each object
--// in the []bool.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachBool(callback func(int, bool) bool) *Value {
--
--	for index, val := range v.MustBoolSlice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereBool uses the specified decider function to select items
--// from the []bool.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereBool(decider func(int, bool) bool) *Value {
--
--	var selected []bool
--
--	v.EachBool(func(index int, val bool) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupBool uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]bool.
--func (v *Value) GroupBool(grouper func(int, bool) string) *Value {
--
--	groups := make(map[string][]bool)
--
--	v.EachBool(func(index int, val bool) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]bool, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceBool uses the specified function to replace each bools
--// by iterating each item.  The data in the returned result will be a
--// []bool containing the replaced items.
--func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value {
--
--	arr := v.MustBoolSlice()
--	replaced := make([]bool, len(arr))
--
--	v.EachBool(func(index int, val bool) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectBool uses the specified collector function to collect a value
--// for each of the bools in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value {
--
--	arr := v.MustBoolSlice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachBool(func(index int, val bool) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Str (string and []string)
--	--------------------------------------------------
--*/
--
--// Str gets the value as a string, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Str(optionalDefault ...string) string {
--	if s, ok := v.data.(string); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return ""
--}
--
--// MustStr gets the value as a string.
--//
--// Panics if the object is not a string.
--func (v *Value) MustStr() string {
--	return v.data.(string)
--}
--
--// StrSlice gets the value as a []string, returns the optionalDefault
--// value or nil if the value is not a []string.
--func (v *Value) StrSlice(optionalDefault ...[]string) []string {
--	if s, ok := v.data.([]string); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustStrSlice gets the value as a []string.
--//
--// Panics if the object is not a []string.
--func (v *Value) MustStrSlice() []string {
--	return v.data.([]string)
--}
--
--// IsStr gets whether the object contained is a string or not.
--func (v *Value) IsStr() bool {
--	_, ok := v.data.(string)
--	return ok
--}
--
--// IsStrSlice gets whether the object contained is a []string or not.
--func (v *Value) IsStrSlice() bool {
--	_, ok := v.data.([]string)
--	return ok
--}
--
--// EachStr calls the specified callback for each object
--// in the []string.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachStr(callback func(int, string) bool) *Value {
--
--	for index, val := range v.MustStrSlice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereStr uses the specified decider function to select items
--// from the []string.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereStr(decider func(int, string) bool) *Value {
--
--	var selected []string
--
--	v.EachStr(func(index int, val string) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupStr uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]string.
--func (v *Value) GroupStr(grouper func(int, string) string) *Value {
--
--	groups := make(map[string][]string)
--
--	v.EachStr(func(index int, val string) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]string, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceStr uses the specified function to replace each strings
--// by iterating each item.  The data in the returned result will be a
--// []string containing the replaced items.
--func (v *Value) ReplaceStr(replacer func(int, string) string) *Value {
--
--	arr := v.MustStrSlice()
--	replaced := make([]string, len(arr))
--
--	v.EachStr(func(index int, val string) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectStr uses the specified collector function to collect a value
--// for each of the strings in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectStr(collector func(int, string) interface{}) *Value {
--
--	arr := v.MustStrSlice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachStr(func(index int, val string) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Int (int and []int)
--	--------------------------------------------------
--*/
--
--// Int gets the value as a int, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Int(optionalDefault ...int) int {
--	if s, ok := v.data.(int); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustInt gets the value as a int.
--//
--// Panics if the object is not a int.
--func (v *Value) MustInt() int {
--	return v.data.(int)
--}
--
--// IntSlice gets the value as a []int, returns the optionalDefault
--// value or nil if the value is not a []int.
--func (v *Value) IntSlice(optionalDefault ...[]int) []int {
--	if s, ok := v.data.([]int); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustIntSlice gets the value as a []int.
--//
--// Panics if the object is not a []int.
--func (v *Value) MustIntSlice() []int {
--	return v.data.([]int)
--}
--
--// IsInt gets whether the object contained is a int or not.
--func (v *Value) IsInt() bool {
--	_, ok := v.data.(int)
--	return ok
--}
--
--// IsIntSlice gets whether the object contained is a []int or not.
--func (v *Value) IsIntSlice() bool {
--	_, ok := v.data.([]int)
--	return ok
--}
--
--// EachInt calls the specified callback for each object
--// in the []int.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachInt(callback func(int, int) bool) *Value {
--
--	for index, val := range v.MustIntSlice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereInt uses the specified decider function to select items
--// from the []int.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereInt(decider func(int, int) bool) *Value {
--
--	var selected []int
--
--	v.EachInt(func(index int, val int) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupInt uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]int.
--func (v *Value) GroupInt(grouper func(int, int) string) *Value {
--
--	groups := make(map[string][]int)
--
--	v.EachInt(func(index int, val int) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]int, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceInt uses the specified function to replace each ints
--// by iterating each item.  The data in the returned result will be a
--// []int containing the replaced items.
--func (v *Value) ReplaceInt(replacer func(int, int) int) *Value {
--
--	arr := v.MustIntSlice()
--	replaced := make([]int, len(arr))
--
--	v.EachInt(func(index int, val int) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectInt uses the specified collector function to collect a value
--// for each of the ints in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectInt(collector func(int, int) interface{}) *Value {
--
--	arr := v.MustIntSlice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachInt(func(index int, val int) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Int8 (int8 and []int8)
--	--------------------------------------------------
--*/
--
--// Int8 gets the value as a int8, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Int8(optionalDefault ...int8) int8 {
--	if s, ok := v.data.(int8); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustInt8 gets the value as a int8.
--//
--// Panics if the object is not a int8.
--func (v *Value) MustInt8() int8 {
--	return v.data.(int8)
--}
--
--// Int8Slice gets the value as a []int8, returns the optionalDefault
--// value or nil if the value is not a []int8.
--func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 {
--	if s, ok := v.data.([]int8); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustInt8Slice gets the value as a []int8.
--//
--// Panics if the object is not a []int8.
--func (v *Value) MustInt8Slice() []int8 {
--	return v.data.([]int8)
--}
--
--// IsInt8 gets whether the object contained is a int8 or not.
--func (v *Value) IsInt8() bool {
--	_, ok := v.data.(int8)
--	return ok
--}
--
--// IsInt8Slice gets whether the object contained is a []int8 or not.
--func (v *Value) IsInt8Slice() bool {
--	_, ok := v.data.([]int8)
--	return ok
--}
--
--// EachInt8 calls the specified callback for each object
--// in the []int8.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachInt8(callback func(int, int8) bool) *Value {
--
--	for index, val := range v.MustInt8Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereInt8 uses the specified decider function to select items
--// from the []int8.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereInt8(decider func(int, int8) bool) *Value {
--
--	var selected []int8
--
--	v.EachInt8(func(index int, val int8) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupInt8 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]int8.
--func (v *Value) GroupInt8(grouper func(int, int8) string) *Value {
--
--	groups := make(map[string][]int8)
--
--	v.EachInt8(func(index int, val int8) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]int8, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceInt8 uses the specified function to replace each int8s
--// by iterating each item.  The data in the returned result will be a
--// []int8 containing the replaced items.
--func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value {
--
--	arr := v.MustInt8Slice()
--	replaced := make([]int8, len(arr))
--
--	v.EachInt8(func(index int, val int8) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectInt8 uses the specified collector function to collect a value
--// for each of the int8s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value {
--
--	arr := v.MustInt8Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachInt8(func(index int, val int8) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Int16 (int16 and []int16)
--	--------------------------------------------------
--*/
--
--// Int16 gets the value as a int16, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Int16(optionalDefault ...int16) int16 {
--	if s, ok := v.data.(int16); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustInt16 gets the value as a int16.
--//
--// Panics if the object is not a int16.
--func (v *Value) MustInt16() int16 {
--	return v.data.(int16)
--}
--
--// Int16Slice gets the value as a []int16, returns the optionalDefault
--// value or nil if the value is not a []int16.
--func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 {
--	if s, ok := v.data.([]int16); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustInt16Slice gets the value as a []int16.
--//
--// Panics if the object is not a []int16.
--func (v *Value) MustInt16Slice() []int16 {
--	return v.data.([]int16)
--}
--
--// IsInt16 gets whether the object contained is a int16 or not.
--func (v *Value) IsInt16() bool {
--	_, ok := v.data.(int16)
--	return ok
--}
--
--// IsInt16Slice gets whether the object contained is a []int16 or not.
--func (v *Value) IsInt16Slice() bool {
--	_, ok := v.data.([]int16)
--	return ok
--}
--
--// EachInt16 calls the specified callback for each object
--// in the []int16.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachInt16(callback func(int, int16) bool) *Value {
--
--	for index, val := range v.MustInt16Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereInt16 uses the specified decider function to select items
--// from the []int16.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereInt16(decider func(int, int16) bool) *Value {
--
--	var selected []int16
--
--	v.EachInt16(func(index int, val int16) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupInt16 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]int16.
--func (v *Value) GroupInt16(grouper func(int, int16) string) *Value {
--
--	groups := make(map[string][]int16)
--
--	v.EachInt16(func(index int, val int16) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]int16, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceInt16 uses the specified function to replace each int16s
--// by iterating each item.  The data in the returned result will be a
--// []int16 containing the replaced items.
--func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value {
--
--	arr := v.MustInt16Slice()
--	replaced := make([]int16, len(arr))
--
--	v.EachInt16(func(index int, val int16) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectInt16 uses the specified collector function to collect a value
--// for each of the int16s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value {
--
--	arr := v.MustInt16Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachInt16(func(index int, val int16) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Int32 (int32 and []int32)
--	--------------------------------------------------
--*/
--
--// Int32 gets the value as a int32, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Int32(optionalDefault ...int32) int32 {
--	if s, ok := v.data.(int32); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustInt32 gets the value as a int32.
--//
--// Panics if the object is not a int32.
--func (v *Value) MustInt32() int32 {
--	return v.data.(int32)
--}
--
--// Int32Slice gets the value as a []int32, returns the optionalDefault
--// value or nil if the value is not a []int32.
--func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 {
--	if s, ok := v.data.([]int32); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustInt32Slice gets the value as a []int32.
--//
--// Panics if the object is not a []int32.
--func (v *Value) MustInt32Slice() []int32 {
--	return v.data.([]int32)
--}
--
--// IsInt32 gets whether the object contained is a int32 or not.
--func (v *Value) IsInt32() bool {
--	_, ok := v.data.(int32)
--	return ok
--}
--
--// IsInt32Slice gets whether the object contained is a []int32 or not.
--func (v *Value) IsInt32Slice() bool {
--	_, ok := v.data.([]int32)
--	return ok
--}
--
--// EachInt32 calls the specified callback for each object
--// in the []int32.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachInt32(callback func(int, int32) bool) *Value {
--
--	for index, val := range v.MustInt32Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereInt32 uses the specified decider function to select items
--// from the []int32.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereInt32(decider func(int, int32) bool) *Value {
--
--	var selected []int32
--
--	v.EachInt32(func(index int, val int32) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupInt32 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]int32.
--func (v *Value) GroupInt32(grouper func(int, int32) string) *Value {
--
--	groups := make(map[string][]int32)
--
--	v.EachInt32(func(index int, val int32) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]int32, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceInt32 uses the specified function to replace each int32s
--// by iterating each item.  The data in the returned result will be a
--// []int32 containing the replaced items.
--func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value {
--
--	arr := v.MustInt32Slice()
--	replaced := make([]int32, len(arr))
--
--	v.EachInt32(func(index int, val int32) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectInt32 uses the specified collector function to collect a value
--// for each of the int32s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value {
--
--	arr := v.MustInt32Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachInt32(func(index int, val int32) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Int64 (int64 and []int64)
--	--------------------------------------------------
--*/
--
--// Int64 gets the value as a int64, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Int64(optionalDefault ...int64) int64 {
--	if s, ok := v.data.(int64); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustInt64 gets the value as a int64.
--//
--// Panics if the object is not a int64.
--func (v *Value) MustInt64() int64 {
--	return v.data.(int64)
--}
--
--// Int64Slice gets the value as a []int64, returns the optionalDefault
--// value or nil if the value is not a []int64.
--func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 {
--	if s, ok := v.data.([]int64); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustInt64Slice gets the value as a []int64.
--//
--// Panics if the object is not a []int64.
--func (v *Value) MustInt64Slice() []int64 {
--	return v.data.([]int64)
--}
--
--// IsInt64 gets whether the object contained is a int64 or not.
--func (v *Value) IsInt64() bool {
--	_, ok := v.data.(int64)
--	return ok
--}
--
--// IsInt64Slice gets whether the object contained is a []int64 or not.
--func (v *Value) IsInt64Slice() bool {
--	_, ok := v.data.([]int64)
--	return ok
--}
--
--// EachInt64 calls the specified callback for each object
--// in the []int64.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachInt64(callback func(int, int64) bool) *Value {
--
--	for index, val := range v.MustInt64Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereInt64 uses the specified decider function to select items
--// from the []int64.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereInt64(decider func(int, int64) bool) *Value {
--
--	var selected []int64
--
--	v.EachInt64(func(index int, val int64) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupInt64 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]int64.
--func (v *Value) GroupInt64(grouper func(int, int64) string) *Value {
--
--	groups := make(map[string][]int64)
--
--	v.EachInt64(func(index int, val int64) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]int64, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceInt64 uses the specified function to replace each int64s
--// by iterating each item.  The data in the returned result will be a
--// []int64 containing the replaced items.
--func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value {
--
--	arr := v.MustInt64Slice()
--	replaced := make([]int64, len(arr))
--
--	v.EachInt64(func(index int, val int64) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectInt64 uses the specified collector function to collect a value
--// for each of the int64s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value {
--
--	arr := v.MustInt64Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachInt64(func(index int, val int64) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Uint (uint and []uint)
--	--------------------------------------------------
--*/
--
--// Uint gets the value as a uint, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Uint(optionalDefault ...uint) uint {
--	if s, ok := v.data.(uint); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustUint gets the value as a uint.
--//
--// Panics if the object is not a uint.
--func (v *Value) MustUint() uint {
--	return v.data.(uint)
--}
--
--// UintSlice gets the value as a []uint, returns the optionalDefault
--// value or nil if the value is not a []uint.
--func (v *Value) UintSlice(optionalDefault ...[]uint) []uint {
--	if s, ok := v.data.([]uint); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustUintSlice gets the value as a []uint.
--//
--// Panics if the object is not a []uint.
--func (v *Value) MustUintSlice() []uint {
--	return v.data.([]uint)
--}
--
--// IsUint gets whether the object contained is a uint or not.
--func (v *Value) IsUint() bool {
--	_, ok := v.data.(uint)
--	return ok
--}
--
--// IsUintSlice gets whether the object contained is a []uint or not.
--func (v *Value) IsUintSlice() bool {
--	_, ok := v.data.([]uint)
--	return ok
--}
--
--// EachUint calls the specified callback for each object
--// in the []uint.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachUint(callback func(int, uint) bool) *Value {
--
--	for index, val := range v.MustUintSlice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereUint uses the specified decider function to select items
--// from the []uint.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereUint(decider func(int, uint) bool) *Value {
--
--	var selected []uint
--
--	v.EachUint(func(index int, val uint) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupUint uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]uint.
--func (v *Value) GroupUint(grouper func(int, uint) string) *Value {
--
--	groups := make(map[string][]uint)
--
--	v.EachUint(func(index int, val uint) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]uint, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceUint uses the specified function to replace each uints
--// by iterating each item.  The data in the returned result will be a
--// []uint containing the replaced items.
--func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value {
--
--	arr := v.MustUintSlice()
--	replaced := make([]uint, len(arr))
--
--	v.EachUint(func(index int, val uint) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectUint uses the specified collector function to collect a value
--// for each of the uints in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value {
--
--	arr := v.MustUintSlice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachUint(func(index int, val uint) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Uint8 (uint8 and []uint8)
--	--------------------------------------------------
--*/
--
--// Uint8 gets the value as a uint8, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Uint8(optionalDefault ...uint8) uint8 {
--	if s, ok := v.data.(uint8); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustUint8 gets the value as a uint8.
--//
--// Panics if the object is not a uint8.
--func (v *Value) MustUint8() uint8 {
--	return v.data.(uint8)
--}
--
--// Uint8Slice gets the value as a []uint8, returns the optionalDefault
--// value or nil if the value is not a []uint8.
--func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 {
--	if s, ok := v.data.([]uint8); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustUint8Slice gets the value as a []uint8.
--//
--// Panics if the object is not a []uint8.
--func (v *Value) MustUint8Slice() []uint8 {
--	return v.data.([]uint8)
--}
--
--// IsUint8 gets whether the object contained is a uint8 or not.
--func (v *Value) IsUint8() bool {
--	_, ok := v.data.(uint8)
--	return ok
--}
--
--// IsUint8Slice gets whether the object contained is a []uint8 or not.
--func (v *Value) IsUint8Slice() bool {
--	_, ok := v.data.([]uint8)
--	return ok
--}
--
--// EachUint8 calls the specified callback for each object
--// in the []uint8.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachUint8(callback func(int, uint8) bool) *Value {
--
--	for index, val := range v.MustUint8Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereUint8 uses the specified decider function to select items
--// from the []uint8.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value {
--
--	var selected []uint8
--
--	v.EachUint8(func(index int, val uint8) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupUint8 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]uint8.
--func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value {
--
--	groups := make(map[string][]uint8)
--
--	v.EachUint8(func(index int, val uint8) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]uint8, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceUint8 uses the specified function to replace each uint8s
--// by iterating each item.  The data in the returned result will be a
--// []uint8 containing the replaced items.
--func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value {
--
--	arr := v.MustUint8Slice()
--	replaced := make([]uint8, len(arr))
--
--	v.EachUint8(func(index int, val uint8) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectUint8 uses the specified collector function to collect a value
--// for each of the uint8s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value {
--
--	arr := v.MustUint8Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachUint8(func(index int, val uint8) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Uint16 (uint16 and []uint16)
--	--------------------------------------------------
--*/
--
--// Uint16 gets the value as a uint16, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Uint16(optionalDefault ...uint16) uint16 {
--	if s, ok := v.data.(uint16); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustUint16 gets the value as a uint16.
--//
--// Panics if the object is not a uint16.
--func (v *Value) MustUint16() uint16 {
--	return v.data.(uint16)
--}
--
--// Uint16Slice gets the value as a []uint16, returns the optionalDefault
--// value or nil if the value is not a []uint16.
--func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 {
--	if s, ok := v.data.([]uint16); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustUint16Slice gets the value as a []uint16.
--//
--// Panics if the object is not a []uint16.
--func (v *Value) MustUint16Slice() []uint16 {
--	return v.data.([]uint16)
--}
--
--// IsUint16 gets whether the object contained is a uint16 or not.
--func (v *Value) IsUint16() bool {
--	_, ok := v.data.(uint16)
--	return ok
--}
--
--// IsUint16Slice gets whether the object contained is a []uint16 or not.
--func (v *Value) IsUint16Slice() bool {
--	_, ok := v.data.([]uint16)
--	return ok
--}
--
--// EachUint16 calls the specified callback for each object
--// in the []uint16.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachUint16(callback func(int, uint16) bool) *Value {
--
--	for index, val := range v.MustUint16Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereUint16 uses the specified decider function to select items
--// from the []uint16.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value {
--
--	var selected []uint16
--
--	v.EachUint16(func(index int, val uint16) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupUint16 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]uint16.
--func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value {
--
--	groups := make(map[string][]uint16)
--
--	v.EachUint16(func(index int, val uint16) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]uint16, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceUint16 uses the specified function to replace each uint16s
--// by iterating each item.  The data in the returned result will be a
--// []uint16 containing the replaced items.
--func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value {
--
--	arr := v.MustUint16Slice()
--	replaced := make([]uint16, len(arr))
--
--	v.EachUint16(func(index int, val uint16) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectUint16 uses the specified collector function to collect a value
--// for each of the uint16s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value {
--
--	arr := v.MustUint16Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachUint16(func(index int, val uint16) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Uint32 (uint32 and []uint32)
--	--------------------------------------------------
--*/
--
--// Uint32 gets the value as a uint32, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Uint32(optionalDefault ...uint32) uint32 {
--	if s, ok := v.data.(uint32); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustUint32 gets the value as a uint32.
--//
--// Panics if the object is not a uint32.
--func (v *Value) MustUint32() uint32 {
--	return v.data.(uint32)
--}
--
--// Uint32Slice gets the value as a []uint32, returns the optionalDefault
--// value or nil if the value is not a []uint32.
--func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 {
--	if s, ok := v.data.([]uint32); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustUint32Slice gets the value as a []uint32.
--//
--// Panics if the object is not a []uint32.
--func (v *Value) MustUint32Slice() []uint32 {
--	return v.data.([]uint32)
--}
--
--// IsUint32 gets whether the object contained is a uint32 or not.
--func (v *Value) IsUint32() bool {
--	_, ok := v.data.(uint32)
--	return ok
--}
--
--// IsUint32Slice gets whether the object contained is a []uint32 or not.
--func (v *Value) IsUint32Slice() bool {
--	_, ok := v.data.([]uint32)
--	return ok
--}
--
--// EachUint32 calls the specified callback for each object
--// in the []uint32.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachUint32(callback func(int, uint32) bool) *Value {
--
--	for index, val := range v.MustUint32Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereUint32 uses the specified decider function to select items
--// from the []uint32.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value {
--
--	var selected []uint32
--
--	v.EachUint32(func(index int, val uint32) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupUint32 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]uint32.
--func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value {
--
--	groups := make(map[string][]uint32)
--
--	v.EachUint32(func(index int, val uint32) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]uint32, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceUint32 uses the specified function to replace each uint32s
--// by iterating each item.  The data in the returned result will be a
--// []uint32 containing the replaced items.
--func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value {
--
--	arr := v.MustUint32Slice()
--	replaced := make([]uint32, len(arr))
--
--	v.EachUint32(func(index int, val uint32) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectUint32 uses the specified collector function to collect a value
--// for each of the uint32s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value {
--
--	arr := v.MustUint32Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachUint32(func(index int, val uint32) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Uint64 (uint64 and []uint64)
--	--------------------------------------------------
--*/
--
--// Uint64 gets the value as a uint64, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Uint64(optionalDefault ...uint64) uint64 {
--	if s, ok := v.data.(uint64); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustUint64 gets the value as a uint64.
--//
--// Panics if the object is not a uint64.
--func (v *Value) MustUint64() uint64 {
--	return v.data.(uint64)
--}
--
--// Uint64Slice gets the value as a []uint64, returns the optionalDefault
--// value or nil if the value is not a []uint64.
--func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 {
--	if s, ok := v.data.([]uint64); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustUint64Slice gets the value as a []uint64.
--//
--// Panics if the object is not a []uint64.
--func (v *Value) MustUint64Slice() []uint64 {
--	return v.data.([]uint64)
--}
--
--// IsUint64 gets whether the object contained is a uint64 or not.
--func (v *Value) IsUint64() bool {
--	_, ok := v.data.(uint64)
--	return ok
--}
--
--// IsUint64Slice gets whether the object contained is a []uint64 or not.
--func (v *Value) IsUint64Slice() bool {
--	_, ok := v.data.([]uint64)
--	return ok
--}
--
--// EachUint64 calls the specified callback for each object
--// in the []uint64.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachUint64(callback func(int, uint64) bool) *Value {
--
--	for index, val := range v.MustUint64Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereUint64 uses the specified decider function to select items
--// from the []uint64.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value {
--
--	var selected []uint64
--
--	v.EachUint64(func(index int, val uint64) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupUint64 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]uint64.
--func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value {
--
--	groups := make(map[string][]uint64)
--
--	v.EachUint64(func(index int, val uint64) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]uint64, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceUint64 uses the specified function to replace each uint64s
--// by iterating each item.  The data in the returned result will be a
--// []uint64 containing the replaced items.
--func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value {
--
--	arr := v.MustUint64Slice()
--	replaced := make([]uint64, len(arr))
--
--	v.EachUint64(func(index int, val uint64) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectUint64 uses the specified collector function to collect a value
--// for each of the uint64s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value {
--
--	arr := v.MustUint64Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachUint64(func(index int, val uint64) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Uintptr (uintptr and []uintptr)
--	--------------------------------------------------
--*/
--
--// Uintptr gets the value as a uintptr, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr {
--	if s, ok := v.data.(uintptr); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustUintptr gets the value as a uintptr.
--//
--// Panics if the object is not a uintptr.
--func (v *Value) MustUintptr() uintptr {
--	return v.data.(uintptr)
--}
--
--// UintptrSlice gets the value as a []uintptr, returns the optionalDefault
--// value or nil if the value is not a []uintptr.
--func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr {
--	if s, ok := v.data.([]uintptr); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustUintptrSlice gets the value as a []uintptr.
--//
--// Panics if the object is not a []uintptr.
--func (v *Value) MustUintptrSlice() []uintptr {
--	return v.data.([]uintptr)
--}
--
--// IsUintptr gets whether the object contained is a uintptr or not.
--func (v *Value) IsUintptr() bool {
--	_, ok := v.data.(uintptr)
--	return ok
--}
--
--// IsUintptrSlice gets whether the object contained is a []uintptr or not.
--func (v *Value) IsUintptrSlice() bool {
--	_, ok := v.data.([]uintptr)
--	return ok
--}
--
--// EachUintptr calls the specified callback for each object
--// in the []uintptr.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value {
--
--	for index, val := range v.MustUintptrSlice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereUintptr uses the specified decider function to select items
--// from the []uintptr.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value {
--
--	var selected []uintptr
--
--	v.EachUintptr(func(index int, val uintptr) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupUintptr uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]uintptr.
--func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value {
--
--	groups := make(map[string][]uintptr)
--
--	v.EachUintptr(func(index int, val uintptr) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]uintptr, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceUintptr uses the specified function to replace each uintptrs
--// by iterating each item.  The data in the returned result will be a
--// []uintptr containing the replaced items.
--func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value {
--
--	arr := v.MustUintptrSlice()
--	replaced := make([]uintptr, len(arr))
--
--	v.EachUintptr(func(index int, val uintptr) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectUintptr uses the specified collector function to collect a value
--// for each of the uintptrs in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value {
--
--	arr := v.MustUintptrSlice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachUintptr(func(index int, val uintptr) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Float32 (float32 and []float32)
--	--------------------------------------------------
--*/
--
--// Float32 gets the value as a float32, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Float32(optionalDefault ...float32) float32 {
--	if s, ok := v.data.(float32); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustFloat32 gets the value as a float32.
--//
--// Panics if the object is not a float32.
--func (v *Value) MustFloat32() float32 {
--	return v.data.(float32)
--}
--
--// Float32Slice gets the value as a []float32, returns the optionalDefault
--// value or nil if the value is not a []float32.
--func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 {
--	if s, ok := v.data.([]float32); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustFloat32Slice gets the value as a []float32.
--//
--// Panics if the object is not a []float32.
--func (v *Value) MustFloat32Slice() []float32 {
--	return v.data.([]float32)
--}
--
--// IsFloat32 gets whether the object contained is a float32 or not.
--func (v *Value) IsFloat32() bool {
--	_, ok := v.data.(float32)
--	return ok
--}
--
--// IsFloat32Slice gets whether the object contained is a []float32 or not.
--func (v *Value) IsFloat32Slice() bool {
--	_, ok := v.data.([]float32)
--	return ok
--}
--
--// EachFloat32 calls the specified callback for each object
--// in the []float32.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachFloat32(callback func(int, float32) bool) *Value {
--
--	for index, val := range v.MustFloat32Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereFloat32 uses the specified decider function to select items
--// from the []float32.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value {
--
--	var selected []float32
--
--	v.EachFloat32(func(index int, val float32) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupFloat32 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]float32.
--func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value {
--
--	groups := make(map[string][]float32)
--
--	v.EachFloat32(func(index int, val float32) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]float32, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceFloat32 uses the specified function to replace each float32s
--// by iterating each item.  The data in the returned result will be a
--// []float32 containing the replaced items.
--func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value {
--
--	arr := v.MustFloat32Slice()
--	replaced := make([]float32, len(arr))
--
--	v.EachFloat32(func(index int, val float32) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectFloat32 uses the specified collector function to collect a value
--// for each of the float32s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value {
--
--	arr := v.MustFloat32Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachFloat32(func(index int, val float32) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Float64 (float64 and []float64)
--	--------------------------------------------------
--*/
--
--// Float64 gets the value as a float64, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Float64(optionalDefault ...float64) float64 {
--	if s, ok := v.data.(float64); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustFloat64 gets the value as a float64.
--//
--// Panics if the object is not a float64.
--func (v *Value) MustFloat64() float64 {
--	return v.data.(float64)
--}
--
--// Float64Slice gets the value as a []float64, returns the optionalDefault
--// value or nil if the value is not a []float64.
--func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 {
--	if s, ok := v.data.([]float64); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustFloat64Slice gets the value as a []float64.
--//
--// Panics if the object is not a []float64.
--func (v *Value) MustFloat64Slice() []float64 {
--	return v.data.([]float64)
--}
--
--// IsFloat64 gets whether the object contained is a float64 or not.
--func (v *Value) IsFloat64() bool {
--	_, ok := v.data.(float64)
--	return ok
--}
--
--// IsFloat64Slice gets whether the object contained is a []float64 or not.
--func (v *Value) IsFloat64Slice() bool {
--	_, ok := v.data.([]float64)
--	return ok
--}
--
--// EachFloat64 calls the specified callback for each object
--// in the []float64.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachFloat64(callback func(int, float64) bool) *Value {
--
--	for index, val := range v.MustFloat64Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereFloat64 uses the specified decider function to select items
--// from the []float64.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value {
--
--	var selected []float64
--
--	v.EachFloat64(func(index int, val float64) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupFloat64 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]float64.
--func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value {
--
--	groups := make(map[string][]float64)
--
--	v.EachFloat64(func(index int, val float64) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]float64, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceFloat64 uses the specified function to replace each float64s
--// by iterating each item.  The data in the returned result will be a
--// []float64 containing the replaced items.
--func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value {
--
--	arr := v.MustFloat64Slice()
--	replaced := make([]float64, len(arr))
--
--	v.EachFloat64(func(index int, val float64) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectFloat64 uses the specified collector function to collect a value
--// for each of the float64s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value {
--
--	arr := v.MustFloat64Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachFloat64(func(index int, val float64) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Complex64 (complex64 and []complex64)
--	--------------------------------------------------
--*/
--
--// Complex64 gets the value as a complex64, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Complex64(optionalDefault ...complex64) complex64 {
--	if s, ok := v.data.(complex64); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustComplex64 gets the value as a complex64.
--//
--// Panics if the object is not a complex64.
--func (v *Value) MustComplex64() complex64 {
--	return v.data.(complex64)
--}
--
--// Complex64Slice gets the value as a []complex64, returns the optionalDefault
--// value or nil if the value is not a []complex64.
--func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 {
--	if s, ok := v.data.([]complex64); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustComplex64Slice gets the value as a []complex64.
--//
--// Panics if the object is not a []complex64.
--func (v *Value) MustComplex64Slice() []complex64 {
--	return v.data.([]complex64)
--}
--
--// IsComplex64 gets whether the object contained is a complex64 or not.
--func (v *Value) IsComplex64() bool {
--	_, ok := v.data.(complex64)
--	return ok
--}
--
--// IsComplex64Slice gets whether the object contained is a []complex64 or not.
--func (v *Value) IsComplex64Slice() bool {
--	_, ok := v.data.([]complex64)
--	return ok
--}
--
--// EachComplex64 calls the specified callback for each object
--// in the []complex64.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value {
--
--	for index, val := range v.MustComplex64Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereComplex64 uses the specified decider function to select items
--// from the []complex64.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value {
--
--	var selected []complex64
--
--	v.EachComplex64(func(index int, val complex64) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupComplex64 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]complex64.
--func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value {
--
--	groups := make(map[string][]complex64)
--
--	v.EachComplex64(func(index int, val complex64) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]complex64, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceComplex64 uses the specified function to replace each complex64s
--// by iterating each item.  The data in the returned result will be a
--// []complex64 containing the replaced items.
--func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value {
--
--	arr := v.MustComplex64Slice()
--	replaced := make([]complex64, len(arr))
--
--	v.EachComplex64(func(index int, val complex64) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectComplex64 uses the specified collector function to collect a value
--// for each of the complex64s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value {
--
--	arr := v.MustComplex64Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachComplex64(func(index int, val complex64) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
--
--/*
--	Complex128 (complex128 and []complex128)
--	--------------------------------------------------
--*/
--
--// Complex128 gets the value as a complex128, returns the optionalDefault
--// value or a system default object if the value is the wrong type.
--func (v *Value) Complex128(optionalDefault ...complex128) complex128 {
--	if s, ok := v.data.(complex128); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return 0
--}
--
--// MustComplex128 gets the value as a complex128.
--//
--// Panics if the object is not a complex128.
--func (v *Value) MustComplex128() complex128 {
--	return v.data.(complex128)
--}
--
--// Complex128Slice gets the value as a []complex128, returns the optionalDefault
--// value or nil if the value is not a []complex128.
--func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 {
--	if s, ok := v.data.([]complex128); ok {
--		return s
--	}
--	if len(optionalDefault) == 1 {
--		return optionalDefault[0]
--	}
--	return nil
--}
--
--// MustComplex128Slice gets the value as a []complex128.
--//
--// Panics if the object is not a []complex128.
--func (v *Value) MustComplex128Slice() []complex128 {
--	return v.data.([]complex128)
--}
--
--// IsComplex128 gets whether the object contained is a complex128 or not.
--func (v *Value) IsComplex128() bool {
--	_, ok := v.data.(complex128)
--	return ok
--}
--
--// IsComplex128Slice gets whether the object contained is a []complex128 or not.
--func (v *Value) IsComplex128Slice() bool {
--	_, ok := v.data.([]complex128)
--	return ok
--}
--
--// EachComplex128 calls the specified callback for each object
--// in the []complex128.
--//
--// Panics if the object is the wrong type.
--func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value {
--
--	for index, val := range v.MustComplex128Slice() {
--		carryon := callback(index, val)
--		if carryon == false {
--			break
--		}
--	}
--
--	return v
--
--}
--
--// WhereComplex128 uses the specified decider function to select items
--// from the []complex128.  The object contained in the result will contain
--// only the selected items.
--func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value {
--
--	var selected []complex128
--
--	v.EachComplex128(func(index int, val complex128) bool {
--		shouldSelect := decider(index, val)
--		if shouldSelect == false {
--			selected = append(selected, val)
--		}
--		return true
--	})
--
--	return &Value{data: selected}
--
--}
--
--// GroupComplex128 uses the specified grouper function to group the items
--// keyed by the return of the grouper.  The object contained in the
--// result will contain a map[string][]complex128.
--func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value {
--
--	groups := make(map[string][]complex128)
--
--	v.EachComplex128(func(index int, val complex128) bool {
--		group := grouper(index, val)
--		if _, ok := groups[group]; !ok {
--			groups[group] = make([]complex128, 0)
--		}
--		groups[group] = append(groups[group], val)
--		return true
--	})
--
--	return &Value{data: groups}
--
--}
--
--// ReplaceComplex128 uses the specified function to replace each complex128s
--// by iterating each item.  The data in the returned result will be a
--// []complex128 containing the replaced items.
--func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value {
--
--	arr := v.MustComplex128Slice()
--	replaced := make([]complex128, len(arr))
--
--	v.EachComplex128(func(index int, val complex128) bool {
--		replaced[index] = replacer(index, val)
--		return true
--	})
--
--	return &Value{data: replaced}
--
--}
--
--// CollectComplex128 uses the specified collector function to collect a value
--// for each of the complex128s in the slice.  The data returned will be a
--// []interface{}.
--func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value {
--
--	arr := v.MustComplex128Slice()
--	collected := make([]interface{}, len(arr))
--
--	v.EachComplex128(func(index int, val complex128) bool {
--		collected[index] = collector(index, val)
--		return true
--	})
--
--	return &Value{data: collected}
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go
-deleted file mode 100644
-index f7a4fce..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/type_specific_codegen_test.go
-+++ /dev/null
-@@ -1,2867 +0,0 @@
--package objx
--
--import (
--	"fmt"
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestInter(t *testing.T) {
--
--	val := interface{}("something")
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Inter())
--	assert.Equal(t, val, New(m).Get("value").MustInter())
--	assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter())
--	assert.Equal(t, val, New(m).Get("nothing").Inter("something"))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustInter()
--	})
--
--}
--
--func TestInterSlice(t *testing.T) {
--
--	val := interface{}("something")
--	m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").InterSlice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0])
--	assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice())
--	assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustInterSlice()
--	})
--
--}
--
--func TestIsInter(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: interface{}("something")}
--	assert.True(t, v.IsInter())
--
--	v = &Value{data: []interface{}{interface{}("something")}}
--	assert.True(t, v.IsInterSlice())
--
--}
--
--func TestEachInter(t *testing.T) {
--
--	v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
--	count := 0
--	replacedVals := make([]interface{}, 0)
--	assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustInterSlice()[0])
--	assert.Equal(t, replacedVals[1], v.MustInterSlice()[1])
--	assert.Equal(t, replacedVals[2], v.MustInterSlice()[2])
--
--}
--
--func TestWhereInter(t *testing.T) {
--
--	v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
--
--	selected := v.WhereInter(func(i int, val interface{}) bool {
--		return i%2 == 0
--	}).MustInterSlice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupInter(t *testing.T) {
--
--	v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
--
--	grouped := v.GroupInter(func(i int, val interface{}) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]interface{})
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceInter(t *testing.T) {
--
--	v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
--
--	rawArr := v.MustInterSlice()
--
--	replaced := v.ReplaceInter(func(index int, val interface{}) interface{} {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustInterSlice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectInter(t *testing.T) {
--
--	v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}}
--
--	collected := v.CollectInter(func(index int, val interface{}) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestMSI(t *testing.T) {
--
--	val := map[string]interface{}(map[string]interface{}{"name": "Tyler"})
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").MSI())
--	assert.Equal(t, val, New(m).Get("value").MustMSI())
--	assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI())
--	assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"}))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustMSI()
--	})
--
--}
--
--func TestMSISlice(t *testing.T) {
--
--	val := map[string]interface{}(map[string]interface{}{"name": "Tyler"})
--	m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").MSISlice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0])
--	assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice())
--	assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustMSISlice()
--	})
--
--}
--
--func TestIsMSI(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})}
--	assert.True(t, v.IsMSI())
--
--	v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
--	assert.True(t, v.IsMSISlice())
--
--}
--
--func TestEachMSI(t *testing.T) {
--
--	v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
--	count := 0
--	replacedVals := make([]map[string]interface{}, 0)
--	assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustMSISlice()[0])
--	assert.Equal(t, replacedVals[1], v.MustMSISlice()[1])
--	assert.Equal(t, replacedVals[2], v.MustMSISlice()[2])
--
--}
--
--func TestWhereMSI(t *testing.T) {
--
--	v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
--
--	selected := v.WhereMSI(func(i int, val map[string]interface{}) bool {
--		return i%2 == 0
--	}).MustMSISlice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupMSI(t *testing.T) {
--
--	v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
--
--	grouped := v.GroupMSI(func(i int, val map[string]interface{}) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]map[string]interface{})
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceMSI(t *testing.T) {
--
--	v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
--
--	rawArr := v.MustMSISlice()
--
--	replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustMSISlice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectMSI(t *testing.T) {
--
--	v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}}
--
--	collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestObjxMap(t *testing.T) {
--
--	val := (Map)(New(1))
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").ObjxMap())
--	assert.Equal(t, val, New(m).Get("value").MustObjxMap())
--	assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap())
--	assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1)))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustObjxMap()
--	})
--
--}
--
--func TestObjxMapSlice(t *testing.T) {
--
--	val := (Map)(New(1))
--	m := map[string]interface{}{"value": [](Map){val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0])
--	assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice())
--	assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustObjxMapSlice()
--	})
--
--}
--
--func TestIsObjxMap(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: (Map)(New(1))}
--	assert.True(t, v.IsObjxMap())
--
--	v = &Value{data: [](Map){(Map)(New(1))}}
--	assert.True(t, v.IsObjxMapSlice())
--
--}
--
--func TestEachObjxMap(t *testing.T) {
--
--	v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
--	count := 0
--	replacedVals := make([](Map), 0)
--	assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0])
--	assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1])
--	assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2])
--
--}
--
--func TestWhereObjxMap(t *testing.T) {
--
--	v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
--
--	selected := v.WhereObjxMap(func(i int, val Map) bool {
--		return i%2 == 0
--	}).MustObjxMapSlice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupObjxMap(t *testing.T) {
--
--	v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
--
--	grouped := v.GroupObjxMap(func(i int, val Map) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][](Map))
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceObjxMap(t *testing.T) {
--
--	v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
--
--	rawArr := v.MustObjxMapSlice()
--
--	replaced := v.ReplaceObjxMap(func(index int, val Map) Map {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustObjxMapSlice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectObjxMap(t *testing.T) {
--
--	v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}}
--
--	collected := v.CollectObjxMap(func(index int, val Map) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestBool(t *testing.T) {
--
--	val := bool(true)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Bool())
--	assert.Equal(t, val, New(m).Get("value").MustBool())
--	assert.Equal(t, bool(false), New(m).Get("nothing").Bool())
--	assert.Equal(t, val, New(m).Get("nothing").Bool(true))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustBool()
--	})
--
--}
--
--func TestBoolSlice(t *testing.T) {
--
--	val := bool(true)
--	m := map[string]interface{}{"value": []bool{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").BoolSlice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0])
--	assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice())
--	assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustBoolSlice()
--	})
--
--}
--
--func TestIsBool(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: bool(true)}
--	assert.True(t, v.IsBool())
--
--	v = &Value{data: []bool{bool(true)}}
--	assert.True(t, v.IsBoolSlice())
--
--}
--
--func TestEachBool(t *testing.T) {
--
--	v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}}
--	count := 0
--	replacedVals := make([]bool, 0)
--	assert.Equal(t, v, v.EachBool(func(i int, val bool) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0])
--	assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1])
--	assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2])
--
--}
--
--func TestWhereBool(t *testing.T) {
--
--	v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
--
--	selected := v.WhereBool(func(i int, val bool) bool {
--		return i%2 == 0
--	}).MustBoolSlice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupBool(t *testing.T) {
--
--	v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
--
--	grouped := v.GroupBool(func(i int, val bool) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]bool)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceBool(t *testing.T) {
--
--	v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
--
--	rawArr := v.MustBoolSlice()
--
--	replaced := v.ReplaceBool(func(index int, val bool) bool {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustBoolSlice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectBool(t *testing.T) {
--
--	v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}}
--
--	collected := v.CollectBool(func(index int, val bool) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestStr(t *testing.T) {
--
--	val := string("hello")
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Str())
--	assert.Equal(t, val, New(m).Get("value").MustStr())
--	assert.Equal(t, string(""), New(m).Get("nothing").Str())
--	assert.Equal(t, val, New(m).Get("nothing").Str("hello"))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustStr()
--	})
--
--}
--
--func TestStrSlice(t *testing.T) {
--
--	val := string("hello")
--	m := map[string]interface{}{"value": []string{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").StrSlice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0])
--	assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice())
--	assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustStrSlice()
--	})
--
--}
--
--func TestIsStr(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: string("hello")}
--	assert.True(t, v.IsStr())
--
--	v = &Value{data: []string{string("hello")}}
--	assert.True(t, v.IsStrSlice())
--
--}
--
--func TestEachStr(t *testing.T) {
--
--	v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
--	count := 0
--	replacedVals := make([]string, 0)
--	assert.Equal(t, v, v.EachStr(func(i int, val string) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustStrSlice()[0])
--	assert.Equal(t, replacedVals[1], v.MustStrSlice()[1])
--	assert.Equal(t, replacedVals[2], v.MustStrSlice()[2])
--
--}
--
--func TestWhereStr(t *testing.T) {
--
--	v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
--
--	selected := v.WhereStr(func(i int, val string) bool {
--		return i%2 == 0
--	}).MustStrSlice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupStr(t *testing.T) {
--
--	v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
--
--	grouped := v.GroupStr(func(i int, val string) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]string)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceStr(t *testing.T) {
--
--	v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
--
--	rawArr := v.MustStrSlice()
--
--	replaced := v.ReplaceStr(func(index int, val string) string {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustStrSlice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectStr(t *testing.T) {
--
--	v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}}
--
--	collected := v.CollectStr(func(index int, val string) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestInt(t *testing.T) {
--
--	val := int(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int())
--	assert.Equal(t, val, New(m).Get("value").MustInt())
--	assert.Equal(t, int(0), New(m).Get("nothing").Int())
--	assert.Equal(t, val, New(m).Get("nothing").Int(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustInt()
--	})
--
--}
--
--func TestIntSlice(t *testing.T) {
--
--	val := int(1)
--	m := map[string]interface{}{"value": []int{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").IntSlice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0])
--	assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice())
--	assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustIntSlice()
--	})
--
--}
--
--func TestIsInt(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: int(1)}
--	assert.True(t, v.IsInt())
--
--	v = &Value{data: []int{int(1)}}
--	assert.True(t, v.IsIntSlice())
--
--}
--
--func TestEachInt(t *testing.T) {
--
--	v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}}
--	count := 0
--	replacedVals := make([]int, 0)
--	assert.Equal(t, v, v.EachInt(func(i int, val int) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustIntSlice()[0])
--	assert.Equal(t, replacedVals[1], v.MustIntSlice()[1])
--	assert.Equal(t, replacedVals[2], v.MustIntSlice()[2])
--
--}
--
--func TestWhereInt(t *testing.T) {
--
--	v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
--
--	selected := v.WhereInt(func(i int, val int) bool {
--		return i%2 == 0
--	}).MustIntSlice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupInt(t *testing.T) {
--
--	v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
--
--	grouped := v.GroupInt(func(i int, val int) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]int)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceInt(t *testing.T) {
--
--	v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
--
--	rawArr := v.MustIntSlice()
--
--	replaced := v.ReplaceInt(func(index int, val int) int {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustIntSlice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectInt(t *testing.T) {
--
--	v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}}
--
--	collected := v.CollectInt(func(index int, val int) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestInt8(t *testing.T) {
--
--	val := int8(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int8())
--	assert.Equal(t, val, New(m).Get("value").MustInt8())
--	assert.Equal(t, int8(0), New(m).Get("nothing").Int8())
--	assert.Equal(t, val, New(m).Get("nothing").Int8(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustInt8()
--	})
--
--}
--
--func TestInt8Slice(t *testing.T) {
--
--	val := int8(1)
--	m := map[string]interface{}{"value": []int8{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int8Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0])
--	assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustInt8Slice()
--	})
--
--}
--
--func TestIsInt8(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: int8(1)}
--	assert.True(t, v.IsInt8())
--
--	v = &Value{data: []int8{int8(1)}}
--	assert.True(t, v.IsInt8Slice())
--
--}
--
--func TestEachInt8(t *testing.T) {
--
--	v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}}
--	count := 0
--	replacedVals := make([]int8, 0)
--	assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2])
--
--}
--
--func TestWhereInt8(t *testing.T) {
--
--	v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
--
--	selected := v.WhereInt8(func(i int, val int8) bool {
--		return i%2 == 0
--	}).MustInt8Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupInt8(t *testing.T) {
--
--	v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
--
--	grouped := v.GroupInt8(func(i int, val int8) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]int8)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceInt8(t *testing.T) {
--
--	v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
--
--	rawArr := v.MustInt8Slice()
--
--	replaced := v.ReplaceInt8(func(index int, val int8) int8 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustInt8Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectInt8(t *testing.T) {
--
--	v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}}
--
--	collected := v.CollectInt8(func(index int, val int8) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestInt16(t *testing.T) {
--
--	val := int16(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int16())
--	assert.Equal(t, val, New(m).Get("value").MustInt16())
--	assert.Equal(t, int16(0), New(m).Get("nothing").Int16())
--	assert.Equal(t, val, New(m).Get("nothing").Int16(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustInt16()
--	})
--
--}
--
--func TestInt16Slice(t *testing.T) {
--
--	val := int16(1)
--	m := map[string]interface{}{"value": []int16{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int16Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0])
--	assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustInt16Slice()
--	})
--
--}
--
--func TestIsInt16(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: int16(1)}
--	assert.True(t, v.IsInt16())
--
--	v = &Value{data: []int16{int16(1)}}
--	assert.True(t, v.IsInt16Slice())
--
--}
--
--func TestEachInt16(t *testing.T) {
--
--	v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}}
--	count := 0
--	replacedVals := make([]int16, 0)
--	assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2])
--
--}
--
--func TestWhereInt16(t *testing.T) {
--
--	v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
--
--	selected := v.WhereInt16(func(i int, val int16) bool {
--		return i%2 == 0
--	}).MustInt16Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupInt16(t *testing.T) {
--
--	v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
--
--	grouped := v.GroupInt16(func(i int, val int16) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]int16)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceInt16(t *testing.T) {
--
--	v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
--
--	rawArr := v.MustInt16Slice()
--
--	replaced := v.ReplaceInt16(func(index int, val int16) int16 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustInt16Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectInt16(t *testing.T) {
--
--	v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}}
--
--	collected := v.CollectInt16(func(index int, val int16) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestInt32(t *testing.T) {
--
--	val := int32(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int32())
--	assert.Equal(t, val, New(m).Get("value").MustInt32())
--	assert.Equal(t, int32(0), New(m).Get("nothing").Int32())
--	assert.Equal(t, val, New(m).Get("nothing").Int32(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustInt32()
--	})
--
--}
--
--func TestInt32Slice(t *testing.T) {
--
--	val := int32(1)
--	m := map[string]interface{}{"value": []int32{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int32Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0])
--	assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustInt32Slice()
--	})
--
--}
--
--func TestIsInt32(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: int32(1)}
--	assert.True(t, v.IsInt32())
--
--	v = &Value{data: []int32{int32(1)}}
--	assert.True(t, v.IsInt32Slice())
--
--}
--
--func TestEachInt32(t *testing.T) {
--
--	v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}}
--	count := 0
--	replacedVals := make([]int32, 0)
--	assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2])
--
--}
--
--func TestWhereInt32(t *testing.T) {
--
--	v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
--
--	selected := v.WhereInt32(func(i int, val int32) bool {
--		return i%2 == 0
--	}).MustInt32Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupInt32(t *testing.T) {
--
--	v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
--
--	grouped := v.GroupInt32(func(i int, val int32) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]int32)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceInt32(t *testing.T) {
--
--	v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
--
--	rawArr := v.MustInt32Slice()
--
--	replaced := v.ReplaceInt32(func(index int, val int32) int32 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustInt32Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectInt32(t *testing.T) {
--
--	v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}}
--
--	collected := v.CollectInt32(func(index int, val int32) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestInt64(t *testing.T) {
--
--	val := int64(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int64())
--	assert.Equal(t, val, New(m).Get("value").MustInt64())
--	assert.Equal(t, int64(0), New(m).Get("nothing").Int64())
--	assert.Equal(t, val, New(m).Get("nothing").Int64(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustInt64()
--	})
--
--}
--
--func TestInt64Slice(t *testing.T) {
--
--	val := int64(1)
--	m := map[string]interface{}{"value": []int64{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Int64Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0])
--	assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustInt64Slice()
--	})
--
--}
--
--func TestIsInt64(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: int64(1)}
--	assert.True(t, v.IsInt64())
--
--	v = &Value{data: []int64{int64(1)}}
--	assert.True(t, v.IsInt64Slice())
--
--}
--
--func TestEachInt64(t *testing.T) {
--
--	v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}}
--	count := 0
--	replacedVals := make([]int64, 0)
--	assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2])
--
--}
--
--func TestWhereInt64(t *testing.T) {
--
--	v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
--
--	selected := v.WhereInt64(func(i int, val int64) bool {
--		return i%2 == 0
--	}).MustInt64Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupInt64(t *testing.T) {
--
--	v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
--
--	grouped := v.GroupInt64(func(i int, val int64) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]int64)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceInt64(t *testing.T) {
--
--	v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
--
--	rawArr := v.MustInt64Slice()
--
--	replaced := v.ReplaceInt64(func(index int, val int64) int64 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustInt64Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectInt64(t *testing.T) {
--
--	v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}}
--
--	collected := v.CollectInt64(func(index int, val int64) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestUint(t *testing.T) {
--
--	val := uint(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint())
--	assert.Equal(t, val, New(m).Get("value").MustUint())
--	assert.Equal(t, uint(0), New(m).Get("nothing").Uint())
--	assert.Equal(t, val, New(m).Get("nothing").Uint(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustUint()
--	})
--
--}
--
--func TestUintSlice(t *testing.T) {
--
--	val := uint(1)
--	m := map[string]interface{}{"value": []uint{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").UintSlice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0])
--	assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice())
--	assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustUintSlice()
--	})
--
--}
--
--func TestIsUint(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: uint(1)}
--	assert.True(t, v.IsUint())
--
--	v = &Value{data: []uint{uint(1)}}
--	assert.True(t, v.IsUintSlice())
--
--}
--
--func TestEachUint(t *testing.T) {
--
--	v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}}
--	count := 0
--	replacedVals := make([]uint, 0)
--	assert.Equal(t, v, v.EachUint(func(i int, val uint) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustUintSlice()[0])
--	assert.Equal(t, replacedVals[1], v.MustUintSlice()[1])
--	assert.Equal(t, replacedVals[2], v.MustUintSlice()[2])
--
--}
--
--func TestWhereUint(t *testing.T) {
--
--	v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
--
--	selected := v.WhereUint(func(i int, val uint) bool {
--		return i%2 == 0
--	}).MustUintSlice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupUint(t *testing.T) {
--
--	v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
--
--	grouped := v.GroupUint(func(i int, val uint) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]uint)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceUint(t *testing.T) {
--
--	v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
--
--	rawArr := v.MustUintSlice()
--
--	replaced := v.ReplaceUint(func(index int, val uint) uint {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustUintSlice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectUint(t *testing.T) {
--
--	v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}}
--
--	collected := v.CollectUint(func(index int, val uint) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestUint8(t *testing.T) {
--
--	val := uint8(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint8())
--	assert.Equal(t, val, New(m).Get("value").MustUint8())
--	assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8())
--	assert.Equal(t, val, New(m).Get("nothing").Uint8(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustUint8()
--	})
--
--}
--
--func TestUint8Slice(t *testing.T) {
--
--	val := uint8(1)
--	m := map[string]interface{}{"value": []uint8{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0])
--	assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustUint8Slice()
--	})
--
--}
--
--func TestIsUint8(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: uint8(1)}
--	assert.True(t, v.IsUint8())
--
--	v = &Value{data: []uint8{uint8(1)}}
--	assert.True(t, v.IsUint8Slice())
--
--}
--
--func TestEachUint8(t *testing.T) {
--
--	v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
--	count := 0
--	replacedVals := make([]uint8, 0)
--	assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2])
--
--}
--
--func TestWhereUint8(t *testing.T) {
--
--	v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
--
--	selected := v.WhereUint8(func(i int, val uint8) bool {
--		return i%2 == 0
--	}).MustUint8Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupUint8(t *testing.T) {
--
--	v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
--
--	grouped := v.GroupUint8(func(i int, val uint8) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]uint8)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceUint8(t *testing.T) {
--
--	v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
--
--	rawArr := v.MustUint8Slice()
--
--	replaced := v.ReplaceUint8(func(index int, val uint8) uint8 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustUint8Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectUint8(t *testing.T) {
--
--	v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}}
--
--	collected := v.CollectUint8(func(index int, val uint8) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestUint16(t *testing.T) {
--
--	val := uint16(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint16())
--	assert.Equal(t, val, New(m).Get("value").MustUint16())
--	assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16())
--	assert.Equal(t, val, New(m).Get("nothing").Uint16(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustUint16()
--	})
--
--}
--
--func TestUint16Slice(t *testing.T) {
--
--	val := uint16(1)
--	m := map[string]interface{}{"value": []uint16{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0])
--	assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustUint16Slice()
--	})
--
--}
--
--func TestIsUint16(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: uint16(1)}
--	assert.True(t, v.IsUint16())
--
--	v = &Value{data: []uint16{uint16(1)}}
--	assert.True(t, v.IsUint16Slice())
--
--}
--
--func TestEachUint16(t *testing.T) {
--
--	v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
--	count := 0
--	replacedVals := make([]uint16, 0)
--	assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2])
--
--}
--
--func TestWhereUint16(t *testing.T) {
--
--	v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
--
--	selected := v.WhereUint16(func(i int, val uint16) bool {
--		return i%2 == 0
--	}).MustUint16Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupUint16(t *testing.T) {
--
--	v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
--
--	grouped := v.GroupUint16(func(i int, val uint16) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]uint16)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceUint16(t *testing.T) {
--
--	v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
--
--	rawArr := v.MustUint16Slice()
--
--	replaced := v.ReplaceUint16(func(index int, val uint16) uint16 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustUint16Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectUint16(t *testing.T) {
--
--	v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}}
--
--	collected := v.CollectUint16(func(index int, val uint16) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestUint32(t *testing.T) {
--
--	val := uint32(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint32())
--	assert.Equal(t, val, New(m).Get("value").MustUint32())
--	assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32())
--	assert.Equal(t, val, New(m).Get("nothing").Uint32(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustUint32()
--	})
--
--}
--
--func TestUint32Slice(t *testing.T) {
--
--	val := uint32(1)
--	m := map[string]interface{}{"value": []uint32{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0])
--	assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustUint32Slice()
--	})
--
--}
--
--func TestIsUint32(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: uint32(1)}
--	assert.True(t, v.IsUint32())
--
--	v = &Value{data: []uint32{uint32(1)}}
--	assert.True(t, v.IsUint32Slice())
--
--}
--
--func TestEachUint32(t *testing.T) {
--
--	v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
--	count := 0
--	replacedVals := make([]uint32, 0)
--	assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2])
--
--}
--
--func TestWhereUint32(t *testing.T) {
--
--	v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
--
--	selected := v.WhereUint32(func(i int, val uint32) bool {
--		return i%2 == 0
--	}).MustUint32Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupUint32(t *testing.T) {
--
--	v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
--
--	grouped := v.GroupUint32(func(i int, val uint32) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]uint32)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceUint32(t *testing.T) {
--
--	v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
--
--	rawArr := v.MustUint32Slice()
--
--	replaced := v.ReplaceUint32(func(index int, val uint32) uint32 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustUint32Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectUint32(t *testing.T) {
--
--	v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}}
--
--	collected := v.CollectUint32(func(index int, val uint32) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestUint64(t *testing.T) {
--
--	val := uint64(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint64())
--	assert.Equal(t, val, New(m).Get("value").MustUint64())
--	assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64())
--	assert.Equal(t, val, New(m).Get("nothing").Uint64(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustUint64()
--	})
--
--}
--
--func TestUint64Slice(t *testing.T) {
--
--	val := uint64(1)
--	m := map[string]interface{}{"value": []uint64{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0])
--	assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustUint64Slice()
--	})
--
--}
--
--func TestIsUint64(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: uint64(1)}
--	assert.True(t, v.IsUint64())
--
--	v = &Value{data: []uint64{uint64(1)}}
--	assert.True(t, v.IsUint64Slice())
--
--}
--
--func TestEachUint64(t *testing.T) {
--
--	v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
--	count := 0
--	replacedVals := make([]uint64, 0)
--	assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2])
--
--}
--
--func TestWhereUint64(t *testing.T) {
--
--	v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
--
--	selected := v.WhereUint64(func(i int, val uint64) bool {
--		return i%2 == 0
--	}).MustUint64Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupUint64(t *testing.T) {
--
--	v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
--
--	grouped := v.GroupUint64(func(i int, val uint64) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]uint64)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceUint64(t *testing.T) {
--
--	v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
--
--	rawArr := v.MustUint64Slice()
--
--	replaced := v.ReplaceUint64(func(index int, val uint64) uint64 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustUint64Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectUint64(t *testing.T) {
--
--	v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}}
--
--	collected := v.CollectUint64(func(index int, val uint64) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestUintptr(t *testing.T) {
--
--	val := uintptr(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Uintptr())
--	assert.Equal(t, val, New(m).Get("value").MustUintptr())
--	assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr())
--	assert.Equal(t, val, New(m).Get("nothing").Uintptr(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustUintptr()
--	})
--
--}
--
--func TestUintptrSlice(t *testing.T) {
--
--	val := uintptr(1)
--	m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0])
--	assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice())
--	assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustUintptrSlice()
--	})
--
--}
--
--func TestIsUintptr(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: uintptr(1)}
--	assert.True(t, v.IsUintptr())
--
--	v = &Value{data: []uintptr{uintptr(1)}}
--	assert.True(t, v.IsUintptrSlice())
--
--}
--
--func TestEachUintptr(t *testing.T) {
--
--	v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
--	count := 0
--	replacedVals := make([]uintptr, 0)
--	assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0])
--	assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1])
--	assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2])
--
--}
--
--func TestWhereUintptr(t *testing.T) {
--
--	v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
--
--	selected := v.WhereUintptr(func(i int, val uintptr) bool {
--		return i%2 == 0
--	}).MustUintptrSlice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupUintptr(t *testing.T) {
--
--	v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
--
--	grouped := v.GroupUintptr(func(i int, val uintptr) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]uintptr)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceUintptr(t *testing.T) {
--
--	v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
--
--	rawArr := v.MustUintptrSlice()
--
--	replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustUintptrSlice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectUintptr(t *testing.T) {
--
--	v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}}
--
--	collected := v.CollectUintptr(func(index int, val uintptr) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestFloat32(t *testing.T) {
--
--	val := float32(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Float32())
--	assert.Equal(t, val, New(m).Get("value").MustFloat32())
--	assert.Equal(t, float32(0), New(m).Get("nothing").Float32())
--	assert.Equal(t, val, New(m).Get("nothing").Float32(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustFloat32()
--	})
--
--}
--
--func TestFloat32Slice(t *testing.T) {
--
--	val := float32(1)
--	m := map[string]interface{}{"value": []float32{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Float32Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0])
--	assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustFloat32Slice()
--	})
--
--}
--
--func TestIsFloat32(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: float32(1)}
--	assert.True(t, v.IsFloat32())
--
--	v = &Value{data: []float32{float32(1)}}
--	assert.True(t, v.IsFloat32Slice())
--
--}
--
--func TestEachFloat32(t *testing.T) {
--
--	v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}}
--	count := 0
--	replacedVals := make([]float32, 0)
--	assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2])
--
--}
--
--func TestWhereFloat32(t *testing.T) {
--
--	v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
--
--	selected := v.WhereFloat32(func(i int, val float32) bool {
--		return i%2 == 0
--	}).MustFloat32Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupFloat32(t *testing.T) {
--
--	v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
--
--	grouped := v.GroupFloat32(func(i int, val float32) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]float32)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceFloat32(t *testing.T) {
--
--	v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
--
--	rawArr := v.MustFloat32Slice()
--
--	replaced := v.ReplaceFloat32(func(index int, val float32) float32 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustFloat32Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectFloat32(t *testing.T) {
--
--	v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}}
--
--	collected := v.CollectFloat32(func(index int, val float32) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestFloat64(t *testing.T) {
--
--	val := float64(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Float64())
--	assert.Equal(t, val, New(m).Get("value").MustFloat64())
--	assert.Equal(t, float64(0), New(m).Get("nothing").Float64())
--	assert.Equal(t, val, New(m).Get("nothing").Float64(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustFloat64()
--	})
--
--}
--
--func TestFloat64Slice(t *testing.T) {
--
--	val := float64(1)
--	m := map[string]interface{}{"value": []float64{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Float64Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0])
--	assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustFloat64Slice()
--	})
--
--}
--
--func TestIsFloat64(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: float64(1)}
--	assert.True(t, v.IsFloat64())
--
--	v = &Value{data: []float64{float64(1)}}
--	assert.True(t, v.IsFloat64Slice())
--
--}
--
--func TestEachFloat64(t *testing.T) {
--
--	v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}}
--	count := 0
--	replacedVals := make([]float64, 0)
--	assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2])
--
--}
--
--func TestWhereFloat64(t *testing.T) {
--
--	v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
--
--	selected := v.WhereFloat64(func(i int, val float64) bool {
--		return i%2 == 0
--	}).MustFloat64Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupFloat64(t *testing.T) {
--
--	v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
--
--	grouped := v.GroupFloat64(func(i int, val float64) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]float64)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceFloat64(t *testing.T) {
--
--	v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
--
--	rawArr := v.MustFloat64Slice()
--
--	replaced := v.ReplaceFloat64(func(index int, val float64) float64 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustFloat64Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectFloat64(t *testing.T) {
--
--	v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}}
--
--	collected := v.CollectFloat64(func(index int, val float64) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestComplex64(t *testing.T) {
--
--	val := complex64(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Complex64())
--	assert.Equal(t, val, New(m).Get("value").MustComplex64())
--	assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64())
--	assert.Equal(t, val, New(m).Get("nothing").Complex64(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustComplex64()
--	})
--
--}
--
--func TestComplex64Slice(t *testing.T) {
--
--	val := complex64(1)
--	m := map[string]interface{}{"value": []complex64{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0])
--	assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustComplex64Slice()
--	})
--
--}
--
--func TestIsComplex64(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: complex64(1)}
--	assert.True(t, v.IsComplex64())
--
--	v = &Value{data: []complex64{complex64(1)}}
--	assert.True(t, v.IsComplex64Slice())
--
--}
--
--func TestEachComplex64(t *testing.T) {
--
--	v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
--	count := 0
--	replacedVals := make([]complex64, 0)
--	assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2])
--
--}
--
--func TestWhereComplex64(t *testing.T) {
--
--	v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
--
--	selected := v.WhereComplex64(func(i int, val complex64) bool {
--		return i%2 == 0
--	}).MustComplex64Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupComplex64(t *testing.T) {
--
--	v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
--
--	grouped := v.GroupComplex64(func(i int, val complex64) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]complex64)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceComplex64(t *testing.T) {
--
--	v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
--
--	rawArr := v.MustComplex64Slice()
--
--	replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustComplex64Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectComplex64(t *testing.T) {
--
--	v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}}
--
--	collected := v.CollectComplex64(func(index int, val complex64) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
--
--// ************************************************************
--// TESTS
--// ************************************************************
--
--func TestComplex128(t *testing.T) {
--
--	val := complex128(1)
--	m := map[string]interface{}{"value": val, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Complex128())
--	assert.Equal(t, val, New(m).Get("value").MustComplex128())
--	assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128())
--	assert.Equal(t, val, New(m).Get("nothing").Complex128(1))
--
--	assert.Panics(t, func() {
--		New(m).Get("age").MustComplex128()
--	})
--
--}
--
--func TestComplex128Slice(t *testing.T) {
--
--	val := complex128(1)
--	m := map[string]interface{}{"value": []complex128{val}, "nothing": nil}
--	assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0])
--	assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0])
--	assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice())
--	assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0])
--
--	assert.Panics(t, func() {
--		New(m).Get("nothing").MustComplex128Slice()
--	})
--
--}
--
--func TestIsComplex128(t *testing.T) {
--
--	var v *Value
--
--	v = &Value{data: complex128(1)}
--	assert.True(t, v.IsComplex128())
--
--	v = &Value{data: []complex128{complex128(1)}}
--	assert.True(t, v.IsComplex128Slice())
--
--}
--
--func TestEachComplex128(t *testing.T) {
--
--	v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
--	count := 0
--	replacedVals := make([]complex128, 0)
--	assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool {
--
--		count++
--		replacedVals = append(replacedVals, val)
--
--		// abort early
--		if i == 2 {
--			return false
--		}
--
--		return true
--
--	}))
--
--	assert.Equal(t, count, 3)
--	assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0])
--	assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1])
--	assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2])
--
--}
--
--func TestWhereComplex128(t *testing.T) {
--
--	v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
--
--	selected := v.WhereComplex128(func(i int, val complex128) bool {
--		return i%2 == 0
--	}).MustComplex128Slice()
--
--	assert.Equal(t, 3, len(selected))
--
--}
--
--func TestGroupComplex128(t *testing.T) {
--
--	v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
--
--	grouped := v.GroupComplex128(func(i int, val complex128) string {
--		return fmt.Sprintf("%v", i%2 == 0)
--	}).data.(map[string][]complex128)
--
--	assert.Equal(t, 2, len(grouped))
--	assert.Equal(t, 3, len(grouped["true"]))
--	assert.Equal(t, 3, len(grouped["false"]))
--
--}
--
--func TestReplaceComplex128(t *testing.T) {
--
--	v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
--
--	rawArr := v.MustComplex128Slice()
--
--	replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 {
--		if index < len(rawArr)-1 {
--			return rawArr[index+1]
--		}
--		return rawArr[0]
--	})
--
--	replacedArr := replaced.MustComplex128Slice()
--	if assert.Equal(t, 6, len(replacedArr)) {
--		assert.Equal(t, replacedArr[0], rawArr[1])
--		assert.Equal(t, replacedArr[1], rawArr[2])
--		assert.Equal(t, replacedArr[2], rawArr[3])
--		assert.Equal(t, replacedArr[3], rawArr[4])
--		assert.Equal(t, replacedArr[4], rawArr[5])
--		assert.Equal(t, replacedArr[5], rawArr[0])
--	}
--
--}
--
--func TestCollectComplex128(t *testing.T) {
--
--	v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}}
--
--	collected := v.CollectComplex128(func(index int, val complex128) interface{} {
--		return index
--	})
--
--	collectedArr := collected.MustInterSlice()
--	if assert.Equal(t, 6, len(collectedArr)) {
--		assert.Equal(t, collectedArr[0], 0)
--		assert.Equal(t, collectedArr[1], 1)
--		assert.Equal(t, collectedArr[2], 2)
--		assert.Equal(t, collectedArr[3], 3)
--		assert.Equal(t, collectedArr[4], 4)
--		assert.Equal(t, collectedArr[5], 5)
--	}
--
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value.go b/Godeps/_workspace/src/github.com/stretchr/objx/value.go
-deleted file mode 100644
-index 7aaef06..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/value.go
-+++ /dev/null
-@@ -1,13 +0,0 @@
--package objx
--
--// Value provides methods for extracting interface{} data in various
--// types.
--type Value struct {
--	// data contains the raw data being managed by this Value
--	data interface{}
--}
--
--// Data returns the raw data contained by this Value
--func (v *Value) Data() interface{} {
--	return v.data
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go b/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go
-deleted file mode 100644
-index 0bc65d9..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/objx/value_test.go
-+++ /dev/null
-@@ -1 +0,0 @@
--package objx
-diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
-deleted file mode 100644
-index c784e1b..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
-+++ /dev/null
-@@ -1,490 +0,0 @@
--package assert
--
--import (
--	"fmt"
--	"reflect"
--	"runtime"
--	"strings"
--	"time"
--)
--
--// TestingT is an interface wrapper around *testing.T
--type TestingT interface {
--	Errorf(format string, args ...interface{})
--}
--
--// Comparison a custom function that returns true on success and false on failure
--type Comparison func() (success bool)
--
--/*
--	Helper functions
--*/
--
--// ObjectsAreEqual determines if two objects are considered equal.
--//
--// This function does no assertion of any kind.
--func ObjectsAreEqual(expected, actual interface{}) bool {
--
--	if reflect.DeepEqual(expected, actual) {
--		return true
--	}
--
--	if reflect.ValueOf(expected) == reflect.ValueOf(actual) {
--		return true
--	}
--
--	// Last ditch effort
--	if fmt.Sprintf("%#v", expected) == fmt.Sprintf("%#v", actual) {
--		return true
--	}
--
--	return false
--
--}
--
--/* CallerInfo is necessary because the assert functions use the testing object
--internally, causing it to print the file:line of the assert method, rather than where
--the problem actually occured in calling code.*/
--
--// CallerInfo returns a string containing the file and line number of the assert call
--// that failed.
--func CallerInfo() string {
--
--	file := ""
--	line := 0
--	ok := false
--
--	for i := 0; ; i++ {
--		_, file, line, ok = runtime.Caller(i)
--		if !ok {
--			return ""
--		}
--		parts := strings.Split(file, "/")
--		dir := parts[len(parts)-2]
--		file = parts[len(parts)-1]
--		if (dir != "assert" && dir != "mock") || file == "mock_test.go" {
--			break
--		}
--	}
--
--	return fmt.Sprintf("%s:%d", file, line)
--}
--
--// getWhitespaceString returns a string that is long enough to overwrite the default
--// output from the go testing framework.
--func getWhitespaceString() string {
--
--	_, file, line, ok := runtime.Caller(1)
--	if !ok {
--		return ""
--	}
--	parts := strings.Split(file, "/")
--	file = parts[len(parts)-1]
--
--	return strings.Repeat(" ", len(fmt.Sprintf("%s:%d:      ", file, line)))
--
--}
--
--func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
--	if len(msgAndArgs) == 0 || msgAndArgs == nil {
--		return ""
--	}
--	if len(msgAndArgs) == 1 {
--		return msgAndArgs[0].(string)
--	}
--	if len(msgAndArgs) > 1 {
--		return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
--	}
--	return ""
--}
--
--// Fail reports a failure through
--func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
--
--	message := messageFromMsgAndArgs(msgAndArgs...)
--
--	if len(message) > 0 {
--		t.Errorf("\r%s\r\tLocation:\t%s\n\r\tError:\t\t%s\n\r\tMessages:\t%s\n\r", getWhitespaceString(), CallerInfo(), failureMessage, message)
--	} else {
--		t.Errorf("\r%s\r\tLocation:\t%s\n\r\tError:\t\t%s\n\r", getWhitespaceString(), CallerInfo(), failureMessage)
--	}
--
--	return false
--}
--
--// Implements asserts that an object is implemented by the specified interface.
--//
--//    assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
--func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
--
--	interfaceType := reflect.TypeOf(interfaceObject).Elem()
--
--	if !reflect.TypeOf(object).Implements(interfaceType) {
--		return Fail(t, fmt.Sprintf("Object must implement %v", interfaceType), msgAndArgs...)
--	}
--
--	return true
--
--}
--
--// IsType asserts that the specified objects are of the same type.
--func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
--
--	if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
--		return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
--	}
--
--	return true
--}
--
--// Equal asserts that two objects are equal.
--//
--//    assert.Equal(t, 123, 123, "123 and 123 should be equal")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
--
--	if !ObjectsAreEqual(expected, actual) {
--		return Fail(t, fmt.Sprintf("Not equal: %#v != %#v", expected, actual), msgAndArgs...)
--	}
--
--	return true
--
--}
--
--// Exactly asserts that two objects are equal is value and type.
--//
--//    assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
--
--	aType := reflect.TypeOf(expected)
--	bType := reflect.TypeOf(actual)
--
--	if aType != bType {
--		return Fail(t, "Types expected to match exactly", "%v != %v", aType, bType)
--	}
--
--	return Equal(t, expected, actual, msgAndArgs...)
--
--}
--
--// NotNil asserts that the specified object is not nil.
--//
--//    assert.NotNil(t, err, "err should be something")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
--
--	var success bool = true
--
--	if object == nil {
--		success = false
--	} else {
--		value := reflect.ValueOf(object)
--		kind := value.Kind()
--		if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
--			success = false
--		}
--	}
--
--	if !success {
--		Fail(t, "Expected not to be nil.", msgAndArgs...)
--	}
--
--	return success
--}
--
--// Nil asserts that the specified object is nil.
--//
--//    assert.Nil(t, err, "err should be nothing")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
--
--	if object == nil {
--		return true
--	} else {
--		value := reflect.ValueOf(object)
--		kind := value.Kind()
--		if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
--			return true
--		}
--	}
--
--	return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
--}
--
--// isEmpty gets whether the specified object is considered empty or not.
--func isEmpty(object interface{}) bool {
--
--	if object == nil {
--		return true
--	} else if object == "" {
--		return true
--	} else if object == 0 {
--		return true
--	} else if object == false {
--		return true
--	}
--
--	objValue := reflect.ValueOf(object)
--	switch objValue.Kind() {
--	case reflect.Map:
--		fallthrough
--	case reflect.Slice:
--		{
--			return (objValue.Len() == 0)
--		}
--	}
--
--	return false
--
--}
--
--// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or a
--// slice with len == 0.
--//
--// assert.Empty(t, obj)
--//
--// Returns whether the assertion was successful (true) or not (false).
--func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
--
--	pass := isEmpty(object)
--	if !pass {
--		Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
--	}
--
--	return pass
--
--}
--
--// Empty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or a
--// slice with len == 0.
--//
--// if assert.NotEmpty(t, obj) {
--//   assert.Equal(t, "two", obj[1])
--// }
--//
--// Returns whether the assertion was successful (true) or not (false).
--func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
--
--	pass := !isEmpty(object)
--	if !pass {
--		Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
--	}
--
--	return pass
--
--}
--
--// True asserts that the specified value is true.
--//
--//    assert.True(t, myBool, "myBool should be true")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
--
--	if value != true {
--		return Fail(t, "Should be true", msgAndArgs...)
--	}
--
--	return true
--
--}
--
--// False asserts that the specified value is true.
--//
--//    assert.False(t, myBool, "myBool should be false")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
--
--	if value != false {
--		return Fail(t, "Should be false", msgAndArgs...)
--	}
--
--	return true
--
--}
--
--// NotEqual asserts that the specified values are NOT equal.
--//
--//    assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
--
--	if ObjectsAreEqual(expected, actual) {
--		return Fail(t, "Should not be equal", msgAndArgs...)
--	}
--
--	return true
--
--}
--
--// Contains asserts that the specified string contains the specified substring.
--//
--//    assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func Contains(t TestingT, s, contains string, msgAndArgs ...interface{}) bool {
--
--	if !strings.Contains(s, contains) {
--		return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
--	}
--
--	return true
--
--}
--
--// NotContains asserts that the specified string does NOT contain the specified substring.
--//
--//    assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func NotContains(t TestingT, s, contains string, msgAndArgs ...interface{}) bool {
--
--	if strings.Contains(s, contains) {
--		return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
--	}
--
--	return true
--
--}
--
--// Uses a Comparison to assert a complex condition.
--func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
--	result := comp()
--	if !result {
--		Fail(t, "Condition failed!", msgAndArgs...)
--	}
--	return result
--}
--
--// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
--// methods, and represents a simple func that takes no arguments, and returns nothing.
--type PanicTestFunc func()
--
--// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
--func didPanic(f PanicTestFunc) (bool, interface{}) {
--
--	var didPanic bool = false
--	var message interface{}
--	func() {
--
--		defer func() {
--			if message = recover(); message != nil {
--				didPanic = true
--			}
--		}()
--
--		// call the target function
--		f()
--
--	}()
--
--	return didPanic, message
--
--}
--
--// Panics asserts that the code inside the specified PanicTestFunc panics.
--//
--//   assert.Panics(t, func(){
--//     GoCrazy()
--//   }, "Calling GoCrazy() should panic")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
--
--	if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
--		return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
--	}
--
--	return true
--}
--
--// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
--//
--//   assert.NotPanics(t, func(){
--//     RemainCalm()
--//   }, "Calling RemainCalm() should NOT panic")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
--
--	if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
--		return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
--	}
--
--	return true
--}
--
--// WithinDuration asserts that the two times are within duration delta of each other.
--//
--//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
--//
--// Returns whether the assertion was successful (true) or not (false).
--func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
--
--	dt := expected.Sub(actual)
--	if dt < -delta || dt > delta {
--		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, dt, delta), msgAndArgs...)
--	}
--
--	return true
--}
--
--/*
--	Errors
--*/
--
--// NoError asserts that a function returned no error (i.e. `nil`).
--//
--//   actualObj, err := SomeFunction()
--//   if assert.NoError(t, err) {
--//	   assert.Equal(t, actualObj, expectedObj)
--//   }
--//
--// Returns whether the assertion was successful (true) or not (false).
--func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
--
--	message := messageFromMsgAndArgs(msgAndArgs...)
--	return Nil(t, err, "No error is expected but got %v %s", err, message)
--
--}
--
--// Error asserts that a function returned an error (i.e. not `nil`).
--//
--//   actualObj, err := SomeFunction()
--//   if assert.Error(t, err, "An error was expected") {
--//	   assert.Equal(t, err, expectedError)
--//   }
--//
--// Returns whether the assertion was successful (true) or not (false).
--func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
--
--	message := messageFromMsgAndArgs(msgAndArgs...)
--	return NotNil(t, err, "An error is expected but got nil. %s", message)
--
--}
--
--// Error asserts that a function returned an error (i.e. not `nil`).
--//
--//   actualObj, err := SomeFunction()
--//   if assert.Error(t, err, "An error was expected") {
--//	   assert.Equal(t, err, expectedError)
--//   }
--//
--// Returns whether the assertion was successful (true) or not (false).
--func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
--
--	message := messageFromMsgAndArgs(msgAndArgs...)
--	if !NotNil(t, theError, "An error is expected but got nil. %s", message) {
--		return false
--	}
--	s := "An error with value \"%s\" is expected but got \"%s\". %s"
--	return Equal(t, theError.Error(), errString,
--		s, errString, theError.Error(), message)
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go
-deleted file mode 100644
-index bf1d727..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go
-+++ /dev/null
-@@ -1,401 +0,0 @@
--package assert
--
--import (
--	"errors"
--	"testing"
--	"time"
--)
--
--// AssertionTesterInterface defines an interface to be used for testing assertion methods
--type AssertionTesterInterface interface {
--	TestMethod()
--}
--
--// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface
--type AssertionTesterConformingObject struct {
--}
--
--func (a *AssertionTesterConformingObject) TestMethod() {
--}
--
--// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface
--type AssertionTesterNonConformingObject struct {
--}
--
--func TestObjectsAreEqual(t *testing.T) {
--
--	if !ObjectsAreEqual("Hello World", "Hello World") {
--		t.Error("objectsAreEqual should return true")
--	}
--	if !ObjectsAreEqual(123, 123) {
--		t.Error("objectsAreEqual should return true")
--	}
--	if !ObjectsAreEqual(123.5, 123.5) {
--		t.Error("objectsAreEqual should return true")
--	}
--	if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) {
--		t.Error("objectsAreEqual should return true")
--	}
--	if !ObjectsAreEqual(nil, nil) {
--		t.Error("objectsAreEqual should return true")
--	}
--
--}
--
--func TestImplements(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) {
--		t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface")
--	}
--	if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) {
--		t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface")
--	}
--
--}
--
--func TestIsType(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) {
--		t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject")
--	}
--	if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) {
--		t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject")
--	}
--
--}
--
--func TestEqual(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !Equal(mockT, "Hello World", "Hello World") {
--		t.Error("Equal should return true")
--	}
--	if !Equal(mockT, 123, 123) {
--		t.Error("Equal should return true")
--	}
--	if !Equal(mockT, 123.5, 123.5) {
--		t.Error("Equal should return true")
--	}
--	if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) {
--		t.Error("Equal should return true")
--	}
--	if !Equal(mockT, nil, nil) {
--		t.Error("Equal should return true")
--	}
--
--}
--
--func TestNotNil(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !NotNil(mockT, new(AssertionTesterConformingObject)) {
--		t.Error("NotNil should return true: object is not nil")
--	}
--	if NotNil(mockT, nil) {
--		t.Error("NotNil should return false: object is nil")
--	}
--
--}
--
--func TestNil(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !Nil(mockT, nil) {
--		t.Error("Nil should return true: object is nil")
--	}
--	if Nil(mockT, new(AssertionTesterConformingObject)) {
--		t.Error("Nil should return false: object is not nil")
--	}
--
--}
--
--func TestTrue(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !True(mockT, true) {
--		t.Error("True should return true")
--	}
--	if True(mockT, false) {
--		t.Error("True should return false")
--	}
--
--}
--
--func TestFalse(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !False(mockT, false) {
--		t.Error("False should return true")
--	}
--	if False(mockT, true) {
--		t.Error("False should return false")
--	}
--
--}
--
--func TestExactly(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	a := float32(1)
--	b := float64(1)
--	c := float32(1)
--	d := float32(2)
--
--	if Exactly(mockT, a, b) {
--		t.Error("Exactly should return false")
--	}
--	if Exactly(mockT, a, d) {
--		t.Error("Exactly should return false")
--	}
--	if !Exactly(mockT, a, c) {
--		t.Error("Exactly should return true")
--	}
--
--	if Exactly(mockT, nil, a) {
--		t.Error("Exactly should return false")
--	}
--	if Exactly(mockT, a, nil) {
--		t.Error("Exactly should return false")
--	}
--
--}
--
--func TestNotEqual(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !NotEqual(mockT, "Hello World", "Hello World!") {
--		t.Error("NotEqual should return true")
--	}
--	if !NotEqual(mockT, 123, 1234) {
--		t.Error("NotEqual should return true")
--	}
--	if !NotEqual(mockT, 123.5, 123.55) {
--		t.Error("NotEqual should return true")
--	}
--	if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) {
--		t.Error("NotEqual should return true")
--	}
--	if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) {
--		t.Error("NotEqual should return true")
--	}
--}
--
--func TestContains(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !Contains(mockT, "Hello World", "Hello") {
--		t.Error("Contains should return true: \"Hello World\" contains \"Hello\"")
--	}
--	if Contains(mockT, "Hello World", "Salut") {
--		t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"")
--	}
--
--}
--
--func TestNotContains(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !NotContains(mockT, "Hello World", "Hello!") {
--		t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"")
--	}
--	if NotContains(mockT, "Hello World", "Hello") {
--		t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"")
--	}
--
--}
--
--func TestDidPanic(t *testing.T) {
--
--	if funcDidPanic, _ := didPanic(func() {
--		panic("Panic!")
--	}); !funcDidPanic {
--		t.Error("didPanic should return true")
--	}
--
--	if funcDidPanic, _ := didPanic(func() {
--	}); funcDidPanic {
--		t.Error("didPanic should return false")
--	}
--
--}
--
--func TestPanics(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !Panics(mockT, func() {
--		panic("Panic!")
--	}) {
--		t.Error("Panics should return true")
--	}
--
--	if Panics(mockT, func() {
--	}) {
--		t.Error("Panics should return false")
--	}
--
--}
--
--func TestNotPanics(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	if !NotPanics(mockT, func() {
--	}) {
--		t.Error("NotPanics should return true")
--	}
--
--	if NotPanics(mockT, func() {
--		panic("Panic!")
--	}) {
--		t.Error("NotPanics should return false")
--	}
--
--}
--
--func TestEqual_Funcs(t *testing.T) {
--
--	type f func() int
--	var f1 f = func() int { return 1 }
--	var f2 f = func() int { return 2 }
--
--	var f1_copy f = f1
--
--	Equal(t, f1_copy, f1, "Funcs are the same and should be considered equal")
--	NotEqual(t, f1, f2, "f1 and f2 are different")
--
--}
--
--func TestNoError(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	// start with a nil error
--	var err error = nil
--
--	True(t, NoError(mockT, err), "NoError should return True for nil arg")
--
--	// now set an error
--	err = errors.New("Some error")
--
--	False(t, NoError(mockT, err), "NoError with error should return False")
--
--}
--
--func TestError(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	// start with a nil error
--	var err error = nil
--
--	False(t, Error(mockT, err), "Error should return False for nil arg")
--
--	// now set an error
--	err = errors.New("Some error")
--
--	True(t, Error(mockT, err), "Error with error should return True")
--
--}
--
--func TestEqualError(t *testing.T) {
--	mockT := new(testing.T)
--
--	// start with a nil error
--	var err error = nil
--	False(t, EqualError(mockT, err, ""),
--		"EqualError should return false for nil arg")
--
--	// now set an error
--	err = errors.New("Some error")
--	False(t, EqualError(mockT, err, "Not some error"),
--		"EqualError should return false for different error string")
--	True(t, EqualError(mockT, err, "Some error"),
--		"EqualError should return true")
--}
--
--func Test_isEmpty(t *testing.T) {
--
--	True(t, isEmpty(""))
--	True(t, isEmpty(nil))
--	True(t, isEmpty([]string{}))
--	True(t, isEmpty(0))
--	True(t, isEmpty(false))
--	True(t, isEmpty(map[string]string{}))
--
--	False(t, isEmpty("something"))
--	False(t, isEmpty(errors.New("something")))
--	False(t, isEmpty([]string{"something"}))
--	False(t, isEmpty(1))
--	False(t, isEmpty(true))
--	False(t, isEmpty(map[string]string{"Hello": "World"}))
--
--}
--
--func TestEmpty(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	True(t, Empty(mockT, ""), "Empty string is empty")
--	True(t, Empty(mockT, nil), "Nil is empty")
--	True(t, Empty(mockT, []string{}), "Empty string array is empty")
--	True(t, Empty(mockT, 0), "Zero int value is empty")
--	True(t, Empty(mockT, false), "False value is empty")
--
--	False(t, Empty(mockT, "something"), "Non Empty string is not empty")
--	False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty")
--	False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty")
--	False(t, Empty(mockT, 1), "Non-zero int value is not empty")
--	False(t, Empty(mockT, true), "True value is not empty")
--
--}
--
--func TestNotEmpty(t *testing.T) {
--
--	mockT := new(testing.T)
--
--	False(t, NotEmpty(mockT, ""), "Empty string is empty")
--	False(t, NotEmpty(mockT, nil), "Nil is empty")
--	False(t, NotEmpty(mockT, []string{}), "Empty string array is empty")
--	False(t, NotEmpty(mockT, 0), "Zero int value is empty")
--	False(t, NotEmpty(mockT, false), "False value is empty")
--
--	True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty")
--	True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty")
--	True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty")
--	True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty")
--	True(t, NotEmpty(mockT, true), "True value is not empty")
--
--}
--
--func TestWithinDuration(t *testing.T) {
--
--	mockT := new(testing.T)
--	a := time.Now()
--	b := a.Add(10 * time.Second)
--
--	True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference")
--	True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference")
--
--	False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference")
--	False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference")
--
--	False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference")
--	False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference")
--
--	False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference")
--	False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference")
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go
-deleted file mode 100644
-index 25f699b..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go
-+++ /dev/null
-@@ -1,74 +0,0 @@
--// A set of comprehensive testing tools for use with the normal Go testing system.
--//
--// Example Usage
--//
--// The following is a complete example using assert in a standard test function:
--//    import (
--//      "testing"
--//      "github.com/stretchr/testify/assert"
--//    )
--//
--//    func TestSomething(t *testing.T) {
--//
--//      var a string = "Hello"
--//      var b string = "Hello"
--//
--//      assert.Equal(t, a, b, "The two words should be the same.")
--//
--//    }
--//
--// Assertions
--//
--// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
--// All assertion functions take, as the first argument, the `*testing.T` object provided by the
--// testing framework. This allows the assertion funcs to write the failings and other details to
--// the correct place.
--//
--// Every assertion function also takes an optional string message as the final argument,
--// allowing custom error messages to be appended to the message the assertion method outputs.
--//
--// Here is an overview of the assert functions:
--//
--//    assert.Equal(t, expected, actual [, message [, format-args])
--//
--//    assert.NotEqual(t, notExpected, actual [, message [, format-args]])
--//
--//    assert.True(t, actualBool [, message [, format-args]])
--//
--//    assert.False(t, actualBool [, message [, format-args]])
--//
--//    assert.Nil(t, actualObject [, message [, format-args]])
--//
--//    assert.NotNil(t, actualObject [, message [, format-args]])
--//
--//    assert.Empty(t, actualObject [, message [, format-args]])
--//
--//    assert.NotEmpty(t, actualObject [, message [, format-args]])
--//
--//    assert.Error(t, errorObject [, message [, format-args]])
--//
--//    assert.NoError(t, errorObject [, message [, format-args]])
--//
--//    assert.Implements(t, (*MyInterface)(nil), new(MyObject) [,message [, format-args]])
--//
--//    assert.IsType(t, expectedObject, actualObject [, message [, format-args]])
--//
--//    assert.Contains(t, string, substring [, message [, format-args]])
--//
--//    assert.NotContains(t, string, substring [, message [, format-args]])
--//
--//    assert.Panics(t, func(){
--//
--//	    // call code that should panic
--//
--//    } [, message [, format-args]])
--//
--//    assert.NotPanics(t, func(){
--//
--//	    // call code that should not panic
--//
--//    } [, message [, format-args]])
--//
--//    assert.WithinDuration(t, timeA, timeB, deltaTime, [, message [, format-args]])
--
--package assert
-diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go b/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go
-deleted file mode 100644
-index da004d1..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go
-+++ /dev/null
-@@ -1,10 +0,0 @@
--package assert
--
--import (
--	"errors"
--)
--
--// AnError is an error instance useful for testing.  If the code does not care
--// about error specifics, and only needs to return the error for example, this
--// error should be used to make the test code more readable.
--var AnError error = errors.New("assert.AnError general error for testing.")
-diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go
-deleted file mode 100644
-index 7d4e7b8..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/testify/mock/doc.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--// Provides a system by which it is possible to mock your objects and verify calls are happening as expected.
--//
--// Example Usage
--//
--// The mock package provides an object, Mock, that tracks activity on another object.  It is usually
--// embedded into a test object as shown below:
--//
--//   type MyTestObject struct {
--//     // add a Mock object instance
--//     mock.Mock
--//
--//     // other fields go here as normal
--//   }
--//
--// When implementing the methods of an interface, you wire your functions up
--// to call the Mock.Called(args...) method, and return the appropriate values.
--//
--// For example, to mock a method that saves the name and age of a person and returns
--// the year of their birth or an error, you might write this:
--//
--//     func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) {
--//       args := o.Mock.Called(firstname, lastname, age)
--//       return args.Int(0), args.Error(1)
--//     }
--//
--// The Int, Error and Bool methods are examples of strongly typed getters that take the argument
--// index position. Given this argument list:
--//
--//     (12, true, "Something")
--//
--// You could read them out strongly typed like this:
--//
--//     args.Int(0)
--//     args.Bool(1)
--//     args.String(2)
--//
--// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion:
--//
--//     return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine)
--//
--// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those
--// cases you should check for nil first.
--package mock
-diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go
-deleted file mode 100644
-index 4320e6f..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock.go
-+++ /dev/null
-@@ -1,505 +0,0 @@
--package mock
--
--import (
--	"fmt"
--	"github.com/stretchr/objx"
--	"github.com/stretchr/testify/assert"
--	"reflect"
--	"runtime"
--	"strings"
--)
--
--// TestingT is an interface wrapper around *testing.T
--type TestingT interface {
--	Logf(format string, args ...interface{})
--	Errorf(format string, args ...interface{})
--}
--
--/*
--	Call
--*/
--
--// Call represents a method call and is used for setting expectations,
--// as well as recording activity.
--type Call struct {
--
--	// The name of the method that was or will be called.
--	Method string
--
--	// Holds the arguments of the method.
--	Arguments Arguments
--
--	// Holds the arguments that should be returned when
--	// this method is called.
--	ReturnArguments Arguments
--
--	// The number of times to return the return arguments when setting
--	// expectations. 0 means to always return the value.
--	Repeatability int
--}
--
--// Mock is the workhorse used to track activity on another object.
--// For an example of its usage, refer to the "Example Usage" section at the top of this document.
--type Mock struct {
--
--	// The method name that is currently
--	// being referred to by the On method.
--	onMethodName string
--
--	// An array of the arguments that are
--	// currently being referred to by the On method.
--	onMethodArguments Arguments
--
--	// Represents the calls that are expected of
--	// an object.
--	ExpectedCalls []Call
--
--	// Holds the calls that were made to this mocked object.
--	Calls []Call
--
--	// TestData holds any data that might be useful for testing.  Testify ignores
--	// this data completely allowing you to do whatever you like with it.
--	testData objx.Map
--}
--
--// TestData holds any data that might be useful for testing.  Testify ignores
--// this data completely allowing you to do whatever you like with it.
--func (m *Mock) TestData() objx.Map {
--
--	if m.testData == nil {
--		m.testData = make(objx.Map)
--	}
--
--	return m.testData
--}
--
--/*
--	Setting expectations
--*/
--
--// On starts a description of an expectation of the specified method
--// being called.
--//
--//     Mock.On("MyMethod", arg1, arg2)
--func (m *Mock) On(methodName string, arguments ...interface{}) *Mock {
--	m.onMethodName = methodName
--	m.onMethodArguments = arguments
--	return m
--}
--
--// Return finishes a description of an expectation of the method (and arguments)
--// specified in the most recent On method call.
--//
--//     Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2)
--func (m *Mock) Return(returnArguments ...interface{}) *Mock {
--	m.ExpectedCalls = append(m.ExpectedCalls, Call{m.onMethodName, m.onMethodArguments, returnArguments, 0})
--	return m
--}
--
--// Once indicates that that the mock should only return the value once.
--//
--//    Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once()
--func (m *Mock) Once() {
--	m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 1
--}
--
--// Twice indicates that that the mock should only return the value twice.
--//
--//    Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice()
--func (m *Mock) Twice() {
--	m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = 2
--}
--
--// Times indicates that that the mock should only return the indicated number
--// of times.
--//
--//    Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5)
--func (m *Mock) Times(i int) {
--	m.ExpectedCalls[len(m.ExpectedCalls)-1].Repeatability = i
--}
--
--/*
--	Recording and responding to activity
--*/
--
--func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) {
--	for i, call := range m.ExpectedCalls {
--		if call.Method == method && call.Repeatability > -1 {
--
--			_, diffCount := call.Arguments.Diff(arguments)
--			if diffCount == 0 {
--				return i, &call
--			}
--
--		}
--	}
--	return -1, nil
--}
--
--func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) {
--
--	diffCount := 0
--	var closestCall *Call = nil
--
--	for _, call := range m.ExpectedCalls {
--		if call.Method == method {
--
--			_, tempDiffCount := call.Arguments.Diff(arguments)
--			if tempDiffCount < diffCount || diffCount == 0 {
--				diffCount = tempDiffCount
--				closestCall = &call
--			}
--
--		}
--	}
--
--	if closestCall == nil {
--		return false, nil
--	}
--
--	return true, closestCall
--}
--
--func callString(method string, arguments Arguments, includeArgumentValues bool) string {
--
--	var argValsString string = ""
--	if includeArgumentValues {
--		var argVals []string
--		for argIndex, arg := range arguments {
--			argVals = append(argVals, fmt.Sprintf("%d: %v", argIndex, arg))
--		}
--		argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t"))
--	}
--
--	return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString)
--}
--
--// Called tells the mock object that a method has been called, and gets an array
--// of arguments to return.  Panics if the call is unexpected (i.e. not preceeded by
--// appropriate .On .Return() calls)
--func (m *Mock) Called(arguments ...interface{}) Arguments {
--
--	// get the calling function's name
--	pc, _, _, ok := runtime.Caller(1)
--	if !ok {
--		panic("Couldn't get the caller information")
--	}
--	functionPath := runtime.FuncForPC(pc).Name()
--	parts := strings.Split(functionPath, ".")
--	functionName := parts[len(parts)-1]
--
--	found, call := m.findExpectedCall(functionName, arguments...)
--
--	switch {
--	case found < 0:
--		// we have to fail here - because we don't know what to do
--		// as the return arguments.  This is because:
--		//
--		//   a) this is a totally unexpected call to this method,
--		//   b) the arguments are not what was expected, or
--		//   c) the developer has forgotten to add an accompanying On...Return pair.
--
--		closestFound, closestCall := m.findClosestCall(functionName, arguments...)
--
--		if closestFound {
--			panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true)))
--		} else {
--			panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo()))
--		}
--	case call.Repeatability == 1:
--		call.Repeatability = -1
--		m.ExpectedCalls[found] = *call
--	case call.Repeatability > 1:
--		call.Repeatability -= 1
--		m.ExpectedCalls[found] = *call
--	}
--
--	// add the call
--	m.Calls = append(m.Calls, Call{functionName, arguments, make([]interface{}, 0), 0})
--
--	return call.ReturnArguments
--
--}
--
--/*
--	Assertions
--*/
--
--// AssertExpectationsForObjects asserts that everything specified with On and Return
--// of the specified objects was in fact called as expected.
--//
--// Calls may have occurred in any order.
--func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
--	var success bool = true
--	for _, obj := range testObjects {
--		mockObj := obj.(Mock)
--		success = success && mockObj.AssertExpectations(t)
--	}
--	return success
--}
--
--// AssertExpectations asserts that everything specified with On and Return was
--// in fact called as expected.  Calls may have occurred in any order.
--func (m *Mock) AssertExpectations(t TestingT) bool {
--
--	var somethingMissing bool = false
--	var failedExpectations int = 0
--
--	// iterate through each expectation
--	for _, expectedCall := range m.ExpectedCalls {
--		switch {
--		case !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments):
--			somethingMissing = true
--			failedExpectations++
--			t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
--		case expectedCall.Repeatability > 0:
--			somethingMissing = true
--			failedExpectations++
--		default:
--			t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
--		}
--	}
--
--	if somethingMissing {
--		t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(m.ExpectedCalls)-failedExpectations, len(m.ExpectedCalls), failedExpectations, assert.CallerInfo())
--	}
--
--	return !somethingMissing
--}
--
--// AssertNumberOfCalls asserts that the method was called expectedCalls times.
--func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool {
--	var actualCalls int = 0
--	for _, call := range m.Calls {
--		if call.Method == methodName {
--			actualCalls++
--		}
--	}
--	return assert.Equal(t, actualCalls, expectedCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls))
--}
--
--// AssertCalled asserts that the method was called.
--func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool {
--	if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) {
--		t.Logf("%s", m.ExpectedCalls)
--		return false
--	}
--	return true
--}
--
--// AssertNotCalled asserts that the method was not called.
--func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool {
--	if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) {
--		t.Logf("%s", m.ExpectedCalls)
--		return false
--	}
--	return true
--}
--
--func (m *Mock) methodWasCalled(methodName string, arguments []interface{}) bool {
--	for _, call := range m.Calls {
--		if call.Method == methodName {
--
--			_, differences := call.Arguments.Diff(arguments)
--
--			if differences == 0 {
--				// found the expected call
--				return true
--			}
--
--		}
--	}
--	// we didn't find the expected call
--	return false
--}
--
--/*
--	Arguments
--*/
--
--// Arguments holds an array of method arguments or return values.
--type Arguments []interface{}
--
--const (
--	// The "any" argument.  Used in Diff and Assert when
--	// the argument being tested shouldn't be taken into consideration.
--	Anything string = "mock.Anything"
--)
--
--// AnythingOfTypeArgument is a string that contains the type of an argument
--// for use when type checking.  Used in Diff and Assert.
--type AnythingOfTypeArgument string
--
--// AnythingOfType returns an AnythingOfTypeArgument object containing the
--// name of the type to check for.  Used in Diff and Assert.
--//
--// For example:
--//	Assert(t, AnythingOfType("string"), AnythingOfType("int"))
--func AnythingOfType(t string) AnythingOfTypeArgument {
--	return AnythingOfTypeArgument(t)
--}
--
--// Get Returns the argument at the specified index.
--func (args Arguments) Get(index int) interface{} {
--	if index+1 > len(args) {
--		panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args)))
--	}
--	return args[index]
--}
--
--// Is gets whether the objects match the arguments specified.
--func (args Arguments) Is(objects ...interface{}) bool {
--	for i, obj := range args {
--		if obj != objects[i] {
--			return false
--		}
--	}
--	return true
--}
--
--// Diff gets a string describing the differences between the arguments
--// and the specified objects.
--//
--// Returns the diff string and number of differences found.
--func (args Arguments) Diff(objects []interface{}) (string, int) {
--
--	var output string = "\n"
--	var differences int
--
--	var maxArgCount int = len(args)
--	if len(objects) > maxArgCount {
--		maxArgCount = len(objects)
--	}
--
--	for i := 0; i < maxArgCount; i++ {
--		var actual, expected interface{}
--
--		if len(objects) <= i {
--			actual = "(Missing)"
--		} else {
--			actual = objects[i]
--		}
--
--		if len(args) <= i {
--			expected = "(Missing)"
--		} else {
--			expected = args[i]
--		}
--
--		if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
--
--			// type checking
--			if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) {
--				// not match
--				differences++
--				output = fmt.Sprintf("%s\t%d: \u274C  type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual)
--			}
--
--		} else {
--
--			// normal checking
--
--			if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) {
--				// match
--				output = fmt.Sprintf("%s\t%d: \u2705  %s == %s\n", output, i, actual, expected)
--			} else {
--				// not match
--				differences++
--				output = fmt.Sprintf("%s\t%d: \u274C  %s != %s\n", output, i, actual, expected)
--			}
--		}
--
--	}
--
--	if differences == 0 {
--		return "No differences.", differences
--	}
--
--	return output, differences
--
--}
--
--// Assert compares the arguments with the specified objects and fails if
--// they do not exactly match.
--func (args Arguments) Assert(t TestingT, objects ...interface{}) bool {
--
--	// get the differences
--	diff, diffCount := args.Diff(objects)
--
--	if diffCount == 0 {
--		return true
--	}
--
--	// there are differences... report them...
--	t.Logf(diff)
--	t.Errorf("%sArguments do not match.", assert.CallerInfo())
--
--	return false
--
--}
--
--// String gets the argument at the specified index. Panics if there is no argument, or
--// if the argument is of the wrong type.
--//
--// If no index is provided, String() returns a complete string representation
--// of the arguments.
--func (args Arguments) String(indexOrNil ...int) string {
--
--	if len(indexOrNil) == 0 {
--		// normal String() method - return a string representation of the args
--		var argsStr []string
--		for _, arg := range args {
--			argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg)))
--		}
--		return strings.Join(argsStr, ",")
--	} else if len(indexOrNil) == 1 {
--		// Index has been specified - get the argument at that index
--		var index int = indexOrNil[0]
--		var s string
--		var ok bool
--		if s, ok = args.Get(index).(string); !ok {
--			panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index)))
--		}
--		return s
--	}
--
--	panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String.  Must be 0 or 1, not %d", len(indexOrNil)))
--
--}
--
--// Int gets the argument at the specified index. Panics if there is no argument, or
--// if the argument is of the wrong type.
--func (args Arguments) Int(index int) int {
--	var s int
--	var ok bool
--	if s, ok = args.Get(index).(int); !ok {
--		panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %s", index, args.Get(index)))
--	}
--	return s
--}
--
--// Error gets the argument at the specified index. Panics if there is no argument, or
--// if the argument is of the wrong type.
--func (args Arguments) Error(index int) error {
--	obj := args.Get(index)
--	var s error
--	var ok bool
--	if obj == nil {
--		return nil
--	}
--	if s, ok = obj.(error); !ok {
--		panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %s", index, args.Get(index)))
--	}
--	return s
--}
--
--// Bool gets the argument at the specified index. Panics if there is no argument, or
--// if the argument is of the wrong type.
--func (args Arguments) Bool(index int) bool {
--	var s bool
--	var ok bool
--	if s, ok = args.Get(index).(bool); !ok {
--		panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %s", index, args.Get(index)))
--	}
--	return s
--}
-diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go b/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go
-deleted file mode 100644
-index cd06451..0000000
---- a/Godeps/_workspace/src/github.com/stretchr/testify/mock/mock_test.go
-+++ /dev/null
-@@ -1,657 +0,0 @@
--package mock
--
--import (
--	"errors"
--	"github.com/stretchr/testify/assert"
--	"testing"
--)
--
--/*
--	Test objects
--*/
--
--// ExampleInterface represents an example interface.
--type ExampleInterface interface {
--	TheExampleMethod(a, b, c int) (int, error)
--}
--
--// TestExampleImplementation is a test implementation of ExampleInterface
--type TestExampleImplementation struct {
--	Mock
--}
--
--func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) {
--	args := i.Mock.Called(a, b, c)
--	return args.Int(0), errors.New("Whoops")
--}
--
--func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) {
--	i.Mock.Called(yesorno)
--}
--
--type ExampleType struct{}
--
--func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error {
--	args := i.Mock.Called(et)
--	return args.Error(0)
--}
--
--/*
--	Mock
--*/
--
--func Test_Mock_TestData(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	if assert.NotNil(t, mockedService.TestData()) {
--
--		mockedService.TestData().Set("something", 123)
--		assert.Equal(t, 123, mockedService.TestData().Get("something").Data())
--
--	}
--
--}
--
--func Test_Mock_On(t *testing.T) {
--
--	// make a test impl object
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	assert.Equal(t, mockedService.Mock.On("TheExampleMethod"), &mockedService.Mock)
--	assert.Equal(t, "TheExampleMethod", mockedService.Mock.onMethodName)
--
--}
--
--func Test_Mock_On_WithArgs(t *testing.T) {
--
--	// make a test impl object
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	assert.Equal(t, mockedService.Mock.On("TheExampleMethod", 1, 2, 3), &mockedService.Mock)
--	assert.Equal(t, "TheExampleMethod", mockedService.Mock.onMethodName)
--	assert.Equal(t, 1, mockedService.Mock.onMethodArguments[0])
--	assert.Equal(t, 2, mockedService.Mock.onMethodArguments[1])
--	assert.Equal(t, 3, mockedService.Mock.onMethodArguments[2])
--
--}
--
--func Test_Mock_Return(t *testing.T) {
--
--	// make a test impl object
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	assert.Equal(t, mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true), &mockedService.Mock)
--
--	// ensure the call was created
--	if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) {
--		call := mockedService.Mock.ExpectedCalls[0]
--
--		assert.Equal(t, "TheExampleMethod", call.Method)
--		assert.Equal(t, "A", call.Arguments[0])
--		assert.Equal(t, "B", call.Arguments[1])
--		assert.Equal(t, true, call.Arguments[2])
--		assert.Equal(t, 1, call.ReturnArguments[0])
--		assert.Equal(t, "two", call.ReturnArguments[1])
--		assert.Equal(t, true, call.ReturnArguments[2])
--		assert.Equal(t, 0, call.Repeatability)
--
--	}
--
--}
--
--func Test_Mock_Return_Once(t *testing.T) {
--
--	// make a test impl object
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Once()
--
--	// ensure the call was created
--	if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) {
--		call := mockedService.Mock.ExpectedCalls[0]
--
--		assert.Equal(t, "TheExampleMethod", call.Method)
--		assert.Equal(t, "A", call.Arguments[0])
--		assert.Equal(t, "B", call.Arguments[1])
--		assert.Equal(t, true, call.Arguments[2])
--		assert.Equal(t, 1, call.ReturnArguments[0])
--		assert.Equal(t, "two", call.ReturnArguments[1])
--		assert.Equal(t, true, call.ReturnArguments[2])
--		assert.Equal(t, 1, call.Repeatability)
--
--	}
--
--}
--
--func Test_Mock_Return_Twice(t *testing.T) {
--
--	// make a test impl object
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Twice()
--
--	// ensure the call was created
--	if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) {
--		call := mockedService.Mock.ExpectedCalls[0]
--
--		assert.Equal(t, "TheExampleMethod", call.Method)
--		assert.Equal(t, "A", call.Arguments[0])
--		assert.Equal(t, "B", call.Arguments[1])
--		assert.Equal(t, true, call.Arguments[2])
--		assert.Equal(t, 1, call.ReturnArguments[0])
--		assert.Equal(t, "two", call.ReturnArguments[1])
--		assert.Equal(t, true, call.ReturnArguments[2])
--		assert.Equal(t, 2, call.Repeatability)
--
--	}
--
--}
--
--func Test_Mock_Return_Times(t *testing.T) {
--
--	// make a test impl object
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(1, "two", true).Times(5)
--
--	// ensure the call was created
--	if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) {
--		call := mockedService.Mock.ExpectedCalls[0]
--
--		assert.Equal(t, "TheExampleMethod", call.Method)
--		assert.Equal(t, "A", call.Arguments[0])
--		assert.Equal(t, "B", call.Arguments[1])
--		assert.Equal(t, true, call.Arguments[2])
--		assert.Equal(t, 1, call.ReturnArguments[0])
--		assert.Equal(t, "two", call.ReturnArguments[1])
--		assert.Equal(t, true, call.ReturnArguments[2])
--		assert.Equal(t, 5, call.Repeatability)
--
--	}
--
--}
--
--func Test_Mock_Return_Nothing(t *testing.T) {
--
--	// make a test impl object
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	assert.Equal(t, mockedService.Mock.On("TheExampleMethod", "A", "B", true).Return(), &mockedService.Mock)
--
--	// ensure the call was created
--	if assert.Equal(t, 1, len(mockedService.Mock.ExpectedCalls)) {
--		call := mockedService.Mock.ExpectedCalls[0]
--
--		assert.Equal(t, "TheExampleMethod", call.Method)
--		assert.Equal(t, "A", call.Arguments[0])
--		assert.Equal(t, "B", call.Arguments[1])
--		assert.Equal(t, true, call.Arguments[2])
--		assert.Equal(t, 0, len(call.ReturnArguments))
--
--	}
--
--}
--
--func Test_Mock_findExpectedCall(t *testing.T) {
--
--	m := new(Mock)
--	m.On("One", 1).Return("one")
--	m.On("Two", 2).Return("two")
--	m.On("Two", 3).Return("three")
--
--	f, c := m.findExpectedCall("Two", 3)
--
--	if assert.Equal(t, 2, f) {
--		if assert.NotNil(t, c) {
--			assert.Equal(t, "Two", c.Method)
--			assert.Equal(t, 3, c.Arguments[0])
--			assert.Equal(t, "three", c.ReturnArguments[0])
--		}
--	}
--
--}
--
--func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) {
--
--	m := new(Mock)
--	m.On("One", 1).Return("one")
--	m.On("Two", 2).Return("two")
--	m.On("Two", 3).Return("three")
--
--	f, _ := m.findExpectedCall("Two")
--
--	assert.Equal(t, -1, f)
--
--}
--
--func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) {
--
--	m := new(Mock)
--	m.On("One", 1).Return("one")
--	m.On("Two", 2).Return("two").Once()
--	m.On("Two", 3).Return("three").Twice()
--	m.On("Two", 3).Return("three").Times(8)
--
--	f, c := m.findExpectedCall("Two", 3)
--
--	if assert.Equal(t, 2, f) {
--		if assert.NotNil(t, c) {
--			assert.Equal(t, "Two", c.Method)
--			assert.Equal(t, 3, c.Arguments[0])
--			assert.Equal(t, "three", c.ReturnArguments[0])
--		}
--	}
--
--}
--
--func Test_callString(t *testing.T) {
--
--	assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false))
--
--}
--
--func Test_Mock_Called(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true)
--
--	returnArguments := mockedService.Mock.Called(1, 2, 3)
--
--	if assert.Equal(t, 1, len(mockedService.Mock.Calls)) {
--		assert.Equal(t, "Test_Mock_Called", mockedService.Mock.Calls[0].Method)
--		assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0])
--		assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1])
--		assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2])
--	}
--
--	if assert.Equal(t, 3, len(returnArguments)) {
--		assert.Equal(t, 5, returnArguments[0])
--		assert.Equal(t, "6", returnArguments[1])
--		assert.Equal(t, true, returnArguments[2])
--	}
--
--}
--
--func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).Return(5, "6", true).Once()
--	mockedService.Mock.On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3).Return(-1, "hi", false)
--
--	returnArguments1 := mockedService.Mock.Called(1, 2, 3)
--	returnArguments2 := mockedService.Mock.Called(1, 2, 3)
--
--	if assert.Equal(t, 2, len(mockedService.Mock.Calls)) {
--		assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Mock.Calls[0].Method)
--		assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0])
--		assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1])
--		assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2])
--
--		assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Mock.Calls[1].Method)
--		assert.Equal(t, 1, mockedService.Mock.Calls[1].Arguments[0])
--		assert.Equal(t, 2, mockedService.Mock.Calls[1].Arguments[1])
--		assert.Equal(t, 3, mockedService.Mock.Calls[1].Arguments[2])
--	}
--
--	if assert.Equal(t, 3, len(returnArguments1)) {
--		assert.Equal(t, 5, returnArguments1[0])
--		assert.Equal(t, "6", returnArguments1[1])
--		assert.Equal(t, true, returnArguments1[2])
--	}
--
--	if assert.Equal(t, 3, len(returnArguments2)) {
--		assert.Equal(t, -1, returnArguments2[0])
--		assert.Equal(t, "hi", returnArguments2[1])
--		assert.Equal(t, false, returnArguments2[2])
--	}
--
--}
--
--func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4)
--
--	mockedService.TheExampleMethod(1, 2, 3)
--	mockedService.TheExampleMethod(1, 2, 3)
--	mockedService.TheExampleMethod(1, 2, 3)
--	mockedService.TheExampleMethod(1, 2, 3)
--	assert.Panics(t, func() {
--		mockedService.TheExampleMethod(1, 2, 3)
--	})
--
--}
--
--func Test_Mock_Called_Unexpected(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	// make sure it panics if no expectation was made
--	assert.Panics(t, func() {
--		mockedService.Mock.Called(1, 2, 3)
--	}, "Calling unexpected method should panic")
--
--}
--
--func Test_AssertExpectationsForObjects_Helper(t *testing.T) {
--
--	var mockedService1 *TestExampleImplementation = new(TestExampleImplementation)
--	var mockedService2 *TestExampleImplementation = new(TestExampleImplementation)
--	var mockedService3 *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService1.Mock.On("Test_AssertExpectationsForObjects_Helper", 1).Return()
--	mockedService2.Mock.On("Test_AssertExpectationsForObjects_Helper", 2).Return()
--	mockedService3.Mock.On("Test_AssertExpectationsForObjects_Helper", 3).Return()
--
--	mockedService1.Called(1)
--	mockedService2.Called(2)
--	mockedService3.Called(3)
--
--	assert.True(t, AssertExpectationsForObjects(t, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock))
--
--}
--
--func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) {
--
--	var mockedService1 *TestExampleImplementation = new(TestExampleImplementation)
--	var mockedService2 *TestExampleImplementation = new(TestExampleImplementation)
--	var mockedService3 *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService1.Mock.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return()
--	mockedService2.Mock.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return()
--	mockedService3.Mock.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return()
--
--	mockedService1.Called(1)
--	mockedService3.Called(3)
--
--	tt := new(testing.T)
--	assert.False(t, AssertExpectationsForObjects(tt, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock))
--
--}
--
--func Test_Mock_AssertExpectations(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7)
--
--	tt := new(testing.T)
--	assert.False(t, mockedService.AssertExpectations(tt))
--
--	// make the call now
--	mockedService.Mock.Called(1, 2, 3)
--
--	// now assert expectations
--	assert.True(t, mockedService.AssertExpectations(tt))
--
--}
--
--func Test_Mock_AssertExpectationsCustomType(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once()
--
--	tt := new(testing.T)
--	assert.False(t, mockedService.AssertExpectations(tt))
--
--	// make the call now
--	mockedService.TheExampleMethod3(&ExampleType{})
--
--	// now assert expectations
--	assert.True(t, mockedService.AssertExpectations(tt))
--
--}
--
--func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice()
--
--	tt := new(testing.T)
--	assert.False(t, mockedService.AssertExpectations(tt))
--
--	// make the call now
--	mockedService.Mock.Called(1, 2, 3)
--
--	assert.False(t, mockedService.AssertExpectations(tt))
--
--	mockedService.Mock.Called(1, 2, 3)
--
--	// now assert expectations
--	assert.True(t, mockedService.AssertExpectations(tt))
--
--}
--
--func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7)
--	mockedService.Mock.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7)
--
--	args1 := mockedService.Mock.Called(1, 2, 3)
--	assert.Equal(t, 5, args1.Int(0))
--	assert.Equal(t, 6, args1.Int(1))
--	assert.Equal(t, 7, args1.Int(2))
--
--	args2 := mockedService.Mock.Called(4, 5, 6)
--	assert.Equal(t, 5, args2.Int(0))
--	assert.Equal(t, 6, args2.Int(1))
--	assert.Equal(t, 7, args2.Int(2))
--
--}
--
--func Test_Mock_AssertNumberOfCalls(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7)
--
--	mockedService.Mock.Called(1, 2, 3)
--	assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1))
--
--	mockedService.Mock.Called(1, 2, 3)
--	assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2))
--
--}
--
--func Test_Mock_AssertCalled(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7)
--
--	mockedService.Mock.Called(1, 2, 3)
--
--	assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3))
--
--}
--
--func Test_Mock_AssertCalled_WithArguments(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7)
--
--	mockedService.Mock.Called(1, 2, 3)
--
--	tt := new(testing.T)
--	assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3))
--	assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4))
--
--}
--
--func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once()
--	mockedService.Mock.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once()
--
--	mockedService.Mock.Called(1, 2, 3)
--	mockedService.Mock.Called(2, 3, 4)
--
--	tt := new(testing.T)
--	assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3))
--	assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4))
--	assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5))
--
--}
--
--func Test_Mock_AssertNotCalled(t *testing.T) {
--
--	var mockedService *TestExampleImplementation = new(TestExampleImplementation)
--
--	mockedService.Mock.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7)
--
--	mockedService.Mock.Called(1, 2, 3)
--
--	assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled"))
--
--}
--
--/*
--	Arguments helper methods
--*/
--func Test_Arguments_Get(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--
--	assert.Equal(t, "string", args.Get(0).(string))
--	assert.Equal(t, 123, args.Get(1).(int))
--	assert.Equal(t, true, args.Get(2).(bool))
--
--}
--
--func Test_Arguments_Is(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--
--	assert.True(t, args.Is("string", 123, true))
--	assert.False(t, args.Is("wrong", 456, false))
--
--}
--
--func Test_Arguments_Diff(t *testing.T) {
--
--	var args Arguments = []interface{}{"Hello World", 123, true}
--	var diff string
--	var count int
--	diff, count = args.Diff([]interface{}{"Hello World", 456, "false"})
--
--	assert.Equal(t, 2, count)
--	assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`)
--	assert.Contains(t, diff, `false != %!s(bool=true)`)
--
--}
--
--func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--	var diff string
--	var count int
--	diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"})
--
--	assert.Equal(t, 3, count)
--	assert.Contains(t, diff, `extra != (Missing)`)
--
--}
--
--func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--	var count int
--	_, count = args.Diff([]interface{}{"string", Anything, true})
--
--	assert.Equal(t, 0, count)
--
--}
--
--func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", Anything, true}
--	var count int
--	_, count = args.Diff([]interface{}{"string", 123, true})
--
--	assert.Equal(t, 0, count)
--
--}
--
--func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", AnythingOfType("int"), true}
--	var count int
--	_, count = args.Diff([]interface{}{"string", 123, true})
--
--	assert.Equal(t, 0, count)
--
--}
--
--func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", AnythingOfType("string"), true}
--	var count int
--	var diff string
--	diff, count = args.Diff([]interface{}{"string", 123, true})
--
--	assert.Equal(t, 1, count)
--	assert.Contains(t, diff, `string != type int - %!s(int=123)`)
--
--}
--
--func Test_Arguments_Assert(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--
--	assert.True(t, args.Assert(t, "string", 123, true))
--
--}
--
--func Test_Arguments_String_Representation(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--	assert.Equal(t, `string,int,bool`, args.String())
--
--}
--
--func Test_Arguments_String(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--	assert.Equal(t, "string", args.String(0))
--
--}
--
--func Test_Arguments_Error(t *testing.T) {
--
--	var err error = errors.New("An Error")
--	var args Arguments = []interface{}{"string", 123, true, err}
--	assert.Equal(t, err, args.Error(3))
--
--}
--
--func Test_Arguments_Error_Nil(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true, nil}
--	assert.Equal(t, nil, args.Error(3))
--
--}
--
--func Test_Arguments_Int(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--	assert.Equal(t, 123, args.Int(1))
--
--}
--
--func Test_Arguments_Bool(t *testing.T) {
--
--	var args Arguments = []interface{}{"string", 123, true}
--	assert.Equal(t, true, args.Bool(2))
--
--}
-diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE b/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE
-deleted file mode 100644
-index 968b453..0000000
---- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE
-+++ /dev/null
-@@ -1,14 +0,0 @@
--Copyright (c) 2013 Vaughan Newton
--
--Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
--documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
--rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
--persons to whom the Software is furnished to do so, subject to the following conditions:
--
--The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
--Software.
--
--THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
--WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
--COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
--OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md b/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md
-deleted file mode 100644
-index d5cd4e7..0000000
---- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md
-+++ /dev/null
-@@ -1,70 +0,0 @@
--go-ini
--======
--
--INI parsing library for Go (golang).
--
--View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).
--
--Usage
-------
--
--Parse an INI file:
--
--```go
--import "github.com/vaughan0/go-ini"
--
--file, err := ini.LoadFile("myfile.ini")
--```
--
--Get data from the parsed file:
--
--```go
--name, ok := file.Get("person", "name")
--if !ok {
--  panic("'name' variable missing from 'person' section")
--}
--```
--
--Iterate through values in a section:
--
--```go
--for key, value := range file["mysection"] {
--  fmt.Printf("%s => %s\n", key, value)
--}
--```
--
--Iterate through sections in a file:
--
--```go
--for name, section := range file {
--  fmt.Printf("Section name: %s\n", name)
--}
--```
--
--File Format
-------------
--
--INI files are parsed by go-ini line-by-line. Each line may be one of the following:
--
--  * A section definition: [section-name]
--  * A property: key = value
--  * A comment: #blahblah _or_ ;blahblah
--  * Blank. The line will be ignored.
--
--Properties defined before any section headers are placed in the default section, which has
--the empty string as it's key.
--
--Example:
--
--```ini
--# I am a comment
--; So am I!
--
--[apples]
--colour = red or green
--shape = applish
--
--[oranges]
--shape = square
--colour = blue
--```
-diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go
-deleted file mode 100644
-index 81aeb32..0000000
---- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go
-+++ /dev/null
-@@ -1,123 +0,0 @@
--// Package ini provides functions for parsing INI configuration files.
--package ini
--
--import (
--	"bufio"
--	"fmt"
--	"io"
--	"os"
--	"regexp"
--	"strings"
--)
--
--var (
--	sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
--	assignRegex  = regexp.MustCompile(`^([^=]+)=(.*)$`)
--)
--
--// ErrSyntax is returned when there is a syntax error in an INI file.
--type ErrSyntax struct {
--	Line   int
--	Source string // The contents of the erroneous line, without leading or trailing whitespace
--}
--
--func (e ErrSyntax) Error() string {
--	return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
--}
--
--// A File represents a parsed INI file.
--type File map[string]Section
--
--// A Section represents a single section of an INI file.
--type Section map[string]string
--
--// Returns a named Section. A Section will be created if one does not already exist for the given name.
--func (f File) Section(name string) Section {
--	section := f[name]
--	if section == nil {
--		section = make(Section)
--		f[name] = section
--	}
--	return section
--}
--
--// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
--func (f File) Get(section, key string) (value string, ok bool) {
--	if s := f[section]; s != nil {
--		value, ok = s[key]
--	}
--	return
--}
--
--// Loads INI data from a reader and stores the data in the File.
--func (f File) Load(in io.Reader) (err error) {
--	bufin, ok := in.(*bufio.Reader)
--	if !ok {
--		bufin = bufio.NewReader(in)
--	}
--	return parseFile(bufin, f)
--}
--
--// Loads INI data from a named file and stores the data in the File.
--func (f File) LoadFile(file string) (err error) {
--	in, err := os.Open(file)
--	if err != nil {
--		return
--	}
--	defer in.Close()
--	return f.Load(in)
--}
--
--func parseFile(in *bufio.Reader, file File) (err error) {
--	section := ""
--	lineNum := 0
--	for done := false; !done; {
--		var line string
--		if line, err = in.ReadString('\n'); err != nil {
--			if err == io.EOF {
--				done = true
--			} else {
--				return
--			}
--		}
--		lineNum++
--		line = strings.TrimSpace(line)
--		if len(line) == 0 {
--			// Skip blank lines
--			continue
--		}
--		if line[0] == ';' || line[0] == '#' {
--			// Skip comments
--			continue
--		}
--
--		if groups := assignRegex.FindStringSubmatch(line); groups != nil {
--			key, val := groups[1], groups[2]
--			key, val = strings.TrimSpace(key), strings.TrimSpace(val)
--			file.Section(section)[key] = val
--		} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
--			name := strings.TrimSpace(groups[1])
--			section = name
--			// Create the section if it does not exist
--			file.Section(section)
--		} else {
--			return ErrSyntax{lineNum, line}
--		}
--
--	}
--	return nil
--}
--
--// Loads and returns a File from a reader.
--func Load(in io.Reader) (File, error) {
--	file := make(File)
--	err := file.Load(in)
--	return file, err
--}
--
--// Loads and returns an INI File from a file on disk.
--func LoadFile(filename string) (File, error) {
--	file := make(File)
--	err := file.LoadFile(filename)
--	return file, err
--}
-diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go
-deleted file mode 100644
-index 38a6f00..0000000
---- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go
-+++ /dev/null
-@@ -1,43 +0,0 @@
--package ini
--
--import (
--	"reflect"
--	"syscall"
--	"testing"
--)
--
--func TestLoadFile(t *testing.T) {
--	originalOpenFiles := numFilesOpen(t)
--
--	file, err := LoadFile("test.ini")
--	if err != nil {
--		t.Fatal(err)
--	}
--
--	if originalOpenFiles != numFilesOpen(t) {
--		t.Error("test.ini not closed")
--	}
--
--	if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) {
--		t.Error("file not read correctly")
--	}
--}
--
--func numFilesOpen(t *testing.T) (num uint64) {
--	var rlimit syscall.Rlimit
--	err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
--	if err != nil {
--		t.Fatal(err)
--	}
--	maxFds := int(rlimit.Cur)
--
--	var stat syscall.Stat_t
--	for i := 0; i < maxFds; i++ {
--		if syscall.Fstat(i, &stat) == nil {
--			num++
--		} else {
--			return
--		}
--	}
--	return
--}
-diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go
-deleted file mode 100644
-index 06a4d05..0000000
---- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go
-+++ /dev/null
-@@ -1,89 +0,0 @@
--package ini
--
--import (
--	"reflect"
--	"strings"
--	"testing"
--)
--
--func TestLoad(t *testing.T) {
--	src := `
--  # Comments are ignored
--
--  herp = derp
--
--  [foo]
--  hello=world
--  whitespace should   =   not matter   
--  ; sneaky semicolon-style comment
--  multiple = equals = signs
--
--  [bar]
--  this = that`
--
--	file, err := Load(strings.NewReader(src))
--	if err != nil {
--		t.Fatal(err)
--	}
--	check := func(section, key, expect string) {
--		if value, _ := file.Get(section, key); value != expect {
--			t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value)
--		}
--	}
--
--	check("", "herp", "derp")
--	check("foo", "hello", "world")
--	check("foo", "whitespace should", "not matter")
--	check("foo", "multiple", "equals = signs")
--	check("bar", "this", "that")
--}
--
--func TestSyntaxError(t *testing.T) {
--	src := `
--  # Line 2
--  [foo]
--  bar = baz
--  # Here's an error on line 6:
--  wut?
--  herp = derp`
--	_, err := Load(strings.NewReader(src))
--	t.Logf("%T: %v", err, err)
--	if err == nil {
--		t.Fatal("expected an error, got nil")
--	}
--	syntaxErr, ok := err.(ErrSyntax)
--	if !ok {
--		t.Fatal("expected an error of type ErrSyntax")
--	}
--	if syntaxErr.Line != 6 {
--		t.Fatal("incorrect line number")
--	}
--	if syntaxErr.Source != "wut?" {
--		t.Fatal("incorrect source")
--	}
--}
--
--func TestDefinedSectionBehaviour(t *testing.T) {
--	check := func(src string, expect File) {
--		file, err := Load(strings.NewReader(src))
--		if err != nil {
--			t.Fatal(err)
--		}
--		if !reflect.DeepEqual(file, expect) {
--			t.Errorf("expected %v, got %v", expect, file)
--		}
--	}
--	// No sections for an empty file
--	check("", File{})
--	// Default section only if there are actually values for it
--	check("foo=bar", File{"": {"foo": "bar"}})
--	// User-defined sections should always be present, even if empty
--	check("[a]\n[b]\nfoo=bar", File{
--		"a": {},
--		"b": {"foo": "bar"},
--	})
--	check("foo=bar\n[a]\nthis=that", File{
--		"":  {"foo": "bar"},
--		"a": {"this": "that"},
--	})
--}
-diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini b/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini
-deleted file mode 100644
-index d13c999..0000000
---- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini
-+++ /dev/null
-@@ -1,2 +0,0 @@
--[default]
--stuff = things
-diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/Godeps/_workspace/src/golang.org/x/net/context/context.go
-deleted file mode 100644
-index 490245d..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/context/context.go
-+++ /dev/null
-@@ -1,432 +0,0 @@
--// Copyright 2014 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package context defines the Context type, which carries deadlines,
--// cancelation signals, and other request-scoped values across API boundaries
--// and between processes.
--//
--// Incoming requests to a server should create a Context, and outgoing calls to
--// servers should accept a Context.  The chain of function calls between must
--// propagate the Context, optionally replacing it with a modified copy created
--// using WithDeadline, WithTimeout, WithCancel, or WithValue.
--//
--// Programs that use Contexts should follow these rules to keep interfaces
--// consistent across packages and enable static analysis tools to check context
--// propagation:
--//
--// Do not store Contexts inside a struct type; instead, pass a Context
--// explicitly to each function that needs it.  The Context should be the first
--// parameter, typically named ctx:
--//
--// 	func DoSomething(ctx context.Context, arg Arg) error {
--// 		// ... use ctx ...
--// 	}
--//
--// Do not pass a nil Context, even if a function permits it.  Pass context.TODO
--// if you are unsure about which Context to use.
--//
--// Use context Values only for request-scoped data that transits processes and
--// APIs, not for passing optional parameters to functions.
--//
--// The same Context may be passed to functions running in different goroutines;
--// Contexts are safe for simultaneous use by multiple goroutines.
--//
--// See http://blog.golang.org/context for example code for a server that uses
--// Contexts.
--package context // import "golang.org/x/net/context"
--
--import (
--	"errors"
--	"fmt"
--	"sync"
--	"time"
--)
--
--// A Context carries a deadline, a cancelation signal, and other values across
--// API boundaries.
--//
--// Context's methods may be called by multiple goroutines simultaneously.
--type Context interface {
--	// Deadline returns the time when work done on behalf of this context
--	// should be canceled.  Deadline returns ok==false when no deadline is
--	// set.  Successive calls to Deadline return the same results.
--	Deadline() (deadline time.Time, ok bool)
--
--	// Done returns a channel that's closed when work done on behalf of this
--	// context should be canceled.  Done may return nil if this context can
--	// never be canceled.  Successive calls to Done return the same value.
--	//
--	// WithCancel arranges for Done to be closed when cancel is called;
--	// WithDeadline arranges for Done to be closed when the deadline
--	// expires; WithTimeout arranges for Done to be closed when the timeout
--	// elapses.
--	//
--	// Done is provided for use in select statements:
--	//
--	// 	// DoSomething calls DoSomethingSlow and returns as soon as
--	// 	// it returns or ctx.Done is closed.
--	// 	func DoSomething(ctx context.Context) (Result, error) {
--	// 		c := make(chan Result, 1)
--	// 		go func() { c <- DoSomethingSlow(ctx) }()
--	// 		select {
--	// 		case res := <-c:
--	// 			return res, nil
--	// 		case <-ctx.Done():
--	// 			return nil, ctx.Err()
--	// 		}
--	// 	}
--	//
--	// See http://blog.golang.org/pipelines for more examples of how to use
--	// a Done channel for cancelation.
--	Done() <-chan struct{}
--
--	// Err returns a non-nil error value after Done is closed.  Err returns
--	// Canceled if the context was canceled or DeadlineExceeded if the
--	// context's deadline passed.  No other values for Err are defined.
--	// After Done is closed, successive calls to Err return the same value.
--	Err() error
--
--	// Value returns the value associated with this context for key, or nil
--	// if no value is associated with key.  Successive calls to Value with
--	// the same key returns the same result.
--	//
--	// Use context values only for request-scoped data that transits
--	// processes and API boundaries, not for passing optional parameters to
--	// functions.
--	//
--	// A key identifies a specific value in a Context.  Functions that wish
--	// to store values in Context typically allocate a key in a global
--	// variable then use that key as the argument to context.WithValue and
--	// Context.Value.  A key can be any type that supports equality;
--	// packages should define keys as an unexported type to avoid
--	// collisions.
--	//
--	// Packages that define a Context key should provide type-safe accessors
--	// for the values stores using that key:
--	//
--	// 	// Package user defines a User type that's stored in Contexts.
--	// 	package user
--	//
--	// 	import "golang.org/x/net/context"
--	//
--	// 	// User is the type of value stored in the Contexts.
--	// 	type User struct {...}
--	//
--	// 	// key is an unexported type for keys defined in this package.
--	// 	// This prevents collisions with keys defined in other packages.
--	// 	type key int
--	//
--	// 	// userKey is the key for user.User values in Contexts.  It is
--	// 	// unexported; clients use user.NewContext and user.FromContext
--	// 	// instead of using this key directly.
--	// 	var userKey key = 0
--	//
--	// 	// NewContext returns a new Context that carries value u.
--	// 	func NewContext(ctx context.Context, u *User) context.Context {
--	// 		return context.WithValue(ctx, userKey, u)
--	// 	}
--	//
--	// 	// FromContext returns the User value stored in ctx, if any.
--	// 	func FromContext(ctx context.Context) (*User, bool) {
--	// 		u, ok := ctx.Value(userKey).(*User)
--	// 		return u, ok
--	// 	}
--	Value(key interface{}) interface{}
--}
--
--// Canceled is the error returned by Context.Err when the context is canceled.
--var Canceled = errors.New("context canceled")
--
--// DeadlineExceeded is the error returned by Context.Err when the context's
--// deadline passes.
--var DeadlineExceeded = errors.New("context deadline exceeded")
--
--// An emptyCtx is never canceled, has no values, and has no deadline.  It is not
--// struct{}, since vars of this type must have distinct addresses.
--type emptyCtx int
--
--func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
--	return
--}
--
--func (*emptyCtx) Done() <-chan struct{} {
--	return nil
--}
--
--func (*emptyCtx) Err() error {
--	return nil
--}
--
--func (*emptyCtx) Value(key interface{}) interface{} {
--	return nil
--}
--
--func (e *emptyCtx) String() string {
--	switch e {
--	case background:
--		return "context.Background"
--	case todo:
--		return "context.TODO"
--	}
--	return "unknown empty Context"
--}
--
--var (
--	background = new(emptyCtx)
--	todo       = new(emptyCtx)
--)
--
--// Background returns a non-nil, empty Context. It is never canceled, has no
--// values, and has no deadline.  It is typically used by the main function,
--// initialization, and tests, and as the top-level Context for incoming
--// requests.
--func Background() Context {
--	return background
--}
--
--// TODO returns a non-nil, empty Context.  Code should use context.TODO when
--// it's unclear which Context to use or it's is not yet available (because the
--// surrounding function has not yet been extended to accept a Context
--// parameter).  TODO is recognized by static analysis tools that determine
--// whether Contexts are propagated correctly in a program.
--func TODO() Context {
--	return todo
--}
--
--// A CancelFunc tells an operation to abandon its work.
--// A CancelFunc does not wait for the work to stop.
--// After the first call, subsequent calls to a CancelFunc do nothing.
--type CancelFunc func()
--
--// WithCancel returns a copy of parent with a new Done channel. The returned
--// context's Done channel is closed when the returned cancel function is called
--// or when the parent context's Done channel is closed, whichever happens first.
--func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
--	c := newCancelCtx(parent)
--	propagateCancel(parent, &c)
--	return &c, func() { c.cancel(true, Canceled) }
--}
--
--// newCancelCtx returns an initialized cancelCtx.
--func newCancelCtx(parent Context) cancelCtx {
--	return cancelCtx{
--		Context: parent,
--		done:    make(chan struct{}),
--	}
--}
--
--// propagateCancel arranges for child to be canceled when parent is.
--func propagateCancel(parent Context, child canceler) {
--	if parent.Done() == nil {
--		return // parent is never canceled
--	}
--	if p, ok := parentCancelCtx(parent); ok {
--		p.mu.Lock()
--		if p.err != nil {
--			// parent has already been canceled
--			child.cancel(false, p.err)
--		} else {
--			if p.children == nil {
--				p.children = make(map[canceler]bool)
--			}
--			p.children[child] = true
--		}
--		p.mu.Unlock()
--	} else {
--		go func() {
--			select {
--			case <-parent.Done():
--				child.cancel(false, parent.Err())
--			case <-child.Done():
--			}
--		}()
--	}
--}
--
--// parentCancelCtx follows a chain of parent references until it finds a
--// *cancelCtx.  This function understands how each of the concrete types in this
--// package represents its parent.
--func parentCancelCtx(parent Context) (*cancelCtx, bool) {
--	for {
--		switch c := parent.(type) {
--		case *cancelCtx:
--			return c, true
--		case *timerCtx:
--			return &c.cancelCtx, true
--		case *valueCtx:
--			parent = c.Context
--		default:
--			return nil, false
--		}
--	}
--}
--
--// A canceler is a context type that can be canceled directly.  The
--// implementations are *cancelCtx and *timerCtx.
--type canceler interface {
--	cancel(removeFromParent bool, err error)
--	Done() <-chan struct{}
--}
--
--// A cancelCtx can be canceled.  When canceled, it also cancels any children
--// that implement canceler.
--type cancelCtx struct {
--	Context
--
--	done chan struct{} // closed by the first cancel call.
--
--	mu       sync.Mutex
--	children map[canceler]bool // set to nil by the first cancel call
--	err      error             // set to non-nil by the first cancel call
--}
--
--func (c *cancelCtx) Done() <-chan struct{} {
--	return c.done
--}
--
--func (c *cancelCtx) Err() error {
--	c.mu.Lock()
--	defer c.mu.Unlock()
--	return c.err
--}
--
--func (c *cancelCtx) String() string {
--	return fmt.Sprintf("%v.WithCancel", c.Context)
--}
--
--// cancel closes c.done, cancels each of c's children, and, if
--// removeFromParent is true, removes c from its parent's children.
--func (c *cancelCtx) cancel(removeFromParent bool, err error) {
--	if err == nil {
--		panic("context: internal error: missing cancel error")
--	}
--	c.mu.Lock()
--	if c.err != nil {
--		c.mu.Unlock()
--		return // already canceled
--	}
--	c.err = err
--	close(c.done)
--	for child := range c.children {
--		// NOTE: acquiring the child's lock while holding parent's lock.
--		child.cancel(false, err)
--	}
--	c.children = nil
--	c.mu.Unlock()
--
--	if removeFromParent {
--		if p, ok := parentCancelCtx(c.Context); ok {
--			p.mu.Lock()
--			if p.children != nil {
--				delete(p.children, c)
--			}
--			p.mu.Unlock()
--		}
--	}
--}
--
--// WithDeadline returns a copy of the parent context with the deadline adjusted
--// to be no later than d.  If the parent's deadline is already earlier than d,
--// WithDeadline(parent, d) is semantically equivalent to parent.  The returned
--// context's Done channel is closed when the deadline expires, when the returned
--// cancel function is called, or when the parent context's Done channel is
--// closed, whichever happens first.
--//
--// Canceling this context releases resources associated with the deadline
--// timer, so code should call cancel as soon as the operations running in this
--// Context complete.
--func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
--	if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
--		// The current deadline is already sooner than the new one.
--		return WithCancel(parent)
--	}
--	c := &timerCtx{
--		cancelCtx: newCancelCtx(parent),
--		deadline:  deadline,
--	}
--	propagateCancel(parent, c)
--	d := deadline.Sub(time.Now())
--	if d <= 0 {
--		c.cancel(true, DeadlineExceeded) // deadline has already passed
--		return c, func() { c.cancel(true, Canceled) }
--	}
--	c.mu.Lock()
--	defer c.mu.Unlock()
--	if c.err == nil {
--		c.timer = time.AfterFunc(d, func() {
--			c.cancel(true, DeadlineExceeded)
--		})
--	}
--	return c, func() { c.cancel(true, Canceled) }
--}
--
--// A timerCtx carries a timer and a deadline.  It embeds a cancelCtx to
--// implement Done and Err.  It implements cancel by stopping its timer then
--// delegating to cancelCtx.cancel.
--type timerCtx struct {
--	cancelCtx
--	timer *time.Timer // Under cancelCtx.mu.
--
--	deadline time.Time
--}
--
--func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
--	return c.deadline, true
--}
--
--func (c *timerCtx) String() string {
--	return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
--}
--
--func (c *timerCtx) cancel(removeFromParent bool, err error) {
--	c.cancelCtx.cancel(removeFromParent, err)
--	c.mu.Lock()
--	if c.timer != nil {
--		c.timer.Stop()
--		c.timer = nil
--	}
--	c.mu.Unlock()
--}
--
--// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
--//
--// Canceling this context releases resources associated with the deadline
--// timer, so code should call cancel as soon as the operations running in this
--// Context complete:
--//
--// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
--// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
--// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
--// 		return slowOperation(ctx)
--// 	}
--func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
--	return WithDeadline(parent, time.Now().Add(timeout))
--}
--
--// WithValue returns a copy of parent in which the value associated with key is
--// val.
--//
--// Use context Values only for request-scoped data that transits processes and
--// APIs, not for passing optional parameters to functions.
--func WithValue(parent Context, key interface{}, val interface{}) Context {
--	return &valueCtx{parent, key, val}
--}
--
--// A valueCtx carries a key-value pair.  It implements Value for that key and
--// delegates all other calls to the embedded Context.
--type valueCtx struct {
--	Context
--	key, val interface{}
--}
--
--func (c *valueCtx) String() string {
--	return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
--}
--
--func (c *valueCtx) Value(key interface{}) interface{} {
--	if c.key == key {
--		return c.val
--	}
--	return c.Context.Value(key)
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
-deleted file mode 100644
-index 82d2494..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go
-+++ /dev/null
-@@ -1,553 +0,0 @@
--// Copyright 2014 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package context
--
--import (
--	"fmt"
--	"math/rand"
--	"runtime"
--	"strings"
--	"sync"
--	"testing"
--	"time"
--)
--
--// otherContext is a Context that's not one of the types defined in context.go.
--// This lets us test code paths that differ based on the underlying type of the
--// Context.
--type otherContext struct {
--	Context
--}
--
--func TestBackground(t *testing.T) {
--	c := Background()
--	if c == nil {
--		t.Fatalf("Background returned nil")
--	}
--	select {
--	case x := <-c.Done():
--		t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
--	default:
--	}
--	if got, want := fmt.Sprint(c), "context.Background"; got != want {
--		t.Errorf("Background().String() = %q want %q", got, want)
--	}
--}
--
--func TestTODO(t *testing.T) {
--	c := TODO()
--	if c == nil {
--		t.Fatalf("TODO returned nil")
--	}
--	select {
--	case x := <-c.Done():
--		t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
--	default:
--	}
--	if got, want := fmt.Sprint(c), "context.TODO"; got != want {
--		t.Errorf("TODO().String() = %q want %q", got, want)
--	}
--}
--
--func TestWithCancel(t *testing.T) {
--	c1, cancel := WithCancel(Background())
--
--	if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
--		t.Errorf("c1.String() = %q want %q", got, want)
--	}
--
--	o := otherContext{c1}
--	c2, _ := WithCancel(o)
--	contexts := []Context{c1, o, c2}
--
--	for i, c := range contexts {
--		if d := c.Done(); d == nil {
--			t.Errorf("c[%d].Done() == %v want non-nil", i, d)
--		}
--		if e := c.Err(); e != nil {
--			t.Errorf("c[%d].Err() == %v want nil", i, e)
--		}
--
--		select {
--		case x := <-c.Done():
--			t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
--		default:
--		}
--	}
--
--	cancel()
--	time.Sleep(100 * time.Millisecond) // let cancelation propagate
--
--	for i, c := range contexts {
--		select {
--		case <-c.Done():
--		default:
--			t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
--		}
--		if e := c.Err(); e != Canceled {
--			t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
--		}
--	}
--}
--
--func TestParentFinishesChild(t *testing.T) {
--	// Context tree:
--	// parent -> cancelChild
--	// parent -> valueChild -> timerChild
--	parent, cancel := WithCancel(Background())
--	cancelChild, stop := WithCancel(parent)
--	defer stop()
--	valueChild := WithValue(parent, "key", "value")
--	timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
--	defer stop()
--
--	select {
--	case x := <-parent.Done():
--		t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
--	case x := <-cancelChild.Done():
--		t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
--	case x := <-timerChild.Done():
--		t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
--	case x := <-valueChild.Done():
--		t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
--	default:
--	}
--
--	// The parent's children should contain the two cancelable children.
--	pc := parent.(*cancelCtx)
--	cc := cancelChild.(*cancelCtx)
--	tc := timerChild.(*timerCtx)
--	pc.mu.Lock()
--	if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
--		t.Errorf("bad linkage: pc.children = %v, want %v and %v",
--			pc.children, cc, tc)
--	}
--	pc.mu.Unlock()
--
--	if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
--		t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
--	}
--	if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
--		t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
--	}
--
--	cancel()
--
--	pc.mu.Lock()
--	if len(pc.children) != 0 {
--		t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
--	}
--	pc.mu.Unlock()
--
--	// parent and children should all be finished.
--	check := func(ctx Context, name string) {
--		select {
--		case <-ctx.Done():
--		default:
--			t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
--		}
--		if e := ctx.Err(); e != Canceled {
--			t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
--		}
--	}
--	check(parent, "parent")
--	check(cancelChild, "cancelChild")
--	check(valueChild, "valueChild")
--	check(timerChild, "timerChild")
--
--	// WithCancel should return a canceled context on a canceled parent.
--	precanceledChild := WithValue(parent, "key", "value")
--	select {
--	case <-precanceledChild.Done():
--	default:
--		t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
--	}
--	if e := precanceledChild.Err(); e != Canceled {
--		t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
--	}
--}
--
--func TestChildFinishesFirst(t *testing.T) {
--	cancelable, stop := WithCancel(Background())
--	defer stop()
--	for _, parent := range []Context{Background(), cancelable} {
--		child, cancel := WithCancel(parent)
--
--		select {
--		case x := <-parent.Done():
--			t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
--		case x := <-child.Done():
--			t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
--		default:
--		}
--
--		cc := child.(*cancelCtx)
--		pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
--		if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
--			t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
--		}
--
--		if pcok {
--			pc.mu.Lock()
--			if len(pc.children) != 1 || !pc.children[cc] {
--				t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
--			}
--			pc.mu.Unlock()
--		}
--
--		cancel()
--
--		if pcok {
--			pc.mu.Lock()
--			if len(pc.children) != 0 {
--				t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
--			}
--			pc.mu.Unlock()
--		}
--
--		// child should be finished.
--		select {
--		case <-child.Done():
--		default:
--			t.Errorf("<-child.Done() blocked, but shouldn't have")
--		}
--		if e := child.Err(); e != Canceled {
--			t.Errorf("child.Err() == %v want %v", e, Canceled)
--		}
--
--		// parent should not be finished.
--		select {
--		case x := <-parent.Done():
--			t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
--		default:
--		}
--		if e := parent.Err(); e != nil {
--			t.Errorf("parent.Err() == %v want nil", e)
--		}
--	}
--}
--
--func testDeadline(c Context, wait time.Duration, t *testing.T) {
--	select {
--	case <-time.After(wait):
--		t.Fatalf("context should have timed out")
--	case <-c.Done():
--	}
--	if e := c.Err(); e != DeadlineExceeded {
--		t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
--	}
--}
--
--func TestDeadline(t *testing.T) {
--	c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
--	if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
--		t.Errorf("c.String() = %q want prefix %q", got, prefix)
--	}
--	testDeadline(c, 200*time.Millisecond, t)
--
--	c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
--	o := otherContext{c}
--	testDeadline(o, 200*time.Millisecond, t)
--
--	c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
--	o = otherContext{c}
--	c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))
--	testDeadline(c, 200*time.Millisecond, t)
--}
--
--func TestTimeout(t *testing.T) {
--	c, _ := WithTimeout(Background(), 100*time.Millisecond)
--	if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
--		t.Errorf("c.String() = %q want prefix %q", got, prefix)
--	}
--	testDeadline(c, 200*time.Millisecond, t)
--
--	c, _ = WithTimeout(Background(), 100*time.Millisecond)
--	o := otherContext{c}
--	testDeadline(o, 200*time.Millisecond, t)
--
--	c, _ = WithTimeout(Background(), 100*time.Millisecond)
--	o = otherContext{c}
--	c, _ = WithTimeout(o, 300*time.Millisecond)
--	testDeadline(c, 200*time.Millisecond, t)
--}
--
--func TestCanceledTimeout(t *testing.T) {
--	c, _ := WithTimeout(Background(), 200*time.Millisecond)
--	o := otherContext{c}
--	c, cancel := WithTimeout(o, 400*time.Millisecond)
--	cancel()
--	time.Sleep(100 * time.Millisecond) // let cancelation propagate
--	select {
--	case <-c.Done():
--	default:
--		t.Errorf("<-c.Done() blocked, but shouldn't have")
--	}
--	if e := c.Err(); e != Canceled {
--		t.Errorf("c.Err() == %v want %v", e, Canceled)
--	}
--}
--
--type key1 int
--type key2 int
--
--var k1 = key1(1)
--var k2 = key2(1) // same int as k1, different type
--var k3 = key2(3) // same type as k2, different int
--
--func TestValues(t *testing.T) {
--	check := func(c Context, nm, v1, v2, v3 string) {
--		if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
--			t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
--		}
--		if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
--			t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
--		}
--		if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
--			t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
--		}
--	}
--
--	c0 := Background()
--	check(c0, "c0", "", "", "")
--
--	c1 := WithValue(Background(), k1, "c1k1")
--	check(c1, "c1", "c1k1", "", "")
--
--	if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
--		t.Errorf("c.String() = %q want %q", got, want)
--	}
--
--	c2 := WithValue(c1, k2, "c2k2")
--	check(c2, "c2", "c1k1", "c2k2", "")
--
--	c3 := WithValue(c2, k3, "c3k3")
--	check(c3, "c2", "c1k1", "c2k2", "c3k3")
--
--	c4 := WithValue(c3, k1, nil)
--	check(c4, "c4", "", "c2k2", "c3k3")
--
--	o0 := otherContext{Background()}
--	check(o0, "o0", "", "", "")
--
--	o1 := otherContext{WithValue(Background(), k1, "c1k1")}
--	check(o1, "o1", "c1k1", "", "")
--
--	o2 := WithValue(o1, k2, "o2k2")
--	check(o2, "o2", "c1k1", "o2k2", "")
--
--	o3 := otherContext{c4}
--	check(o3, "o3", "", "c2k2", "c3k3")
--
--	o4 := WithValue(o3, k3, nil)
--	check(o4, "o4", "", "c2k2", "")
--}
--
--func TestAllocs(t *testing.T) {
--	bg := Background()
--	for _, test := range []struct {
--		desc       string
--		f          func()
--		limit      float64
--		gccgoLimit float64
--	}{
--		{
--			desc:       "Background()",
--			f:          func() { Background() },
--			limit:      0,
--			gccgoLimit: 0,
--		},
--		{
--			desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
--			f: func() {
--				c := WithValue(bg, k1, nil)
--				c.Value(k1)
--			},
--			limit:      3,
--			gccgoLimit: 3,
--		},
--		{
--			desc: "WithTimeout(bg, 15*time.Millisecond)",
--			f: func() {
--				c, _ := WithTimeout(bg, 15*time.Millisecond)
--				<-c.Done()
--			},
--			limit:      8,
--			gccgoLimit: 13,
--		},
--		{
--			desc: "WithCancel(bg)",
--			f: func() {
--				c, cancel := WithCancel(bg)
--				cancel()
--				<-c.Done()
--			},
--			limit:      5,
--			gccgoLimit: 8,
--		},
--		{
--			desc: "WithTimeout(bg, 100*time.Millisecond)",
--			f: func() {
--				c, cancel := WithTimeout(bg, 100*time.Millisecond)
--				cancel()
--				<-c.Done()
--			},
--			limit:      8,
--			gccgoLimit: 25,
--		},
--	} {
--		limit := test.limit
--		if runtime.Compiler == "gccgo" {
--			// gccgo does not yet do escape analysis.
--			// TOOD(iant): Remove this when gccgo does do escape analysis.
--			limit = test.gccgoLimit
--		}
--		if n := testing.AllocsPerRun(100, test.f); n > limit {
--			t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
--		}
--	}
--}
--
--func TestSimultaneousCancels(t *testing.T) {
--	root, cancel := WithCancel(Background())
--	m := map[Context]CancelFunc{root: cancel}
--	q := []Context{root}
--	// Create a tree of contexts.
--	for len(q) != 0 && len(m) < 100 {
--		parent := q[0]
--		q = q[1:]
--		for i := 0; i < 4; i++ {
--			ctx, cancel := WithCancel(parent)
--			m[ctx] = cancel
--			q = append(q, ctx)
--		}
--	}
--	// Start all the cancels in a random order.
--	var wg sync.WaitGroup
--	wg.Add(len(m))
--	for _, cancel := range m {
--		go func(cancel CancelFunc) {
--			cancel()
--			wg.Done()
--		}(cancel)
--	}
--	// Wait on all the contexts in a random order.
--	for ctx := range m {
--		select {
--		case <-ctx.Done():
--		case <-time.After(1 * time.Second):
--			buf := make([]byte, 10<<10)
--			n := runtime.Stack(buf, true)
--			t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
--		}
--	}
--	// Wait for all the cancel functions to return.
--	done := make(chan struct{})
--	go func() {
--		wg.Wait()
--		close(done)
--	}()
--	select {
--	case <-done:
--	case <-time.After(1 * time.Second):
--		buf := make([]byte, 10<<10)
--		n := runtime.Stack(buf, true)
--		t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
--	}
--}
--
--func TestInterlockedCancels(t *testing.T) {
--	parent, cancelParent := WithCancel(Background())
--	child, cancelChild := WithCancel(parent)
--	go func() {
--		parent.Done()
--		cancelChild()
--	}()
--	cancelParent()
--	select {
--	case <-child.Done():
--	case <-time.After(1 * time.Second):
--		buf := make([]byte, 10<<10)
--		n := runtime.Stack(buf, true)
--		t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
--	}
--}
--
--func TestLayersCancel(t *testing.T) {
--	testLayers(t, time.Now().UnixNano(), false)
--}
--
--func TestLayersTimeout(t *testing.T) {
--	testLayers(t, time.Now().UnixNano(), true)
--}
--
--func testLayers(t *testing.T, seed int64, testTimeout bool) {
--	rand.Seed(seed)
--	errorf := func(format string, a ...interface{}) {
--		t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
--	}
--	const (
--		timeout   = 200 * time.Millisecond
--		minLayers = 30
--	)
--	type value int
--	var (
--		vals      []*value
--		cancels   []CancelFunc
--		numTimers int
--		ctx       = Background()
--	)
--	for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
--		switch rand.Intn(3) {
--		case 0:
--			v := new(value)
--			ctx = WithValue(ctx, v, v)
--			vals = append(vals, v)
--		case 1:
--			var cancel CancelFunc
--			ctx, cancel = WithCancel(ctx)
--			cancels = append(cancels, cancel)
--		case 2:
--			var cancel CancelFunc
--			ctx, cancel = WithTimeout(ctx, timeout)
--			cancels = append(cancels, cancel)
--			numTimers++
--		}
--	}
--	checkValues := func(when string) {
--		for _, key := range vals {
--			if val := ctx.Value(key).(*value); key != val {
--				errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
--			}
--		}
--	}
--	select {
--	case <-ctx.Done():
--		errorf("ctx should not be canceled yet")
--	default:
--	}
--	if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
--		t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
--	}
--	t.Log(ctx)
--	checkValues("before cancel")
--	if testTimeout {
--		select {
--		case <-ctx.Done():
--		case <-time.After(timeout + timeout/10):
--			errorf("ctx should have timed out")
--		}
--		checkValues("after timeout")
--	} else {
--		cancel := cancels[rand.Intn(len(cancels))]
--		cancel()
--		select {
--		case <-ctx.Done():
--		default:
--			errorf("ctx should be canceled")
--		}
--		checkValues("after cancel")
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go
-deleted file mode 100644
-index a6754dc..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--// Copyright 2014 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package context_test
--
--import (
--	"fmt"
--	"time"
--
--	"golang.org/x/net/context"
--)
--
--func ExampleWithTimeout() {
--	// Pass a context with a timeout to tell a blocking function that it
--	// should abandon its work after the timeout elapses.
--	ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
--	select {
--	case <-time.After(200 * time.Millisecond):
--		fmt.Println("overslept")
--	case <-ctx.Done():
--		fmt.Println(ctx.Err()) // prints "context deadline exceeded"
--	}
--	// Output:
--	// context deadline exceeded
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/atom/atom.go b/Godeps/_workspace/src/golang.org/x/net/html/atom/atom.go
-deleted file mode 100644
-index cd0a8ac..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/atom/atom.go
-+++ /dev/null
-@@ -1,78 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package atom provides integer codes (also known as atoms) for a fixed set of
--// frequently occurring HTML strings: tag names and attribute keys such as "p"
--// and "id".
--//
--// Sharing an atom's name between all elements with the same tag can result in
--// fewer string allocations when tokenizing and parsing HTML. Integer
--// comparisons are also generally faster than string comparisons.
--//
--// The value of an atom's particular code is not guaranteed to stay the same
--// between versions of this package. Neither is any ordering guaranteed:
--// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
--// be dense. The only guarantees are that e.g. looking up "div" will yield
--// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
--package atom // import "golang.org/x/net/html/atom"
--
--// Atom is an integer code for a string. The zero value maps to "".
--type Atom uint32
--
--// String returns the atom's name.
--func (a Atom) String() string {
--	start := uint32(a >> 8)
--	n := uint32(a & 0xff)
--	if start+n > uint32(len(atomText)) {
--		return ""
--	}
--	return atomText[start : start+n]
--}
--
--func (a Atom) string() string {
--	return atomText[a>>8 : a>>8+a&0xff]
--}
--
--// fnv computes the FNV hash with an arbitrary starting value h.
--func fnv(h uint32, s []byte) uint32 {
--	for i := range s {
--		h ^= uint32(s[i])
--		h *= 16777619
--	}
--	return h
--}
--
--func match(s string, t []byte) bool {
--	for i, c := range t {
--		if s[i] != c {
--			return false
--		}
--	}
--	return true
--}
--
--// Lookup returns the atom whose name is s. It returns zero if there is no
--// such atom. The lookup is case sensitive.
--func Lookup(s []byte) Atom {
--	if len(s) == 0 || len(s) > maxAtomLen {
--		return 0
--	}
--	h := fnv(hash0, s)
--	if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
--		return a
--	}
--	if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
--		return a
--	}
--	return 0
--}
--
--// String returns a string whose contents are equal to s. In that sense, it is
--// equivalent to string(s) but may be more efficient.
--func String(s []byte) string {
--	if a := Lookup(s); a != 0 {
--		return a.String()
--	}
--	return string(s)
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/atom/atom_test.go b/Godeps/_workspace/src/golang.org/x/net/html/atom/atom_test.go
-deleted file mode 100644
-index 6e33704..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/atom/atom_test.go
-+++ /dev/null
-@@ -1,109 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package atom
--
--import (
--	"sort"
--	"testing"
--)
--
--func TestKnown(t *testing.T) {
--	for _, s := range testAtomList {
--		if atom := Lookup([]byte(s)); atom.String() != s {
--			t.Errorf("Lookup(%q) = %#x (%q)", s, uint32(atom), atom.String())
--		}
--	}
--}
--
--func TestHits(t *testing.T) {
--	for _, a := range table {
--		if a == 0 {
--			continue
--		}
--		got := Lookup([]byte(a.String()))
--		if got != a {
--			t.Errorf("Lookup(%q) = %#x, want %#x", a.String(), uint32(got), uint32(a))
--		}
--	}
--}
--
--func TestMisses(t *testing.T) {
--	testCases := []string{
--		"",
--		"\x00",
--		"\xff",
--		"A",
--		"DIV",
--		"Div",
--		"dIV",
--		"aa",
--		"a\x00",
--		"ab",
--		"abb",
--		"abbr0",
--		"abbr ",
--		" abbr",
--		" a",
--		"acceptcharset",
--		"acceptCharset",
--		"accept_charset",
--		"h0",
--		"h1h2",
--		"h7",
--		"onClick",
--		"λ",
--		// The following string has the same hash (0xa1d7fab7) as "onmouseover".
--		"\x00\x00\x00\x00\x00\x50\x18\xae\x38\xd0\xb7",
--	}
--	for _, tc := range testCases {
--		got := Lookup([]byte(tc))
--		if got != 0 {
--			t.Errorf("Lookup(%q): got %d, want 0", tc, got)
--		}
--	}
--}
--
--func TestForeignObject(t *testing.T) {
--	const (
--		afo = Foreignobject
--		afO = ForeignObject
--		sfo = "foreignobject"
--		sfO = "foreignObject"
--	)
--	if got := Lookup([]byte(sfo)); got != afo {
--		t.Errorf("Lookup(%q): got %#v, want %#v", sfo, got, afo)
--	}
--	if got := Lookup([]byte(sfO)); got != afO {
--		t.Errorf("Lookup(%q): got %#v, want %#v", sfO, got, afO)
--	}
--	if got := afo.String(); got != sfo {
--		t.Errorf("Atom(%#v).String(): got %q, want %q", afo, got, sfo)
--	}
--	if got := afO.String(); got != sfO {
--		t.Errorf("Atom(%#v).String(): got %q, want %q", afO, got, sfO)
--	}
--}
--
--func BenchmarkLookup(b *testing.B) {
--	sortedTable := make([]string, 0, len(table))
--	for _, a := range table {
--		if a != 0 {
--			sortedTable = append(sortedTable, a.String())
--		}
--	}
--	sort.Strings(sortedTable)
--
--	x := make([][]byte, 1000)
--	for i := range x {
--		x[i] = []byte(sortedTable[i%len(sortedTable)])
--	}
--
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		for _, s := range x {
--			Lookup(s)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/atom/gen.go b/Godeps/_workspace/src/golang.org/x/net/html/atom/gen.go
-deleted file mode 100644
-index 9958a71..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/atom/gen.go
-+++ /dev/null
-@@ -1,636 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// +build ignore
--
--package main
--
--// This program generates table.go and table_test.go.
--// Invoke as
--//
--//	go run gen.go |gofmt >table.go
--//	go run gen.go -test |gofmt >table_test.go
--
--import (
--	"flag"
--	"fmt"
--	"math/rand"
--	"os"
--	"sort"
--	"strings"
--)
--
--// identifier converts s to a Go exported identifier.
--// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
--func identifier(s string) string {
--	b := make([]byte, 0, len(s))
--	cap := true
--	for _, c := range s {
--		if c == '-' {
--			cap = true
--			continue
--		}
--		if cap && 'a' <= c && c <= 'z' {
--			c -= 'a' - 'A'
--		}
--		cap = false
--		b = append(b, byte(c))
--	}
--	return string(b)
--}
--
--var test = flag.Bool("test", false, "generate table_test.go")
--
--func main() {
--	flag.Parse()
--
--	var all []string
--	all = append(all, elements...)
--	all = append(all, attributes...)
--	all = append(all, eventHandlers...)
--	all = append(all, extra...)
--	sort.Strings(all)
--
--	if *test {
--		fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n")
--		fmt.Printf("package atom\n\n")
--		fmt.Printf("var testAtomList = []string{\n")
--		for _, s := range all {
--			fmt.Printf("\t%q,\n", s)
--		}
--		fmt.Printf("}\n")
--		return
--	}
--
--	// uniq - lists have dups
--	// compute max len too
--	maxLen := 0
--	w := 0
--	for _, s := range all {
--		if w == 0 || all[w-1] != s {
--			if maxLen < len(s) {
--				maxLen = len(s)
--			}
--			all[w] = s
--			w++
--		}
--	}
--	all = all[:w]
--
--	// Find hash that minimizes table size.
--	var best *table
--	for i := 0; i < 1000000; i++ {
--		if best != nil && 1<<(best.k-1) < len(all) {
--			break
--		}
--		h := rand.Uint32()
--		for k := uint(0); k <= 16; k++ {
--			if best != nil && k >= best.k {
--				break
--			}
--			var t table
--			if t.init(h, k, all) {
--				best = &t
--				break
--			}
--		}
--	}
--	if best == nil {
--		fmt.Fprintf(os.Stderr, "failed to construct string table\n")
--		os.Exit(1)
--	}
--
--	// Lay out strings, using overlaps when possible.
--	layout := append([]string{}, all...)
--
--	// Remove strings that are substrings of other strings
--	for changed := true; changed; {
--		changed = false
--		for i, s := range layout {
--			if s == "" {
--				continue
--			}
--			for j, t := range layout {
--				if i != j && t != "" && strings.Contains(s, t) {
--					changed = true
--					layout[j] = ""
--				}
--			}
--		}
--	}
--
--	// Join strings where one suffix matches another prefix.
--	for {
--		// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
--		// maximizing overlap length k.
--		besti := -1
--		bestj := -1
--		bestk := 0
--		for i, s := range layout {
--			if s == "" {
--				continue
--			}
--			for j, t := range layout {
--				if i == j {
--					continue
--				}
--				for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
--					if s[len(s)-k:] == t[:k] {
--						besti = i
--						bestj = j
--						bestk = k
--					}
--				}
--			}
--		}
--		if bestk > 0 {
--			layout[besti] += layout[bestj][bestk:]
--			layout[bestj] = ""
--			continue
--		}
--		break
--	}
--
--	text := strings.Join(layout, "")
--
--	atom := map[string]uint32{}
--	for _, s := range all {
--		off := strings.Index(text, s)
--		if off < 0 {
--			panic("lost string " + s)
--		}
--		atom[s] = uint32(off<<8 | len(s))
--	}
--
--	// Generate the Go code.
--	fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n")
--	fmt.Printf("package atom\n\nconst (\n")
--	for _, s := range all {
--		fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s])
--	}
--	fmt.Printf(")\n\n")
--
--	fmt.Printf("const hash0 = %#x\n\n", best.h0)
--	fmt.Printf("const maxAtomLen = %d\n\n", maxLen)
--
--	fmt.Printf("var table = [1<<%d]Atom{\n", best.k)
--	for i, s := range best.tab {
--		if s == "" {
--			continue
--		}
--		fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s)
--	}
--	fmt.Printf("}\n")
--	datasize := (1 << best.k) * 4
--
--	fmt.Printf("const atomText =\n")
--	textsize := len(text)
--	for len(text) > 60 {
--		fmt.Printf("\t%q +\n", text[:60])
--		text = text[60:]
--	}
--	fmt.Printf("\t%q\n\n", text)
--
--	fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
--}
--
--type byLen []string
--
--func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
--func (x byLen) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
--func (x byLen) Len() int           { return len(x) }
--
--// fnv computes the FNV hash with an arbitrary starting value h.
--func fnv(h uint32, s string) uint32 {
--	for i := 0; i < len(s); i++ {
--		h ^= uint32(s[i])
--		h *= 16777619
--	}
--	return h
--}
--
--// A table represents an attempt at constructing the lookup table.
--// The lookup table uses cuckoo hashing, meaning that each string
--// can be found in one of two positions.
--type table struct {
--	h0   uint32
--	k    uint
--	mask uint32
--	tab  []string
--}
--
--// hash returns the two hashes for s.
--func (t *table) hash(s string) (h1, h2 uint32) {
--	h := fnv(t.h0, s)
--	h1 = h & t.mask
--	h2 = (h >> 16) & t.mask
--	return
--}
--
--// init initializes the table with the given parameters.
--// h0 is the initial hash value,
--// k is the number of bits of hash value to use, and
--// x is the list of strings to store in the table.
--// init returns false if the table cannot be constructed.
--func (t *table) init(h0 uint32, k uint, x []string) bool {
--	t.h0 = h0
--	t.k = k
--	t.tab = make([]string, 1<<k)
--	t.mask = 1<<k - 1
--	for _, s := range x {
--		if !t.insert(s) {
--			return false
--		}
--	}
--	return true
--}
--
--// insert inserts s in the table.
--func (t *table) insert(s string) bool {
--	h1, h2 := t.hash(s)
--	if t.tab[h1] == "" {
--		t.tab[h1] = s
--		return true
--	}
--	if t.tab[h2] == "" {
--		t.tab[h2] = s
--		return true
--	}
--	if t.push(h1, 0) {
--		t.tab[h1] = s
--		return true
--	}
--	if t.push(h2, 0) {
--		t.tab[h2] = s
--		return true
--	}
--	return false
--}
--
--// push attempts to push aside the entry in slot i.
--func (t *table) push(i uint32, depth int) bool {
--	if depth > len(t.tab) {
--		return false
--	}
--	s := t.tab[i]
--	h1, h2 := t.hash(s)
--	j := h1 + h2 - i
--	if t.tab[j] != "" && !t.push(j, depth+1) {
--		return false
--	}
--	t.tab[j] = s
--	return true
--}
--
--// The lists of element names and attribute keys were taken from
--// http://www.whatwg.org/specs/web-apps/current-work/multipage/section-index.html
--// as of the "HTML Living Standard - Last Updated 30 May 2012" version.
--
--var elements = []string{
--	"a",
--	"abbr",
--	"address",
--	"area",
--	"article",
--	"aside",
--	"audio",
--	"b",
--	"base",
--	"bdi",
--	"bdo",
--	"blockquote",
--	"body",
--	"br",
--	"button",
--	"canvas",
--	"caption",
--	"cite",
--	"code",
--	"col",
--	"colgroup",
--	"command",
--	"data",
--	"datalist",
--	"dd",
--	"del",
--	"details",
--	"dfn",
--	"dialog",
--	"div",
--	"dl",
--	"dt",
--	"em",
--	"embed",
--	"fieldset",
--	"figcaption",
--	"figure",
--	"footer",
--	"form",
--	"h1",
--	"h2",
--	"h3",
--	"h4",
--	"h5",
--	"h6",
--	"head",
--	"header",
--	"hgroup",
--	"hr",
--	"html",
--	"i",
--	"iframe",
--	"img",
--	"input",
--	"ins",
--	"kbd",
--	"keygen",
--	"label",
--	"legend",
--	"li",
--	"link",
--	"map",
--	"mark",
--	"menu",
--	"meta",
--	"meter",
--	"nav",
--	"noscript",
--	"object",
--	"ol",
--	"optgroup",
--	"option",
--	"output",
--	"p",
--	"param",
--	"pre",
--	"progress",
--	"q",
--	"rp",
--	"rt",
--	"ruby",
--	"s",
--	"samp",
--	"script",
--	"section",
--	"select",
--	"small",
--	"source",
--	"span",
--	"strong",
--	"style",
--	"sub",
--	"summary",
--	"sup",
--	"table",
--	"tbody",
--	"td",
--	"textarea",
--	"tfoot",
--	"th",
--	"thead",
--	"time",
--	"title",
--	"tr",
--	"track",
--	"u",
--	"ul",
--	"var",
--	"video",
--	"wbr",
--}
--
--var attributes = []string{
--	"accept",
--	"accept-charset",
--	"accesskey",
--	"action",
--	"alt",
--	"async",
--	"autocomplete",
--	"autofocus",
--	"autoplay",
--	"border",
--	"challenge",
--	"charset",
--	"checked",
--	"cite",
--	"class",
--	"cols",
--	"colspan",
--	"command",
--	"content",
--	"contenteditable",
--	"contextmenu",
--	"controls",
--	"coords",
--	"crossorigin",
--	"data",
--	"datetime",
--	"default",
--	"defer",
--	"dir",
--	"dirname",
--	"disabled",
--	"download",
--	"draggable",
--	"dropzone",
--	"enctype",
--	"for",
--	"form",
--	"formaction",
--	"formenctype",
--	"formmethod",
--	"formnovalidate",
--	"formtarget",
--	"headers",
--	"height",
--	"hidden",
--	"high",
--	"href",
--	"hreflang",
--	"http-equiv",
--	"icon",
--	"id",
--	"inert",
--	"ismap",
--	"itemid",
--	"itemprop",
--	"itemref",
--	"itemscope",
--	"itemtype",
--	"keytype",
--	"kind",
--	"label",
--	"lang",
--	"list",
--	"loop",
--	"low",
--	"manifest",
--	"max",
--	"maxlength",
--	"media",
--	"mediagroup",
--	"method",
--	"min",
--	"multiple",
--	"muted",
--	"name",
--	"novalidate",
--	"open",
--	"optimum",
--	"pattern",
--	"ping",
--	"placeholder",
--	"poster",
--	"preload",
--	"radiogroup",
--	"readonly",
--	"rel",
--	"required",
--	"reversed",
--	"rows",
--	"rowspan",
--	"sandbox",
--	"spellcheck",
--	"scope",
--	"scoped",
--	"seamless",
--	"selected",
--	"shape",
--	"size",
--	"sizes",
--	"span",
--	"src",
--	"srcdoc",
--	"srclang",
--	"start",
--	"step",
--	"style",
--	"tabindex",
--	"target",
--	"title",
--	"translate",
--	"type",
--	"typemustmatch",
--	"usemap",
--	"value",
--	"width",
--	"wrap",
--}
--
--var eventHandlers = []string{
--	"onabort",
--	"onafterprint",
--	"onbeforeprint",
--	"onbeforeunload",
--	"onblur",
--	"oncancel",
--	"oncanplay",
--	"oncanplaythrough",
--	"onchange",
--	"onclick",
--	"onclose",
--	"oncontextmenu",
--	"oncuechange",
--	"ondblclick",
--	"ondrag",
--	"ondragend",
--	"ondragenter",
--	"ondragleave",
--	"ondragover",
--	"ondragstart",
--	"ondrop",
--	"ondurationchange",
--	"onemptied",
--	"onended",
--	"onerror",
--	"onfocus",
--	"onhashchange",
--	"oninput",
--	"oninvalid",
--	"onkeydown",
--	"onkeypress",
--	"onkeyup",
--	"onload",
--	"onloadeddata",
--	"onloadedmetadata",
--	"onloadstart",
--	"onmessage",
--	"onmousedown",
--	"onmousemove",
--	"onmouseout",
--	"onmouseover",
--	"onmouseup",
--	"onmousewheel",
--	"onoffline",
--	"ononline",
--	"onpagehide",
--	"onpageshow",
--	"onpause",
--	"onplay",
--	"onplaying",
--	"onpopstate",
--	"onprogress",
--	"onratechange",
--	"onreset",
--	"onresize",
--	"onscroll",
--	"onseeked",
--	"onseeking",
--	"onselect",
--	"onshow",
--	"onstalled",
--	"onstorage",
--	"onsubmit",
--	"onsuspend",
--	"ontimeupdate",
--	"onunload",
--	"onvolumechange",
--	"onwaiting",
--}
--
--// extra are ad-hoc values not covered by any of the lists above.
--var extra = []string{
--	"align",
--	"annotation",
--	"annotation-xml",
--	"applet",
--	"basefont",
--	"bgsound",
--	"big",
--	"blink",
--	"center",
--	"color",
--	"desc",
--	"face",
--	"font",
--	"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
--	"foreignobject",
--	"frame",
--	"frameset",
--	"image",
--	"isindex",
--	"listing",
--	"malignmark",
--	"marquee",
--	"math",
--	"mglyph",
--	"mi",
--	"mn",
--	"mo",
--	"ms",
--	"mtext",
--	"nobr",
--	"noembed",
--	"noframes",
--	"plaintext",
--	"prompt",
--	"public",
--	"spacer",
--	"strike",
--	"svg",
--	"system",
--	"tt",
--	"xmp",
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/atom/table.go b/Godeps/_workspace/src/golang.org/x/net/html/atom/table.go
-deleted file mode 100644
-index 20b8b8a..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/atom/table.go
-+++ /dev/null
-@@ -1,694 +0,0 @@
--// generated by go run gen.go; DO NOT EDIT
--
--package atom
--
--const (
--	A                Atom = 0x1
--	Abbr             Atom = 0x4
--	Accept           Atom = 0x2106
--	AcceptCharset    Atom = 0x210e
--	Accesskey        Atom = 0x3309
--	Action           Atom = 0x21b06
--	Address          Atom = 0x5d507
--	Align            Atom = 0x1105
--	Alt              Atom = 0x4503
--	Annotation       Atom = 0x18d0a
--	AnnotationXml    Atom = 0x18d0e
--	Applet           Atom = 0x2d106
--	Area             Atom = 0x31804
--	Article          Atom = 0x39907
--	Aside            Atom = 0x4f05
--	Async            Atom = 0x9305
--	Audio            Atom = 0xaf05
--	Autocomplete     Atom = 0xd50c
--	Autofocus        Atom = 0xe109
--	Autoplay         Atom = 0x10c08
--	B                Atom = 0x101
--	Base             Atom = 0x11404
--	Basefont         Atom = 0x11408
--	Bdi              Atom = 0x1a03
--	Bdo              Atom = 0x12503
--	Bgsound          Atom = 0x13807
--	Big              Atom = 0x14403
--	Blink            Atom = 0x14705
--	Blockquote       Atom = 0x14c0a
--	Body             Atom = 0x2f04
--	Border           Atom = 0x15606
--	Br               Atom = 0x202
--	Button           Atom = 0x15c06
--	Canvas           Atom = 0x4b06
--	Caption          Atom = 0x1e007
--	Center           Atom = 0x2df06
--	Challenge        Atom = 0x23e09
--	Charset          Atom = 0x2807
--	Checked          Atom = 0x33f07
--	Cite             Atom = 0x9704
--	Class            Atom = 0x3d905
--	Code             Atom = 0x16f04
--	Col              Atom = 0x17603
--	Colgroup         Atom = 0x17608
--	Color            Atom = 0x18305
--	Cols             Atom = 0x18804
--	Colspan          Atom = 0x18807
--	Command          Atom = 0x19b07
--	Content          Atom = 0x42c07
--	Contenteditable  Atom = 0x42c0f
--	Contextmenu      Atom = 0x3480b
--	Controls         Atom = 0x1ae08
--	Coords           Atom = 0x1ba06
--	Crossorigin      Atom = 0x1c40b
--	Data             Atom = 0x44304
--	Datalist         Atom = 0x44308
--	Datetime         Atom = 0x25b08
--	Dd               Atom = 0x28802
--	Default          Atom = 0x5207
--	Defer            Atom = 0x17105
--	Del              Atom = 0x4d603
--	Desc             Atom = 0x4804
--	Details          Atom = 0x6507
--	Dfn              Atom = 0x8303
--	Dialog           Atom = 0x1b06
--	Dir              Atom = 0x9d03
--	Dirname          Atom = 0x9d07
--	Disabled         Atom = 0x10008
--	Div              Atom = 0x10703
--	Dl               Atom = 0x13e02
--	Download         Atom = 0x40908
--	Draggable        Atom = 0x1a109
--	Dropzone         Atom = 0x3a208
--	Dt               Atom = 0x4e402
--	Em               Atom = 0x7f02
--	Embed            Atom = 0x7f05
--	Enctype          Atom = 0x23007
--	Face             Atom = 0x2dd04
--	Fieldset         Atom = 0x1d508
--	Figcaption       Atom = 0x1dd0a
--	Figure           Atom = 0x1f106
--	Font             Atom = 0x11804
--	Footer           Atom = 0x5906
--	For              Atom = 0x1fd03
--	ForeignObject    Atom = 0x1fd0d
--	Foreignobject    Atom = 0x20a0d
--	Form             Atom = 0x21704
--	Formaction       Atom = 0x2170a
--	Formenctype      Atom = 0x22c0b
--	Formmethod       Atom = 0x2470a
--	Formnovalidate   Atom = 0x2510e
--	Formtarget       Atom = 0x2660a
--	Frame            Atom = 0x8705
--	Frameset         Atom = 0x8708
--	H1               Atom = 0x13602
--	H2               Atom = 0x29602
--	H3               Atom = 0x2c502
--	H4               Atom = 0x30e02
--	H5               Atom = 0x4e602
--	H6               Atom = 0x27002
--	Head             Atom = 0x2fa04
--	Header           Atom = 0x2fa06
--	Headers          Atom = 0x2fa07
--	Height           Atom = 0x27206
--	Hgroup           Atom = 0x27a06
--	Hidden           Atom = 0x28606
--	High             Atom = 0x29304
--	Hr               Atom = 0x13102
--	Href             Atom = 0x29804
--	Hreflang         Atom = 0x29808
--	Html             Atom = 0x27604
--	HttpEquiv        Atom = 0x2a00a
--	I                Atom = 0x601
--	Icon             Atom = 0x42b04
--	Id               Atom = 0x5102
--	Iframe           Atom = 0x2b406
--	Image            Atom = 0x2ba05
--	Img              Atom = 0x2bf03
--	Inert            Atom = 0x4c105
--	Input            Atom = 0x3f605
--	Ins              Atom = 0x1cd03
--	Isindex          Atom = 0x2c707
--	Ismap            Atom = 0x2ce05
--	Itemid           Atom = 0x9806
--	Itemprop         Atom = 0x57e08
--	Itemref          Atom = 0x2d707
--	Itemscope        Atom = 0x2e509
--	Itemtype         Atom = 0x2ef08
--	Kbd              Atom = 0x1903
--	Keygen           Atom = 0x3906
--	Keytype          Atom = 0x51207
--	Kind             Atom = 0xfd04
--	Label            Atom = 0xba05
--	Lang             Atom = 0x29c04
--	Legend           Atom = 0x1a806
--	Li               Atom = 0x1202
--	Link             Atom = 0x14804
--	List             Atom = 0x44704
--	Listing          Atom = 0x44707
--	Loop             Atom = 0xbe04
--	Low              Atom = 0x13f03
--	Malignmark       Atom = 0x100a
--	Manifest         Atom = 0x5b608
--	Map              Atom = 0x2d003
--	Mark             Atom = 0x1604
--	Marquee          Atom = 0x5f207
--	Math             Atom = 0x2f704
--	Max              Atom = 0x30603
--	Maxlength        Atom = 0x30609
--	Media            Atom = 0xa205
--	Mediagroup       Atom = 0xa20a
--	Menu             Atom = 0x34f04
--	Meta             Atom = 0x45604
--	Meter            Atom = 0x26105
--	Method           Atom = 0x24b06
--	Mglyph           Atom = 0x2c006
--	Mi               Atom = 0x9b02
--	Min              Atom = 0x31003
--	Mn               Atom = 0x25402
--	Mo               Atom = 0x47a02
--	Ms               Atom = 0x2e802
--	Mtext            Atom = 0x31305
--	Multiple         Atom = 0x32108
--	Muted            Atom = 0x32905
--	Name             Atom = 0xa004
--	Nav              Atom = 0x3e03
--	Nobr             Atom = 0x7404
--	Noembed          Atom = 0x7d07
--	Noframes         Atom = 0x8508
--	Noscript         Atom = 0x28b08
--	Novalidate       Atom = 0x2550a
--	Object           Atom = 0x21106
--	Ol               Atom = 0xcd02
--	Onabort          Atom = 0x16007
--	Onafterprint     Atom = 0x1e50c
--	Onbeforeprint    Atom = 0x21f0d
--	Onbeforeunload   Atom = 0x5c90e
--	Onblur           Atom = 0x3e206
--	Oncancel         Atom = 0xb308
--	Oncanplay        Atom = 0x12709
--	Oncanplaythrough Atom = 0x12710
--	Onchange         Atom = 0x3b808
--	Onclick          Atom = 0x2ad07
--	Onclose          Atom = 0x32e07
--	Oncontextmenu    Atom = 0x3460d
--	Oncuechange      Atom = 0x3530b
--	Ondblclick       Atom = 0x35e0a
--	Ondrag           Atom = 0x36806
--	Ondragend        Atom = 0x36809
--	Ondragenter      Atom = 0x3710b
--	Ondragleave      Atom = 0x37c0b
--	Ondragover       Atom = 0x3870a
--	Ondragstart      Atom = 0x3910b
--	Ondrop           Atom = 0x3a006
--	Ondurationchange Atom = 0x3b010
--	Onemptied        Atom = 0x3a709
--	Onended          Atom = 0x3c007
--	Onerror          Atom = 0x3c707
--	Onfocus          Atom = 0x3ce07
--	Onhashchange     Atom = 0x3e80c
--	Oninput          Atom = 0x3f407
--	Oninvalid        Atom = 0x3fb09
--	Onkeydown        Atom = 0x40409
--	Onkeypress       Atom = 0x4110a
--	Onkeyup          Atom = 0x42107
--	Onload           Atom = 0x43b06
--	Onloadeddata     Atom = 0x43b0c
--	Onloadedmetadata Atom = 0x44e10
--	Onloadstart      Atom = 0x4640b
--	Onmessage        Atom = 0x46f09
--	Onmousedown      Atom = 0x4780b
--	Onmousemove      Atom = 0x4830b
--	Onmouseout       Atom = 0x48e0a
--	Onmouseover      Atom = 0x49b0b
--	Onmouseup        Atom = 0x4a609
--	Onmousewheel     Atom = 0x4af0c
--	Onoffline        Atom = 0x4bb09
--	Ononline         Atom = 0x4c608
--	Onpagehide       Atom = 0x4ce0a
--	Onpageshow       Atom = 0x4d90a
--	Onpause          Atom = 0x4e807
--	Onplay           Atom = 0x4f206
--	Onplaying        Atom = 0x4f209
--	Onpopstate       Atom = 0x4fb0a
--	Onprogress       Atom = 0x5050a
--	Onratechange     Atom = 0x5190c
--	Onreset          Atom = 0x52507
--	Onresize         Atom = 0x52c08
--	Onscroll         Atom = 0x53a08
--	Onseeked         Atom = 0x54208
--	Onseeking        Atom = 0x54a09
--	Onselect         Atom = 0x55308
--	Onshow           Atom = 0x55d06
--	Onstalled        Atom = 0x56609
--	Onstorage        Atom = 0x56f09
--	Onsubmit         Atom = 0x57808
--	Onsuspend        Atom = 0x58809
--	Ontimeupdate     Atom = 0x1190c
--	Onunload         Atom = 0x59108
--	Onvolumechange   Atom = 0x5990e
--	Onwaiting        Atom = 0x5a709
--	Open             Atom = 0x58404
--	Optgroup         Atom = 0xc008
--	Optimum          Atom = 0x5b007
--	Option           Atom = 0x5c506
--	Output           Atom = 0x49506
--	P                Atom = 0xc01
--	Param            Atom = 0xc05
--	Pattern          Atom = 0x6e07
--	Ping             Atom = 0xab04
--	Placeholder      Atom = 0xc70b
--	Plaintext        Atom = 0xf109
--	Poster           Atom = 0x17d06
--	Pre              Atom = 0x27f03
--	Preload          Atom = 0x27f07
--	Progress         Atom = 0x50708
--	Prompt           Atom = 0x5bf06
--	Public           Atom = 0x42706
--	Q                Atom = 0x15101
--	Radiogroup       Atom = 0x30a
--	Readonly         Atom = 0x31908
--	Rel              Atom = 0x28003
--	Required         Atom = 0x1f508
--	Reversed         Atom = 0x5e08
--	Rows             Atom = 0x7704
--	Rowspan          Atom = 0x7707
--	Rp               Atom = 0x1eb02
--	Rt               Atom = 0x16502
--	Ruby             Atom = 0xd104
--	S                Atom = 0x2c01
--	Samp             Atom = 0x6b04
--	Sandbox          Atom = 0xe907
--	Scope            Atom = 0x2e905
--	Scoped           Atom = 0x2e906
--	Script           Atom = 0x28d06
--	Seamless         Atom = 0x33308
--	Section          Atom = 0x3dd07
--	Select           Atom = 0x55506
--	Selected         Atom = 0x55508
--	Shape            Atom = 0x1b505
--	Size             Atom = 0x53004
--	Sizes            Atom = 0x53005
--	Small            Atom = 0x1bf05
--	Source           Atom = 0x1cf06
--	Spacer           Atom = 0x30006
--	Span             Atom = 0x7a04
--	Spellcheck       Atom = 0x33a0a
--	Src              Atom = 0x3d403
--	Srcdoc           Atom = 0x3d406
--	Srclang          Atom = 0x41a07
--	Start            Atom = 0x39705
--	Step             Atom = 0x5bc04
--	Strike           Atom = 0x50e06
--	Strong           Atom = 0x53406
--	Style            Atom = 0x5db05
--	Sub              Atom = 0x57a03
--	Summary          Atom = 0x5e007
--	Sup              Atom = 0x5e703
--	Svg              Atom = 0x5ea03
--	System           Atom = 0x5ed06
--	Tabindex         Atom = 0x45c08
--	Table            Atom = 0x43605
--	Target           Atom = 0x26a06
--	Tbody            Atom = 0x2e05
--	Td               Atom = 0x4702
--	Textarea         Atom = 0x31408
--	Tfoot            Atom = 0x5805
--	Th               Atom = 0x13002
--	Thead            Atom = 0x2f905
--	Time             Atom = 0x11b04
--	Title            Atom = 0x8e05
--	Tr               Atom = 0xf902
--	Track            Atom = 0xf905
--	Translate        Atom = 0x16609
--	Tt               Atom = 0x7002
--	Type             Atom = 0x23304
--	Typemustmatch    Atom = 0x2330d
--	U                Atom = 0xb01
--	Ul               Atom = 0x5602
--	Usemap           Atom = 0x4ec06
--	Value            Atom = 0x4005
--	Var              Atom = 0x10903
--	Video            Atom = 0x2a905
--	Wbr              Atom = 0x14103
--	Width            Atom = 0x4e205
--	Wrap             Atom = 0x56204
--	Xmp              Atom = 0xef03
--)
--
--const hash0 = 0xc17da63e
--
--const maxAtomLen = 16
--
--var table = [1 << 9]Atom{
--	0x1:   0x4830b, // onmousemove
--	0x2:   0x5a709, // onwaiting
--	0x4:   0x5bf06, // prompt
--	0x7:   0x5b007, // optimum
--	0x8:   0x1604,  // mark
--	0xa:   0x2d707, // itemref
--	0xb:   0x4d90a, // onpageshow
--	0xc:   0x55506, // select
--	0xd:   0x1a109, // draggable
--	0xe:   0x3e03,  // nav
--	0xf:   0x19b07, // command
--	0x11:  0xb01,   // u
--	0x14:  0x2fa07, // headers
--	0x15:  0x44308, // datalist
--	0x17:  0x6b04,  // samp
--	0x1a:  0x40409, // onkeydown
--	0x1b:  0x53a08, // onscroll
--	0x1c:  0x17603, // col
--	0x20:  0x57e08, // itemprop
--	0x21:  0x2a00a, // http-equiv
--	0x22:  0x5e703, // sup
--	0x24:  0x1f508, // required
--	0x2b:  0x27f07, // preload
--	0x2c:  0x21f0d, // onbeforeprint
--	0x2d:  0x3710b, // ondragenter
--	0x2e:  0x4e402, // dt
--	0x2f:  0x57808, // onsubmit
--	0x30:  0x13102, // hr
--	0x31:  0x3460d, // oncontextmenu
--	0x33:  0x2ba05, // image
--	0x34:  0x4e807, // onpause
--	0x35:  0x27a06, // hgroup
--	0x36:  0xab04,  // ping
--	0x37:  0x55308, // onselect
--	0x3a:  0x10703, // div
--	0x40:  0x9b02,  // mi
--	0x41:  0x33308, // seamless
--	0x42:  0x2807,  // charset
--	0x43:  0x5102,  // id
--	0x44:  0x4fb0a, // onpopstate
--	0x45:  0x4d603, // del
--	0x46:  0x5f207, // marquee
--	0x47:  0x3309,  // accesskey
--	0x49:  0x5906,  // footer
--	0x4a:  0x2d106, // applet
--	0x4b:  0x2ce05, // ismap
--	0x51:  0x34f04, // menu
--	0x52:  0x2f04,  // body
--	0x55:  0x8708,  // frameset
--	0x56:  0x52507, // onreset
--	0x57:  0x14705, // blink
--	0x58:  0x8e05,  // title
--	0x59:  0x39907, // article
--	0x5b:  0x13002, // th
--	0x5d:  0x15101, // q
--	0x5e:  0x58404, // open
--	0x5f:  0x31804, // area
--	0x61:  0x43b06, // onload
--	0x62:  0x3f605, // input
--	0x63:  0x11404, // base
--	0x64:  0x18807, // colspan
--	0x65:  0x51207, // keytype
--	0x66:  0x13e02, // dl
--	0x68:  0x1d508, // fieldset
--	0x6a:  0x31003, // min
--	0x6b:  0x10903, // var
--	0x6f:  0x2fa06, // header
--	0x70:  0x16502, // rt
--	0x71:  0x17608, // colgroup
--	0x72:  0x25402, // mn
--	0x74:  0x16007, // onabort
--	0x75:  0x3906,  // keygen
--	0x76:  0x4bb09, // onoffline
--	0x77:  0x23e09, // challenge
--	0x78:  0x2d003, // map
--	0x7a:  0x30e02, // h4
--	0x7b:  0x3c707, // onerror
--	0x7c:  0x30609, // maxlength
--	0x7d:  0x31305, // mtext
--	0x7e:  0x5805,  // tfoot
--	0x7f:  0x11804, // font
--	0x80:  0x100a,  // malignmark
--	0x81:  0x45604, // meta
--	0x82:  0x9305,  // async
--	0x83:  0x2c502, // h3
--	0x84:  0x28802, // dd
--	0x85:  0x29804, // href
--	0x86:  0xa20a,  // mediagroup
--	0x87:  0x1ba06, // coords
--	0x88:  0x41a07, // srclang
--	0x89:  0x35e0a, // ondblclick
--	0x8a:  0x4005,  // value
--	0x8c:  0xb308,  // oncancel
--	0x8e:  0x33a0a, // spellcheck
--	0x8f:  0x8705,  // frame
--	0x91:  0x14403, // big
--	0x94:  0x21b06, // action
--	0x95:  0x9d03,  // dir
--	0x97:  0x31908, // readonly
--	0x99:  0x43605, // table
--	0x9a:  0x5e007, // summary
--	0x9b:  0x14103, // wbr
--	0x9c:  0x30a,   // radiogroup
--	0x9d:  0xa004,  // name
--	0x9f:  0x5ed06, // system
--	0xa1:  0x18305, // color
--	0xa2:  0x4b06,  // canvas
--	0xa3:  0x27604, // html
--	0xa5:  0x54a09, // onseeking
--	0xac:  0x1b505, // shape
--	0xad:  0x28003, // rel
--	0xae:  0x12710, // oncanplaythrough
--	0xaf:  0x3870a, // ondragover
--	0xb1:  0x1fd0d, // foreignObject
--	0xb3:  0x7704,  // rows
--	0xb6:  0x44707, // listing
--	0xb7:  0x49506, // output
--	0xb9:  0x3480b, // contextmenu
--	0xbb:  0x13f03, // low
--	0xbc:  0x1eb02, // rp
--	0xbd:  0x58809, // onsuspend
--	0xbe:  0x15c06, // button
--	0xbf:  0x4804,  // desc
--	0xc1:  0x3dd07, // section
--	0xc2:  0x5050a, // onprogress
--	0xc3:  0x56f09, // onstorage
--	0xc4:  0x2f704, // math
--	0xc5:  0x4f206, // onplay
--	0xc7:  0x5602,  // ul
--	0xc8:  0x6e07,  // pattern
--	0xc9:  0x4af0c, // onmousewheel
--	0xca:  0x36809, // ondragend
--	0xcb:  0xd104,  // ruby
--	0xcc:  0xc01,   // p
--	0xcd:  0x32e07, // onclose
--	0xce:  0x26105, // meter
--	0xcf:  0x13807, // bgsound
--	0xd2:  0x27206, // height
--	0xd4:  0x101,   // b
--	0xd5:  0x2ef08, // itemtype
--	0xd8:  0x1e007, // caption
--	0xd9:  0x10008, // disabled
--	0xdc:  0x5ea03, // svg
--	0xdd:  0x1bf05, // small
--	0xde:  0x44304, // data
--	0xe0:  0x4c608, // ononline
--	0xe1:  0x2c006, // mglyph
--	0xe3:  0x7f05,  // embed
--	0xe4:  0xf902,  // tr
--	0xe5:  0x4640b, // onloadstart
--	0xe7:  0x3b010, // ondurationchange
--	0xed:  0x12503, // bdo
--	0xee:  0x4702,  // td
--	0xef:  0x4f05,  // aside
--	0xf0:  0x29602, // h2
--	0xf1:  0x50708, // progress
--	0xf2:  0x14c0a, // blockquote
--	0xf4:  0xba05,  // label
--	0xf5:  0x601,   // i
--	0xf7:  0x7707,  // rowspan
--	0xfb:  0x4f209, // onplaying
--	0xfd:  0x2bf03, // img
--	0xfe:  0xc008,  // optgroup
--	0xff:  0x42c07, // content
--	0x101: 0x5190c, // onratechange
--	0x103: 0x3e80c, // onhashchange
--	0x104: 0x6507,  // details
--	0x106: 0x40908, // download
--	0x109: 0xe907,  // sandbox
--	0x10b: 0x42c0f, // contenteditable
--	0x10d: 0x37c0b, // ondragleave
--	0x10e: 0x2106,  // accept
--	0x10f: 0x55508, // selected
--	0x112: 0x2170a, // formaction
--	0x113: 0x2df06, // center
--	0x115: 0x44e10, // onloadedmetadata
--	0x116: 0x14804, // link
--	0x117: 0x11b04, // time
--	0x118: 0x1c40b, // crossorigin
--	0x119: 0x3ce07, // onfocus
--	0x11a: 0x56204, // wrap
--	0x11b: 0x42b04, // icon
--	0x11d: 0x2a905, // video
--	0x11e: 0x3d905, // class
--	0x121: 0x5990e, // onvolumechange
--	0x122: 0x3e206, // onblur
--	0x123: 0x2e509, // itemscope
--	0x124: 0x5db05, // style
--	0x127: 0x42706, // public
--	0x129: 0x2510e, // formnovalidate
--	0x12a: 0x55d06, // onshow
--	0x12c: 0x16609, // translate
--	0x12d: 0x9704,  // cite
--	0x12e: 0x2e802, // ms
--	0x12f: 0x1190c, // ontimeupdate
--	0x130: 0xfd04,  // kind
--	0x131: 0x2660a, // formtarget
--	0x135: 0x3c007, // onended
--	0x136: 0x28606, // hidden
--	0x137: 0x2c01,  // s
--	0x139: 0x2470a, // formmethod
--	0x13a: 0x44704, // list
--	0x13c: 0x27002, // h6
--	0x13d: 0xcd02,  // ol
--	0x13e: 0x3530b, // oncuechange
--	0x13f: 0x20a0d, // foreignobject
--	0x143: 0x5c90e, // onbeforeunload
--	0x145: 0x3a709, // onemptied
--	0x146: 0x17105, // defer
--	0x147: 0xef03,  // xmp
--	0x148: 0xaf05,  // audio
--	0x149: 0x1903,  // kbd
--	0x14c: 0x46f09, // onmessage
--	0x14d: 0x5c506, // option
--	0x14e: 0x4503,  // alt
--	0x14f: 0x33f07, // checked
--	0x150: 0x10c08, // autoplay
--	0x152: 0x202,   // br
--	0x153: 0x2550a, // novalidate
--	0x156: 0x7d07,  // noembed
--	0x159: 0x2ad07, // onclick
--	0x15a: 0x4780b, // onmousedown
--	0x15b: 0x3b808, // onchange
--	0x15e: 0x3fb09, // oninvalid
--	0x15f: 0x2e906, // scoped
--	0x160: 0x1ae08, // controls
--	0x161: 0x32905, // muted
--	0x163: 0x4ec06, // usemap
--	0x164: 0x1dd0a, // figcaption
--	0x165: 0x36806, // ondrag
--	0x166: 0x29304, // high
--	0x168: 0x3d403, // src
--	0x169: 0x17d06, // poster
--	0x16b: 0x18d0e, // annotation-xml
--	0x16c: 0x5bc04, // step
--	0x16d: 0x4,     // abbr
--	0x16e: 0x1b06,  // dialog
--	0x170: 0x1202,  // li
--	0x172: 0x47a02, // mo
--	0x175: 0x1fd03, // for
--	0x176: 0x1cd03, // ins
--	0x178: 0x53004, // size
--	0x17a: 0x5207,  // default
--	0x17b: 0x1a03,  // bdi
--	0x17c: 0x4ce0a, // onpagehide
--	0x17d: 0x9d07,  // dirname
--	0x17e: 0x23304, // type
--	0x17f: 0x21704, // form
--	0x180: 0x4c105, // inert
--	0x181: 0x12709, // oncanplay
--	0x182: 0x8303,  // dfn
--	0x183: 0x45c08, // tabindex
--	0x186: 0x7f02,  // em
--	0x187: 0x29c04, // lang
--	0x189: 0x3a208, // dropzone
--	0x18a: 0x4110a, // onkeypress
--	0x18b: 0x25b08, // datetime
--	0x18c: 0x18804, // cols
--	0x18d: 0x1,     // a
--	0x18e: 0x43b0c, // onloadeddata
--	0x191: 0x15606, // border
--	0x192: 0x2e05,  // tbody
--	0x193: 0x24b06, // method
--	0x195: 0xbe04,  // loop
--	0x196: 0x2b406, // iframe
--	0x198: 0x2fa04, // head
--	0x19e: 0x5b608, // manifest
--	0x19f: 0xe109,  // autofocus
--	0x1a0: 0x16f04, // code
--	0x1a1: 0x53406, // strong
--	0x1a2: 0x32108, // multiple
--	0x1a3: 0xc05,   // param
--	0x1a6: 0x23007, // enctype
--	0x1a7: 0x2dd04, // face
--	0x1a8: 0xf109,  // plaintext
--	0x1a9: 0x13602, // h1
--	0x1aa: 0x56609, // onstalled
--	0x1ad: 0x28d06, // script
--	0x1ae: 0x30006, // spacer
--	0x1af: 0x52c08, // onresize
--	0x1b0: 0x49b0b, // onmouseover
--	0x1b1: 0x59108, // onunload
--	0x1b2: 0x54208, // onseeked
--	0x1b4: 0x2330d, // typemustmatch
--	0x1b5: 0x1f106, // figure
--	0x1b6: 0x48e0a, // onmouseout
--	0x1b7: 0x27f03, // pre
--	0x1b8: 0x4e205, // width
--	0x1bb: 0x7404,  // nobr
--	0x1be: 0x7002,  // tt
--	0x1bf: 0x1105,  // align
--	0x1c0: 0x3f407, // oninput
--	0x1c3: 0x42107, // onkeyup
--	0x1c6: 0x1e50c, // onafterprint
--	0x1c7: 0x210e,  // accept-charset
--	0x1c8: 0x9806,  // itemid
--	0x1cb: 0x50e06, // strike
--	0x1cc: 0x57a03, // sub
--	0x1cd: 0xf905,  // track
--	0x1ce: 0x39705, // start
--	0x1d0: 0x11408, // basefont
--	0x1d6: 0x1cf06, // source
--	0x1d7: 0x1a806, // legend
--	0x1d8: 0x2f905, // thead
--	0x1da: 0x2e905, // scope
--	0x1dd: 0x21106, // object
--	0x1de: 0xa205,  // media
--	0x1df: 0x18d0a, // annotation
--	0x1e0: 0x22c0b, // formenctype
--	0x1e2: 0x28b08, // noscript
--	0x1e4: 0x53005, // sizes
--	0x1e5: 0xd50c,  // autocomplete
--	0x1e6: 0x7a04,  // span
--	0x1e7: 0x8508,  // noframes
--	0x1e8: 0x26a06, // target
--	0x1e9: 0x3a006, // ondrop
--	0x1ea: 0x3d406, // srcdoc
--	0x1ec: 0x5e08,  // reversed
--	0x1f0: 0x2c707, // isindex
--	0x1f3: 0x29808, // hreflang
--	0x1f5: 0x4e602, // h5
--	0x1f6: 0x5d507, // address
--	0x1fa: 0x30603, // max
--	0x1fb: 0xc70b,  // placeholder
--	0x1fc: 0x31408, // textarea
--	0x1fe: 0x4a609, // onmouseup
--	0x1ff: 0x3910b, // ondragstart
--}
--
--const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" +
--	"genavaluealtdescanvasidefaultfootereversedetailsampatternobr" +
--	"owspanoembedfnoframesetitleasyncitemidirnamediagroupingaudio" +
--	"ncancelabelooptgrouplaceholderubyautocompleteautofocusandbox" +
--	"mplaintextrackindisabledivarautoplaybasefontimeupdatebdoncan" +
--	"playthrough1bgsoundlowbrbigblinkblockquoteborderbuttonabortr" +
--	"anslatecodefercolgroupostercolorcolspannotation-xmlcommandra" +
--	"ggablegendcontrolshapecoordsmallcrossoriginsourcefieldsetfig" +
--	"captionafterprintfigurequiredforeignObjectforeignobjectforma" +
--	"ctionbeforeprintformenctypemustmatchallengeformmethodformnov" +
--	"alidatetimeterformtargeth6heightmlhgroupreloadhiddenoscripth" +
--	"igh2hreflanghttp-equivideonclickiframeimageimglyph3isindexis" +
--	"mappletitemrefacenteritemscopeditemtypematheaderspacermaxlen" +
--	"gth4minmtextareadonlymultiplemutedoncloseamlesspellcheckedon" +
--	"contextmenuoncuechangeondblclickondragendondragenterondragle" +
--	"aveondragoverondragstarticleondropzonemptiedondurationchange" +
--	"onendedonerroronfocusrcdoclassectionbluronhashchangeoninputo" +
--	"ninvalidonkeydownloadonkeypressrclangonkeyupublicontentedita" +
--	"bleonloadeddatalistingonloadedmetadatabindexonloadstartonmes" +
--	"sageonmousedownonmousemoveonmouseoutputonmouseoveronmouseupo" +
--	"nmousewheelonofflinertononlineonpagehidelonpageshowidth5onpa" +
--	"usemaponplayingonpopstateonprogresstrikeytypeonratechangeonr" +
--	"esetonresizestrongonscrollonseekedonseekingonselectedonshowr" +
--	"aponstalledonstorageonsubmitempropenonsuspendonunloadonvolum" +
--	"echangeonwaitingoptimumanifestepromptoptionbeforeunloaddress" +
--	"tylesummarysupsvgsystemarquee"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/atom/table_test.go b/Godeps/_workspace/src/golang.org/x/net/html/atom/table_test.go
-deleted file mode 100644
-index db016a1..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/atom/table_test.go
-+++ /dev/null
-@@ -1,341 +0,0 @@
--// generated by go run gen.go -test; DO NOT EDIT
--
--package atom
--
--var testAtomList = []string{
--	"a",
--	"abbr",
--	"accept",
--	"accept-charset",
--	"accesskey",
--	"action",
--	"address",
--	"align",
--	"alt",
--	"annotation",
--	"annotation-xml",
--	"applet",
--	"area",
--	"article",
--	"aside",
--	"async",
--	"audio",
--	"autocomplete",
--	"autofocus",
--	"autoplay",
--	"b",
--	"base",
--	"basefont",
--	"bdi",
--	"bdo",
--	"bgsound",
--	"big",
--	"blink",
--	"blockquote",
--	"body",
--	"border",
--	"br",
--	"button",
--	"canvas",
--	"caption",
--	"center",
--	"challenge",
--	"charset",
--	"checked",
--	"cite",
--	"cite",
--	"class",
--	"code",
--	"col",
--	"colgroup",
--	"color",
--	"cols",
--	"colspan",
--	"command",
--	"command",
--	"content",
--	"contenteditable",
--	"contextmenu",
--	"controls",
--	"coords",
--	"crossorigin",
--	"data",
--	"data",
--	"datalist",
--	"datetime",
--	"dd",
--	"default",
--	"defer",
--	"del",
--	"desc",
--	"details",
--	"dfn",
--	"dialog",
--	"dir",
--	"dirname",
--	"disabled",
--	"div",
--	"dl",
--	"download",
--	"draggable",
--	"dropzone",
--	"dt",
--	"em",
--	"embed",
--	"enctype",
--	"face",
--	"fieldset",
--	"figcaption",
--	"figure",
--	"font",
--	"footer",
--	"for",
--	"foreignObject",
--	"foreignobject",
--	"form",
--	"form",
--	"formaction",
--	"formenctype",
--	"formmethod",
--	"formnovalidate",
--	"formtarget",
--	"frame",
--	"frameset",
--	"h1",
--	"h2",
--	"h3",
--	"h4",
--	"h5",
--	"h6",
--	"head",
--	"header",
--	"headers",
--	"height",
--	"hgroup",
--	"hidden",
--	"high",
--	"hr",
--	"href",
--	"hreflang",
--	"html",
--	"http-equiv",
--	"i",
--	"icon",
--	"id",
--	"iframe",
--	"image",
--	"img",
--	"inert",
--	"input",
--	"ins",
--	"isindex",
--	"ismap",
--	"itemid",
--	"itemprop",
--	"itemref",
--	"itemscope",
--	"itemtype",
--	"kbd",
--	"keygen",
--	"keytype",
--	"kind",
--	"label",
--	"label",
--	"lang",
--	"legend",
--	"li",
--	"link",
--	"list",
--	"listing",
--	"loop",
--	"low",
--	"malignmark",
--	"manifest",
--	"map",
--	"mark",
--	"marquee",
--	"math",
--	"max",
--	"maxlength",
--	"media",
--	"mediagroup",
--	"menu",
--	"meta",
--	"meter",
--	"method",
--	"mglyph",
--	"mi",
--	"min",
--	"mn",
--	"mo",
--	"ms",
--	"mtext",
--	"multiple",
--	"muted",
--	"name",
--	"nav",
--	"nobr",
--	"noembed",
--	"noframes",
--	"noscript",
--	"novalidate",
--	"object",
--	"ol",
--	"onabort",
--	"onafterprint",
--	"onbeforeprint",
--	"onbeforeunload",
--	"onblur",
--	"oncancel",
--	"oncanplay",
--	"oncanplaythrough",
--	"onchange",
--	"onclick",
--	"onclose",
--	"oncontextmenu",
--	"oncuechange",
--	"ondblclick",
--	"ondrag",
--	"ondragend",
--	"ondragenter",
--	"ondragleave",
--	"ondragover",
--	"ondragstart",
--	"ondrop",
--	"ondurationchange",
--	"onemptied",
--	"onended",
--	"onerror",
--	"onfocus",
--	"onhashchange",
--	"oninput",
--	"oninvalid",
--	"onkeydown",
--	"onkeypress",
--	"onkeyup",
--	"onload",
--	"onloadeddata",
--	"onloadedmetadata",
--	"onloadstart",
--	"onmessage",
--	"onmousedown",
--	"onmousemove",
--	"onmouseout",
--	"onmouseover",
--	"onmouseup",
--	"onmousewheel",
--	"onoffline",
--	"ononline",
--	"onpagehide",
--	"onpageshow",
--	"onpause",
--	"onplay",
--	"onplaying",
--	"onpopstate",
--	"onprogress",
--	"onratechange",
--	"onreset",
--	"onresize",
--	"onscroll",
--	"onseeked",
--	"onseeking",
--	"onselect",
--	"onshow",
--	"onstalled",
--	"onstorage",
--	"onsubmit",
--	"onsuspend",
--	"ontimeupdate",
--	"onunload",
--	"onvolumechange",
--	"onwaiting",
--	"open",
--	"optgroup",
--	"optimum",
--	"option",
--	"output",
--	"p",
--	"param",
--	"pattern",
--	"ping",
--	"placeholder",
--	"plaintext",
--	"poster",
--	"pre",
--	"preload",
--	"progress",
--	"prompt",
--	"public",
--	"q",
--	"radiogroup",
--	"readonly",
--	"rel",
--	"required",
--	"reversed",
--	"rows",
--	"rowspan",
--	"rp",
--	"rt",
--	"ruby",
--	"s",
--	"samp",
--	"sandbox",
--	"scope",
--	"scoped",
--	"script",
--	"seamless",
--	"section",
--	"select",
--	"selected",
--	"shape",
--	"size",
--	"sizes",
--	"small",
--	"source",
--	"spacer",
--	"span",
--	"span",
--	"spellcheck",
--	"src",
--	"srcdoc",
--	"srclang",
--	"start",
--	"step",
--	"strike",
--	"strong",
--	"style",
--	"style",
--	"sub",
--	"summary",
--	"sup",
--	"svg",
--	"system",
--	"tabindex",
--	"table",
--	"target",
--	"tbody",
--	"td",
--	"textarea",
--	"tfoot",
--	"th",
--	"thead",
--	"time",
--	"title",
--	"title",
--	"tr",
--	"track",
--	"translate",
--	"tt",
--	"type",
--	"typemustmatch",
--	"u",
--	"ul",
--	"usemap",
--	"value",
--	"var",
--	"video",
--	"wbr",
--	"width",
--	"wrap",
--	"xmp",
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/charset.go b/Godeps/_workspace/src/golang.org/x/net/html/charset/charset.go
-deleted file mode 100644
-index 2e5f9ba..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/charset.go
-+++ /dev/null
-@@ -1,231 +0,0 @@
--// Copyright 2013 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package charset provides common text encodings for HTML documents.
--//
--// The mapping from encoding labels to encodings is defined at
--// http://encoding.spec.whatwg.org.
--package charset // import "golang.org/x/net/html/charset"
--
--import (
--	"bytes"
--	"io"
--	"mime"
--	"strings"
--	"unicode/utf8"
--
--	"golang.org/x/net/html"
--	"golang.org/x/text/encoding"
--	"golang.org/x/text/encoding/charmap"
--	"golang.org/x/text/transform"
--)
--
--// Lookup returns the encoding with the specified label, and its canonical
--// name. It returns nil and the empty string if label is not one of the
--// standard encodings for HTML. Matching is case-insensitive and ignores
--// leading and trailing whitespace.
--func Lookup(label string) (e encoding.Encoding, name string) {
--	label = strings.ToLower(strings.Trim(label, "\t\n\r\f "))
--	enc := encodings[label]
--	return enc.e, enc.name
--}
--
--// DetermineEncoding determines the encoding of an HTML document by examining
--// up to the first 1024 bytes of content and the declared Content-Type.
--//
--// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
--func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
--	if len(content) > 1024 {
--		content = content[:1024]
--	}
--
--	for _, b := range boms {
--		if bytes.HasPrefix(content, b.bom) {
--			e, name = Lookup(b.enc)
--			return e, name, true
--		}
--	}
--
--	if _, params, err := mime.ParseMediaType(contentType); err == nil {
--		if cs, ok := params["charset"]; ok {
--			if e, name = Lookup(cs); e != nil {
--				return e, name, true
--			}
--		}
--	}
--
--	if len(content) > 0 {
--		e, name = prescan(content)
--		if e != nil {
--			return e, name, false
--		}
--	}
--
--	// Try to detect UTF-8.
--	// First eliminate any partial rune at the end.
--	for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
--		b := content[i]
--		if b < 0x80 {
--			break
--		}
--		if utf8.RuneStart(b) {
--			content = content[:i]
--			break
--		}
--	}
--	hasHighBit := false
--	for _, c := range content {
--		if c >= 0x80 {
--			hasHighBit = true
--			break
--		}
--	}
--	if hasHighBit && utf8.Valid(content) {
--		return encoding.Nop, "utf-8", false
--	}
--
--	// TODO: change default depending on user's locale?
--	return charmap.Windows1252, "windows-1252", false
--}
--
--// NewReader returns an io.Reader that converts the content of r to UTF-8.
--// It calls DetermineEncoding to find out what r's encoding is.
--func NewReader(r io.Reader, contentType string) (io.Reader, error) {
--	preview := make([]byte, 1024)
--	n, err := io.ReadFull(r, preview)
--	switch {
--	case err == io.ErrUnexpectedEOF:
--		preview = preview[:n]
--		r = bytes.NewReader(preview)
--	case err != nil:
--		return nil, err
--	default:
--		r = io.MultiReader(bytes.NewReader(preview), r)
--	}
--
--	if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
--		r = transform.NewReader(r, e.NewDecoder())
--	}
--	return r, nil
--}
--
--func prescan(content []byte) (e encoding.Encoding, name string) {
--	z := html.NewTokenizer(bytes.NewReader(content))
--	for {
--		switch z.Next() {
--		case html.ErrorToken:
--			return nil, ""
--
--		case html.StartTagToken, html.SelfClosingTagToken:
--			tagName, hasAttr := z.TagName()
--			if !bytes.Equal(tagName, []byte("meta")) {
--				continue
--			}
--			attrList := make(map[string]bool)
--			gotPragma := false
--
--			const (
--				dontKnow = iota
--				doNeedPragma
--				doNotNeedPragma
--			)
--			needPragma := dontKnow
--
--			name = ""
--			e = nil
--			for hasAttr {
--				var key, val []byte
--				key, val, hasAttr = z.TagAttr()
--				ks := string(key)
--				if attrList[ks] {
--					continue
--				}
--				attrList[ks] = true
--				for i, c := range val {
--					if 'A' <= c && c <= 'Z' {
--						val[i] = c + 0x20
--					}
--				}
--
--				switch ks {
--				case "http-equiv":
--					if bytes.Equal(val, []byte("content-type")) {
--						gotPragma = true
--					}
--
--				case "content":
--					if e == nil {
--						name = fromMetaElement(string(val))
--						if name != "" {
--							e, name = Lookup(name)
--							if e != nil {
--								needPragma = doNeedPragma
--							}
--						}
--					}
--
--				case "charset":
--					e, name = Lookup(string(val))
--					needPragma = doNotNeedPragma
--				}
--			}
--
--			if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
--				continue
--			}
--
--			if strings.HasPrefix(name, "utf-16") {
--				name = "utf-8"
--				e = encoding.Nop
--			}
--
--			if e != nil {
--				return e, name
--			}
--		}
--	}
--}
--
--func fromMetaElement(s string) string {
--	for s != "" {
--		csLoc := strings.Index(s, "charset")
--		if csLoc == -1 {
--			return ""
--		}
--		s = s[csLoc+len("charset"):]
--		s = strings.TrimLeft(s, " \t\n\f\r")
--		if !strings.HasPrefix(s, "=") {
--			continue
--		}
--		s = s[1:]
--		s = strings.TrimLeft(s, " \t\n\f\r")
--		if s == "" {
--			return ""
--		}
--		if q := s[0]; q == '"' || q == '\'' {
--			s = s[1:]
--			closeQuote := strings.IndexRune(s, rune(q))
--			if closeQuote == -1 {
--				return ""
--			}
--			return s[:closeQuote]
--		}
--
--		end := strings.IndexAny(s, "; \t\n\f\r")
--		if end == -1 {
--			end = len(s)
--		}
--		return s[:end]
--	}
--	return ""
--}
--
--var boms = []struct {
--	bom []byte
--	enc string
--}{
--	{[]byte{0xfe, 0xff}, "utf-16be"},
--	{[]byte{0xff, 0xfe}, "utf-16le"},
--	{[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/charset_test.go b/Godeps/_workspace/src/golang.org/x/net/html/charset/charset_test.go
-deleted file mode 100644
-index d309f75..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/charset_test.go
-+++ /dev/null
-@@ -1,215 +0,0 @@
--// Copyright 2013 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package charset
--
--import (
--	"bytes"
--	"io/ioutil"
--	"runtime"
--	"strings"
--	"testing"
--
--	"golang.org/x/text/transform"
--)
--
--func transformString(t transform.Transformer, s string) (string, error) {
--	r := transform.NewReader(strings.NewReader(s), t)
--	b, err := ioutil.ReadAll(r)
--	return string(b), err
--}
--
--var testCases = []struct {
--	utf8, other, otherEncoding string
--}{
--	{"Résumé", "Résumé", "utf8"},
--	{"Résumé", "R\xe9sum\xe9", "latin1"},
--	{"これは漢字です。", "S0\x8c0o0\"oW[g0Y0\x020", "UTF-16LE"},
--	{"これは漢字です。", "0S0\x8c0oo\"[W0g0Y0\x02", "UTF-16BE"},
--	{"Hello, world", "Hello, world", "ASCII"},
--	{"Gdańsk", "Gda\xf1sk", "ISO-8859-2"},
--	{"Ââ Čč Đđ Ŋŋ Õõ Šš Žž Åå Ää", "\xc2\xe2 \xc8\xe8 \xa9\xb9 \xaf\xbf \xd5\xf5 \xaa\xba \xac\xbc \xc5\xe5 \xc4\xe4", "ISO-8859-10"},
--	{"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "ISO-8859-11"},
--	{"latviešu", "latvie\xf0u", "ISO-8859-13"},
--	{"Seònaid", "Se\xf2naid", "ISO-8859-14"},
--	{"€1 is cheap", "\xa41 is cheap", "ISO-8859-15"},
--	{"românește", "rom\xe2ne\xbate", "ISO-8859-16"},
--	{"nutraĵo", "nutra\xbco", "ISO-8859-3"},
--	{"Kalâdlit", "Kal\xe2dlit", "ISO-8859-4"},
--	{"русский", "\xe0\xe3\xe1\xe1\xda\xd8\xd9", "ISO-8859-5"},
--	{"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "ISO-8859-7"},
--	{"Kağan", "Ka\xf0an", "ISO-8859-9"},
--	{"Résumé", "R\x8esum\x8e", "macintosh"},
--	{"Gdańsk", "Gda\xf1sk", "windows-1250"},
--	{"русский", "\xf0\xf3\xf1\xf1\xea\xe8\xe9", "windows-1251"},
--	{"Résumé", "R\xe9sum\xe9", "windows-1252"},
--	{"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "windows-1253"},
--	{"Kağan", "Ka\xf0an", "windows-1254"},
--	{"עִבְרִית", "\xf2\xc4\xe1\xc0\xf8\xc4\xe9\xfa", "windows-1255"},
--	{"العربية", "\xc7\xe1\xda\xd1\xc8\xed\xc9", "windows-1256"},
--	{"latviešu", "latvie\xf0u", "windows-1257"},
--	{"Việt", "Vi\xea\xf2t", "windows-1258"},
--	{"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "windows-874"},
--	{"русский", "\xd2\xd5\xd3\xd3\xcb\xc9\xca", "KOI8-R"},
--	{"українська", "\xd5\xcb\xd2\xc1\xa7\xce\xd3\xd8\xcb\xc1", "KOI8-U"},
--	{"Hello 常用國字標準字體表", "Hello \xb1`\xa5\u03b0\xea\xa6r\xbc\u0437\u01e6r\xc5\xe9\xaa\xed", "big5"},
--	{"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gbk"},
--	{"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gb18030"},
--	{"עִבְרִית", "\x81\x30\xfb\x30\x81\x30\xf6\x34\x81\x30\xf9\x33\x81\x30\xf6\x30\x81\x30\xfb\x36\x81\x30\xf6\x34\x81\x30\xfa\x31\x81\x30\xfb\x38", "gb18030"},
--	{"㧯", "\x82\x31\x89\x38", "gb18030"},
--	{"これは漢字です。", "\x82\xb1\x82\xea\x82\xcd\x8a\xbf\x8e\x9a\x82\xc5\x82\xb7\x81B", "SJIS"},
--	{"Hello, 世界!", "Hello, \x90\xa2\x8aE!", "SJIS"},
--	{"イウエオカ", "\xb2\xb3\xb4\xb5\xb6", "SJIS"},
--	{"これは漢字です。", "\xa4\xb3\xa4\xec\xa4\u03f4\xc1\xbb\xfa\xa4\u01e4\xb9\xa1\xa3", "EUC-JP"},
--	{"Hello, 世界!", "Hello, \x1b$B@$3&\x1b(B!", "ISO-2022-JP"},
--	{"네이트 | 즐거움의 시작, 슈파스(Spaβ) NATE", "\xb3\xd7\xc0\xcc\xc6\xae | \xc1\xf1\xb0\xc5\xbf\xf2\xc0\xc7 \xbd\xc3\xc0\xdb, \xbd\xb4\xc6\xc4\xbd\xba(Spa\xa5\xe2) NATE", "EUC-KR"},
--}
--
--func TestDecode(t *testing.T) {
--	for _, tc := range testCases {
--		e, _ := Lookup(tc.otherEncoding)
--		if e == nil {
--			t.Errorf("%s: not found", tc.otherEncoding)
--			continue
--		}
--		s, err := transformString(e.NewDecoder(), tc.other)
--		if err != nil {
--			t.Errorf("%s: decode %q: %v", tc.otherEncoding, tc.other, err)
--			continue
--		}
--		if s != tc.utf8 {
--			t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.utf8)
--		}
--	}
--}
--
--func TestEncode(t *testing.T) {
--	for _, tc := range testCases {
--		e, _ := Lookup(tc.otherEncoding)
--		if e == nil {
--			t.Errorf("%s: not found", tc.otherEncoding)
--			continue
--		}
--		s, err := transformString(e.NewEncoder(), tc.utf8)
--		if err != nil {
--			t.Errorf("%s: encode %q: %s", tc.otherEncoding, tc.utf8, err)
--			continue
--		}
--		if s != tc.other {
--			t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.other)
--		}
--	}
--}
--
--// TestNames verifies that you can pass an encoding's name to Lookup and get
--// the same encoding back (except for "replacement").
--func TestNames(t *testing.T) {
--	for _, e := range encodings {
--		if e.name == "replacement" {
--			continue
--		}
--		_, got := Lookup(e.name)
--		if got != e.name {
--			t.Errorf("got %q, want %q", got, e.name)
--			continue
--		}
--	}
--}
--
--var sniffTestCases = []struct {
--	filename, declared, want string
--}{
--	{"HTTP-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
--	{"UTF-16LE-BOM.html", "", "utf-16le"},
--	{"UTF-16BE-BOM.html", "", "utf-16be"},
--	{"meta-content-attribute.html", "text/html", "iso-8859-15"},
--	{"meta-charset-attribute.html", "text/html", "iso-8859-15"},
--	{"No-encoding-declaration.html", "text/html", "utf-8"},
--	{"HTTP-vs-UTF-8-BOM.html", "text/html; charset=iso-8859-15", "utf-8"},
--	{"HTTP-vs-meta-content.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
--	{"HTTP-vs-meta-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
--	{"UTF-8-BOM-vs-meta-content.html", "text/html", "utf-8"},
--	{"UTF-8-BOM-vs-meta-charset.html", "text/html", "utf-8"},
--}
--
--func TestSniff(t *testing.T) {
--	switch runtime.GOOS {
--	case "nacl": // platforms that don't permit direct file system access
--		t.Skipf("not supported on %q", runtime.GOOS)
--	}
--
--	for _, tc := range sniffTestCases {
--		content, err := ioutil.ReadFile("testdata/" + tc.filename)
--		if err != nil {
--			t.Errorf("%s: error reading file: %v", tc.filename, err)
--			continue
--		}
--
--		_, name, _ := DetermineEncoding(content, tc.declared)
--		if name != tc.want {
--			t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want)
--			continue
--		}
--	}
--}
--
--func TestReader(t *testing.T) {
--	switch runtime.GOOS {
--	case "nacl": // platforms that don't permit direct file system access
--		t.Skipf("not supported on %q", runtime.GOOS)
--	}
--
--	for _, tc := range sniffTestCases {
--		content, err := ioutil.ReadFile("testdata/" + tc.filename)
--		if err != nil {
--			t.Errorf("%s: error reading file: %v", tc.filename, err)
--			continue
--		}
--
--		r, err := NewReader(bytes.NewReader(content), tc.declared)
--		if err != nil {
--			t.Errorf("%s: error creating reader: %v", tc.filename, err)
--			continue
--		}
--
--		got, err := ioutil.ReadAll(r)
--		if err != nil {
--			t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err)
--			continue
--		}
--
--		e, _ := Lookup(tc.want)
--		want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder()))
--		if err != nil {
--			t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err)
--			continue
--		}
--
--		if !bytes.Equal(got, want) {
--			t.Errorf("%s: got %q, want %q", tc.filename, got, want)
--			continue
--		}
--	}
--}
--
--var metaTestCases = []struct {
--	meta, want string
--}{
--	{"", ""},
--	{"text/html", ""},
--	{"text/html; charset utf-8", ""},
--	{"text/html; charset=latin-2", "latin-2"},
--	{"text/html; charset; charset = utf-8", "utf-8"},
--	{`charset="big5"`, "big5"},
--	{"charset='shift_jis'", "shift_jis"},
--}
--
--func TestFromMeta(t *testing.T) {
--	for _, tc := range metaTestCases {
--		got := fromMetaElement(tc.meta)
--		if got != tc.want {
--			t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/gen.go b/Godeps/_workspace/src/golang.org/x/net/html/charset/gen.go
-deleted file mode 100644
-index 8b76909..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/gen.go
-+++ /dev/null
-@@ -1,111 +0,0 @@
--// Copyright 2013 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// +build ignore
--
--package main
--
--// Download http://encoding.spec.whatwg.org/encodings.json and use it to
--// generate table.go.
--
--import (
--	"encoding/json"
--	"fmt"
--	"log"
--	"net/http"
--	"strings"
--)
--
--type enc struct {
--	Name   string
--	Labels []string
--}
--
--type group struct {
--	Encodings []enc
--	Heading   string
--}
--
--const specURL = "http://encoding.spec.whatwg.org/encodings.json"
--
--func main() {
--	resp, err := http.Get(specURL)
--	if err != nil {
--		log.Fatalf("error fetching %s: %s", specURL, err)
--	}
--	if resp.StatusCode != 200 {
--		log.Fatalf("error fetching %s: HTTP status %s", specURL, resp.Status)
--	}
--	defer resp.Body.Close()
--
--	var groups []group
--	d := json.NewDecoder(resp.Body)
--	err = d.Decode(&groups)
--	if err != nil {
--		log.Fatalf("error reading encodings.json: %s", err)
--	}
--
--	fmt.Println("// generated by go run gen.go; DO NOT EDIT")
--	fmt.Println()
--	fmt.Println("package charset")
--	fmt.Println()
--
--	fmt.Println("import (")
--	fmt.Println(`"golang.org/x/text/encoding"`)
--	for _, pkg := range []string{"charmap", "japanese", "korean", "simplifiedchinese", "traditionalchinese", "unicode"} {
--		fmt.Printf("\"golang.org/x/text/encoding/%s\"\n", pkg)
--	}
--	fmt.Println(")")
--	fmt.Println()
--
--	fmt.Println("var encodings = map[string]struct{e encoding.Encoding; name string} {")
--	for _, g := range groups {
--		for _, e := range g.Encodings {
--			goName, ok := miscNames[e.Name]
--			if !ok {
--				for k, v := range prefixes {
--					if strings.HasPrefix(e.Name, k) {
--						goName = v + e.Name[len(k):]
--						break
--					}
--				}
--				if goName == "" {
--					log.Fatalf("unrecognized encoding name: %s", e.Name)
--				}
--			}
--
--			for _, label := range e.Labels {
--				fmt.Printf("%q: {%s, %q},\n", label, goName, e.Name)
--			}
--		}
--	}
--	fmt.Println("}")
--}
--
--var prefixes = map[string]string{
--	"iso-8859-": "charmap.ISO8859_",
--	"windows-":  "charmap.Windows",
--}
--
--var miscNames = map[string]string{
--	"utf-8":          "encoding.Nop",
--	"ibm866":         "charmap.CodePage866",
--	"iso-8859-8-i":   "charmap.ISO8859_8",
--	"koi8-r":         "charmap.KOI8R",
--	"koi8-u":         "charmap.KOI8U",
--	"macintosh":      "charmap.Macintosh",
--	"x-mac-cyrillic": "charmap.MacintoshCyrillic",
--	"gbk":            "simplifiedchinese.GBK",
--	"gb18030":        "simplifiedchinese.GB18030",
--	"hz-gb-2312":     "simplifiedchinese.HZGB2312",
--	"big5":           "traditionalchinese.Big5",
--	"euc-jp":         "japanese.EUCJP",
--	"iso-2022-jp":    "japanese.ISO2022JP",
--	"shift_jis":      "japanese.ShiftJIS",
--	"euc-kr":         "korean.EUCKR",
--	"replacement":    "encoding.Replacement",
--	"utf-16be":       "unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)",
--	"utf-16le":       "unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)",
--	"x-user-defined": "charmap.XUserDefined",
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/table.go b/Godeps/_workspace/src/golang.org/x/net/html/charset/table.go
-deleted file mode 100644
-index aa0d948..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/table.go
-+++ /dev/null
-@@ -1,235 +0,0 @@
--// generated by go run gen.go; DO NOT EDIT
--
--package charset
--
--import (
--	"golang.org/x/text/encoding"
--	"golang.org/x/text/encoding/charmap"
--	"golang.org/x/text/encoding/japanese"
--	"golang.org/x/text/encoding/korean"
--	"golang.org/x/text/encoding/simplifiedchinese"
--	"golang.org/x/text/encoding/traditionalchinese"
--	"golang.org/x/text/encoding/unicode"
--)
--
--var encodings = map[string]struct {
--	e    encoding.Encoding
--	name string
--}{
--	"unicode-1-1-utf-8":   {encoding.Nop, "utf-8"},
--	"utf-8":               {encoding.Nop, "utf-8"},
--	"utf8":                {encoding.Nop, "utf-8"},
--	"866":                 {charmap.CodePage866, "ibm866"},
--	"cp866":               {charmap.CodePage866, "ibm866"},
--	"csibm866":            {charmap.CodePage866, "ibm866"},
--	"ibm866":              {charmap.CodePage866, "ibm866"},
--	"csisolatin2":         {charmap.ISO8859_2, "iso-8859-2"},
--	"iso-8859-2":          {charmap.ISO8859_2, "iso-8859-2"},
--	"iso-ir-101":          {charmap.ISO8859_2, "iso-8859-2"},
--	"iso8859-2":           {charmap.ISO8859_2, "iso-8859-2"},
--	"iso88592":            {charmap.ISO8859_2, "iso-8859-2"},
--	"iso_8859-2":          {charmap.ISO8859_2, "iso-8859-2"},
--	"iso_8859-2:1987":     {charmap.ISO8859_2, "iso-8859-2"},
--	"l2":                  {charmap.ISO8859_2, "iso-8859-2"},
--	"latin2":              {charmap.ISO8859_2, "iso-8859-2"},
--	"csisolatin3":         {charmap.ISO8859_3, "iso-8859-3"},
--	"iso-8859-3":          {charmap.ISO8859_3, "iso-8859-3"},
--	"iso-ir-109":          {charmap.ISO8859_3, "iso-8859-3"},
--	"iso8859-3":           {charmap.ISO8859_3, "iso-8859-3"},
--	"iso88593":            {charmap.ISO8859_3, "iso-8859-3"},
--	"iso_8859-3":          {charmap.ISO8859_3, "iso-8859-3"},
--	"iso_8859-3:1988":     {charmap.ISO8859_3, "iso-8859-3"},
--	"l3":                  {charmap.ISO8859_3, "iso-8859-3"},
--	"latin3":              {charmap.ISO8859_3, "iso-8859-3"},
--	"csisolatin4":         {charmap.ISO8859_4, "iso-8859-4"},
--	"iso-8859-4":          {charmap.ISO8859_4, "iso-8859-4"},
--	"iso-ir-110":          {charmap.ISO8859_4, "iso-8859-4"},
--	"iso8859-4":           {charmap.ISO8859_4, "iso-8859-4"},
--	"iso88594":            {charmap.ISO8859_4, "iso-8859-4"},
--	"iso_8859-4":          {charmap.ISO8859_4, "iso-8859-4"},
--	"iso_8859-4:1988":     {charmap.ISO8859_4, "iso-8859-4"},
--	"l4":                  {charmap.ISO8859_4, "iso-8859-4"},
--	"latin4":              {charmap.ISO8859_4, "iso-8859-4"},
--	"csisolatincyrillic":  {charmap.ISO8859_5, "iso-8859-5"},
--	"cyrillic":            {charmap.ISO8859_5, "iso-8859-5"},
--	"iso-8859-5":          {charmap.ISO8859_5, "iso-8859-5"},
--	"iso-ir-144":          {charmap.ISO8859_5, "iso-8859-5"},
--	"iso8859-5":           {charmap.ISO8859_5, "iso-8859-5"},
--	"iso88595":            {charmap.ISO8859_5, "iso-8859-5"},
--	"iso_8859-5":          {charmap.ISO8859_5, "iso-8859-5"},
--	"iso_8859-5:1988":     {charmap.ISO8859_5, "iso-8859-5"},
--	"arabic":              {charmap.ISO8859_6, "iso-8859-6"},
--	"asmo-708":            {charmap.ISO8859_6, "iso-8859-6"},
--	"csiso88596e":         {charmap.ISO8859_6, "iso-8859-6"},
--	"csiso88596i":         {charmap.ISO8859_6, "iso-8859-6"},
--	"csisolatinarabic":    {charmap.ISO8859_6, "iso-8859-6"},
--	"ecma-114":            {charmap.ISO8859_6, "iso-8859-6"},
--	"iso-8859-6":          {charmap.ISO8859_6, "iso-8859-6"},
--	"iso-8859-6-e":        {charmap.ISO8859_6, "iso-8859-6"},
--	"iso-8859-6-i":        {charmap.ISO8859_6, "iso-8859-6"},
--	"iso-ir-127":          {charmap.ISO8859_6, "iso-8859-6"},
--	"iso8859-6":           {charmap.ISO8859_6, "iso-8859-6"},
--	"iso88596":            {charmap.ISO8859_6, "iso-8859-6"},
--	"iso_8859-6":          {charmap.ISO8859_6, "iso-8859-6"},
--	"iso_8859-6:1987":     {charmap.ISO8859_6, "iso-8859-6"},
--	"csisolatingreek":     {charmap.ISO8859_7, "iso-8859-7"},
--	"ecma-118":            {charmap.ISO8859_7, "iso-8859-7"},
--	"elot_928":            {charmap.ISO8859_7, "iso-8859-7"},
--	"greek":               {charmap.ISO8859_7, "iso-8859-7"},
--	"greek8":              {charmap.ISO8859_7, "iso-8859-7"},
--	"iso-8859-7":          {charmap.ISO8859_7, "iso-8859-7"},
--	"iso-ir-126":          {charmap.ISO8859_7, "iso-8859-7"},
--	"iso8859-7":           {charmap.ISO8859_7, "iso-8859-7"},
--	"iso88597":            {charmap.ISO8859_7, "iso-8859-7"},
--	"iso_8859-7":          {charmap.ISO8859_7, "iso-8859-7"},
--	"iso_8859-7:1987":     {charmap.ISO8859_7, "iso-8859-7"},
--	"sun_eu_greek":        {charmap.ISO8859_7, "iso-8859-7"},
--	"csiso88598e":         {charmap.ISO8859_8, "iso-8859-8"},
--	"csisolatinhebrew":    {charmap.ISO8859_8, "iso-8859-8"},
--	"hebrew":              {charmap.ISO8859_8, "iso-8859-8"},
--	"iso-8859-8":          {charmap.ISO8859_8, "iso-8859-8"},
--	"iso-8859-8-e":        {charmap.ISO8859_8, "iso-8859-8"},
--	"iso-ir-138":          {charmap.ISO8859_8, "iso-8859-8"},
--	"iso8859-8":           {charmap.ISO8859_8, "iso-8859-8"},
--	"iso88598":            {charmap.ISO8859_8, "iso-8859-8"},
--	"iso_8859-8":          {charmap.ISO8859_8, "iso-8859-8"},
--	"iso_8859-8:1988":     {charmap.ISO8859_8, "iso-8859-8"},
--	"visual":              {charmap.ISO8859_8, "iso-8859-8"},
--	"csiso88598i":         {charmap.ISO8859_8, "iso-8859-8-i"},
--	"iso-8859-8-i":        {charmap.ISO8859_8, "iso-8859-8-i"},
--	"logical":             {charmap.ISO8859_8, "iso-8859-8-i"},
--	"csisolatin6":         {charmap.ISO8859_10, "iso-8859-10"},
--	"iso-8859-10":         {charmap.ISO8859_10, "iso-8859-10"},
--	"iso-ir-157":          {charmap.ISO8859_10, "iso-8859-10"},
--	"iso8859-10":          {charmap.ISO8859_10, "iso-8859-10"},
--	"iso885910":           {charmap.ISO8859_10, "iso-8859-10"},
--	"l6":                  {charmap.ISO8859_10, "iso-8859-10"},
--	"latin6":              {charmap.ISO8859_10, "iso-8859-10"},
--	"iso-8859-13":         {charmap.ISO8859_13, "iso-8859-13"},
--	"iso8859-13":          {charmap.ISO8859_13, "iso-8859-13"},
--	"iso885913":           {charmap.ISO8859_13, "iso-8859-13"},
--	"iso-8859-14":         {charmap.ISO8859_14, "iso-8859-14"},
--	"iso8859-14":          {charmap.ISO8859_14, "iso-8859-14"},
--	"iso885914":           {charmap.ISO8859_14, "iso-8859-14"},
--	"csisolatin9":         {charmap.ISO8859_15, "iso-8859-15"},
--	"iso-8859-15":         {charmap.ISO8859_15, "iso-8859-15"},
--	"iso8859-15":          {charmap.ISO8859_15, "iso-8859-15"},
--	"iso885915":           {charmap.ISO8859_15, "iso-8859-15"},
--	"iso_8859-15":         {charmap.ISO8859_15, "iso-8859-15"},
--	"l9":                  {charmap.ISO8859_15, "iso-8859-15"},
--	"iso-8859-16":         {charmap.ISO8859_16, "iso-8859-16"},
--	"cskoi8r":             {charmap.KOI8R, "koi8-r"},
--	"koi":                 {charmap.KOI8R, "koi8-r"},
--	"koi8":                {charmap.KOI8R, "koi8-r"},
--	"koi8-r":              {charmap.KOI8R, "koi8-r"},
--	"koi8_r":              {charmap.KOI8R, "koi8-r"},
--	"koi8-u":              {charmap.KOI8U, "koi8-u"},
--	"csmacintosh":         {charmap.Macintosh, "macintosh"},
--	"mac":                 {charmap.Macintosh, "macintosh"},
--	"macintosh":           {charmap.Macintosh, "macintosh"},
--	"x-mac-roman":         {charmap.Macintosh, "macintosh"},
--	"dos-874":             {charmap.Windows874, "windows-874"},
--	"iso-8859-11":         {charmap.Windows874, "windows-874"},
--	"iso8859-11":          {charmap.Windows874, "windows-874"},
--	"iso885911":           {charmap.Windows874, "windows-874"},
--	"tis-620":             {charmap.Windows874, "windows-874"},
--	"windows-874":         {charmap.Windows874, "windows-874"},
--	"cp1250":              {charmap.Windows1250, "windows-1250"},
--	"windows-1250":        {charmap.Windows1250, "windows-1250"},
--	"x-cp1250":            {charmap.Windows1250, "windows-1250"},
--	"cp1251":              {charmap.Windows1251, "windows-1251"},
--	"windows-1251":        {charmap.Windows1251, "windows-1251"},
--	"x-cp1251":            {charmap.Windows1251, "windows-1251"},
--	"ansi_x3.4-1968":      {charmap.Windows1252, "windows-1252"},
--	"ascii":               {charmap.Windows1252, "windows-1252"},
--	"cp1252":              {charmap.Windows1252, "windows-1252"},
--	"cp819":               {charmap.Windows1252, "windows-1252"},
--	"csisolatin1":         {charmap.Windows1252, "windows-1252"},
--	"ibm819":              {charmap.Windows1252, "windows-1252"},
--	"iso-8859-1":          {charmap.Windows1252, "windows-1252"},
--	"iso-ir-100":          {charmap.Windows1252, "windows-1252"},
--	"iso8859-1":           {charmap.Windows1252, "windows-1252"},
--	"iso88591":            {charmap.Windows1252, "windows-1252"},
--	"iso_8859-1":          {charmap.Windows1252, "windows-1252"},
--	"iso_8859-1:1987":     {charmap.Windows1252, "windows-1252"},
--	"l1":                  {charmap.Windows1252, "windows-1252"},
--	"latin1":              {charmap.Windows1252, "windows-1252"},
--	"us-ascii":            {charmap.Windows1252, "windows-1252"},
--	"windows-1252":        {charmap.Windows1252, "windows-1252"},
--	"x-cp1252":            {charmap.Windows1252, "windows-1252"},
--	"cp1253":              {charmap.Windows1253, "windows-1253"},
--	"windows-1253":        {charmap.Windows1253, "windows-1253"},
--	"x-cp1253":            {charmap.Windows1253, "windows-1253"},
--	"cp1254":              {charmap.Windows1254, "windows-1254"},
--	"csisolatin5":         {charmap.Windows1254, "windows-1254"},
--	"iso-8859-9":          {charmap.Windows1254, "windows-1254"},
--	"iso-ir-148":          {charmap.Windows1254, "windows-1254"},
--	"iso8859-9":           {charmap.Windows1254, "windows-1254"},
--	"iso88599":            {charmap.Windows1254, "windows-1254"},
--	"iso_8859-9":          {charmap.Windows1254, "windows-1254"},
--	"iso_8859-9:1989":     {charmap.Windows1254, "windows-1254"},
--	"l5":                  {charmap.Windows1254, "windows-1254"},
--	"latin5":              {charmap.Windows1254, "windows-1254"},
--	"windows-1254":        {charmap.Windows1254, "windows-1254"},
--	"x-cp1254":            {charmap.Windows1254, "windows-1254"},
--	"cp1255":              {charmap.Windows1255, "windows-1255"},
--	"windows-1255":        {charmap.Windows1255, "windows-1255"},
--	"x-cp1255":            {charmap.Windows1255, "windows-1255"},
--	"cp1256":              {charmap.Windows1256, "windows-1256"},
--	"windows-1256":        {charmap.Windows1256, "windows-1256"},
--	"x-cp1256":            {charmap.Windows1256, "windows-1256"},
--	"cp1257":              {charmap.Windows1257, "windows-1257"},
--	"windows-1257":        {charmap.Windows1257, "windows-1257"},
--	"x-cp1257":            {charmap.Windows1257, "windows-1257"},
--	"cp1258":              {charmap.Windows1258, "windows-1258"},
--	"windows-1258":        {charmap.Windows1258, "windows-1258"},
--	"x-cp1258":            {charmap.Windows1258, "windows-1258"},
--	"x-mac-cyrillic":      {charmap.MacintoshCyrillic, "x-mac-cyrillic"},
--	"x-mac-ukrainian":     {charmap.MacintoshCyrillic, "x-mac-cyrillic"},
--	"chinese":             {simplifiedchinese.GBK, "gbk"},
--	"csgb2312":            {simplifiedchinese.GBK, "gbk"},
--	"csiso58gb231280":     {simplifiedchinese.GBK, "gbk"},
--	"gb2312":              {simplifiedchinese.GBK, "gbk"},
--	"gb_2312":             {simplifiedchinese.GBK, "gbk"},
--	"gb_2312-80":          {simplifiedchinese.GBK, "gbk"},
--	"gbk":                 {simplifiedchinese.GBK, "gbk"},
--	"iso-ir-58":           {simplifiedchinese.GBK, "gbk"},
--	"x-gbk":               {simplifiedchinese.GBK, "gbk"},
--	"gb18030":             {simplifiedchinese.GB18030, "gb18030"},
--	"hz-gb-2312":          {simplifiedchinese.HZGB2312, "hz-gb-2312"},
--	"big5":                {traditionalchinese.Big5, "big5"},
--	"big5-hkscs":          {traditionalchinese.Big5, "big5"},
--	"cn-big5":             {traditionalchinese.Big5, "big5"},
--	"csbig5":              {traditionalchinese.Big5, "big5"},
--	"x-x-big5":            {traditionalchinese.Big5, "big5"},
--	"cseucpkdfmtjapanese": {japanese.EUCJP, "euc-jp"},
--	"euc-jp":              {japanese.EUCJP, "euc-jp"},
--	"x-euc-jp":            {japanese.EUCJP, "euc-jp"},
--	"csiso2022jp":         {japanese.ISO2022JP, "iso-2022-jp"},
--	"iso-2022-jp":         {japanese.ISO2022JP, "iso-2022-jp"},
--	"csshiftjis":          {japanese.ShiftJIS, "shift_jis"},
--	"ms_kanji":            {japanese.ShiftJIS, "shift_jis"},
--	"shift-jis":           {japanese.ShiftJIS, "shift_jis"},
--	"shift_jis":           {japanese.ShiftJIS, "shift_jis"},
--	"sjis":                {japanese.ShiftJIS, "shift_jis"},
--	"windows-31j":         {japanese.ShiftJIS, "shift_jis"},
--	"x-sjis":              {japanese.ShiftJIS, "shift_jis"},
--	"cseuckr":             {korean.EUCKR, "euc-kr"},
--	"csksc56011987":       {korean.EUCKR, "euc-kr"},
--	"euc-kr":              {korean.EUCKR, "euc-kr"},
--	"iso-ir-149":          {korean.EUCKR, "euc-kr"},
--	"korean":              {korean.EUCKR, "euc-kr"},
--	"ks_c_5601-1987":      {korean.EUCKR, "euc-kr"},
--	"ks_c_5601-1989":      {korean.EUCKR, "euc-kr"},
--	"ksc5601":             {korean.EUCKR, "euc-kr"},
--	"ksc_5601":            {korean.EUCKR, "euc-kr"},
--	"windows-949":         {korean.EUCKR, "euc-kr"},
--	"csiso2022kr":         {encoding.Replacement, "replacement"},
--	"iso-2022-kr":         {encoding.Replacement, "replacement"},
--	"iso-2022-cn":         {encoding.Replacement, "replacement"},
--	"iso-2022-cn-ext":     {encoding.Replacement, "replacement"},
--	"utf-16be":            {unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM), "utf-16be"},
--	"utf-16":              {unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), "utf-16le"},
--	"utf-16le":            {unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), "utf-16le"},
--	"x-user-defined":      {charmap.XUserDefined, "x-user-defined"},
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-charset.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-charset.html
-deleted file mode 100644
-index 9915fa0..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-charset.html
-+++ /dev/null
-@@ -1,48 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
--  <title>HTTP charset</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="The character encoding of a page can be set using the HTTP header charset declaration.">
--<style type='text/css'>
--.test div { width: 50px; }</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
--</head>
--<body>
--<p class='title'>HTTP charset</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">The character encoding of a page can be set using the HTTP header charset declaration.</p>
--<div class="notes"><p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p><p>The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-003">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-001<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-001" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
--				<li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
-deleted file mode 100644
-index 26e5d8b..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
-+++ /dev/null
-@@ -1,48 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
--  <title>HTTP vs UTF-8 BOM</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.">
--<style type='text/css'>
--.test div { width: 50px; }</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
--</head>
--<body>
--<p class='title'>HTTP vs UTF-8 BOM</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.</p>
--<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p><p>If the test is unsuccessful, the characters &#x00EF;&#x00BB;&#x00BF; should appear at the top of the page.  These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-022">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-034<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-034" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
--				<li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
-deleted file mode 100644
-index 2f07e95..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
-+++ /dev/null
-@@ -1,49 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
-- <meta charset="iso-8859-1" > <title>HTTP vs meta charset</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.">
--<style type='text/css'>
--.test div { width: 50px; }.test div { width: 90px; }
--</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
--</head>
--<body>
--<p class='title'>HTTP vs meta charset</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.</p>
--<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-037">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-018<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-018" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
--				<li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
-deleted file mode 100644
-index 6853cdd..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
-+++ /dev/null
-@@ -1,49 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
-- <meta http-equiv="content-type" content="text/html;charset=iso-8859-1" > <title>HTTP vs meta content</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.">
--<style type='text/css'>
--.test div { width: 50px; }.test div { width: 90px; }
--</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
--</head>
--<body>
--<p class='title'>HTTP vs meta content</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.</p>
--<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-018">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-016<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-016" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
--				<li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html
-deleted file mode 100644
-index 612e26c..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html
-+++ /dev/null
-@@ -1,47 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
--  <title>No encoding declaration</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.">
--<style type='text/css'>
--.test div { width: 50px; }</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
--</head>
--<body>
--<p class='title'>No encoding declaration</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.</p>
--<div class="notes"><p><p>The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-034">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-015<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-015" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/README b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/README
-deleted file mode 100644
-index a8e1fa4..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/README
-+++ /dev/null
-@@ -1 +0,0 @@
--These test cases come from http://www.w3.org/International/tests/html5/the-input-byte-stream/results-basics
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html
-deleted file mode 100644
-index 3abf7a9343c20518e57dfea58b374fb0f4fb58a1..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 2670
-zcmcJR?QRoS5Qc}JAoU&=BQ-(7b^;2j8i*i3RV1JlO@;VXIsPurV!WHiDdLW}i`*CO
-z^UnC>tih=KsVr;H&Y7?C&O3AV(?534uG?e##U9y_y|!QNi4``n+D>d{2lky^LnFNx
-z?9HrarH$>rwQR_$g)Hk0*&STI*EYq|47~&U9sfUB+ji})9eR{QqCUra7oDsZ5obtB
-zdxP%<)-$4Q;rSHJiM>U(#ZI=;?n^BC?Dp6lu=~_1-lnX3u03&2BlmQIY>L+!Uq7<S
-znh)&E?pViTjIm26+mz45Gn;?mU1-%d$8(q8ng2R#e!F1tlD&lM9_z}^IdM&9OX8=U
-z8%PwVO_n7-g+SYm(XCxt at f1Qm>XoytKw^Q#oZSM?3*J?)&ojG&yzQRkC!M<M9xE_7
-zb;}_hR3kl=jSw#Vt-|I{q%Ck#9h-3za!uL)n~QLmd*yVN|H|tGZJ}Lo7NIwEW{jNQ
-zV@@K5_3@^fi08HMCj^^V*Hl9s7bDNfAUw%xiKL5{%KZf*9rq_B3%EJ8zj(gqf5v)%
-zbOLV*+p`@!Ep4CmhgBB}-xMo+eXUno4NY--$glQJ%^9|ktY at fB&Rr7SEd-RMIzBO=
-z at -E&3<2aeBADM{J>l5JE?ax;lp_NYEcdUht`ZswOviB~L5hmJ|pXI71nn20w;>vG!
-zQGB$EE9&wC``&J#_Ym~<oskhM*qPSKA~LzoN!pzH1>PgRu-Bd>1!pOp0||k`kr=VJ
-zfH6I6rmRaeHA7U-A^OTsT+|d2a^i(>DePzZ{)ibXoCBvJnuYrd-3kkN$u<La`*flh
-zDi+>y{qQK;=*Y;S87ro12aTgu^i*%f8zC3>a}9DIe4cfxOzsCw&(cqvP9{ud{N6f`
-z#TNDY(B6 at Gpr|uN+%&x^XZjBHdc at 2vsM(Tyc2=vshHQ5w+obmp>tuWT(t4BTUGAQw
-zxeI$UGSLUBg=WFbF;4f at 4=^P2AgY at CFn8A`bcC=_&~)fiDe)#cUARRBzJ^k|%X)69
-z+{Cb`wq}Rsg%B62CC_tK!AV(W{(MV?#mndR46CU#BUN<{8e?*oT+!pE5wF#O#TR#a
-z$9qRT)tpbw8zAI~QQJg2C3|6$I%(T(;`zOMy6SO+&;pG=c#2P|P-WZn$$DpWJlC3U
-z3*nvm<q%|^qPyLgA~&hNxH!U(CgUrj$Lv*i?ZToRve;kc at WJ`8#Z)Pn$q5nRA5|>z
-zwP{u~r$L?-m3uqp9I1+#3yE|3M$(s-BE<Joa8PqdUta}ZQ2KUivf!ALM1?f7$7oIM
-sZ)BUR)d7uk!p%4L`mByQto|PReD2~`cUQB{U7yke at NV7*jW5Z60Z{<B#sB~S
-
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html
-deleted file mode 100644
-index 76254c980c29954f1bbd96e145c79f64c43e3355..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 2682
-zcmcJR?QRoS5Qc}JMdBV7Bat9sI{^h%O^8Y;RgubvAPDiRa{S#oi<{jL2gDuqE^=SM
-z^UnC>tih=LQ>`qYoiktOop<K!=TCcf-F~rW_RtRPjXk$VR at lU9JGPna+cmptdzbG8
-zdo$}<X=A%@EgQ0GA<KG0b_bX5wN3FfLvP<+;r~}_+qT`a-#y9!QJ>(wi%!;yh%+Rm
-z{e|xntY<{q!1F1Z6MKtngPm-p-4|H&+3m4AVE3_AyiHm6Tzlf4M(*ht*%YrezJ6kr
-zHGj4<yK5bfF~%;PY+XJR&uspUccE9?9M4^zGk-cOe!F1tg1v<E4(rO!IdM&93*x7p
-z8%PwVO_n7-g+SYm(5+os at h^mW)GKFOfy4<Gb9M_npYX1FeVy4|<ZbsPKk3w6_gI0!
-zsap>5pc?64*$Cm%-zseWMA`x;)v*~jA=i}szqts9xmQkS`M11|(H7bTXAycsXU53+
-zJ?120SRZeyiFjW7enPN`bxk$IaWV3o48oJF7D&2ysoY;6(s6%6vVfaYd&mC=erK!)
-zNGI^7upQgN)53OHe_VE<@J+G8*Y|p*)zB2Thdi}+YR<5QWHm!|a_*AoZXuv7)$xe|
-zm3Q$D7{|#}{m4X&UY!6(ZhyYi2(5JLzGE$H)W6BQklnjPMwn<<eiqA`XaXgx3wwFx
-z!u}~PtanA0H|+*`4?u6%85yyHooTHsB9rT!q|K?H;yvOEd+kY5aF)_JkPs*wi4l7z
-zFs6sily!-wW{B!JL|^%di<&}0PP`B<h5bg~A2MTwbKo>Yvv7Z*TVWwD*=E3QpH37*
-z#lqXJA0A~J9T_<^W5smspmDg2p6ac5Bjn<Ku0igDud_~-$^D?|S^A07$%M&_=dJTt
-zY*DWd?Qb#<6m_PEo2FOgOy8nj51F|IHCvF+)^fGekZmtz>+~LAoow%1TCdZ*$K8`O
-zw_$HaCi+0N&@7la#_7KL5r$+QL{)Pi=I&aDjt~|Knht#`CEi4*3%97i_fSfAS<fw%
-zn-~_=*6h%{5aL3$<o}#ia8j0;KmVn|;^h-=WmR6xNL8JK#+ckCSM<1P#A|h6v2v#$
-zaHn^?chpnO`P94t_OVibB~EP;@09$7PU at viyM@*V*V7k=p6Ga?P}?75Bwndfm2J{5
-zs~ytuoNMwC?x}AMK<F{Ln~iC5i;Ts|5q>lwUz0=3V0GCxY}z81UC-nP=CGt2OqYV$
-zoRCo+qM9YX*3FFORLC=<a&JeRBULkVB5_aOO8Vkbg!qmME@~d>E3B~S at +KROyk4r5
-yX7?DaslDfIebqXgC!KKp4IYy+W~X?ddE6o=`A+x#x0AK&6MF#W&AXxbRrv+SX}PNa
-
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html
-deleted file mode 100644
-index 83de433..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html
-+++ /dev/null
-@@ -1,49 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
-- <meta charset="iso-8859-15"> <title>UTF-8 BOM vs meta charset</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.">
--<style type='text/css'>
--.test div { width: 50px; }.test div { width: 90px; }
--</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
--</head>
--<body>
--<p class='title'>UTF-8 BOM vs meta charset</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.</p>
--<div class="notes"><p><p>The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-024">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-038<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-038" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
--				<li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html
-deleted file mode 100644
-index 501aac2..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html
-+++ /dev/null
-@@ -1,48 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
-- <meta http-equiv="content-type" content="text/html; charset=iso-8859-15"> <title>UTF-8 BOM vs meta content</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.">
--<style type='text/css'>
--.test div { width: 50px; }</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
--</head>
--<body>
--<p class='title'>UTF-8 BOM vs meta content</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.</p>
--<div class="notes"><p><p>The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-038">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-037<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-037" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
--				<li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
-deleted file mode 100644
-index 2d7d25a..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
-+++ /dev/null
-@@ -1,48 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
-- <meta charset="iso-8859-15"> <title>meta charset attribute</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="The character encoding of the page can be set by a meta element with charset attribute.">
--<style type='text/css'>
--.test div { width: 50px; }</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
--</head>
--<body>
--<p class='title'>meta charset attribute</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">The character encoding of the page can be set by a meta element with charset attribute.</p>
--<div class="notes"><p><p>The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-015">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-009<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-009" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
--				<li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-content-attribute.html b/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-content-attribute.html
-deleted file mode 100644
-index 1c3f228..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-content-attribute.html
-+++ /dev/null
-@@ -1,48 +0,0 @@
--<!DOCTYPE html>
--<html  lang="en" >
--<head>
-- <meta http-equiv="content-type" content="text/html; charset=iso-8859-15"> <title>meta content attribute</title>
--<link rel='author' title='Richard Ishida' href='mailto:ishida at w3.org'>
--<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
--<link rel="stylesheet" type="text/css" href="./generatedtests.css">
--<script src="http://w3c-test.org/resources/testharness.js"></script>
--<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
--<meta name='flags' content='http'>
--<meta name="assert" content="The character encoding of the page can be set by a meta element with http-equiv and content attributes.">
--<style type='text/css'>
--.test div { width: 50px; }</style>
--<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
--</head>
--<body>
--<p class='title'>meta content attribute</p>
--
--
--<div id='log'></div>
--
--
--<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>
--
--
--
--
--
--<div class='description'>
--<p class="assertion" title="Assertion">The character encoding of the page can be set by a meta element with http-equiv and content attributes.</p>
--<div class="notes"><p><p>The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
--</div>
--</div>
--<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-009">Next test</a></div><div class="doctype">HTML5</div>
--<p class="jump">the-input-byte-stream-007<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary &amp; related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-007" target="_blank">Detailed results for this test</a><br/>	<a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
--<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
--				<li>The test is read from a server that supports HTTP.</li></ul></div>
--</div>
--<script>
--test(function() {
--assert_equals(document.getElementById('box').offsetWidth, 100);
--}, " ");
--</script>
--
--</body>
--</html>
--
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/const.go b/Godeps/_workspace/src/golang.org/x/net/html/const.go
-deleted file mode 100644
-index d7cc8bb..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/const.go
-+++ /dev/null
-@@ -1,100 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--// Section 12.2.3.2 of the HTML5 specification says "The following elements
--// have varying levels of special parsing rules".
--// http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#the-stack-of-open-elements
--var isSpecialElementMap = map[string]bool{
--	"address":    true,
--	"applet":     true,
--	"area":       true,
--	"article":    true,
--	"aside":      true,
--	"base":       true,
--	"basefont":   true,
--	"bgsound":    true,
--	"blockquote": true,
--	"body":       true,
--	"br":         true,
--	"button":     true,
--	"caption":    true,
--	"center":     true,
--	"col":        true,
--	"colgroup":   true,
--	"command":    true,
--	"dd":         true,
--	"details":    true,
--	"dir":        true,
--	"div":        true,
--	"dl":         true,
--	"dt":         true,
--	"embed":      true,
--	"fieldset":   true,
--	"figcaption": true,
--	"figure":     true,
--	"footer":     true,
--	"form":       true,
--	"frame":      true,
--	"frameset":   true,
--	"h1":         true,
--	"h2":         true,
--	"h3":         true,
--	"h4":         true,
--	"h5":         true,
--	"h6":         true,
--	"head":       true,
--	"header":     true,
--	"hgroup":     true,
--	"hr":         true,
--	"html":       true,
--	"iframe":     true,
--	"img":        true,
--	"input":      true,
--	"isindex":    true,
--	"li":         true,
--	"link":       true,
--	"listing":    true,
--	"marquee":    true,
--	"menu":       true,
--	"meta":       true,
--	"nav":        true,
--	"noembed":    true,
--	"noframes":   true,
--	"noscript":   true,
--	"object":     true,
--	"ol":         true,
--	"p":          true,
--	"param":      true,
--	"plaintext":  true,
--	"pre":        true,
--	"script":     true,
--	"section":    true,
--	"select":     true,
--	"style":      true,
--	"summary":    true,
--	"table":      true,
--	"tbody":      true,
--	"td":         true,
--	"textarea":   true,
--	"tfoot":      true,
--	"th":         true,
--	"thead":      true,
--	"title":      true,
--	"tr":         true,
--	"ul":         true,
--	"wbr":        true,
--	"xmp":        true,
--}
--
--func isSpecialElement(element *Node) bool {
--	switch element.Namespace {
--	case "", "html":
--		return isSpecialElementMap[element.Data]
--	case "svg":
--		return element.Data == "foreignObject"
--	}
--	return false
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/doc.go b/Godeps/_workspace/src/golang.org/x/net/html/doc.go
-deleted file mode 100644
-index 32379a3..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/doc.go
-+++ /dev/null
-@@ -1,106 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--/*
--Package html implements an HTML5-compliant tokenizer and parser.
--
--Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
--caller's responsibility to ensure that r provides UTF-8 encoded HTML.
--
--	z := html.NewTokenizer(r)
--
--Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
--which parses the next token and returns its type, or an error:
--
--	for {
--		tt := z.Next()
--		if tt == html.ErrorToken {
--			// ...
--			return ...
--		}
--		// Process the current token.
--	}
--
--There are two APIs for retrieving the current token. The high-level API is to
--call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
--allow optionally calling Raw after Next but before Token, Text, TagName, or
--TagAttr. In EBNF notation, the valid call sequence per token is:
--
--	Next {Raw} [ Token | Text | TagName {TagAttr} ]
--
--Token returns an independent data structure that completely describes a token.
--Entities (such as "&lt;") are unescaped, tag names and attribute keys are
--lower-cased, and attributes are collected into a []Attribute. For example:
--
--	for {
--		if z.Next() == html.ErrorToken {
--			// Returning io.EOF indicates success.
--			return z.Err()
--		}
--		emitToken(z.Token())
--	}
--
--The low-level API performs fewer allocations and copies, but the contents of
--the []byte values returned by Text, TagName and TagAttr may change on the next
--call to Next. For example, to extract an HTML page's anchor text:
--
--	depth := 0
--	for {
--		tt := z.Next()
--		switch tt {
--		case ErrorToken:
--			return z.Err()
--		case TextToken:
--			if depth > 0 {
--				// emitBytes should copy the []byte it receives,
--				// if it doesn't process it immediately.
--				emitBytes(z.Text())
--			}
--		case StartTagToken, EndTagToken:
--			tn, _ := z.TagName()
--			if len(tn) == 1 && tn[0] == 'a' {
--				if tt == StartTagToken {
--					depth++
--				} else {
--					depth--
--				}
--			}
--		}
--	}
--
--Parsing is done by calling Parse with an io.Reader, which returns the root of
--the parse tree (the document element) as a *Node. It is the caller's
--responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
--example, to process each anchor node in depth-first order:
--
--	doc, err := html.Parse(r)
--	if err != nil {
--		// ...
--	}
--	var f func(*html.Node)
--	f = func(n *html.Node) {
--		if n.Type == html.ElementNode && n.Data == "a" {
--			// Do something with n...
--		}
--		for c := n.FirstChild; c != nil; c = c.NextSibling {
--			f(c)
--		}
--	}
--	f(doc)
--
--The relevant specifications include:
--http://www.whatwg.org/specs/web-apps/current-work/multipage/syntax.html and
--http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html
--*/
--package html // import "golang.org/x/net/html"
--
--// The tokenization algorithm implemented by this package is not a line-by-line
--// transliteration of the relatively verbose state-machine in the WHATWG
--// specification. A more direct approach is used instead, where the program
--// counter implies the state, such as whether it is tokenizing a tag or a text
--// node. Specification compliance is verified by checking expected and actual
--// outputs over a test suite rather than aiming for algorithmic fidelity.
--
--// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
--// TODO(nigeltao): How does parsing interact with a JavaScript engine?
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/doctype.go b/Godeps/_workspace/src/golang.org/x/net/html/doctype.go
-deleted file mode 100644
-index c484e5a..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/doctype.go
-+++ /dev/null
-@@ -1,156 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"strings"
--)
--
--// parseDoctype parses the data from a DoctypeToken into a name,
--// public identifier, and system identifier. It returns a Node whose Type
--// is DoctypeNode, whose Data is the name, and which has attributes
--// named "system" and "public" for the two identifiers if they were present.
--// quirks is whether the document should be parsed in "quirks mode".
--func parseDoctype(s string) (n *Node, quirks bool) {
--	n = &Node{Type: DoctypeNode}
--
--	// Find the name.
--	space := strings.IndexAny(s, whitespace)
--	if space == -1 {
--		space = len(s)
--	}
--	n.Data = s[:space]
--	// The comparison to "html" is case-sensitive.
--	if n.Data != "html" {
--		quirks = true
--	}
--	n.Data = strings.ToLower(n.Data)
--	s = strings.TrimLeft(s[space:], whitespace)
--
--	if len(s) < 6 {
--		// It can't start with "PUBLIC" or "SYSTEM".
--		// Ignore the rest of the string.
--		return n, quirks || s != ""
--	}
--
--	key := strings.ToLower(s[:6])
--	s = s[6:]
--	for key == "public" || key == "system" {
--		s = strings.TrimLeft(s, whitespace)
--		if s == "" {
--			break
--		}
--		quote := s[0]
--		if quote != '"' && quote != '\'' {
--			break
--		}
--		s = s[1:]
--		q := strings.IndexRune(s, rune(quote))
--		var id string
--		if q == -1 {
--			id = s
--			s = ""
--		} else {
--			id = s[:q]
--			s = s[q+1:]
--		}
--		n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
--		if key == "public" {
--			key = "system"
--		} else {
--			key = ""
--		}
--	}
--
--	if key != "" || s != "" {
--		quirks = true
--	} else if len(n.Attr) > 0 {
--		if n.Attr[0].Key == "public" {
--			public := strings.ToLower(n.Attr[0].Val)
--			switch public {
--			case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
--				quirks = true
--			default:
--				for _, q := range quirkyIDs {
--					if strings.HasPrefix(public, q) {
--						quirks = true
--						break
--					}
--				}
--			}
--			// The following two public IDs only cause quirks mode if there is no system ID.
--			if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
--				strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
--				quirks = true
--			}
--		}
--		if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
--			strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
--			quirks = true
--		}
--	}
--
--	return n, quirks
--}
--
--// quirkyIDs is a list of public doctype identifiers that cause a document
--// to be interpreted in quirks mode. The identifiers should be in lower case.
--var quirkyIDs = []string{
--	"+//silmaril//dtd html pro v0r11 19970101//",
--	"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
--	"-//as//dtd html 3.0 aswedit + extensions//",
--	"-//ietf//dtd html 2.0 level 1//",
--	"-//ietf//dtd html 2.0 level 2//",
--	"-//ietf//dtd html 2.0 strict level 1//",
--	"-//ietf//dtd html 2.0 strict level 2//",
--	"-//ietf//dtd html 2.0 strict//",
--	"-//ietf//dtd html 2.0//",
--	"-//ietf//dtd html 2.1e//",
--	"-//ietf//dtd html 3.0//",
--	"-//ietf//dtd html 3.2 final//",
--	"-//ietf//dtd html 3.2//",
--	"-//ietf//dtd html 3//",
--	"-//ietf//dtd html level 0//",
--	"-//ietf//dtd html level 1//",
--	"-//ietf//dtd html level 2//",
--	"-//ietf//dtd html level 3//",
--	"-//ietf//dtd html strict level 0//",
--	"-//ietf//dtd html strict level 1//",
--	"-//ietf//dtd html strict level 2//",
--	"-//ietf//dtd html strict level 3//",
--	"-//ietf//dtd html strict//",
--	"-//ietf//dtd html//",
--	"-//metrius//dtd metrius presentational//",
--	"-//microsoft//dtd internet explorer 2.0 html strict//",
--	"-//microsoft//dtd internet explorer 2.0 html//",
--	"-//microsoft//dtd internet explorer 2.0 tables//",
--	"-//microsoft//dtd internet explorer 3.0 html strict//",
--	"-//microsoft//dtd internet explorer 3.0 html//",
--	"-//microsoft//dtd internet explorer 3.0 tables//",
--	"-//netscape comm. corp.//dtd html//",
--	"-//netscape comm. corp.//dtd strict html//",
--	"-//o'reilly and associates//dtd html 2.0//",
--	"-//o'reilly and associates//dtd html extended 1.0//",
--	"-//o'reilly and associates//dtd html extended relaxed 1.0//",
--	"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
--	"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
--	"-//spyglass//dtd html 2.0 extended//",
--	"-//sq//dtd html 2.0 hotmetal + extensions//",
--	"-//sun microsystems corp.//dtd hotjava html//",
--	"-//sun microsystems corp.//dtd hotjava strict html//",
--	"-//w3c//dtd html 3 1995-03-24//",
--	"-//w3c//dtd html 3.2 draft//",
--	"-//w3c//dtd html 3.2 final//",
--	"-//w3c//dtd html 3.2//",
--	"-//w3c//dtd html 3.2s draft//",
--	"-//w3c//dtd html 4.0 frameset//",
--	"-//w3c//dtd html 4.0 transitional//",
--	"-//w3c//dtd html experimental 19960712//",
--	"-//w3c//dtd html experimental 970421//",
--	"-//w3c//dtd w3 html//",
--	"-//w3o//dtd w3 html 3.0//",
--	"-//webtechs//dtd mozilla html 2.0//",
--	"-//webtechs//dtd mozilla html//",
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/entity.go b/Godeps/_workspace/src/golang.org/x/net/html/entity.go
-deleted file mode 100644
-index af8a007..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/entity.go
-+++ /dev/null
-@@ -1,2253 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--// All entities that do not end with ';' are 6 or fewer bytes long.
--const longestEntityWithoutSemicolon = 6
--
--// entity is a map from HTML entity names to their values. The semicolon matters:
--// http://www.whatwg.org/specs/web-apps/current-work/multipage/named-character-references.html
--// lists both "amp" and "amp;" as two separate entries.
--//
--// Note that the HTML5 list is larger than the HTML4 list at
--// http://www.w3.org/TR/html4/sgml/entities.html
--var entity = map[string]rune{
--	"AElig;":                           '\U000000C6',
--	"AMP;":                             '\U00000026',
--	"Aacute;":                          '\U000000C1',
--	"Abreve;":                          '\U00000102',
--	"Acirc;":                           '\U000000C2',
--	"Acy;":                             '\U00000410',
--	"Afr;":                             '\U0001D504',
--	"Agrave;":                          '\U000000C0',
--	"Alpha;":                           '\U00000391',
--	"Amacr;":                           '\U00000100',
--	"And;":                             '\U00002A53',
--	"Aogon;":                           '\U00000104',
--	"Aopf;":                            '\U0001D538',
--	"ApplyFunction;":                   '\U00002061',
--	"Aring;":                           '\U000000C5',
--	"Ascr;":                            '\U0001D49C',
--	"Assign;":                          '\U00002254',
--	"Atilde;":                          '\U000000C3',
--	"Auml;":                            '\U000000C4',
--	"Backslash;":                       '\U00002216',
--	"Barv;":                            '\U00002AE7',
--	"Barwed;":                          '\U00002306',
--	"Bcy;":                             '\U00000411',
--	"Because;":                         '\U00002235',
--	"Bernoullis;":                      '\U0000212C',
--	"Beta;":                            '\U00000392',
--	"Bfr;":                             '\U0001D505',
--	"Bopf;":                            '\U0001D539',
--	"Breve;":                           '\U000002D8',
--	"Bscr;":                            '\U0000212C',
--	"Bumpeq;":                          '\U0000224E',
--	"CHcy;":                            '\U00000427',
--	"COPY;":                            '\U000000A9',
--	"Cacute;":                          '\U00000106',
--	"Cap;":                             '\U000022D2',
--	"CapitalDifferentialD;":            '\U00002145',
--	"Cayleys;":                         '\U0000212D',
--	"Ccaron;":                          '\U0000010C',
--	"Ccedil;":                          '\U000000C7',
--	"Ccirc;":                           '\U00000108',
--	"Cconint;":                         '\U00002230',
--	"Cdot;":                            '\U0000010A',
--	"Cedilla;":                         '\U000000B8',
--	"CenterDot;":                       '\U000000B7',
--	"Cfr;":                             '\U0000212D',
--	"Chi;":                             '\U000003A7',
--	"CircleDot;":                       '\U00002299',
--	"CircleMinus;":                     '\U00002296',
--	"CirclePlus;":                      '\U00002295',
--	"CircleTimes;":                     '\U00002297',
--	"ClockwiseContourIntegral;":        '\U00002232',
--	"CloseCurlyDoubleQuote;":           '\U0000201D',
--	"CloseCurlyQuote;":                 '\U00002019',
--	"Colon;":                           '\U00002237',
--	"Colone;":                          '\U00002A74',
--	"Congruent;":                       '\U00002261',
--	"Conint;":                          '\U0000222F',
--	"ContourIntegral;":                 '\U0000222E',
--	"Copf;":                            '\U00002102',
--	"Coproduct;":                       '\U00002210',
--	"CounterClockwiseContourIntegral;": '\U00002233',
--	"Cross;":                    '\U00002A2F',
--	"Cscr;":                     '\U0001D49E',
--	"Cup;":                      '\U000022D3',
--	"CupCap;":                   '\U0000224D',
--	"DD;":                       '\U00002145',
--	"DDotrahd;":                 '\U00002911',
--	"DJcy;":                     '\U00000402',
--	"DScy;":                     '\U00000405',
--	"DZcy;":                     '\U0000040F',
--	"Dagger;":                   '\U00002021',
--	"Darr;":                     '\U000021A1',
--	"Dashv;":                    '\U00002AE4',
--	"Dcaron;":                   '\U0000010E',
--	"Dcy;":                      '\U00000414',
--	"Del;":                      '\U00002207',
--	"Delta;":                    '\U00000394',
--	"Dfr;":                      '\U0001D507',
--	"DiacriticalAcute;":         '\U000000B4',
--	"DiacriticalDot;":           '\U000002D9',
--	"DiacriticalDoubleAcute;":   '\U000002DD',
--	"DiacriticalGrave;":         '\U00000060',
--	"DiacriticalTilde;":         '\U000002DC',
--	"Diamond;":                  '\U000022C4',
--	"DifferentialD;":            '\U00002146',
--	"Dopf;":                     '\U0001D53B',
--	"Dot;":                      '\U000000A8',
--	"DotDot;":                   '\U000020DC',
--	"DotEqual;":                 '\U00002250',
--	"DoubleContourIntegral;":    '\U0000222F',
--	"DoubleDot;":                '\U000000A8',
--	"DoubleDownArrow;":          '\U000021D3',
--	"DoubleLeftArrow;":          '\U000021D0',
--	"DoubleLeftRightArrow;":     '\U000021D4',
--	"DoubleLeftTee;":            '\U00002AE4',
--	"DoubleLongLeftArrow;":      '\U000027F8',
--	"DoubleLongLeftRightArrow;": '\U000027FA',
--	"DoubleLongRightArrow;":     '\U000027F9',
--	"DoubleRightArrow;":         '\U000021D2',
--	"DoubleRightTee;":           '\U000022A8',
--	"DoubleUpArrow;":            '\U000021D1',
--	"DoubleUpDownArrow;":        '\U000021D5',
--	"DoubleVerticalBar;":        '\U00002225',
--	"DownArrow;":                '\U00002193',
--	"DownArrowBar;":             '\U00002913',
--	"DownArrowUpArrow;":         '\U000021F5',
--	"DownBreve;":                '\U00000311',
--	"DownLeftRightVector;":      '\U00002950',
--	"DownLeftTeeVector;":        '\U0000295E',
--	"DownLeftVector;":           '\U000021BD',
--	"DownLeftVectorBar;":        '\U00002956',
--	"DownRightTeeVector;":       '\U0000295F',
--	"DownRightVector;":          '\U000021C1',
--	"DownRightVectorBar;":       '\U00002957',
--	"DownTee;":                  '\U000022A4',
--	"DownTeeArrow;":             '\U000021A7',
--	"Downarrow;":                '\U000021D3',
--	"Dscr;":                     '\U0001D49F',
--	"Dstrok;":                   '\U00000110',
--	"ENG;":                      '\U0000014A',
--	"ETH;":                      '\U000000D0',
--	"Eacute;":                   '\U000000C9',
--	"Ecaron;":                   '\U0000011A',
--	"Ecirc;":                    '\U000000CA',
--	"Ecy;":                      '\U0000042D',
--	"Edot;":                     '\U00000116',
--	"Efr;":                      '\U0001D508',
--	"Egrave;":                   '\U000000C8',
--	"Element;":                  '\U00002208',
--	"Emacr;":                    '\U00000112',
--	"EmptySmallSquare;":         '\U000025FB',
--	"EmptyVerySmallSquare;":     '\U000025AB',
--	"Eogon;":                    '\U00000118',
--	"Eopf;":                     '\U0001D53C',
--	"Epsilon;":                  '\U00000395',
--	"Equal;":                    '\U00002A75',
--	"EqualTilde;":               '\U00002242',
--	"Equilibrium;":              '\U000021CC',
--	"Escr;":                     '\U00002130',
--	"Esim;":                     '\U00002A73',
--	"Eta;":                      '\U00000397',
--	"Euml;":                     '\U000000CB',
--	"Exists;":                   '\U00002203',
--	"ExponentialE;":             '\U00002147',
--	"Fcy;":                      '\U00000424',
--	"Ffr;":                      '\U0001D509',
--	"FilledSmallSquare;":        '\U000025FC',
--	"FilledVerySmallSquare;":    '\U000025AA',
--	"Fopf;":                     '\U0001D53D',
--	"ForAll;":                   '\U00002200',
--	"Fouriertrf;":               '\U00002131',
--	"Fscr;":                     '\U00002131',
--	"GJcy;":                     '\U00000403',
--	"GT;":                       '\U0000003E',
--	"Gamma;":                    '\U00000393',
--	"Gammad;":                   '\U000003DC',
--	"Gbreve;":                   '\U0000011E',
--	"Gcedil;":                   '\U00000122',
--	"Gcirc;":                    '\U0000011C',
--	"Gcy;":                      '\U00000413',
--	"Gdot;":                     '\U00000120',
--	"Gfr;":                      '\U0001D50A',
--	"Gg;":                       '\U000022D9',
--	"Gopf;":                     '\U0001D53E',
--	"GreaterEqual;":             '\U00002265',
--	"GreaterEqualLess;":         '\U000022DB',
--	"GreaterFullEqual;":         '\U00002267',
--	"GreaterGreater;":           '\U00002AA2',
--	"GreaterLess;":              '\U00002277',
--	"GreaterSlantEqual;":        '\U00002A7E',
--	"GreaterTilde;":             '\U00002273',
--	"Gscr;":                     '\U0001D4A2',
--	"Gt;":                       '\U0000226B',
--	"HARDcy;":                   '\U0000042A',
--	"Hacek;":                    '\U000002C7',
--	"Hat;":                      '\U0000005E',
--	"Hcirc;":                    '\U00000124',
--	"Hfr;":                      '\U0000210C',
--	"HilbertSpace;":             '\U0000210B',
--	"Hopf;":                     '\U0000210D',
--	"HorizontalLine;":           '\U00002500',
--	"Hscr;":                     '\U0000210B',
--	"Hstrok;":                   '\U00000126',
--	"HumpDownHump;":             '\U0000224E',
--	"HumpEqual;":                '\U0000224F',
--	"IEcy;":                     '\U00000415',
--	"IJlig;":                    '\U00000132',
--	"IOcy;":                     '\U00000401',
--	"Iacute;":                   '\U000000CD',
--	"Icirc;":                    '\U000000CE',
--	"Icy;":                      '\U00000418',
--	"Idot;":                     '\U00000130',
--	"Ifr;":                      '\U00002111',
--	"Igrave;":                   '\U000000CC',
--	"Im;":                       '\U00002111',
--	"Imacr;":                    '\U0000012A',
--	"ImaginaryI;":               '\U00002148',
--	"Implies;":                  '\U000021D2',
--	"Int;":                      '\U0000222C',
--	"Integral;":                 '\U0000222B',
--	"Intersection;":             '\U000022C2',
--	"InvisibleComma;":           '\U00002063',
--	"InvisibleTimes;":           '\U00002062',
--	"Iogon;":                    '\U0000012E',
--	"Iopf;":                     '\U0001D540',
--	"Iota;":                     '\U00000399',
--	"Iscr;":                     '\U00002110',
--	"Itilde;":                   '\U00000128',
--	"Iukcy;":                    '\U00000406',
--	"Iuml;":                     '\U000000CF',
--	"Jcirc;":                    '\U00000134',
--	"Jcy;":                      '\U00000419',
--	"Jfr;":                      '\U0001D50D',
--	"Jopf;":                     '\U0001D541',
--	"Jscr;":                     '\U0001D4A5',
--	"Jsercy;":                   '\U00000408',
--	"Jukcy;":                    '\U00000404',
--	"KHcy;":                     '\U00000425',
--	"KJcy;":                     '\U0000040C',
--	"Kappa;":                    '\U0000039A',
--	"Kcedil;":                   '\U00000136',
--	"Kcy;":                      '\U0000041A',
--	"Kfr;":                      '\U0001D50E',
--	"Kopf;":                     '\U0001D542',
--	"Kscr;":                     '\U0001D4A6',
--	"LJcy;":                     '\U00000409',
--	"LT;":                       '\U0000003C',
--	"Lacute;":                   '\U00000139',
--	"Lambda;":                   '\U0000039B',
--	"Lang;":                     '\U000027EA',
--	"Laplacetrf;":               '\U00002112',
--	"Larr;":                     '\U0000219E',
--	"Lcaron;":                   '\U0000013D',
--	"Lcedil;":                   '\U0000013B',
--	"Lcy;":                      '\U0000041B',
--	"LeftAngleBracket;":         '\U000027E8',
--	"LeftArrow;":                '\U00002190',
--	"LeftArrowBar;":             '\U000021E4',
--	"LeftArrowRightArrow;":      '\U000021C6',
--	"LeftCeiling;":              '\U00002308',
--	"LeftDoubleBracket;":        '\U000027E6',
--	"LeftDownTeeVector;":        '\U00002961',
--	"LeftDownVector;":           '\U000021C3',
--	"LeftDownVectorBar;":        '\U00002959',
--	"LeftFloor;":                '\U0000230A',
--	"LeftRightArrow;":           '\U00002194',
--	"LeftRightVector;":          '\U0000294E',
--	"LeftTee;":                  '\U000022A3',
--	"LeftTeeArrow;":             '\U000021A4',
--	"LeftTeeVector;":            '\U0000295A',
--	"LeftTriangle;":             '\U000022B2',
--	"LeftTriangleBar;":          '\U000029CF',
--	"LeftTriangleEqual;":        '\U000022B4',
--	"LeftUpDownVector;":         '\U00002951',
--	"LeftUpTeeVector;":          '\U00002960',
--	"LeftUpVector;":             '\U000021BF',
--	"LeftUpVectorBar;":          '\U00002958',
--	"LeftVector;":               '\U000021BC',
--	"LeftVectorBar;":            '\U00002952',
--	"Leftarrow;":                '\U000021D0',
--	"Leftrightarrow;":           '\U000021D4',
--	"LessEqualGreater;":         '\U000022DA',
--	"LessFullEqual;":            '\U00002266',
--	"LessGreater;":              '\U00002276',
--	"LessLess;":                 '\U00002AA1',
--	"LessSlantEqual;":           '\U00002A7D',
--	"LessTilde;":                '\U00002272',
--	"Lfr;":                      '\U0001D50F',
--	"Ll;":                       '\U000022D8',
--	"Lleftarrow;":               '\U000021DA',
--	"Lmidot;":                   '\U0000013F',
--	"LongLeftArrow;":            '\U000027F5',
--	"LongLeftRightArrow;":       '\U000027F7',
--	"LongRightArrow;":           '\U000027F6',
--	"Longleftarrow;":            '\U000027F8',
--	"Longleftrightarrow;":       '\U000027FA',
--	"Longrightarrow;":           '\U000027F9',
--	"Lopf;":                     '\U0001D543',
--	"LowerLeftArrow;":           '\U00002199',
--	"LowerRightArrow;":          '\U00002198',
--	"Lscr;":                     '\U00002112',
--	"Lsh;":                      '\U000021B0',
--	"Lstrok;":                   '\U00000141',
--	"Lt;":                       '\U0000226A',
--	"Map;":                      '\U00002905',
--	"Mcy;":                      '\U0000041C',
--	"MediumSpace;":              '\U0000205F',
--	"Mellintrf;":                '\U00002133',
--	"Mfr;":                      '\U0001D510',
--	"MinusPlus;":                '\U00002213',
--	"Mopf;":                     '\U0001D544',
--	"Mscr;":                     '\U00002133',
--	"Mu;":                       '\U0000039C',
--	"NJcy;":                     '\U0000040A',
--	"Nacute;":                   '\U00000143',
--	"Ncaron;":                   '\U00000147',
--	"Ncedil;":                   '\U00000145',
--	"Ncy;":                      '\U0000041D',
--	"NegativeMediumSpace;":      '\U0000200B',
--	"NegativeThickSpace;":       '\U0000200B',
--	"NegativeThinSpace;":        '\U0000200B',
--	"NegativeVeryThinSpace;":    '\U0000200B',
--	"NestedGreaterGreater;":     '\U0000226B',
--	"NestedLessLess;":           '\U0000226A',
--	"NewLine;":                  '\U0000000A',
--	"Nfr;":                      '\U0001D511',
--	"NoBreak;":                  '\U00002060',
--	"NonBreakingSpace;":         '\U000000A0',
--	"Nopf;":                     '\U00002115',
--	"Not;":                      '\U00002AEC',
--	"NotCongruent;":             '\U00002262',
--	"NotCupCap;":                '\U0000226D',
--	"NotDoubleVerticalBar;":     '\U00002226',
--	"NotElement;":               '\U00002209',
--	"NotEqual;":                 '\U00002260',
--	"NotExists;":                '\U00002204',
--	"NotGreater;":               '\U0000226F',
--	"NotGreaterEqual;":          '\U00002271',
--	"NotGreaterLess;":           '\U00002279',
--	"NotGreaterTilde;":          '\U00002275',
--	"NotLeftTriangle;":          '\U000022EA',
--	"NotLeftTriangleEqual;":     '\U000022EC',
--	"NotLess;":                  '\U0000226E',
--	"NotLessEqual;":             '\U00002270',
--	"NotLessGreater;":           '\U00002278',
--	"NotLessTilde;":             '\U00002274',
--	"NotPrecedes;":              '\U00002280',
--	"NotPrecedesSlantEqual;":    '\U000022E0',
--	"NotReverseElement;":        '\U0000220C',
--	"NotRightTriangle;":         '\U000022EB',
--	"NotRightTriangleEqual;":    '\U000022ED',
--	"NotSquareSubsetEqual;":     '\U000022E2',
--	"NotSquareSupersetEqual;":   '\U000022E3',
--	"NotSubsetEqual;":           '\U00002288',
--	"NotSucceeds;":              '\U00002281',
--	"NotSucceedsSlantEqual;":    '\U000022E1',
--	"NotSupersetEqual;":         '\U00002289',
--	"NotTilde;":                 '\U00002241',
--	"NotTildeEqual;":            '\U00002244',
--	"NotTildeFullEqual;":        '\U00002247',
--	"NotTildeTilde;":            '\U00002249',
--	"NotVerticalBar;":           '\U00002224',
--	"Nscr;":                     '\U0001D4A9',
--	"Ntilde;":                   '\U000000D1',
--	"Nu;":                       '\U0000039D',
--	"OElig;":                    '\U00000152',
--	"Oacute;":                   '\U000000D3',
--	"Ocirc;":                    '\U000000D4',
--	"Ocy;":                      '\U0000041E',
--	"Odblac;":                   '\U00000150',
--	"Ofr;":                      '\U0001D512',
--	"Ograve;":                   '\U000000D2',
--	"Omacr;":                    '\U0000014C',
--	"Omega;":                    '\U000003A9',
--	"Omicron;":                  '\U0000039F',
--	"Oopf;":                     '\U0001D546',
--	"OpenCurlyDoubleQuote;":     '\U0000201C',
--	"OpenCurlyQuote;":           '\U00002018',
--	"Or;":                       '\U00002A54',
--	"Oscr;":                     '\U0001D4AA',
--	"Oslash;":                   '\U000000D8',
--	"Otilde;":                   '\U000000D5',
--	"Otimes;":                   '\U00002A37',
--	"Ouml;":                     '\U000000D6',
--	"OverBar;":                  '\U0000203E',
--	"OverBrace;":                '\U000023DE',
--	"OverBracket;":              '\U000023B4',
--	"OverParenthesis;":          '\U000023DC',
--	"PartialD;":                 '\U00002202',
--	"Pcy;":                      '\U0000041F',
--	"Pfr;":                      '\U0001D513',
--	"Phi;":                      '\U000003A6',
--	"Pi;":                       '\U000003A0',
--	"PlusMinus;":                '\U000000B1',
--	"Poincareplane;":            '\U0000210C',
--	"Popf;":                     '\U00002119',
--	"Pr;":                       '\U00002ABB',
--	"Precedes;":                 '\U0000227A',
--	"PrecedesEqual;":            '\U00002AAF',
--	"PrecedesSlantEqual;":       '\U0000227C',
--	"PrecedesTilde;":            '\U0000227E',
--	"Prime;":                    '\U00002033',
--	"Product;":                  '\U0000220F',
--	"Proportion;":               '\U00002237',
--	"Proportional;":             '\U0000221D',
--	"Pscr;":                     '\U0001D4AB',
--	"Psi;":                      '\U000003A8',
--	"QUOT;":                     '\U00000022',
--	"Qfr;":                      '\U0001D514',
--	"Qopf;":                     '\U0000211A',
--	"Qscr;":                     '\U0001D4AC',
--	"RBarr;":                    '\U00002910',
--	"REG;":                      '\U000000AE',
--	"Racute;":                   '\U00000154',
--	"Rang;":                     '\U000027EB',
--	"Rarr;":                     '\U000021A0',
--	"Rarrtl;":                   '\U00002916',
--	"Rcaron;":                   '\U00000158',
--	"Rcedil;":                   '\U00000156',
--	"Rcy;":                      '\U00000420',
--	"Re;":                       '\U0000211C',
--	"ReverseElement;":           '\U0000220B',
--	"ReverseEquilibrium;":       '\U000021CB',
--	"ReverseUpEquilibrium;":     '\U0000296F',
--	"Rfr;":                      '\U0000211C',
--	"Rho;":                      '\U000003A1',
--	"RightAngleBracket;":        '\U000027E9',
--	"RightArrow;":               '\U00002192',
--	"RightArrowBar;":            '\U000021E5',
--	"RightArrowLeftArrow;":      '\U000021C4',
--	"RightCeiling;":             '\U00002309',
--	"RightDoubleBracket;":       '\U000027E7',
--	"RightDownTeeVector;":       '\U0000295D',
--	"RightDownVector;":          '\U000021C2',
--	"RightDownVectorBar;":       '\U00002955',
--	"RightFloor;":               '\U0000230B',
--	"RightTee;":                 '\U000022A2',
--	"RightTeeArrow;":            '\U000021A6',
--	"RightTeeVector;":           '\U0000295B',
--	"RightTriangle;":            '\U000022B3',
--	"RightTriangleBar;":         '\U000029D0',
--	"RightTriangleEqual;":       '\U000022B5',
--	"RightUpDownVector;":        '\U0000294F',
--	"RightUpTeeVector;":         '\U0000295C',
--	"RightUpVector;":            '\U000021BE',
--	"RightUpVectorBar;":         '\U00002954',
--	"RightVector;":              '\U000021C0',
--	"RightVectorBar;":           '\U00002953',
--	"Rightarrow;":               '\U000021D2',
--	"Ropf;":                     '\U0000211D',
--	"RoundImplies;":             '\U00002970',
--	"Rrightarrow;":              '\U000021DB',
--	"Rscr;":                     '\U0000211B',
--	"Rsh;":                      '\U000021B1',
--	"RuleDelayed;":              '\U000029F4',
--	"SHCHcy;":                   '\U00000429',
--	"SHcy;":                     '\U00000428',
--	"SOFTcy;":                   '\U0000042C',
--	"Sacute;":                   '\U0000015A',
--	"Sc;":                       '\U00002ABC',
--	"Scaron;":                   '\U00000160',
--	"Scedil;":                   '\U0000015E',
--	"Scirc;":                    '\U0000015C',
--	"Scy;":                      '\U00000421',
--	"Sfr;":                      '\U0001D516',
--	"ShortDownArrow;":           '\U00002193',
--	"ShortLeftArrow;":           '\U00002190',
--	"ShortRightArrow;":          '\U00002192',
--	"ShortUpArrow;":             '\U00002191',
--	"Sigma;":                    '\U000003A3',
--	"SmallCircle;":              '\U00002218',
--	"Sopf;":                     '\U0001D54A',
--	"Sqrt;":                     '\U0000221A',
--	"Square;":                   '\U000025A1',
--	"SquareIntersection;":       '\U00002293',
--	"SquareSubset;":             '\U0000228F',
--	"SquareSubsetEqual;":        '\U00002291',
--	"SquareSuperset;":           '\U00002290',
--	"SquareSupersetEqual;":      '\U00002292',
--	"SquareUnion;":              '\U00002294',
--	"Sscr;":                     '\U0001D4AE',
--	"Star;":                     '\U000022C6',
--	"Sub;":                      '\U000022D0',
--	"Subset;":                   '\U000022D0',
--	"SubsetEqual;":              '\U00002286',
--	"Succeeds;":                 '\U0000227B',
--	"SucceedsEqual;":            '\U00002AB0',
--	"SucceedsSlantEqual;":       '\U0000227D',
--	"SucceedsTilde;":            '\U0000227F',
--	"SuchThat;":                 '\U0000220B',
--	"Sum;":                      '\U00002211',
--	"Sup;":                      '\U000022D1',
--	"Superset;":                 '\U00002283',
--	"SupersetEqual;":            '\U00002287',
--	"Supset;":                   '\U000022D1',
--	"THORN;":                    '\U000000DE',
--	"TRADE;":                    '\U00002122',
--	"TSHcy;":                    '\U0000040B',
--	"TScy;":                     '\U00000426',
--	"Tab;":                      '\U00000009',
--	"Tau;":                      '\U000003A4',
--	"Tcaron;":                   '\U00000164',
--	"Tcedil;":                   '\U00000162',
--	"Tcy;":                      '\U00000422',
--	"Tfr;":                      '\U0001D517',
--	"Therefore;":                '\U00002234',
--	"Theta;":                    '\U00000398',
--	"ThinSpace;":                '\U00002009',
--	"Tilde;":                    '\U0000223C',
--	"TildeEqual;":               '\U00002243',
--	"TildeFullEqual;":           '\U00002245',
--	"TildeTilde;":               '\U00002248',
--	"Topf;":                     '\U0001D54B',
--	"TripleDot;":                '\U000020DB',
--	"Tscr;":                     '\U0001D4AF',
--	"Tstrok;":                   '\U00000166',
--	"Uacute;":                   '\U000000DA',
--	"Uarr;":                     '\U0000219F',
--	"Uarrocir;":                 '\U00002949',
--	"Ubrcy;":                    '\U0000040E',
--	"Ubreve;":                   '\U0000016C',
--	"Ucirc;":                    '\U000000DB',
--	"Ucy;":                      '\U00000423',
--	"Udblac;":                   '\U00000170',
--	"Ufr;":                      '\U0001D518',
--	"Ugrave;":                   '\U000000D9',
--	"Umacr;":                    '\U0000016A',
--	"UnderBar;":                 '\U0000005F',
--	"UnderBrace;":               '\U000023DF',
--	"UnderBracket;":             '\U000023B5',
--	"UnderParenthesis;":         '\U000023DD',
--	"Union;":                    '\U000022C3',
--	"UnionPlus;":                '\U0000228E',
--	"Uogon;":                    '\U00000172',
--	"Uopf;":                     '\U0001D54C',
--	"UpArrow;":                  '\U00002191',
--	"UpArrowBar;":               '\U00002912',
--	"UpArrowDownArrow;":         '\U000021C5',
--	"UpDownArrow;":              '\U00002195',
--	"UpEquilibrium;":            '\U0000296E',
--	"UpTee;":                    '\U000022A5',
--	"UpTeeArrow;":               '\U000021A5',
--	"Uparrow;":                  '\U000021D1',
--	"Updownarrow;":              '\U000021D5',
--	"UpperLeftArrow;":           '\U00002196',
--	"UpperRightArrow;":          '\U00002197',
--	"Upsi;":                     '\U000003D2',
--	"Upsilon;":                  '\U000003A5',
--	"Uring;":                    '\U0000016E',
--	"Uscr;":                     '\U0001D4B0',
--	"Utilde;":                   '\U00000168',
--	"Uuml;":                     '\U000000DC',
--	"VDash;":                    '\U000022AB',
--	"Vbar;":                     '\U00002AEB',
--	"Vcy;":                      '\U00000412',
--	"Vdash;":                    '\U000022A9',
--	"Vdashl;":                   '\U00002AE6',
--	"Vee;":                      '\U000022C1',
--	"Verbar;":                   '\U00002016',
--	"Vert;":                     '\U00002016',
--	"VerticalBar;":              '\U00002223',
--	"VerticalLine;":             '\U0000007C',
--	"VerticalSeparator;":        '\U00002758',
--	"VerticalTilde;":            '\U00002240',
--	"VeryThinSpace;":            '\U0000200A',
--	"Vfr;":                      '\U0001D519',
--	"Vopf;":                     '\U0001D54D',
--	"Vscr;":                     '\U0001D4B1',
--	"Vvdash;":                   '\U000022AA',
--	"Wcirc;":                    '\U00000174',
--	"Wedge;":                    '\U000022C0',
--	"Wfr;":                      '\U0001D51A',
--	"Wopf;":                     '\U0001D54E',
--	"Wscr;":                     '\U0001D4B2',
--	"Xfr;":                      '\U0001D51B',
--	"Xi;":                       '\U0000039E',
--	"Xopf;":                     '\U0001D54F',
--	"Xscr;":                     '\U0001D4B3',
--	"YAcy;":                     '\U0000042F',
--	"YIcy;":                     '\U00000407',
--	"YUcy;":                     '\U0000042E',
--	"Yacute;":                   '\U000000DD',
--	"Ycirc;":                    '\U00000176',
--	"Ycy;":                      '\U0000042B',
--	"Yfr;":                      '\U0001D51C',
--	"Yopf;":                     '\U0001D550',
--	"Yscr;":                     '\U0001D4B4',
--	"Yuml;":                     '\U00000178',
--	"ZHcy;":                     '\U00000416',
--	"Zacute;":                   '\U00000179',
--	"Zcaron;":                   '\U0000017D',
--	"Zcy;":                      '\U00000417',
--	"Zdot;":                     '\U0000017B',
--	"ZeroWidthSpace;":           '\U0000200B',
--	"Zeta;":                     '\U00000396',
--	"Zfr;":                      '\U00002128',
--	"Zopf;":                     '\U00002124',
--	"Zscr;":                     '\U0001D4B5',
--	"aacute;":                   '\U000000E1',
--	"abreve;":                   '\U00000103',
--	"ac;":                       '\U0000223E',
--	"acd;":                      '\U0000223F',
--	"acirc;":                    '\U000000E2',
--	"acute;":                    '\U000000B4',
--	"acy;":                      '\U00000430',
--	"aelig;":                    '\U000000E6',
--	"af;":                       '\U00002061',
--	"afr;":                      '\U0001D51E',
--	"agrave;":                   '\U000000E0',
--	"alefsym;":                  '\U00002135',
--	"aleph;":                    '\U00002135',
--	"alpha;":                    '\U000003B1',
--	"amacr;":                    '\U00000101',
--	"amalg;":                    '\U00002A3F',
--	"amp;":                      '\U00000026',
--	"and;":                      '\U00002227',
--	"andand;":                   '\U00002A55',
--	"andd;":                     '\U00002A5C',
--	"andslope;":                 '\U00002A58',
--	"andv;":                     '\U00002A5A',
--	"ang;":                      '\U00002220',
--	"ange;":                     '\U000029A4',
--	"angle;":                    '\U00002220',
--	"angmsd;":                   '\U00002221',
--	"angmsdaa;":                 '\U000029A8',
--	"angmsdab;":                 '\U000029A9',
--	"angmsdac;":                 '\U000029AA',
--	"angmsdad;":                 '\U000029AB',
--	"angmsdae;":                 '\U000029AC',
--	"angmsdaf;":                 '\U000029AD',
--	"angmsdag;":                 '\U000029AE',
--	"angmsdah;":                 '\U000029AF',
--	"angrt;":                    '\U0000221F',
--	"angrtvb;":                  '\U000022BE',
--	"angrtvbd;":                 '\U0000299D',
--	"angsph;":                   '\U00002222',
--	"angst;":                    '\U000000C5',
--	"angzarr;":                  '\U0000237C',
--	"aogon;":                    '\U00000105',
--	"aopf;":                     '\U0001D552',
--	"ap;":                       '\U00002248',
--	"apE;":                      '\U00002A70',
--	"apacir;":                   '\U00002A6F',
--	"ape;":                      '\U0000224A',
--	"apid;":                     '\U0000224B',
--	"apos;":                     '\U00000027',
--	"approx;":                   '\U00002248',
--	"approxeq;":                 '\U0000224A',
--	"aring;":                    '\U000000E5',
--	"ascr;":                     '\U0001D4B6',
--	"ast;":                      '\U0000002A',
--	"asymp;":                    '\U00002248',
--	"asympeq;":                  '\U0000224D',
--	"atilde;":                   '\U000000E3',
--	"auml;":                     '\U000000E4',
--	"awconint;":                 '\U00002233',
--	"awint;":                    '\U00002A11',
--	"bNot;":                     '\U00002AED',
--	"backcong;":                 '\U0000224C',
--	"backepsilon;":              '\U000003F6',
--	"backprime;":                '\U00002035',
--	"backsim;":                  '\U0000223D',
--	"backsimeq;":                '\U000022CD',
--	"barvee;":                   '\U000022BD',
--	"barwed;":                   '\U00002305',
--	"barwedge;":                 '\U00002305',
--	"bbrk;":                     '\U000023B5',
--	"bbrktbrk;":                 '\U000023B6',
--	"bcong;":                    '\U0000224C',
--	"bcy;":                      '\U00000431',
--	"bdquo;":                    '\U0000201E',
--	"becaus;":                   '\U00002235',
--	"because;":                  '\U00002235',
--	"bemptyv;":                  '\U000029B0',
--	"bepsi;":                    '\U000003F6',
--	"bernou;":                   '\U0000212C',
--	"beta;":                     '\U000003B2',
--	"beth;":                     '\U00002136',
--	"between;":                  '\U0000226C',
--	"bfr;":                      '\U0001D51F',
--	"bigcap;":                   '\U000022C2',
--	"bigcirc;":                  '\U000025EF',
--	"bigcup;":                   '\U000022C3',
--	"bigodot;":                  '\U00002A00',
--	"bigoplus;":                 '\U00002A01',
--	"bigotimes;":                '\U00002A02',
--	"bigsqcup;":                 '\U00002A06',
--	"bigstar;":                  '\U00002605',
--	"bigtriangledown;":          '\U000025BD',
--	"bigtriangleup;":            '\U000025B3',
--	"biguplus;":                 '\U00002A04',
--	"bigvee;":                   '\U000022C1',
--	"bigwedge;":                 '\U000022C0',
--	"bkarow;":                   '\U0000290D',
--	"blacklozenge;":             '\U000029EB',
--	"blacksquare;":              '\U000025AA',
--	"blacktriangle;":            '\U000025B4',
--	"blacktriangledown;":        '\U000025BE',
--	"blacktriangleleft;":        '\U000025C2',
--	"blacktriangleright;":       '\U000025B8',
--	"blank;":                    '\U00002423',
--	"blk12;":                    '\U00002592',
--	"blk14;":                    '\U00002591',
--	"blk34;":                    '\U00002593',
--	"block;":                    '\U00002588',
--	"bnot;":                     '\U00002310',
--	"bopf;":                     '\U0001D553',
--	"bot;":                      '\U000022A5',
--	"bottom;":                   '\U000022A5',
--	"bowtie;":                   '\U000022C8',
--	"boxDL;":                    '\U00002557',
--	"boxDR;":                    '\U00002554',
--	"boxDl;":                    '\U00002556',
--	"boxDr;":                    '\U00002553',
--	"boxH;":                     '\U00002550',
--	"boxHD;":                    '\U00002566',
--	"boxHU;":                    '\U00002569',
--	"boxHd;":                    '\U00002564',
--	"boxHu;":                    '\U00002567',
--	"boxUL;":                    '\U0000255D',
--	"boxUR;":                    '\U0000255A',
--	"boxUl;":                    '\U0000255C',
--	"boxUr;":                    '\U00002559',
--	"boxV;":                     '\U00002551',
--	"boxVH;":                    '\U0000256C',
--	"boxVL;":                    '\U00002563',
--	"boxVR;":                    '\U00002560',
--	"boxVh;":                    '\U0000256B',
--	"boxVl;":                    '\U00002562',
--	"boxVr;":                    '\U0000255F',
--	"boxbox;":                   '\U000029C9',
--	"boxdL;":                    '\U00002555',
--	"boxdR;":                    '\U00002552',
--	"boxdl;":                    '\U00002510',
--	"boxdr;":                    '\U0000250C',
--	"boxh;":                     '\U00002500',
--	"boxhD;":                    '\U00002565',
--	"boxhU;":                    '\U00002568',
--	"boxhd;":                    '\U0000252C',
--	"boxhu;":                    '\U00002534',
--	"boxminus;":                 '\U0000229F',
--	"boxplus;":                  '\U0000229E',
--	"boxtimes;":                 '\U000022A0',
--	"boxuL;":                    '\U0000255B',
--	"boxuR;":                    '\U00002558',
--	"boxul;":                    '\U00002518',
--	"boxur;":                    '\U00002514',
--	"boxv;":                     '\U00002502',
--	"boxvH;":                    '\U0000256A',
--	"boxvL;":                    '\U00002561',
--	"boxvR;":                    '\U0000255E',
--	"boxvh;":                    '\U0000253C',
--	"boxvl;":                    '\U00002524',
--	"boxvr;":                    '\U0000251C',
--	"bprime;":                   '\U00002035',
--	"breve;":                    '\U000002D8',
--	"brvbar;":                   '\U000000A6',
--	"bscr;":                     '\U0001D4B7',
--	"bsemi;":                    '\U0000204F',
--	"bsim;":                     '\U0000223D',
--	"bsime;":                    '\U000022CD',
--	"bsol;":                     '\U0000005C',
--	"bsolb;":                    '\U000029C5',
--	"bsolhsub;":                 '\U000027C8',
--	"bull;":                     '\U00002022',
--	"bullet;":                   '\U00002022',
--	"bump;":                     '\U0000224E',
--	"bumpE;":                    '\U00002AAE',
--	"bumpe;":                    '\U0000224F',
--	"bumpeq;":                   '\U0000224F',
--	"cacute;":                   '\U00000107',
--	"cap;":                      '\U00002229',
--	"capand;":                   '\U00002A44',
--	"capbrcup;":                 '\U00002A49',
--	"capcap;":                   '\U00002A4B',
--	"capcup;":                   '\U00002A47',
--	"capdot;":                   '\U00002A40',
--	"caret;":                    '\U00002041',
--	"caron;":                    '\U000002C7',
--	"ccaps;":                    '\U00002A4D',
--	"ccaron;":                   '\U0000010D',
--	"ccedil;":                   '\U000000E7',
--	"ccirc;":                    '\U00000109',
--	"ccups;":                    '\U00002A4C',
--	"ccupssm;":                  '\U00002A50',
--	"cdot;":                     '\U0000010B',
--	"cedil;":                    '\U000000B8',
--	"cemptyv;":                  '\U000029B2',
--	"cent;":                     '\U000000A2',
--	"centerdot;":                '\U000000B7',
--	"cfr;":                      '\U0001D520',
--	"chcy;":                     '\U00000447',
--	"check;":                    '\U00002713',
--	"checkmark;":                '\U00002713',
--	"chi;":                      '\U000003C7',
--	"cir;":                      '\U000025CB',
--	"cirE;":                     '\U000029C3',
--	"circ;":                     '\U000002C6',
--	"circeq;":                   '\U00002257',
--	"circlearrowleft;":          '\U000021BA',
--	"circlearrowright;":         '\U000021BB',
--	"circledR;":                 '\U000000AE',
--	"circledS;":                 '\U000024C8',
--	"circledast;":               '\U0000229B',
--	"circledcirc;":              '\U0000229A',
--	"circleddash;":              '\U0000229D',
--	"cire;":                     '\U00002257',
--	"cirfnint;":                 '\U00002A10',
--	"cirmid;":                   '\U00002AEF',
--	"cirscir;":                  '\U000029C2',
--	"clubs;":                    '\U00002663',
--	"clubsuit;":                 '\U00002663',
--	"colon;":                    '\U0000003A',
--	"colone;":                   '\U00002254',
--	"coloneq;":                  '\U00002254',
--	"comma;":                    '\U0000002C',
--	"commat;":                   '\U00000040',
--	"comp;":                     '\U00002201',
--	"compfn;":                   '\U00002218',
--	"complement;":               '\U00002201',
--	"complexes;":                '\U00002102',
--	"cong;":                     '\U00002245',
--	"congdot;":                  '\U00002A6D',
--	"conint;":                   '\U0000222E',
--	"copf;":                     '\U0001D554',
--	"coprod;":                   '\U00002210',
--	"copy;":                     '\U000000A9',
--	"copysr;":                   '\U00002117',
--	"crarr;":                    '\U000021B5',
--	"cross;":                    '\U00002717',
--	"cscr;":                     '\U0001D4B8',
--	"csub;":                     '\U00002ACF',
--	"csube;":                    '\U00002AD1',
--	"csup;":                     '\U00002AD0',
--	"csupe;":                    '\U00002AD2',
--	"ctdot;":                    '\U000022EF',
--	"cudarrl;":                  '\U00002938',
--	"cudarrr;":                  '\U00002935',
--	"cuepr;":                    '\U000022DE',
--	"cuesc;":                    '\U000022DF',
--	"cularr;":                   '\U000021B6',
--	"cularrp;":                  '\U0000293D',
--	"cup;":                      '\U0000222A',
--	"cupbrcap;":                 '\U00002A48',
--	"cupcap;":                   '\U00002A46',
--	"cupcup;":                   '\U00002A4A',
--	"cupdot;":                   '\U0000228D',
--	"cupor;":                    '\U00002A45',
--	"curarr;":                   '\U000021B7',
--	"curarrm;":                  '\U0000293C',
--	"curlyeqprec;":              '\U000022DE',
--	"curlyeqsucc;":              '\U000022DF',
--	"curlyvee;":                 '\U000022CE',
--	"curlywedge;":               '\U000022CF',
--	"curren;":                   '\U000000A4',
--	"curvearrowleft;":           '\U000021B6',
--	"curvearrowright;":          '\U000021B7',
--	"cuvee;":                    '\U000022CE',
--	"cuwed;":                    '\U000022CF',
--	"cwconint;":                 '\U00002232',
--	"cwint;":                    '\U00002231',
--	"cylcty;":                   '\U0000232D',
--	"dArr;":                     '\U000021D3',
--	"dHar;":                     '\U00002965',
--	"dagger;":                   '\U00002020',
--	"daleth;":                   '\U00002138',
--	"darr;":                     '\U00002193',
--	"dash;":                     '\U00002010',
--	"dashv;":                    '\U000022A3',
--	"dbkarow;":                  '\U0000290F',
--	"dblac;":                    '\U000002DD',
--	"dcaron;":                   '\U0000010F',
--	"dcy;":                      '\U00000434',
--	"dd;":                       '\U00002146',
--	"ddagger;":                  '\U00002021',
--	"ddarr;":                    '\U000021CA',
--	"ddotseq;":                  '\U00002A77',
--	"deg;":                      '\U000000B0',
--	"delta;":                    '\U000003B4',
--	"demptyv;":                  '\U000029B1',
--	"dfisht;":                   '\U0000297F',
--	"dfr;":                      '\U0001D521',
--	"dharl;":                    '\U000021C3',
--	"dharr;":                    '\U000021C2',
--	"diam;":                     '\U000022C4',
--	"diamond;":                  '\U000022C4',
--	"diamondsuit;":              '\U00002666',
--	"diams;":                    '\U00002666',
--	"die;":                      '\U000000A8',
--	"digamma;":                  '\U000003DD',
--	"disin;":                    '\U000022F2',
--	"div;":                      '\U000000F7',
--	"divide;":                   '\U000000F7',
--	"divideontimes;":            '\U000022C7',
--	"divonx;":                   '\U000022C7',
--	"djcy;":                     '\U00000452',
--	"dlcorn;":                   '\U0000231E',
--	"dlcrop;":                   '\U0000230D',
--	"dollar;":                   '\U00000024',
--	"dopf;":                     '\U0001D555',
--	"dot;":                      '\U000002D9',
--	"doteq;":                    '\U00002250',
--	"doteqdot;":                 '\U00002251',
--	"dotminus;":                 '\U00002238',
--	"dotplus;":                  '\U00002214',
--	"dotsquare;":                '\U000022A1',
--	"doublebarwedge;":           '\U00002306',
--	"downarrow;":                '\U00002193',
--	"downdownarrows;":           '\U000021CA',
--	"downharpoonleft;":          '\U000021C3',
--	"downharpoonright;":         '\U000021C2',
--	"drbkarow;":                 '\U00002910',
--	"drcorn;":                   '\U0000231F',
--	"drcrop;":                   '\U0000230C',
--	"dscr;":                     '\U0001D4B9',
--	"dscy;":                     '\U00000455',
--	"dsol;":                     '\U000029F6',
--	"dstrok;":                   '\U00000111',
--	"dtdot;":                    '\U000022F1',
--	"dtri;":                     '\U000025BF',
--	"dtrif;":                    '\U000025BE',
--	"duarr;":                    '\U000021F5',
--	"duhar;":                    '\U0000296F',
--	"dwangle;":                  '\U000029A6',
--	"dzcy;":                     '\U0000045F',
--	"dzigrarr;":                 '\U000027FF',
--	"eDDot;":                    '\U00002A77',
--	"eDot;":                     '\U00002251',
--	"eacute;":                   '\U000000E9',
--	"easter;":                   '\U00002A6E',
--	"ecaron;":                   '\U0000011B',
--	"ecir;":                     '\U00002256',
--	"ecirc;":                    '\U000000EA',
--	"ecolon;":                   '\U00002255',
--	"ecy;":                      '\U0000044D',
--	"edot;":                     '\U00000117',
--	"ee;":                       '\U00002147',
--	"efDot;":                    '\U00002252',
--	"efr;":                      '\U0001D522',
--	"eg;":                       '\U00002A9A',
--	"egrave;":                   '\U000000E8',
--	"egs;":                      '\U00002A96',
--	"egsdot;":                   '\U00002A98',
--	"el;":                       '\U00002A99',
--	"elinters;":                 '\U000023E7',
--	"ell;":                      '\U00002113',
--	"els;":                      '\U00002A95',
--	"elsdot;":                   '\U00002A97',
--	"emacr;":                    '\U00000113',
--	"empty;":                    '\U00002205',
--	"emptyset;":                 '\U00002205',
--	"emptyv;":                   '\U00002205',
--	"emsp;":                     '\U00002003',
--	"emsp13;":                   '\U00002004',
--	"emsp14;":                   '\U00002005',
--	"eng;":                      '\U0000014B',
--	"ensp;":                     '\U00002002',
--	"eogon;":                    '\U00000119',
--	"eopf;":                     '\U0001D556',
--	"epar;":                     '\U000022D5',
--	"eparsl;":                   '\U000029E3',
--	"eplus;":                    '\U00002A71',
--	"epsi;":                     '\U000003B5',
--	"epsilon;":                  '\U000003B5',
--	"epsiv;":                    '\U000003F5',
--	"eqcirc;":                   '\U00002256',
--	"eqcolon;":                  '\U00002255',
--	"eqsim;":                    '\U00002242',
--	"eqslantgtr;":               '\U00002A96',
--	"eqslantless;":              '\U00002A95',
--	"equals;":                   '\U0000003D',
--	"equest;":                   '\U0000225F',
--	"equiv;":                    '\U00002261',
--	"equivDD;":                  '\U00002A78',
--	"eqvparsl;":                 '\U000029E5',
--	"erDot;":                    '\U00002253',
--	"erarr;":                    '\U00002971',
--	"escr;":                     '\U0000212F',
--	"esdot;":                    '\U00002250',
--	"esim;":                     '\U00002242',
--	"eta;":                      '\U000003B7',
--	"eth;":                      '\U000000F0',
--	"euml;":                     '\U000000EB',
--	"euro;":                     '\U000020AC',
--	"excl;":                     '\U00000021',
--	"exist;":                    '\U00002203',
--	"expectation;":              '\U00002130',
--	"exponentiale;":             '\U00002147',
--	"fallingdotseq;":            '\U00002252',
--	"fcy;":                      '\U00000444',
--	"female;":                   '\U00002640',
--	"ffilig;":                   '\U0000FB03',
--	"fflig;":                    '\U0000FB00',
--	"ffllig;":                   '\U0000FB04',
--	"ffr;":                      '\U0001D523',
--	"filig;":                    '\U0000FB01',
--	"flat;":                     '\U0000266D',
--	"fllig;":                    '\U0000FB02',
--	"fltns;":                    '\U000025B1',
--	"fnof;":                     '\U00000192',
--	"fopf;":                     '\U0001D557',
--	"forall;":                   '\U00002200',
--	"fork;":                     '\U000022D4',
--	"forkv;":                    '\U00002AD9',
--	"fpartint;":                 '\U00002A0D',
--	"frac12;":                   '\U000000BD',
--	"frac13;":                   '\U00002153',
--	"frac14;":                   '\U000000BC',
--	"frac15;":                   '\U00002155',
--	"frac16;":                   '\U00002159',
--	"frac18;":                   '\U0000215B',
--	"frac23;":                   '\U00002154',
--	"frac25;":                   '\U00002156',
--	"frac34;":                   '\U000000BE',
--	"frac35;":                   '\U00002157',
--	"frac38;":                   '\U0000215C',
--	"frac45;":                   '\U00002158',
--	"frac56;":                   '\U0000215A',
--	"frac58;":                   '\U0000215D',
--	"frac78;":                   '\U0000215E',
--	"frasl;":                    '\U00002044',
--	"frown;":                    '\U00002322',
--	"fscr;":                     '\U0001D4BB',
--	"gE;":                       '\U00002267',
--	"gEl;":                      '\U00002A8C',
--	"gacute;":                   '\U000001F5',
--	"gamma;":                    '\U000003B3',
--	"gammad;":                   '\U000003DD',
--	"gap;":                      '\U00002A86',
--	"gbreve;":                   '\U0000011F',
--	"gcirc;":                    '\U0000011D',
--	"gcy;":                      '\U00000433',
--	"gdot;":                     '\U00000121',
--	"ge;":                       '\U00002265',
--	"gel;":                      '\U000022DB',
--	"geq;":                      '\U00002265',
--	"geqq;":                     '\U00002267',
--	"geqslant;":                 '\U00002A7E',
--	"ges;":                      '\U00002A7E',
--	"gescc;":                    '\U00002AA9',
--	"gesdot;":                   '\U00002A80',
--	"gesdoto;":                  '\U00002A82',
--	"gesdotol;":                 '\U00002A84',
--	"gesles;":                   '\U00002A94',
--	"gfr;":                      '\U0001D524',
--	"gg;":                       '\U0000226B',
--	"ggg;":                      '\U000022D9',
--	"gimel;":                    '\U00002137',
--	"gjcy;":                     '\U00000453',
--	"gl;":                       '\U00002277',
--	"glE;":                      '\U00002A92',
--	"gla;":                      '\U00002AA5',
--	"glj;":                      '\U00002AA4',
--	"gnE;":                      '\U00002269',
--	"gnap;":                     '\U00002A8A',
--	"gnapprox;":                 '\U00002A8A',
--	"gne;":                      '\U00002A88',
--	"gneq;":                     '\U00002A88',
--	"gneqq;":                    '\U00002269',
--	"gnsim;":                    '\U000022E7',
--	"gopf;":                     '\U0001D558',
--	"grave;":                    '\U00000060',
--	"gscr;":                     '\U0000210A',
--	"gsim;":                     '\U00002273',
--	"gsime;":                    '\U00002A8E',
--	"gsiml;":                    '\U00002A90',
--	"gt;":                       '\U0000003E',
--	"gtcc;":                     '\U00002AA7',
--	"gtcir;":                    '\U00002A7A',
--	"gtdot;":                    '\U000022D7',
--	"gtlPar;":                   '\U00002995',
--	"gtquest;":                  '\U00002A7C',
--	"gtrapprox;":                '\U00002A86',
--	"gtrarr;":                   '\U00002978',
--	"gtrdot;":                   '\U000022D7',
--	"gtreqless;":                '\U000022DB',
--	"gtreqqless;":               '\U00002A8C',
--	"gtrless;":                  '\U00002277',
--	"gtrsim;":                   '\U00002273',
--	"hArr;":                     '\U000021D4',
--	"hairsp;":                   '\U0000200A',
--	"half;":                     '\U000000BD',
--	"hamilt;":                   '\U0000210B',
--	"hardcy;":                   '\U0000044A',
--	"harr;":                     '\U00002194',
--	"harrcir;":                  '\U00002948',
--	"harrw;":                    '\U000021AD',
--	"hbar;":                     '\U0000210F',
--	"hcirc;":                    '\U00000125',
--	"hearts;":                   '\U00002665',
--	"heartsuit;":                '\U00002665',
--	"hellip;":                   '\U00002026',
--	"hercon;":                   '\U000022B9',
--	"hfr;":                      '\U0001D525',
--	"hksearow;":                 '\U00002925',
--	"hkswarow;":                 '\U00002926',
--	"hoarr;":                    '\U000021FF',
--	"homtht;":                   '\U0000223B',
--	"hookleftarrow;":            '\U000021A9',
--	"hookrightarrow;":           '\U000021AA',
--	"hopf;":                     '\U0001D559',
--	"horbar;":                   '\U00002015',
--	"hscr;":                     '\U0001D4BD',
--	"hslash;":                   '\U0000210F',
--	"hstrok;":                   '\U00000127',
--	"hybull;":                   '\U00002043',
--	"hyphen;":                   '\U00002010',
--	"iacute;":                   '\U000000ED',
--	"ic;":                       '\U00002063',
--	"icirc;":                    '\U000000EE',
--	"icy;":                      '\U00000438',
--	"iecy;":                     '\U00000435',
--	"iexcl;":                    '\U000000A1',
--	"iff;":                      '\U000021D4',
--	"ifr;":                      '\U0001D526',
--	"igrave;":                   '\U000000EC',
--	"ii;":                       '\U00002148',
--	"iiiint;":                   '\U00002A0C',
--	"iiint;":                    '\U0000222D',
--	"iinfin;":                   '\U000029DC',
--	"iiota;":                    '\U00002129',
--	"ijlig;":                    '\U00000133',
--	"imacr;":                    '\U0000012B',
--	"image;":                    '\U00002111',
--	"imagline;":                 '\U00002110',
--	"imagpart;":                 '\U00002111',
--	"imath;":                    '\U00000131',
--	"imof;":                     '\U000022B7',
--	"imped;":                    '\U000001B5',
--	"in;":                       '\U00002208',
--	"incare;":                   '\U00002105',
--	"infin;":                    '\U0000221E',
--	"infintie;":                 '\U000029DD',
--	"inodot;":                   '\U00000131',
--	"int;":                      '\U0000222B',
--	"intcal;":                   '\U000022BA',
--	"integers;":                 '\U00002124',
--	"intercal;":                 '\U000022BA',
--	"intlarhk;":                 '\U00002A17',
--	"intprod;":                  '\U00002A3C',
--	"iocy;":                     '\U00000451',
--	"iogon;":                    '\U0000012F',
--	"iopf;":                     '\U0001D55A',
--	"iota;":                     '\U000003B9',
--	"iprod;":                    '\U00002A3C',
--	"iquest;":                   '\U000000BF',
--	"iscr;":                     '\U0001D4BE',
--	"isin;":                     '\U00002208',
--	"isinE;":                    '\U000022F9',
--	"isindot;":                  '\U000022F5',
--	"isins;":                    '\U000022F4',
--	"isinsv;":                   '\U000022F3',
--	"isinv;":                    '\U00002208',
--	"it;":                       '\U00002062',
--	"itilde;":                   '\U00000129',
--	"iukcy;":                    '\U00000456',
--	"iuml;":                     '\U000000EF',
--	"jcirc;":                    '\U00000135',
--	"jcy;":                      '\U00000439',
--	"jfr;":                      '\U0001D527',
--	"jmath;":                    '\U00000237',
--	"jopf;":                     '\U0001D55B',
--	"jscr;":                     '\U0001D4BF',
--	"jsercy;":                   '\U00000458',
--	"jukcy;":                    '\U00000454',
--	"kappa;":                    '\U000003BA',
--	"kappav;":                   '\U000003F0',
--	"kcedil;":                   '\U00000137',
--	"kcy;":                      '\U0000043A',
--	"kfr;":                      '\U0001D528',
--	"kgreen;":                   '\U00000138',
--	"khcy;":                     '\U00000445',
--	"kjcy;":                     '\U0000045C',
--	"kopf;":                     '\U0001D55C',
--	"kscr;":                     '\U0001D4C0',
--	"lAarr;":                    '\U000021DA',
--	"lArr;":                     '\U000021D0',
--	"lAtail;":                   '\U0000291B',
--	"lBarr;":                    '\U0000290E',
--	"lE;":                       '\U00002266',
--	"lEg;":                      '\U00002A8B',
--	"lHar;":                     '\U00002962',
--	"lacute;":                   '\U0000013A',
--	"laemptyv;":                 '\U000029B4',
--	"lagran;":                   '\U00002112',
--	"lambda;":                   '\U000003BB',
--	"lang;":                     '\U000027E8',
--	"langd;":                    '\U00002991',
--	"langle;":                   '\U000027E8',
--	"lap;":                      '\U00002A85',
--	"laquo;":                    '\U000000AB',
--	"larr;":                     '\U00002190',
--	"larrb;":                    '\U000021E4',
--	"larrbfs;":                  '\U0000291F',
--	"larrfs;":                   '\U0000291D',
--	"larrhk;":                   '\U000021A9',
--	"larrlp;":                   '\U000021AB',
--	"larrpl;":                   '\U00002939',
--	"larrsim;":                  '\U00002973',
--	"larrtl;":                   '\U000021A2',
--	"lat;":                      '\U00002AAB',
--	"latail;":                   '\U00002919',
--	"late;":                     '\U00002AAD',
--	"lbarr;":                    '\U0000290C',
--	"lbbrk;":                    '\U00002772',
--	"lbrace;":                   '\U0000007B',
--	"lbrack;":                   '\U0000005B',
--	"lbrke;":                    '\U0000298B',
--	"lbrksld;":                  '\U0000298F',
--	"lbrkslu;":                  '\U0000298D',
--	"lcaron;":                   '\U0000013E',
--	"lcedil;":                   '\U0000013C',
--	"lceil;":                    '\U00002308',
--	"lcub;":                     '\U0000007B',
--	"lcy;":                      '\U0000043B',
--	"ldca;":                     '\U00002936',
--	"ldquo;":                    '\U0000201C',
--	"ldquor;":                   '\U0000201E',
--	"ldrdhar;":                  '\U00002967',
--	"ldrushar;":                 '\U0000294B',
--	"ldsh;":                     '\U000021B2',
--	"le;":                       '\U00002264',
--	"leftarrow;":                '\U00002190',
--	"leftarrowtail;":            '\U000021A2',
--	"leftharpoondown;":          '\U000021BD',
--	"leftharpoonup;":            '\U000021BC',
--	"leftleftarrows;":           '\U000021C7',
--	"leftrightarrow;":           '\U00002194',
--	"leftrightarrows;":          '\U000021C6',
--	"leftrightharpoons;":        '\U000021CB',
--	"leftrightsquigarrow;":      '\U000021AD',
--	"leftthreetimes;":           '\U000022CB',
--	"leg;":                      '\U000022DA',
--	"leq;":                      '\U00002264',
--	"leqq;":                     '\U00002266',
--	"leqslant;":                 '\U00002A7D',
--	"les;":                      '\U00002A7D',
--	"lescc;":                    '\U00002AA8',
--	"lesdot;":                   '\U00002A7F',
--	"lesdoto;":                  '\U00002A81',
--	"lesdotor;":                 '\U00002A83',
--	"lesges;":                   '\U00002A93',
--	"lessapprox;":               '\U00002A85',
--	"lessdot;":                  '\U000022D6',
--	"lesseqgtr;":                '\U000022DA',
--	"lesseqqgtr;":               '\U00002A8B',
--	"lessgtr;":                  '\U00002276',
--	"lesssim;":                  '\U00002272',
--	"lfisht;":                   '\U0000297C',
--	"lfloor;":                   '\U0000230A',
--	"lfr;":                      '\U0001D529',
--	"lg;":                       '\U00002276',
--	"lgE;":                      '\U00002A91',
--	"lhard;":                    '\U000021BD',
--	"lharu;":                    '\U000021BC',
--	"lharul;":                   '\U0000296A',
--	"lhblk;":                    '\U00002584',
--	"ljcy;":                     '\U00000459',
--	"ll;":                       '\U0000226A',
--	"llarr;":                    '\U000021C7',
--	"llcorner;":                 '\U0000231E',
--	"llhard;":                   '\U0000296B',
--	"lltri;":                    '\U000025FA',
--	"lmidot;":                   '\U00000140',
--	"lmoust;":                   '\U000023B0',
--	"lmoustache;":               '\U000023B0',
--	"lnE;":                      '\U00002268',
--	"lnap;":                     '\U00002A89',
--	"lnapprox;":                 '\U00002A89',
--	"lne;":                      '\U00002A87',
--	"lneq;":                     '\U00002A87',
--	"lneqq;":                    '\U00002268',
--	"lnsim;":                    '\U000022E6',
--	"loang;":                    '\U000027EC',
--	"loarr;":                    '\U000021FD',
--	"lobrk;":                    '\U000027E6',
--	"longleftarrow;":            '\U000027F5',
--	"longleftrightarrow;":       '\U000027F7',
--	"longmapsto;":               '\U000027FC',
--	"longrightarrow;":           '\U000027F6',
--	"looparrowleft;":            '\U000021AB',
--	"looparrowright;":           '\U000021AC',
--	"lopar;":                    '\U00002985',
--	"lopf;":                     '\U0001D55D',
--	"loplus;":                   '\U00002A2D',
--	"lotimes;":                  '\U00002A34',
--	"lowast;":                   '\U00002217',
--	"lowbar;":                   '\U0000005F',
--	"loz;":                      '\U000025CA',
--	"lozenge;":                  '\U000025CA',
--	"lozf;":                     '\U000029EB',
--	"lpar;":                     '\U00000028',
--	"lparlt;":                   '\U00002993',
--	"lrarr;":                    '\U000021C6',
--	"lrcorner;":                 '\U0000231F',
--	"lrhar;":                    '\U000021CB',
--	"lrhard;":                   '\U0000296D',
--	"lrm;":                      '\U0000200E',
--	"lrtri;":                    '\U000022BF',
--	"lsaquo;":                   '\U00002039',
--	"lscr;":                     '\U0001D4C1',
--	"lsh;":                      '\U000021B0',
--	"lsim;":                     '\U00002272',
--	"lsime;":                    '\U00002A8D',
--	"lsimg;":                    '\U00002A8F',
--	"lsqb;":                     '\U0000005B',
--	"lsquo;":                    '\U00002018',
--	"lsquor;":                   '\U0000201A',
--	"lstrok;":                   '\U00000142',
--	"lt;":                       '\U0000003C',
--	"ltcc;":                     '\U00002AA6',
--	"ltcir;":                    '\U00002A79',
--	"ltdot;":                    '\U000022D6',
--	"lthree;":                   '\U000022CB',
--	"ltimes;":                   '\U000022C9',
--	"ltlarr;":                   '\U00002976',
--	"ltquest;":                  '\U00002A7B',
--	"ltrPar;":                   '\U00002996',
--	"ltri;":                     '\U000025C3',
--	"ltrie;":                    '\U000022B4',
--	"ltrif;":                    '\U000025C2',
--	"lurdshar;":                 '\U0000294A',
--	"luruhar;":                  '\U00002966',
--	"mDDot;":                    '\U0000223A',
--	"macr;":                     '\U000000AF',
--	"male;":                     '\U00002642',
--	"malt;":                     '\U00002720',
--	"maltese;":                  '\U00002720',
--	"map;":                      '\U000021A6',
--	"mapsto;":                   '\U000021A6',
--	"mapstodown;":               '\U000021A7',
--	"mapstoleft;":               '\U000021A4',
--	"mapstoup;":                 '\U000021A5',
--	"marker;":                   '\U000025AE',
--	"mcomma;":                   '\U00002A29',
--	"mcy;":                      '\U0000043C',
--	"mdash;":                    '\U00002014',
--	"measuredangle;":            '\U00002221',
--	"mfr;":                      '\U0001D52A',
--	"mho;":                      '\U00002127',
--	"micro;":                    '\U000000B5',
--	"mid;":                      '\U00002223',
--	"midast;":                   '\U0000002A',
--	"midcir;":                   '\U00002AF0',
--	"middot;":                   '\U000000B7',
--	"minus;":                    '\U00002212',
--	"minusb;":                   '\U0000229F',
--	"minusd;":                   '\U00002238',
--	"minusdu;":                  '\U00002A2A',
--	"mlcp;":                     '\U00002ADB',
--	"mldr;":                     '\U00002026',
--	"mnplus;":                   '\U00002213',
--	"models;":                   '\U000022A7',
--	"mopf;":                     '\U0001D55E',
--	"mp;":                       '\U00002213',
--	"mscr;":                     '\U0001D4C2',
--	"mstpos;":                   '\U0000223E',
--	"mu;":                       '\U000003BC',
--	"multimap;":                 '\U000022B8',
--	"mumap;":                    '\U000022B8',
--	"nLeftarrow;":               '\U000021CD',
--	"nLeftrightarrow;":          '\U000021CE',
--	"nRightarrow;":              '\U000021CF',
--	"nVDash;":                   '\U000022AF',
--	"nVdash;":                   '\U000022AE',
--	"nabla;":                    '\U00002207',
--	"nacute;":                   '\U00000144',
--	"nap;":                      '\U00002249',
--	"napos;":                    '\U00000149',
--	"napprox;":                  '\U00002249',
--	"natur;":                    '\U0000266E',
--	"natural;":                  '\U0000266E',
--	"naturals;":                 '\U00002115',
--	"nbsp;":                     '\U000000A0',
--	"ncap;":                     '\U00002A43',
--	"ncaron;":                   '\U00000148',
--	"ncedil;":                   '\U00000146',
--	"ncong;":                    '\U00002247',
--	"ncup;":                     '\U00002A42',
--	"ncy;":                      '\U0000043D',
--	"ndash;":                    '\U00002013',
--	"ne;":                       '\U00002260',
--	"neArr;":                    '\U000021D7',
--	"nearhk;":                   '\U00002924',
--	"nearr;":                    '\U00002197',
--	"nearrow;":                  '\U00002197',
--	"nequiv;":                   '\U00002262',
--	"nesear;":                   '\U00002928',
--	"nexist;":                   '\U00002204',
--	"nexists;":                  '\U00002204',
--	"nfr;":                      '\U0001D52B',
--	"nge;":                      '\U00002271',
--	"ngeq;":                     '\U00002271',
--	"ngsim;":                    '\U00002275',
--	"ngt;":                      '\U0000226F',
--	"ngtr;":                     '\U0000226F',
--	"nhArr;":                    '\U000021CE',
--	"nharr;":                    '\U000021AE',
--	"nhpar;":                    '\U00002AF2',
--	"ni;":                       '\U0000220B',
--	"nis;":                      '\U000022FC',
--	"nisd;":                     '\U000022FA',
--	"niv;":                      '\U0000220B',
--	"njcy;":                     '\U0000045A',
--	"nlArr;":                    '\U000021CD',
--	"nlarr;":                    '\U0000219A',
--	"nldr;":                     '\U00002025',
--	"nle;":                      '\U00002270',
--	"nleftarrow;":               '\U0000219A',
--	"nleftrightarrow;":          '\U000021AE',
--	"nleq;":                     '\U00002270',
--	"nless;":                    '\U0000226E',
--	"nlsim;":                    '\U00002274',
--	"nlt;":                      '\U0000226E',
--	"nltri;":                    '\U000022EA',
--	"nltrie;":                   '\U000022EC',
--	"nmid;":                     '\U00002224',
--	"nopf;":                     '\U0001D55F',
--	"not;":                      '\U000000AC',
--	"notin;":                    '\U00002209',
--	"notinva;":                  '\U00002209',
--	"notinvb;":                  '\U000022F7',
--	"notinvc;":                  '\U000022F6',
--	"notni;":                    '\U0000220C',
--	"notniva;":                  '\U0000220C',
--	"notnivb;":                  '\U000022FE',
--	"notnivc;":                  '\U000022FD',
--	"npar;":                     '\U00002226',
--	"nparallel;":                '\U00002226',
--	"npolint;":                  '\U00002A14',
--	"npr;":                      '\U00002280',
--	"nprcue;":                   '\U000022E0',
--	"nprec;":                    '\U00002280',
--	"nrArr;":                    '\U000021CF',
--	"nrarr;":                    '\U0000219B',
--	"nrightarrow;":              '\U0000219B',
--	"nrtri;":                    '\U000022EB',
--	"nrtrie;":                   '\U000022ED',
--	"nsc;":                      '\U00002281',
--	"nsccue;":                   '\U000022E1',
--	"nscr;":                     '\U0001D4C3',
--	"nshortmid;":                '\U00002224',
--	"nshortparallel;":           '\U00002226',
--	"nsim;":                     '\U00002241',
--	"nsime;":                    '\U00002244',
--	"nsimeq;":                   '\U00002244',
--	"nsmid;":                    '\U00002224',
--	"nspar;":                    '\U00002226',
--	"nsqsube;":                  '\U000022E2',
--	"nsqsupe;":                  '\U000022E3',
--	"nsub;":                     '\U00002284',
--	"nsube;":                    '\U00002288',
--	"nsubseteq;":                '\U00002288',
--	"nsucc;":                    '\U00002281',
--	"nsup;":                     '\U00002285',
--	"nsupe;":                    '\U00002289',
--	"nsupseteq;":                '\U00002289',
--	"ntgl;":                     '\U00002279',
--	"ntilde;":                   '\U000000F1',
--	"ntlg;":                     '\U00002278',
--	"ntriangleleft;":            '\U000022EA',
--	"ntrianglelefteq;":          '\U000022EC',
--	"ntriangleright;":           '\U000022EB',
--	"ntrianglerighteq;":         '\U000022ED',
--	"nu;":                       '\U000003BD',
--	"num;":                      '\U00000023',
--	"numero;":                   '\U00002116',
--	"numsp;":                    '\U00002007',
--	"nvDash;":                   '\U000022AD',
--	"nvHarr;":                   '\U00002904',
--	"nvdash;":                   '\U000022AC',
--	"nvinfin;":                  '\U000029DE',
--	"nvlArr;":                   '\U00002902',
--	"nvrArr;":                   '\U00002903',
--	"nwArr;":                    '\U000021D6',
--	"nwarhk;":                   '\U00002923',
--	"nwarr;":                    '\U00002196',
--	"nwarrow;":                  '\U00002196',
--	"nwnear;":                   '\U00002927',
--	"oS;":                       '\U000024C8',
--	"oacute;":                   '\U000000F3',
--	"oast;":                     '\U0000229B',
--	"ocir;":                     '\U0000229A',
--	"ocirc;":                    '\U000000F4',
--	"ocy;":                      '\U0000043E',
--	"odash;":                    '\U0000229D',
--	"odblac;":                   '\U00000151',
--	"odiv;":                     '\U00002A38',
--	"odot;":                     '\U00002299',
--	"odsold;":                   '\U000029BC',
--	"oelig;":                    '\U00000153',
--	"ofcir;":                    '\U000029BF',
--	"ofr;":                      '\U0001D52C',
--	"ogon;":                     '\U000002DB',
--	"ograve;":                   '\U000000F2',
--	"ogt;":                      '\U000029C1',
--	"ohbar;":                    '\U000029B5',
--	"ohm;":                      '\U000003A9',
--	"oint;":                     '\U0000222E',
--	"olarr;":                    '\U000021BA',
--	"olcir;":                    '\U000029BE',
--	"olcross;":                  '\U000029BB',
--	"oline;":                    '\U0000203E',
--	"olt;":                      '\U000029C0',
--	"omacr;":                    '\U0000014D',
--	"omega;":                    '\U000003C9',
--	"omicron;":                  '\U000003BF',
--	"omid;":                     '\U000029B6',
--	"ominus;":                   '\U00002296',
--	"oopf;":                     '\U0001D560',
--	"opar;":                     '\U000029B7',
--	"operp;":                    '\U000029B9',
--	"oplus;":                    '\U00002295',
--	"or;":                       '\U00002228',
--	"orarr;":                    '\U000021BB',
--	"ord;":                      '\U00002A5D',
--	"order;":                    '\U00002134',
--	"orderof;":                  '\U00002134',
--	"ordf;":                     '\U000000AA',
--	"ordm;":                     '\U000000BA',
--	"origof;":                   '\U000022B6',
--	"oror;":                     '\U00002A56',
--	"orslope;":                  '\U00002A57',
--	"orv;":                      '\U00002A5B',
--	"oscr;":                     '\U00002134',
--	"oslash;":                   '\U000000F8',
--	"osol;":                     '\U00002298',
--	"otilde;":                   '\U000000F5',
--	"otimes;":                   '\U00002297',
--	"otimesas;":                 '\U00002A36',
--	"ouml;":                     '\U000000F6',
--	"ovbar;":                    '\U0000233D',
--	"par;":                      '\U00002225',
--	"para;":                     '\U000000B6',
--	"parallel;":                 '\U00002225',
--	"parsim;":                   '\U00002AF3',
--	"parsl;":                    '\U00002AFD',
--	"part;":                     '\U00002202',
--	"pcy;":                      '\U0000043F',
--	"percnt;":                   '\U00000025',
--	"period;":                   '\U0000002E',
--	"permil;":                   '\U00002030',
--	"perp;":                     '\U000022A5',
--	"pertenk;":                  '\U00002031',
--	"pfr;":                      '\U0001D52D',
--	"phi;":                      '\U000003C6',
--	"phiv;":                     '\U000003D5',
--	"phmmat;":                   '\U00002133',
--	"phone;":                    '\U0000260E',
--	"pi;":                       '\U000003C0',
--	"pitchfork;":                '\U000022D4',
--	"piv;":                      '\U000003D6',
--	"planck;":                   '\U0000210F',
--	"planckh;":                  '\U0000210E',
--	"plankv;":                   '\U0000210F',
--	"plus;":                     '\U0000002B',
--	"plusacir;":                 '\U00002A23',
--	"plusb;":                    '\U0000229E',
--	"pluscir;":                  '\U00002A22',
--	"plusdo;":                   '\U00002214',
--	"plusdu;":                   '\U00002A25',
--	"pluse;":                    '\U00002A72',
--	"plusmn;":                   '\U000000B1',
--	"plussim;":                  '\U00002A26',
--	"plustwo;":                  '\U00002A27',
--	"pm;":                       '\U000000B1',
--	"pointint;":                 '\U00002A15',
--	"popf;":                     '\U0001D561',
--	"pound;":                    '\U000000A3',
--	"pr;":                       '\U0000227A',
--	"prE;":                      '\U00002AB3',
--	"prap;":                     '\U00002AB7',
--	"prcue;":                    '\U0000227C',
--	"pre;":                      '\U00002AAF',
--	"prec;":                     '\U0000227A',
--	"precapprox;":               '\U00002AB7',
--	"preccurlyeq;":              '\U0000227C',
--	"preceq;":                   '\U00002AAF',
--	"precnapprox;":              '\U00002AB9',
--	"precneqq;":                 '\U00002AB5',
--	"precnsim;":                 '\U000022E8',
--	"precsim;":                  '\U0000227E',
--	"prime;":                    '\U00002032',
--	"primes;":                   '\U00002119',
--	"prnE;":                     '\U00002AB5',
--	"prnap;":                    '\U00002AB9',
--	"prnsim;":                   '\U000022E8',
--	"prod;":                     '\U0000220F',
--	"profalar;":                 '\U0000232E',
--	"profline;":                 '\U00002312',
--	"profsurf;":                 '\U00002313',
--	"prop;":                     '\U0000221D',
--	"propto;":                   '\U0000221D',
--	"prsim;":                    '\U0000227E',
--	"prurel;":                   '\U000022B0',
--	"pscr;":                     '\U0001D4C5',
--	"psi;":                      '\U000003C8',
--	"puncsp;":                   '\U00002008',
--	"qfr;":                      '\U0001D52E',
--	"qint;":                     '\U00002A0C',
--	"qopf;":                     '\U0001D562',
--	"qprime;":                   '\U00002057',
--	"qscr;":                     '\U0001D4C6',
--	"quaternions;":              '\U0000210D',
--	"quatint;":                  '\U00002A16',
--	"quest;":                    '\U0000003F',
--	"questeq;":                  '\U0000225F',
--	"quot;":                     '\U00000022',
--	"rAarr;":                    '\U000021DB',
--	"rArr;":                     '\U000021D2',
--	"rAtail;":                   '\U0000291C',
--	"rBarr;":                    '\U0000290F',
--	"rHar;":                     '\U00002964',
--	"racute;":                   '\U00000155',
--	"radic;":                    '\U0000221A',
--	"raemptyv;":                 '\U000029B3',
--	"rang;":                     '\U000027E9',
--	"rangd;":                    '\U00002992',
--	"range;":                    '\U000029A5',
--	"rangle;":                   '\U000027E9',
--	"raquo;":                    '\U000000BB',
--	"rarr;":                     '\U00002192',
--	"rarrap;":                   '\U00002975',
--	"rarrb;":                    '\U000021E5',
--	"rarrbfs;":                  '\U00002920',
--	"rarrc;":                    '\U00002933',
--	"rarrfs;":                   '\U0000291E',
--	"rarrhk;":                   '\U000021AA',
--	"rarrlp;":                   '\U000021AC',
--	"rarrpl;":                   '\U00002945',
--	"rarrsim;":                  '\U00002974',
--	"rarrtl;":                   '\U000021A3',
--	"rarrw;":                    '\U0000219D',
--	"ratail;":                   '\U0000291A',
--	"ratio;":                    '\U00002236',
--	"rationals;":                '\U0000211A',
--	"rbarr;":                    '\U0000290D',
--	"rbbrk;":                    '\U00002773',
--	"rbrace;":                   '\U0000007D',
--	"rbrack;":                   '\U0000005D',
--	"rbrke;":                    '\U0000298C',
--	"rbrksld;":                  '\U0000298E',
--	"rbrkslu;":                  '\U00002990',
--	"rcaron;":                   '\U00000159',
--	"rcedil;":                   '\U00000157',
--	"rceil;":                    '\U00002309',
--	"rcub;":                     '\U0000007D',
--	"rcy;":                      '\U00000440',
--	"rdca;":                     '\U00002937',
--	"rdldhar;":                  '\U00002969',
--	"rdquo;":                    '\U0000201D',
--	"rdquor;":                   '\U0000201D',
--	"rdsh;":                     '\U000021B3',
--	"real;":                     '\U0000211C',
--	"realine;":                  '\U0000211B',
--	"realpart;":                 '\U0000211C',
--	"reals;":                    '\U0000211D',
--	"rect;":                     '\U000025AD',
--	"reg;":                      '\U000000AE',
--	"rfisht;":                   '\U0000297D',
--	"rfloor;":                   '\U0000230B',
--	"rfr;":                      '\U0001D52F',
--	"rhard;":                    '\U000021C1',
--	"rharu;":                    '\U000021C0',
--	"rharul;":                   '\U0000296C',
--	"rho;":                      '\U000003C1',
--	"rhov;":                     '\U000003F1',
--	"rightarrow;":               '\U00002192',
--	"rightarrowtail;":           '\U000021A3',
--	"rightharpoondown;":         '\U000021C1',
--	"rightharpoonup;":           '\U000021C0',
--	"rightleftarrows;":          '\U000021C4',
--	"rightleftharpoons;":        '\U000021CC',
--	"rightrightarrows;":         '\U000021C9',
--	"rightsquigarrow;":          '\U0000219D',
--	"rightthreetimes;":          '\U000022CC',
--	"ring;":                     '\U000002DA',
--	"risingdotseq;":             '\U00002253',
--	"rlarr;":                    '\U000021C4',
--	"rlhar;":                    '\U000021CC',
--	"rlm;":                      '\U0000200F',
--	"rmoust;":                   '\U000023B1',
--	"rmoustache;":               '\U000023B1',
--	"rnmid;":                    '\U00002AEE',
--	"roang;":                    '\U000027ED',
--	"roarr;":                    '\U000021FE',
--	"robrk;":                    '\U000027E7',
--	"ropar;":                    '\U00002986',
--	"ropf;":                     '\U0001D563',
--	"roplus;":                   '\U00002A2E',
--	"rotimes;":                  '\U00002A35',
--	"rpar;":                     '\U00000029',
--	"rpargt;":                   '\U00002994',
--	"rppolint;":                 '\U00002A12',
--	"rrarr;":                    '\U000021C9',
--	"rsaquo;":                   '\U0000203A',
--	"rscr;":                     '\U0001D4C7',
--	"rsh;":                      '\U000021B1',
--	"rsqb;":                     '\U0000005D',
--	"rsquo;":                    '\U00002019',
--	"rsquor;":                   '\U00002019',
--	"rthree;":                   '\U000022CC',
--	"rtimes;":                   '\U000022CA',
--	"rtri;":                     '\U000025B9',
--	"rtrie;":                    '\U000022B5',
--	"rtrif;":                    '\U000025B8',
--	"rtriltri;":                 '\U000029CE',
--	"ruluhar;":                  '\U00002968',
--	"rx;":                       '\U0000211E',
--	"sacute;":                   '\U0000015B',
--	"sbquo;":                    '\U0000201A',
--	"sc;":                       '\U0000227B',
--	"scE;":                      '\U00002AB4',
--	"scap;":                     '\U00002AB8',
--	"scaron;":                   '\U00000161',
--	"sccue;":                    '\U0000227D',
--	"sce;":                      '\U00002AB0',
--	"scedil;":                   '\U0000015F',
--	"scirc;":                    '\U0000015D',
--	"scnE;":                     '\U00002AB6',
--	"scnap;":                    '\U00002ABA',
--	"scnsim;":                   '\U000022E9',
--	"scpolint;":                 '\U00002A13',
--	"scsim;":                    '\U0000227F',
--	"scy;":                      '\U00000441',
--	"sdot;":                     '\U000022C5',
--	"sdotb;":                    '\U000022A1',
--	"sdote;":                    '\U00002A66',
--	"seArr;":                    '\U000021D8',
--	"searhk;":                   '\U00002925',
--	"searr;":                    '\U00002198',
--	"searrow;":                  '\U00002198',
--	"sect;":                     '\U000000A7',
--	"semi;":                     '\U0000003B',
--	"seswar;":                   '\U00002929',
--	"setminus;":                 '\U00002216',
--	"setmn;":                    '\U00002216',
--	"sext;":                     '\U00002736',
--	"sfr;":                      '\U0001D530',
--	"sfrown;":                   '\U00002322',
--	"sharp;":                    '\U0000266F',
--	"shchcy;":                   '\U00000449',
--	"shcy;":                     '\U00000448',
--	"shortmid;":                 '\U00002223',
--	"shortparallel;":            '\U00002225',
--	"shy;":                      '\U000000AD',
--	"sigma;":                    '\U000003C3',
--	"sigmaf;":                   '\U000003C2',
--	"sigmav;":                   '\U000003C2',
--	"sim;":                      '\U0000223C',
--	"simdot;":                   '\U00002A6A',
--	"sime;":                     '\U00002243',
--	"simeq;":                    '\U00002243',
--	"simg;":                     '\U00002A9E',
--	"simgE;":                    '\U00002AA0',
--	"siml;":                     '\U00002A9D',
--	"simlE;":                    '\U00002A9F',
--	"simne;":                    '\U00002246',
--	"simplus;":                  '\U00002A24',
--	"simrarr;":                  '\U00002972',
--	"slarr;":                    '\U00002190',
--	"smallsetminus;":            '\U00002216',
--	"smashp;":                   '\U00002A33',
--	"smeparsl;":                 '\U000029E4',
--	"smid;":                     '\U00002223',
--	"smile;":                    '\U00002323',
--	"smt;":                      '\U00002AAA',
--	"smte;":                     '\U00002AAC',
--	"softcy;":                   '\U0000044C',
--	"sol;":                      '\U0000002F',
--	"solb;":                     '\U000029C4',
--	"solbar;":                   '\U0000233F',
--	"sopf;":                     '\U0001D564',
--	"spades;":                   '\U00002660',
--	"spadesuit;":                '\U00002660',
--	"spar;":                     '\U00002225',
--	"sqcap;":                    '\U00002293',
--	"sqcup;":                    '\U00002294',
--	"sqsub;":                    '\U0000228F',
--	"sqsube;":                   '\U00002291',
--	"sqsubset;":                 '\U0000228F',
--	"sqsubseteq;":               '\U00002291',
--	"sqsup;":                    '\U00002290',
--	"sqsupe;":                   '\U00002292',
--	"sqsupset;":                 '\U00002290',
--	"sqsupseteq;":               '\U00002292',
--	"squ;":                      '\U000025A1',
--	"square;":                   '\U000025A1',
--	"squarf;":                   '\U000025AA',
--	"squf;":                     '\U000025AA',
--	"srarr;":                    '\U00002192',
--	"sscr;":                     '\U0001D4C8',
--	"ssetmn;":                   '\U00002216',
--	"ssmile;":                   '\U00002323',
--	"sstarf;":                   '\U000022C6',
--	"star;":                     '\U00002606',
--	"starf;":                    '\U00002605',
--	"straightepsilon;":          '\U000003F5',
--	"straightphi;":              '\U000003D5',
--	"strns;":                    '\U000000AF',
--	"sub;":                      '\U00002282',
--	"subE;":                     '\U00002AC5',
--	"subdot;":                   '\U00002ABD',
--	"sube;":                     '\U00002286',
--	"subedot;":                  '\U00002AC3',
--	"submult;":                  '\U00002AC1',
--	"subnE;":                    '\U00002ACB',
--	"subne;":                    '\U0000228A',
--	"subplus;":                  '\U00002ABF',
--	"subrarr;":                  '\U00002979',
--	"subset;":                   '\U00002282',
--	"subseteq;":                 '\U00002286',
--	"subseteqq;":                '\U00002AC5',
--	"subsetneq;":                '\U0000228A',
--	"subsetneqq;":               '\U00002ACB',
--	"subsim;":                   '\U00002AC7',
--	"subsub;":                   '\U00002AD5',
--	"subsup;":                   '\U00002AD3',
--	"succ;":                     '\U0000227B',
--	"succapprox;":               '\U00002AB8',
--	"succcurlyeq;":              '\U0000227D',
--	"succeq;":                   '\U00002AB0',
--	"succnapprox;":              '\U00002ABA',
--	"succneqq;":                 '\U00002AB6',
--	"succnsim;":                 '\U000022E9',
--	"succsim;":                  '\U0000227F',
--	"sum;":                      '\U00002211',
--	"sung;":                     '\U0000266A',
--	"sup;":                      '\U00002283',
--	"sup1;":                     '\U000000B9',
--	"sup2;":                     '\U000000B2',
--	"sup3;":                     '\U000000B3',
--	"supE;":                     '\U00002AC6',
--	"supdot;":                   '\U00002ABE',
--	"supdsub;":                  '\U00002AD8',
--	"supe;":                     '\U00002287',
--	"supedot;":                  '\U00002AC4',
--	"suphsol;":                  '\U000027C9',
--	"suphsub;":                  '\U00002AD7',
--	"suplarr;":                  '\U0000297B',
--	"supmult;":                  '\U00002AC2',
--	"supnE;":                    '\U00002ACC',
--	"supne;":                    '\U0000228B',
--	"supplus;":                  '\U00002AC0',
--	"supset;":                   '\U00002283',
--	"supseteq;":                 '\U00002287',
--	"supseteqq;":                '\U00002AC6',
--	"supsetneq;":                '\U0000228B',
--	"supsetneqq;":               '\U00002ACC',
--	"supsim;":                   '\U00002AC8',
--	"supsub;":                   '\U00002AD4',
--	"supsup;":                   '\U00002AD6',
--	"swArr;":                    '\U000021D9',
--	"swarhk;":                   '\U00002926',
--	"swarr;":                    '\U00002199',
--	"swarrow;":                  '\U00002199',
--	"swnwar;":                   '\U0000292A',
--	"szlig;":                    '\U000000DF',
--	"target;":                   '\U00002316',
--	"tau;":                      '\U000003C4',
--	"tbrk;":                     '\U000023B4',
--	"tcaron;":                   '\U00000165',
--	"tcedil;":                   '\U00000163',
--	"tcy;":                      '\U00000442',
--	"tdot;":                     '\U000020DB',
--	"telrec;":                   '\U00002315',
--	"tfr;":                      '\U0001D531',
--	"there4;":                   '\U00002234',
--	"therefore;":                '\U00002234',
--	"theta;":                    '\U000003B8',
--	"thetasym;":                 '\U000003D1',
--	"thetav;":                   '\U000003D1',
--	"thickapprox;":              '\U00002248',
--	"thicksim;":                 '\U0000223C',
--	"thinsp;":                   '\U00002009',
--	"thkap;":                    '\U00002248',
--	"thksim;":                   '\U0000223C',
--	"thorn;":                    '\U000000FE',
--	"tilde;":                    '\U000002DC',
--	"times;":                    '\U000000D7',
--	"timesb;":                   '\U000022A0',
--	"timesbar;":                 '\U00002A31',
--	"timesd;":                   '\U00002A30',
--	"tint;":                     '\U0000222D',
--	"toea;":                     '\U00002928',
--	"top;":                      '\U000022A4',
--	"topbot;":                   '\U00002336',
--	"topcir;":                   '\U00002AF1',
--	"topf;":                     '\U0001D565',
--	"topfork;":                  '\U00002ADA',
--	"tosa;":                     '\U00002929',
--	"tprime;":                   '\U00002034',
--	"trade;":                    '\U00002122',
--	"triangle;":                 '\U000025B5',
--	"triangledown;":             '\U000025BF',
--	"triangleleft;":             '\U000025C3',
--	"trianglelefteq;":           '\U000022B4',
--	"triangleq;":                '\U0000225C',
--	"triangleright;":            '\U000025B9',
--	"trianglerighteq;":          '\U000022B5',
--	"tridot;":                   '\U000025EC',
--	"trie;":                     '\U0000225C',
--	"triminus;":                 '\U00002A3A',
--	"triplus;":                  '\U00002A39',
--	"trisb;":                    '\U000029CD',
--	"tritime;":                  '\U00002A3B',
--	"trpezium;":                 '\U000023E2',
--	"tscr;":                     '\U0001D4C9',
--	"tscy;":                     '\U00000446',
--	"tshcy;":                    '\U0000045B',
--	"tstrok;":                   '\U00000167',
--	"twixt;":                    '\U0000226C',
--	"twoheadleftarrow;":         '\U0000219E',
--	"twoheadrightarrow;":        '\U000021A0',
--	"uArr;":                     '\U000021D1',
--	"uHar;":                     '\U00002963',
--	"uacute;":                   '\U000000FA',
--	"uarr;":                     '\U00002191',
--	"ubrcy;":                    '\U0000045E',
--	"ubreve;":                   '\U0000016D',
--	"ucirc;":                    '\U000000FB',
--	"ucy;":                      '\U00000443',
--	"udarr;":                    '\U000021C5',
--	"udblac;":                   '\U00000171',
--	"udhar;":                    '\U0000296E',
--	"ufisht;":                   '\U0000297E',
--	"ufr;":                      '\U0001D532',
--	"ugrave;":                   '\U000000F9',
--	"uharl;":                    '\U000021BF',
--	"uharr;":                    '\U000021BE',
--	"uhblk;":                    '\U00002580',
--	"ulcorn;":                   '\U0000231C',
--	"ulcorner;":                 '\U0000231C',
--	"ulcrop;":                   '\U0000230F',
--	"ultri;":                    '\U000025F8',
--	"umacr;":                    '\U0000016B',
--	"uml;":                      '\U000000A8',
--	"uogon;":                    '\U00000173',
--	"uopf;":                     '\U0001D566',
--	"uparrow;":                  '\U00002191',
--	"updownarrow;":              '\U00002195',
--	"upharpoonleft;":            '\U000021BF',
--	"upharpoonright;":           '\U000021BE',
--	"uplus;":                    '\U0000228E',
--	"upsi;":                     '\U000003C5',
--	"upsih;":                    '\U000003D2',
--	"upsilon;":                  '\U000003C5',
--	"upuparrows;":               '\U000021C8',
--	"urcorn;":                   '\U0000231D',
--	"urcorner;":                 '\U0000231D',
--	"urcrop;":                   '\U0000230E',
--	"uring;":                    '\U0000016F',
--	"urtri;":                    '\U000025F9',
--	"uscr;":                     '\U0001D4CA',
--	"utdot;":                    '\U000022F0',
--	"utilde;":                   '\U00000169',
--	"utri;":                     '\U000025B5',
--	"utrif;":                    '\U000025B4',
--	"uuarr;":                    '\U000021C8',
--	"uuml;":                     '\U000000FC',
--	"uwangle;":                  '\U000029A7',
--	"vArr;":                     '\U000021D5',
--	"vBar;":                     '\U00002AE8',
--	"vBarv;":                    '\U00002AE9',
--	"vDash;":                    '\U000022A8',
--	"vangrt;":                   '\U0000299C',
--	"varepsilon;":               '\U000003F5',
--	"varkappa;":                 '\U000003F0',
--	"varnothing;":               '\U00002205',
--	"varphi;":                   '\U000003D5',
--	"varpi;":                    '\U000003D6',
--	"varpropto;":                '\U0000221D',
--	"varr;":                     '\U00002195',
--	"varrho;":                   '\U000003F1',
--	"varsigma;":                 '\U000003C2',
--	"vartheta;":                 '\U000003D1',
--	"vartriangleleft;":          '\U000022B2',
--	"vartriangleright;":         '\U000022B3',
--	"vcy;":                      '\U00000432',
--	"vdash;":                    '\U000022A2',
--	"vee;":                      '\U00002228',
--	"veebar;":                   '\U000022BB',
--	"veeeq;":                    '\U0000225A',
--	"vellip;":                   '\U000022EE',
--	"verbar;":                   '\U0000007C',
--	"vert;":                     '\U0000007C',
--	"vfr;":                      '\U0001D533',
--	"vltri;":                    '\U000022B2',
--	"vopf;":                     '\U0001D567',
--	"vprop;":                    '\U0000221D',
--	"vrtri;":                    '\U000022B3',
--	"vscr;":                     '\U0001D4CB',
--	"vzigzag;":                  '\U0000299A',
--	"wcirc;":                    '\U00000175',
--	"wedbar;":                   '\U00002A5F',
--	"wedge;":                    '\U00002227',
--	"wedgeq;":                   '\U00002259',
--	"weierp;":                   '\U00002118',
--	"wfr;":                      '\U0001D534',
--	"wopf;":                     '\U0001D568',
--	"wp;":                       '\U00002118',
--	"wr;":                       '\U00002240',
--	"wreath;":                   '\U00002240',
--	"wscr;":                     '\U0001D4CC',
--	"xcap;":                     '\U000022C2',
--	"xcirc;":                    '\U000025EF',
--	"xcup;":                     '\U000022C3',
--	"xdtri;":                    '\U000025BD',
--	"xfr;":                      '\U0001D535',
--	"xhArr;":                    '\U000027FA',
--	"xharr;":                    '\U000027F7',
--	"xi;":                       '\U000003BE',
--	"xlArr;":                    '\U000027F8',
--	"xlarr;":                    '\U000027F5',
--	"xmap;":                     '\U000027FC',
--	"xnis;":                     '\U000022FB',
--	"xodot;":                    '\U00002A00',
--	"xopf;":                     '\U0001D569',
--	"xoplus;":                   '\U00002A01',
--	"xotime;":                   '\U00002A02',
--	"xrArr;":                    '\U000027F9',
--	"xrarr;":                    '\U000027F6',
--	"xscr;":                     '\U0001D4CD',
--	"xsqcup;":                   '\U00002A06',
--	"xuplus;":                   '\U00002A04',
--	"xutri;":                    '\U000025B3',
--	"xvee;":                     '\U000022C1',
--	"xwedge;":                   '\U000022C0',
--	"yacute;":                   '\U000000FD',
--	"yacy;":                     '\U0000044F',
--	"ycirc;":                    '\U00000177',
--	"ycy;":                      '\U0000044B',
--	"yen;":                      '\U000000A5',
--	"yfr;":                      '\U0001D536',
--	"yicy;":                     '\U00000457',
--	"yopf;":                     '\U0001D56A',
--	"yscr;":                     '\U0001D4CE',
--	"yucy;":                     '\U0000044E',
--	"yuml;":                     '\U000000FF',
--	"zacute;":                   '\U0000017A',
--	"zcaron;":                   '\U0000017E',
--	"zcy;":                      '\U00000437',
--	"zdot;":                     '\U0000017C',
--	"zeetrf;":                   '\U00002128',
--	"zeta;":                     '\U000003B6',
--	"zfr;":                      '\U0001D537',
--	"zhcy;":                     '\U00000436',
--	"zigrarr;":                  '\U000021DD',
--	"zopf;":                     '\U0001D56B',
--	"zscr;":                     '\U0001D4CF',
--	"zwj;":                      '\U0000200D',
--	"zwnj;":                     '\U0000200C',
--	"AElig":                     '\U000000C6',
--	"AMP":                       '\U00000026',
--	"Aacute":                    '\U000000C1',
--	"Acirc":                     '\U000000C2',
--	"Agrave":                    '\U000000C0',
--	"Aring":                     '\U000000C5',
--	"Atilde":                    '\U000000C3',
--	"Auml":                      '\U000000C4',
--	"COPY":                      '\U000000A9',
--	"Ccedil":                    '\U000000C7',
--	"ETH":                       '\U000000D0',
--	"Eacute":                    '\U000000C9',
--	"Ecirc":                     '\U000000CA',
--	"Egrave":                    '\U000000C8',
--	"Euml":                      '\U000000CB',
--	"GT":                        '\U0000003E',
--	"Iacute":                    '\U000000CD',
--	"Icirc":                     '\U000000CE',
--	"Igrave":                    '\U000000CC',
--	"Iuml":                      '\U000000CF',
--	"LT":                        '\U0000003C',
--	"Ntilde":                    '\U000000D1',
--	"Oacute":                    '\U000000D3',
--	"Ocirc":                     '\U000000D4',
--	"Ograve":                    '\U000000D2',
--	"Oslash":                    '\U000000D8',
--	"Otilde":                    '\U000000D5',
--	"Ouml":                      '\U000000D6',
--	"QUOT":                      '\U00000022',
--	"REG":                       '\U000000AE',
--	"THORN":                     '\U000000DE',
--	"Uacute":                    '\U000000DA',
--	"Ucirc":                     '\U000000DB',
--	"Ugrave":                    '\U000000D9',
--	"Uuml":                      '\U000000DC',
--	"Yacute":                    '\U000000DD',
--	"aacute":                    '\U000000E1',
--	"acirc":                     '\U000000E2',
--	"acute":                     '\U000000B4',
--	"aelig":                     '\U000000E6',
--	"agrave":                    '\U000000E0',
--	"amp":                       '\U00000026',
--	"aring":                     '\U000000E5',
--	"atilde":                    '\U000000E3',
--	"auml":                      '\U000000E4',
--	"brvbar":                    '\U000000A6',
--	"ccedil":                    '\U000000E7',
--	"cedil":                     '\U000000B8',
--	"cent":                      '\U000000A2',
--	"copy":                      '\U000000A9',
--	"curren":                    '\U000000A4',
--	"deg":                       '\U000000B0',
--	"divide":                    '\U000000F7',
--	"eacute":                    '\U000000E9',
--	"ecirc":                     '\U000000EA',
--	"egrave":                    '\U000000E8',
--	"eth":                       '\U000000F0',
--	"euml":                      '\U000000EB',
--	"frac12":                    '\U000000BD',
--	"frac14":                    '\U000000BC',
--	"frac34":                    '\U000000BE',
--	"gt":                        '\U0000003E',
--	"iacute":                    '\U000000ED',
--	"icirc":                     '\U000000EE',
--	"iexcl":                     '\U000000A1',
--	"igrave":                    '\U000000EC',
--	"iquest":                    '\U000000BF',
--	"iuml":                      '\U000000EF',
--	"laquo":                     '\U000000AB',
--	"lt":                        '\U0000003C',
--	"macr":                      '\U000000AF',
--	"micro":                     '\U000000B5',
--	"middot":                    '\U000000B7',
--	"nbsp":                      '\U000000A0',
--	"not":                       '\U000000AC',
--	"ntilde":                    '\U000000F1',
--	"oacute":                    '\U000000F3',
--	"ocirc":                     '\U000000F4',
--	"ograve":                    '\U000000F2',
--	"ordf":                      '\U000000AA',
--	"ordm":                      '\U000000BA',
--	"oslash":                    '\U000000F8',
--	"otilde":                    '\U000000F5',
--	"ouml":                      '\U000000F6',
--	"para":                      '\U000000B6',
--	"plusmn":                    '\U000000B1',
--	"pound":                     '\U000000A3',
--	"quot":                      '\U00000022',
--	"raquo":                     '\U000000BB',
--	"reg":                       '\U000000AE',
--	"sect":                      '\U000000A7',
--	"shy":                       '\U000000AD',
--	"sup1":                      '\U000000B9',
--	"sup2":                      '\U000000B2',
--	"sup3":                      '\U000000B3',
--	"szlig":                     '\U000000DF',
--	"thorn":                     '\U000000FE',
--	"times":                     '\U000000D7',
--	"uacute":                    '\U000000FA',
--	"ucirc":                     '\U000000FB',
--	"ugrave":                    '\U000000F9',
--	"uml":                       '\U000000A8',
--	"uuml":                      '\U000000FC',
--	"yacute":                    '\U000000FD',
--	"yen":                       '\U000000A5',
--	"yuml":                      '\U000000FF',
--}
--
--// HTML entities that are two unicode codepoints.
--var entity2 = map[string][2]rune{
--	// TODO(nigeltao): Handle replacements that are wider than their names.
--	// "nLt;":                     {'\u226A', '\u20D2'},
--	// "nGt;":                     {'\u226B', '\u20D2'},
--	"NotEqualTilde;":           {'\u2242', '\u0338'},
--	"NotGreaterFullEqual;":     {'\u2267', '\u0338'},
--	"NotGreaterGreater;":       {'\u226B', '\u0338'},
--	"NotGreaterSlantEqual;":    {'\u2A7E', '\u0338'},
--	"NotHumpDownHump;":         {'\u224E', '\u0338'},
--	"NotHumpEqual;":            {'\u224F', '\u0338'},
--	"NotLeftTriangleBar;":      {'\u29CF', '\u0338'},
--	"NotLessLess;":             {'\u226A', '\u0338'},
--	"NotLessSlantEqual;":       {'\u2A7D', '\u0338'},
--	"NotNestedGreaterGreater;": {'\u2AA2', '\u0338'},
--	"NotNestedLessLess;":       {'\u2AA1', '\u0338'},
--	"NotPrecedesEqual;":        {'\u2AAF', '\u0338'},
--	"NotRightTriangleBar;":     {'\u29D0', '\u0338'},
--	"NotSquareSubset;":         {'\u228F', '\u0338'},
--	"NotSquareSuperset;":       {'\u2290', '\u0338'},
--	"NotSubset;":               {'\u2282', '\u20D2'},
--	"NotSucceedsEqual;":        {'\u2AB0', '\u0338'},
--	"NotSucceedsTilde;":        {'\u227F', '\u0338'},
--	"NotSuperset;":             {'\u2283', '\u20D2'},
--	"ThickSpace;":              {'\u205F', '\u200A'},
--	"acE;":                     {'\u223E', '\u0333'},
--	"bne;":                     {'\u003D', '\u20E5'},
--	"bnequiv;":                 {'\u2261', '\u20E5'},
--	"caps;":                    {'\u2229', '\uFE00'},
--	"cups;":                    {'\u222A', '\uFE00'},
--	"fjlig;":                   {'\u0066', '\u006A'},
--	"gesl;":                    {'\u22DB', '\uFE00'},
--	"gvertneqq;":               {'\u2269', '\uFE00'},
--	"gvnE;":                    {'\u2269', '\uFE00'},
--	"lates;":                   {'\u2AAD', '\uFE00'},
--	"lesg;":                    {'\u22DA', '\uFE00'},
--	"lvertneqq;":               {'\u2268', '\uFE00'},
--	"lvnE;":                    {'\u2268', '\uFE00'},
--	"nGg;":                     {'\u22D9', '\u0338'},
--	"nGtv;":                    {'\u226B', '\u0338'},
--	"nLl;":                     {'\u22D8', '\u0338'},
--	"nLtv;":                    {'\u226A', '\u0338'},
--	"nang;":                    {'\u2220', '\u20D2'},
--	"napE;":                    {'\u2A70', '\u0338'},
--	"napid;":                   {'\u224B', '\u0338'},
--	"nbump;":                   {'\u224E', '\u0338'},
--	"nbumpe;":                  {'\u224F', '\u0338'},
--	"ncongdot;":                {'\u2A6D', '\u0338'},
--	"nedot;":                   {'\u2250', '\u0338'},
--	"nesim;":                   {'\u2242', '\u0338'},
--	"ngE;":                     {'\u2267', '\u0338'},
--	"ngeqq;":                   {'\u2267', '\u0338'},
--	"ngeqslant;":               {'\u2A7E', '\u0338'},
--	"nges;":                    {'\u2A7E', '\u0338'},
--	"nlE;":                     {'\u2266', '\u0338'},
--	"nleqq;":                   {'\u2266', '\u0338'},
--	"nleqslant;":               {'\u2A7D', '\u0338'},
--	"nles;":                    {'\u2A7D', '\u0338'},
--	"notinE;":                  {'\u22F9', '\u0338'},
--	"notindot;":                {'\u22F5', '\u0338'},
--	"nparsl;":                  {'\u2AFD', '\u20E5'},
--	"npart;":                   {'\u2202', '\u0338'},
--	"npre;":                    {'\u2AAF', '\u0338'},
--	"npreceq;":                 {'\u2AAF', '\u0338'},
--	"nrarrc;":                  {'\u2933', '\u0338'},
--	"nrarrw;":                  {'\u219D', '\u0338'},
--	"nsce;":                    {'\u2AB0', '\u0338'},
--	"nsubE;":                   {'\u2AC5', '\u0338'},
--	"nsubset;":                 {'\u2282', '\u20D2'},
--	"nsubseteqq;":              {'\u2AC5', '\u0338'},
--	"nsucceq;":                 {'\u2AB0', '\u0338'},
--	"nsupE;":                   {'\u2AC6', '\u0338'},
--	"nsupset;":                 {'\u2283', '\u20D2'},
--	"nsupseteqq;":              {'\u2AC6', '\u0338'},
--	"nvap;":                    {'\u224D', '\u20D2'},
--	"nvge;":                    {'\u2265', '\u20D2'},
--	"nvgt;":                    {'\u003E', '\u20D2'},
--	"nvle;":                    {'\u2264', '\u20D2'},
--	"nvlt;":                    {'\u003C', '\u20D2'},
--	"nvltrie;":                 {'\u22B4', '\u20D2'},
--	"nvrtrie;":                 {'\u22B5', '\u20D2'},
--	"nvsim;":                   {'\u223C', '\u20D2'},
--	"race;":                    {'\u223D', '\u0331'},
--	"smtes;":                   {'\u2AAC', '\uFE00'},
--	"sqcaps;":                  {'\u2293', '\uFE00'},
--	"sqcups;":                  {'\u2294', '\uFE00'},
--	"varsubsetneq;":            {'\u228A', '\uFE00'},
--	"varsubsetneqq;":           {'\u2ACB', '\uFE00'},
--	"varsupsetneq;":            {'\u228B', '\uFE00'},
--	"varsupsetneqq;":           {'\u2ACC', '\uFE00'},
--	"vnsub;":                   {'\u2282', '\u20D2'},
--	"vnsup;":                   {'\u2283', '\u20D2'},
--	"vsubnE;":                  {'\u2ACB', '\uFE00'},
--	"vsubne;":                  {'\u228A', '\uFE00'},
--	"vsupnE;":                  {'\u2ACC', '\uFE00'},
--	"vsupne;":                  {'\u228B', '\uFE00'},
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/entity_test.go b/Godeps/_workspace/src/golang.org/x/net/html/entity_test.go
-deleted file mode 100644
-index b53f866..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/entity_test.go
-+++ /dev/null
-@@ -1,29 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"testing"
--	"unicode/utf8"
--)
--
--func TestEntityLength(t *testing.T) {
--	// We verify that the length of UTF-8 encoding of each value is <= 1 + len(key).
--	// The +1 comes from the leading "&". This property implies that the length of
--	// unescaped text is <= the length of escaped text.
--	for k, v := range entity {
--		if 1+len(k) < utf8.RuneLen(v) {
--			t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v))
--		}
--		if len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' {
--			t.Errorf("entity name %s is %d characters, but longestEntityWithoutSemicolon=%d", k, len(k), longestEntityWithoutSemicolon)
--		}
--	}
--	for k, v := range entity2 {
--		if 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) {
--			t.Error("escaped entity &" + k + " is shorter than its UTF-8 encoding " + string(v[0]) + string(v[1]))
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/escape.go b/Godeps/_workspace/src/golang.org/x/net/html/escape.go
-deleted file mode 100644
-index 75bddff..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/escape.go
-+++ /dev/null
-@@ -1,258 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"bytes"
--	"strings"
--	"unicode/utf8"
--)
--
--// These replacements permit compatibility with old numeric entities that
--// assumed Windows-1252 encoding.
--// http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html#consume-a-character-reference
--var replacementTable = [...]rune{
--	'\u20AC', // First entry is what 0x80 should be replaced with.
--	'\u0081',
--	'\u201A',
--	'\u0192',
--	'\u201E',
--	'\u2026',
--	'\u2020',
--	'\u2021',
--	'\u02C6',
--	'\u2030',
--	'\u0160',
--	'\u2039',
--	'\u0152',
--	'\u008D',
--	'\u017D',
--	'\u008F',
--	'\u0090',
--	'\u2018',
--	'\u2019',
--	'\u201C',
--	'\u201D',
--	'\u2022',
--	'\u2013',
--	'\u2014',
--	'\u02DC',
--	'\u2122',
--	'\u0161',
--	'\u203A',
--	'\u0153',
--	'\u009D',
--	'\u017E',
--	'\u0178', // Last entry is 0x9F.
--	// 0x00->'\uFFFD' is handled programmatically.
--	// 0x0D->'\u000D' is a no-op.
--}
--
--// unescapeEntity reads an entity like "&lt;" from b[src:] and writes the
--// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
--// Precondition: b[src] == '&' && dst <= src.
--// attribute should be true if parsing an attribute value.
--func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
--	// http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html#consume-a-character-reference
--
--	// i starts at 1 because we already know that s[0] == '&'.
--	i, s := 1, b[src:]
--
--	if len(s) <= 1 {
--		b[dst] = b[src]
--		return dst + 1, src + 1
--	}
--
--	if s[i] == '#' {
--		if len(s) <= 3 { // We need to have at least "&#.".
--			b[dst] = b[src]
--			return dst + 1, src + 1
--		}
--		i++
--		c := s[i]
--		hex := false
--		if c == 'x' || c == 'X' {
--			hex = true
--			i++
--		}
--
--		x := '\x00'
--		for i < len(s) {
--			c = s[i]
--			i++
--			if hex {
--				if '0' <= c && c <= '9' {
--					x = 16*x + rune(c) - '0'
--					continue
--				} else if 'a' <= c && c <= 'f' {
--					x = 16*x + rune(c) - 'a' + 10
--					continue
--				} else if 'A' <= c && c <= 'F' {
--					x = 16*x + rune(c) - 'A' + 10
--					continue
--				}
--			} else if '0' <= c && c <= '9' {
--				x = 10*x + rune(c) - '0'
--				continue
--			}
--			if c != ';' {
--				i--
--			}
--			break
--		}
--
--		if i <= 3 { // No characters matched.
--			b[dst] = b[src]
--			return dst + 1, src + 1
--		}
--
--		if 0x80 <= x && x <= 0x9F {
--			// Replace characters from Windows-1252 with UTF-8 equivalents.
--			x = replacementTable[x-0x80]
--		} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
--			// Replace invalid characters with the replacement character.
--			x = '\uFFFD'
--		}
--
--		return dst + utf8.EncodeRune(b[dst:], x), src + i
--	}
--
--	// Consume the maximum number of characters possible, with the
--	// consumed characters matching one of the named references.
--
--	for i < len(s) {
--		c := s[i]
--		i++
--		// Lower-cased characters are more common in entities, so we check for them first.
--		if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
--			continue
--		}
--		if c != ';' {
--			i--
--		}
--		break
--	}
--
--	entityName := string(s[1:i])
--	if entityName == "" {
--		// No-op.
--	} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
--		// No-op.
--	} else if x := entity[entityName]; x != 0 {
--		return dst + utf8.EncodeRune(b[dst:], x), src + i
--	} else if x := entity2[entityName]; x[0] != 0 {
--		dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
--		return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
--	} else if !attribute {
--		maxLen := len(entityName) - 1
--		if maxLen > longestEntityWithoutSemicolon {
--			maxLen = longestEntityWithoutSemicolon
--		}
--		for j := maxLen; j > 1; j-- {
--			if x := entity[entityName[:j]]; x != 0 {
--				return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
--			}
--		}
--	}
--
--	dst1, src1 = dst+i, src+i
--	copy(b[dst:dst1], b[src:src1])
--	return dst1, src1
--}
--
--// unescape unescapes b's entities in-place, so that "a&lt;b" becomes "a<b".
--// attribute should be true if parsing an attribute value.
--func unescape(b []byte, attribute bool) []byte {
--	for i, c := range b {
--		if c == '&' {
--			dst, src := unescapeEntity(b, i, i, attribute)
--			for src < len(b) {
--				c := b[src]
--				if c == '&' {
--					dst, src = unescapeEntity(b, dst, src, attribute)
--				} else {
--					b[dst] = c
--					dst, src = dst+1, src+1
--				}
--			}
--			return b[0:dst]
--		}
--	}
--	return b
--}
--
--// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
--func lower(b []byte) []byte {
--	for i, c := range b {
--		if 'A' <= c && c <= 'Z' {
--			b[i] = c + 'a' - 'A'
--		}
--	}
--	return b
--}
--
--const escapedChars = "&'<>\"\r"
--
--func escape(w writer, s string) error {
--	i := strings.IndexAny(s, escapedChars)
--	for i != -1 {
--		if _, err := w.WriteString(s[:i]); err != nil {
--			return err
--		}
--		var esc string
--		switch s[i] {
--		case '&':
--			esc = "&amp;"
--		case '\'':
--			// "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
--			esc = "&#39;"
--		case '<':
--			esc = "&lt;"
--		case '>':
--			esc = "&gt;"
--		case '"':
--			// "&#34;" is shorter than "&quot;".
--			esc = "&#34;"
--		case '\r':
--			esc = "&#13;"
--		default:
--			panic("unrecognized escape character")
--		}
--		s = s[i+1:]
--		if _, err := w.WriteString(esc); err != nil {
--			return err
--		}
--		i = strings.IndexAny(s, escapedChars)
--	}
--	_, err := w.WriteString(s)
--	return err
--}
--
--// EscapeString escapes special characters like "<" to become "&lt;". It
--// escapes only five such characters: <, >, &, ' and ".
--// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
--// always true.
--func EscapeString(s string) string {
--	if strings.IndexAny(s, escapedChars) == -1 {
--		return s
--	}
--	var buf bytes.Buffer
--	escape(&buf, s)
--	return buf.String()
--}
--
--// UnescapeString unescapes entities like "&lt;" to become "<". It unescapes a
--// larger range of entities than EscapeString escapes. For example, "&aacute;"
--// unescapes to "á", as does "&#225;" and "&xE1;".
--// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
--// always true.
--func UnescapeString(s string) string {
--	for _, c := range s {
--		if c == '&' {
--			return string(unescape([]byte(s), false))
--		}
--	}
--	return s
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/escape_test.go b/Godeps/_workspace/src/golang.org/x/net/html/escape_test.go
-deleted file mode 100644
-index b405d4b..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/escape_test.go
-+++ /dev/null
-@@ -1,97 +0,0 @@
--// Copyright 2013 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import "testing"
--
--type unescapeTest struct {
--	// A short description of the test case.
--	desc string
--	// The HTML text.
--	html string
--	// The unescaped text.
--	unescaped string
--}
--
--var unescapeTests = []unescapeTest{
--	// Handle no entities.
--	{
--		"copy",
--		"A\ttext\nstring",
--		"A\ttext\nstring",
--	},
--	// Handle simple named entities.
--	{
--		"simple",
--		"&amp; &gt; &lt;",
--		"& > <",
--	},
--	// Handle hitting the end of the string.
--	{
--		"stringEnd",
--		"&amp &amp",
--		"& &",
--	},
--	// Handle entities with two codepoints.
--	{
--		"multiCodepoint",
--		"text &gesl; blah",
--		"text \u22db\ufe00 blah",
--	},
--	// Handle decimal numeric entities.
--	{
--		"decimalEntity",
--		"Delta = &#916; ",
--		"Delta = Δ ",
--	},
--	// Handle hexadecimal numeric entities.
--	{
--		"hexadecimalEntity",
--		"Lambda = &#x3bb; = &#X3Bb ",
--		"Lambda = λ = λ ",
--	},
--	// Handle numeric early termination.
--	{
--		"numericEnds",
--		"&# &#x &#128;43 &copy = &#169f = &#xa9",
--		"&# &#x €43 © = ©f = ©",
--	},
--	// Handle numeric ISO-8859-1 entity replacements.
--	{
--		"numericReplacements",
--		"Footnote&#x87;",
--		"Footnote‡",
--	},
--}
--
--func TestUnescape(t *testing.T) {
--	for _, tt := range unescapeTests {
--		unescaped := UnescapeString(tt.html)
--		if unescaped != tt.unescaped {
--			t.Errorf("TestUnescape %s: want %q, got %q", tt.desc, tt.unescaped, unescaped)
--		}
--	}
--}
--
--func TestUnescapeEscape(t *testing.T) {
--	ss := []string{
--		``,
--		`abc def`,
--		`a & b`,
--		`a&amp;b`,
--		`a &amp b`,
--		`&quot;`,
--		`"`,
--		`"<&>"`,
--		`&quot;&lt;&amp;&gt;&quot;`,
--		`3&5==1 && 0<1, "0&lt;1", a+acute=&aacute;`,
--		`The special characters are: <, >, &, ' and "`,
--	}
--	for _, s := range ss {
--		if got := UnescapeString(EscapeString(s)); got != s {
--			t.Errorf("got %q want %q", got, s)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/example_test.go b/Godeps/_workspace/src/golang.org/x/net/html/example_test.go
-deleted file mode 100644
-index 0b06ed7..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/example_test.go
-+++ /dev/null
-@@ -1,40 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// This example demonstrates parsing HTML data and walking the resulting tree.
--package html_test
--
--import (
--	"fmt"
--	"log"
--	"strings"
--
--	"golang.org/x/net/html"
--)
--
--func ExampleParse() {
--	s := `<p>Links:</p><ul><li><a href="foo">Foo</a><li><a href="/bar/baz">BarBaz</a></ul>`
--	doc, err := html.Parse(strings.NewReader(s))
--	if err != nil {
--		log.Fatal(err)
--	}
--	var f func(*html.Node)
--	f = func(n *html.Node) {
--		if n.Type == html.ElementNode && n.Data == "a" {
--			for _, a := range n.Attr {
--				if a.Key == "href" {
--					fmt.Println(a.Val)
--					break
--				}
--			}
--		}
--		for c := n.FirstChild; c != nil; c = c.NextSibling {
--			f(c)
--		}
--	}
--	f(doc)
--	// Output:
--	// foo
--	// /bar/baz
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/foreign.go b/Godeps/_workspace/src/golang.org/x/net/html/foreign.go
-deleted file mode 100644
-index d3b3844..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/foreign.go
-+++ /dev/null
-@@ -1,226 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"strings"
--)
--
--func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
--	for i := range aa {
--		if newName, ok := nameMap[aa[i].Key]; ok {
--			aa[i].Key = newName
--		}
--	}
--}
--
--func adjustForeignAttributes(aa []Attribute) {
--	for i, a := range aa {
--		if a.Key == "" || a.Key[0] != 'x' {
--			continue
--		}
--		switch a.Key {
--		case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
--			"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
--			j := strings.Index(a.Key, ":")
--			aa[i].Namespace = a.Key[:j]
--			aa[i].Key = a.Key[j+1:]
--		}
--	}
--}
--
--func htmlIntegrationPoint(n *Node) bool {
--	if n.Type != ElementNode {
--		return false
--	}
--	switch n.Namespace {
--	case "math":
--		if n.Data == "annotation-xml" {
--			for _, a := range n.Attr {
--				if a.Key == "encoding" {
--					val := strings.ToLower(a.Val)
--					if val == "text/html" || val == "application/xhtml+xml" {
--						return true
--					}
--				}
--			}
--		}
--	case "svg":
--		switch n.Data {
--		case "desc", "foreignObject", "title":
--			return true
--		}
--	}
--	return false
--}
--
--func mathMLTextIntegrationPoint(n *Node) bool {
--	if n.Namespace != "math" {
--		return false
--	}
--	switch n.Data {
--	case "mi", "mo", "mn", "ms", "mtext":
--		return true
--	}
--	return false
--}
--
--// Section 12.2.5.5.
--var breakout = map[string]bool{
--	"b":          true,
--	"big":        true,
--	"blockquote": true,
--	"body":       true,
--	"br":         true,
--	"center":     true,
--	"code":       true,
--	"dd":         true,
--	"div":        true,
--	"dl":         true,
--	"dt":         true,
--	"em":         true,
--	"embed":      true,
--	"h1":         true,
--	"h2":         true,
--	"h3":         true,
--	"h4":         true,
--	"h5":         true,
--	"h6":         true,
--	"head":       true,
--	"hr":         true,
--	"i":          true,
--	"img":        true,
--	"li":         true,
--	"listing":    true,
--	"menu":       true,
--	"meta":       true,
--	"nobr":       true,
--	"ol":         true,
--	"p":          true,
--	"pre":        true,
--	"ruby":       true,
--	"s":          true,
--	"small":      true,
--	"span":       true,
--	"strong":     true,
--	"strike":     true,
--	"sub":        true,
--	"sup":        true,
--	"table":      true,
--	"tt":         true,
--	"u":          true,
--	"ul":         true,
--	"var":        true,
--}
--
--// Section 12.2.5.5.
--var svgTagNameAdjustments = map[string]string{
--	"altglyph":            "altGlyph",
--	"altglyphdef":         "altGlyphDef",
--	"altglyphitem":        "altGlyphItem",
--	"animatecolor":        "animateColor",
--	"animatemotion":       "animateMotion",
--	"animatetransform":    "animateTransform",
--	"clippath":            "clipPath",
--	"feblend":             "feBlend",
--	"fecolormatrix":       "feColorMatrix",
--	"fecomponenttransfer": "feComponentTransfer",
--	"fecomposite":         "feComposite",
--	"feconvolvematrix":    "feConvolveMatrix",
--	"fediffuselighting":   "feDiffuseLighting",
--	"fedisplacementmap":   "feDisplacementMap",
--	"fedistantlight":      "feDistantLight",
--	"feflood":             "feFlood",
--	"fefunca":             "feFuncA",
--	"fefuncb":             "feFuncB",
--	"fefuncg":             "feFuncG",
--	"fefuncr":             "feFuncR",
--	"fegaussianblur":      "feGaussianBlur",
--	"feimage":             "feImage",
--	"femerge":             "feMerge",
--	"femergenode":         "feMergeNode",
--	"femorphology":        "feMorphology",
--	"feoffset":            "feOffset",
--	"fepointlight":        "fePointLight",
--	"fespecularlighting":  "feSpecularLighting",
--	"fespotlight":         "feSpotLight",
--	"fetile":              "feTile",
--	"feturbulence":        "feTurbulence",
--	"foreignobject":       "foreignObject",
--	"glyphref":            "glyphRef",
--	"lineargradient":      "linearGradient",
--	"radialgradient":      "radialGradient",
--	"textpath":            "textPath",
--}
--
--// Section 12.2.5.1
--var mathMLAttributeAdjustments = map[string]string{
--	"definitionurl": "definitionURL",
--}
--
--var svgAttributeAdjustments = map[string]string{
--	"attributename":             "attributeName",
--	"attributetype":             "attributeType",
--	"basefrequency":             "baseFrequency",
--	"baseprofile":               "baseProfile",
--	"calcmode":                  "calcMode",
--	"clippathunits":             "clipPathUnits",
--	"contentscripttype":         "contentScriptType",
--	"contentstyletype":          "contentStyleType",
--	"diffuseconstant":           "diffuseConstant",
--	"edgemode":                  "edgeMode",
--	"externalresourcesrequired": "externalResourcesRequired",
--	"filterres":                 "filterRes",
--	"filterunits":               "filterUnits",
--	"glyphref":                  "glyphRef",
--	"gradienttransform":         "gradientTransform",
--	"gradientunits":             "gradientUnits",
--	"kernelmatrix":              "kernelMatrix",
--	"kernelunitlength":          "kernelUnitLength",
--	"keypoints":                 "keyPoints",
--	"keysplines":                "keySplines",
--	"keytimes":                  "keyTimes",
--	"lengthadjust":              "lengthAdjust",
--	"limitingconeangle":         "limitingConeAngle",
--	"markerheight":              "markerHeight",
--	"markerunits":               "markerUnits",
--	"markerwidth":               "markerWidth",
--	"maskcontentunits":          "maskContentUnits",
--	"maskunits":                 "maskUnits",
--	"numoctaves":                "numOctaves",
--	"pathlength":                "pathLength",
--	"patterncontentunits":       "patternContentUnits",
--	"patterntransform":          "patternTransform",
--	"patternunits":              "patternUnits",
--	"pointsatx":                 "pointsAtX",
--	"pointsaty":                 "pointsAtY",
--	"pointsatz":                 "pointsAtZ",
--	"preservealpha":             "preserveAlpha",
--	"preserveaspectratio":       "preserveAspectRatio",
--	"primitiveunits":            "primitiveUnits",
--	"refx":                      "refX",
--	"refy":                      "refY",
--	"repeatcount":               "repeatCount",
--	"repeatdur":                 "repeatDur",
--	"requiredextensions":        "requiredExtensions",
--	"requiredfeatures":          "requiredFeatures",
--	"specularconstant":          "specularConstant",
--	"specularexponent":          "specularExponent",
--	"spreadmethod":              "spreadMethod",
--	"startoffset":               "startOffset",
--	"stddeviation":              "stdDeviation",
--	"stitchtiles":               "stitchTiles",
--	"surfacescale":              "surfaceScale",
--	"systemlanguage":            "systemLanguage",
--	"tablevalues":               "tableValues",
--	"targetx":                   "targetX",
--	"targety":                   "targetY",
--	"textlength":                "textLength",
--	"viewbox":                   "viewBox",
--	"viewtarget":                "viewTarget",
--	"xchannelselector":          "xChannelSelector",
--	"ychannelselector":          "yChannelSelector",
--	"zoomandpan":                "zoomAndPan",
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/node.go b/Godeps/_workspace/src/golang.org/x/net/html/node.go
-deleted file mode 100644
-index 26b657a..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/node.go
-+++ /dev/null
-@@ -1,193 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"golang.org/x/net/html/atom"
--)
--
--// A NodeType is the type of a Node.
--type NodeType uint32
--
--const (
--	ErrorNode NodeType = iota
--	TextNode
--	DocumentNode
--	ElementNode
--	CommentNode
--	DoctypeNode
--	scopeMarkerNode
--)
--
--// Section 12.2.3.3 says "scope markers are inserted when entering applet
--// elements, buttons, object elements, marquees, table cells, and table
--// captions, and are used to prevent formatting from 'leaking'".
--var scopeMarker = Node{Type: scopeMarkerNode}
--
--// A Node consists of a NodeType and some Data (tag name for element nodes,
--// content for text) and are part of a tree of Nodes. Element nodes may also
--// have a Namespace and contain a slice of Attributes. Data is unescaped, so
--// that it looks like "a<b" rather than "a&lt;b". For element nodes, DataAtom
--// is the atom for Data, or zero if Data is not a known tag name.
--//
--// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
--// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
--// "svg" is short for "http://www.w3.org/2000/svg".
--type Node struct {
--	Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
--
--	Type      NodeType
--	DataAtom  atom.Atom
--	Data      string
--	Namespace string
--	Attr      []Attribute
--}
--
--// InsertBefore inserts newChild as a child of n, immediately before oldChild
--// in the sequence of n's children. oldChild may be nil, in which case newChild
--// is appended to the end of n's children.
--//
--// It will panic if newChild already has a parent or siblings.
--func (n *Node) InsertBefore(newChild, oldChild *Node) {
--	if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
--		panic("html: InsertBefore called for an attached child Node")
--	}
--	var prev, next *Node
--	if oldChild != nil {
--		prev, next = oldChild.PrevSibling, oldChild
--	} else {
--		prev = n.LastChild
--	}
--	if prev != nil {
--		prev.NextSibling = newChild
--	} else {
--		n.FirstChild = newChild
--	}
--	if next != nil {
--		next.PrevSibling = newChild
--	} else {
--		n.LastChild = newChild
--	}
--	newChild.Parent = n
--	newChild.PrevSibling = prev
--	newChild.NextSibling = next
--}
--
--// AppendChild adds a node c as a child of n.
--//
--// It will panic if c already has a parent or siblings.
--func (n *Node) AppendChild(c *Node) {
--	if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
--		panic("html: AppendChild called for an attached child Node")
--	}
--	last := n.LastChild
--	if last != nil {
--		last.NextSibling = c
--	} else {
--		n.FirstChild = c
--	}
--	n.LastChild = c
--	c.Parent = n
--	c.PrevSibling = last
--}
--
--// RemoveChild removes a node c that is a child of n. Afterwards, c will have
--// no parent and no siblings.
--//
--// It will panic if c's parent is not n.
--func (n *Node) RemoveChild(c *Node) {
--	if c.Parent != n {
--		panic("html: RemoveChild called for a non-child Node")
--	}
--	if n.FirstChild == c {
--		n.FirstChild = c.NextSibling
--	}
--	if c.NextSibling != nil {
--		c.NextSibling.PrevSibling = c.PrevSibling
--	}
--	if n.LastChild == c {
--		n.LastChild = c.PrevSibling
--	}
--	if c.PrevSibling != nil {
--		c.PrevSibling.NextSibling = c.NextSibling
--	}
--	c.Parent = nil
--	c.PrevSibling = nil
--	c.NextSibling = nil
--}
--
--// reparentChildren reparents all of src's child nodes to dst.
--func reparentChildren(dst, src *Node) {
--	for {
--		child := src.FirstChild
--		if child == nil {
--			break
--		}
--		src.RemoveChild(child)
--		dst.AppendChild(child)
--	}
--}
--
--// clone returns a new node with the same type, data and attributes.
--// The clone has no parent, no siblings and no children.
--func (n *Node) clone() *Node {
--	m := &Node{
--		Type:     n.Type,
--		DataAtom: n.DataAtom,
--		Data:     n.Data,
--		Attr:     make([]Attribute, len(n.Attr)),
--	}
--	copy(m.Attr, n.Attr)
--	return m
--}
--
--// nodeStack is a stack of nodes.
--type nodeStack []*Node
--
--// pop pops the stack. It will panic if s is empty.
--func (s *nodeStack) pop() *Node {
--	i := len(*s)
--	n := (*s)[i-1]
--	*s = (*s)[:i-1]
--	return n
--}
--
--// top returns the most recently pushed node, or nil if s is empty.
--func (s *nodeStack) top() *Node {
--	if i := len(*s); i > 0 {
--		return (*s)[i-1]
--	}
--	return nil
--}
--
--// index returns the index of the top-most occurrence of n in the stack, or -1
--// if n is not present.
--func (s *nodeStack) index(n *Node) int {
--	for i := len(*s) - 1; i >= 0; i-- {
--		if (*s)[i] == n {
--			return i
--		}
--	}
--	return -1
--}
--
--// insert inserts a node at the given index.
--func (s *nodeStack) insert(i int, n *Node) {
--	(*s) = append(*s, nil)
--	copy((*s)[i+1:], (*s)[i:])
--	(*s)[i] = n
--}
--
--// remove removes a node from the stack. It is a no-op if n is not present.
--func (s *nodeStack) remove(n *Node) {
--	i := s.index(n)
--	if i == -1 {
--		return
--	}
--	copy((*s)[i:], (*s)[i+1:])
--	j := len(*s) - 1
--	(*s)[j] = nil
--	*s = (*s)[:j]
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/node_test.go b/Godeps/_workspace/src/golang.org/x/net/html/node_test.go
-deleted file mode 100644
-index 471102f..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/node_test.go
-+++ /dev/null
-@@ -1,146 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"fmt"
--)
--
--// checkTreeConsistency checks that a node and its descendants are all
--// consistent in their parent/child/sibling relationships.
--func checkTreeConsistency(n *Node) error {
--	return checkTreeConsistency1(n, 0)
--}
--
--func checkTreeConsistency1(n *Node, depth int) error {
--	if depth == 1e4 {
--		return fmt.Errorf("html: tree looks like it contains a cycle")
--	}
--	if err := checkNodeConsistency(n); err != nil {
--		return err
--	}
--	for c := n.FirstChild; c != nil; c = c.NextSibling {
--		if err := checkTreeConsistency1(c, depth+1); err != nil {
--			return err
--		}
--	}
--	return nil
--}
--
--// checkNodeConsistency checks that a node's parent/child/sibling relationships
--// are consistent.
--func checkNodeConsistency(n *Node) error {
--	if n == nil {
--		return nil
--	}
--
--	nParent := 0
--	for p := n.Parent; p != nil; p = p.Parent {
--		nParent++
--		if nParent == 1e4 {
--			return fmt.Errorf("html: parent list looks like an infinite loop")
--		}
--	}
--
--	nForward := 0
--	for c := n.FirstChild; c != nil; c = c.NextSibling {
--		nForward++
--		if nForward == 1e6 {
--			return fmt.Errorf("html: forward list of children looks like an infinite loop")
--		}
--		if c.Parent != n {
--			return fmt.Errorf("html: inconsistent child/parent relationship")
--		}
--	}
--
--	nBackward := 0
--	for c := n.LastChild; c != nil; c = c.PrevSibling {
--		nBackward++
--		if nBackward == 1e6 {
--			return fmt.Errorf("html: backward list of children looks like an infinite loop")
--		}
--		if c.Parent != n {
--			return fmt.Errorf("html: inconsistent child/parent relationship")
--		}
--	}
--
--	if n.Parent != nil {
--		if n.Parent == n {
--			return fmt.Errorf("html: inconsistent parent relationship")
--		}
--		if n.Parent == n.FirstChild {
--			return fmt.Errorf("html: inconsistent parent/first relationship")
--		}
--		if n.Parent == n.LastChild {
--			return fmt.Errorf("html: inconsistent parent/last relationship")
--		}
--		if n.Parent == n.PrevSibling {
--			return fmt.Errorf("html: inconsistent parent/prev relationship")
--		}
--		if n.Parent == n.NextSibling {
--			return fmt.Errorf("html: inconsistent parent/next relationship")
--		}
--
--		parentHasNAsAChild := false
--		for c := n.Parent.FirstChild; c != nil; c = c.NextSibling {
--			if c == n {
--				parentHasNAsAChild = true
--				break
--			}
--		}
--		if !parentHasNAsAChild {
--			return fmt.Errorf("html: inconsistent parent/child relationship")
--		}
--	}
--
--	if n.PrevSibling != nil && n.PrevSibling.NextSibling != n {
--		return fmt.Errorf("html: inconsistent prev/next relationship")
--	}
--	if n.NextSibling != nil && n.NextSibling.PrevSibling != n {
--		return fmt.Errorf("html: inconsistent next/prev relationship")
--	}
--
--	if (n.FirstChild == nil) != (n.LastChild == nil) {
--		return fmt.Errorf("html: inconsistent first/last relationship")
--	}
--	if n.FirstChild != nil && n.FirstChild == n.LastChild {
--		// We have a sole child.
--		if n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil {
--			return fmt.Errorf("html: inconsistent sole child's sibling relationship")
--		}
--	}
--
--	seen := map[*Node]bool{}
--
--	var last *Node
--	for c := n.FirstChild; c != nil; c = c.NextSibling {
--		if seen[c] {
--			return fmt.Errorf("html: inconsistent repeated child")
--		}
--		seen[c] = true
--		last = c
--	}
--	if last != n.LastChild {
--		return fmt.Errorf("html: inconsistent last relationship")
--	}
--
--	var first *Node
--	for c := n.LastChild; c != nil; c = c.PrevSibling {
--		if !seen[c] {
--			return fmt.Errorf("html: inconsistent missing child")
--		}
--		delete(seen, c)
--		first = c
--	}
--	if first != n.FirstChild {
--		return fmt.Errorf("html: inconsistent first relationship")
--	}
--
--	if len(seen) != 0 {
--		return fmt.Errorf("html: inconsistent forwards/backwards child list")
--	}
--
--	return nil
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/parse.go b/Godeps/_workspace/src/golang.org/x/net/html/parse.go
-deleted file mode 100644
-index b42a323..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/parse.go
-+++ /dev/null
-@@ -1,2092 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"errors"
--	"fmt"
--	"io"
--	"strings"
--
--	a "golang.org/x/net/html/atom"
--)
--
--// A parser implements the HTML5 parsing algorithm:
--// http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html#tree-construction
--type parser struct {
--	// tokenizer provides the tokens for the parser.
--	tokenizer *Tokenizer
--	// tok is the most recently read token.
--	tok Token
--	// Self-closing tags like <hr/> are treated as start tags, except that
--	// hasSelfClosingToken is set while they are being processed.
--	hasSelfClosingToken bool
--	// doc is the document root element.
--	doc *Node
--	// The stack of open elements (section 12.2.3.2) and active formatting
--	// elements (section 12.2.3.3).
--	oe, afe nodeStack
--	// Element pointers (section 12.2.3.4).
--	head, form *Node
--	// Other parsing state flags (section 12.2.3.5).
--	scripting, framesetOK bool
--	// im is the current insertion mode.
--	im insertionMode
--	// originalIM is the insertion mode to go back to after completing a text
--	// or inTableText insertion mode.
--	originalIM insertionMode
--	// fosterParenting is whether new elements should be inserted according to
--	// the foster parenting rules (section 12.2.5.3).
--	fosterParenting bool
--	// quirks is whether the parser is operating in "quirks mode."
--	quirks bool
--	// fragment is whether the parser is parsing an HTML fragment.
--	fragment bool
--	// context is the context element when parsing an HTML fragment
--	// (section 12.4).
--	context *Node
--}
--
--func (p *parser) top() *Node {
--	if n := p.oe.top(); n != nil {
--		return n
--	}
--	return p.doc
--}
--
--// Stop tags for use in popUntil. These come from section 12.2.3.2.
--var (
--	defaultScopeStopTags = map[string][]a.Atom{
--		"":     {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object},
--		"math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext},
--		"svg":  {a.Desc, a.ForeignObject, a.Title},
--	}
--)
--
--type scope int
--
--const (
--	defaultScope scope = iota
--	listItemScope
--	buttonScope
--	tableScope
--	tableRowScope
--	tableBodyScope
--	selectScope
--)
--
--// popUntil pops the stack of open elements at the highest element whose tag
--// is in matchTags, provided there is no higher element in the scope's stop
--// tags (as defined in section 12.2.3.2). It returns whether or not there was
--// such an element. If there was not, popUntil leaves the stack unchanged.
--//
--// For example, the set of stop tags for table scope is: "html", "table". If
--// the stack was:
--// ["html", "body", "font", "table", "b", "i", "u"]
--// then popUntil(tableScope, "font") would return false, but
--// popUntil(tableScope, "i") would return true and the stack would become:
--// ["html", "body", "font", "table", "b"]
--//
--// If an element's tag is in both the stop tags and matchTags, then the stack
--// will be popped and the function returns true (provided, of course, there was
--// no higher element in the stack that was also in the stop tags). For example,
--// popUntil(tableScope, "table") returns true and leaves:
--// ["html", "body", "font"]
--func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool {
--	if i := p.indexOfElementInScope(s, matchTags...); i != -1 {
--		p.oe = p.oe[:i]
--		return true
--	}
--	return false
--}
--
--// indexOfElementInScope returns the index in p.oe of the highest element whose
--// tag is in matchTags that is in scope. If no matching element is in scope, it
--// returns -1.
--func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
--	for i := len(p.oe) - 1; i >= 0; i-- {
--		tagAtom := p.oe[i].DataAtom
--		if p.oe[i].Namespace == "" {
--			for _, t := range matchTags {
--				if t == tagAtom {
--					return i
--				}
--			}
--			switch s {
--			case defaultScope:
--				// No-op.
--			case listItemScope:
--				if tagAtom == a.Ol || tagAtom == a.Ul {
--					return -1
--				}
--			case buttonScope:
--				if tagAtom == a.Button {
--					return -1
--				}
--			case tableScope:
--				if tagAtom == a.Html || tagAtom == a.Table {
--					return -1
--				}
--			case selectScope:
--				if tagAtom != a.Optgroup && tagAtom != a.Option {
--					return -1
--				}
--			default:
--				panic("unreachable")
--			}
--		}
--		switch s {
--		case defaultScope, listItemScope, buttonScope:
--			for _, t := range defaultScopeStopTags[p.oe[i].Namespace] {
--				if t == tagAtom {
--					return -1
--				}
--			}
--		}
--	}
--	return -1
--}
--
--// elementInScope is like popUntil, except that it doesn't modify the stack of
--// open elements.
--func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool {
--	return p.indexOfElementInScope(s, matchTags...) != -1
--}
--
--// clearStackToContext pops elements off the stack of open elements until a
--// scope-defined element is found.
--func (p *parser) clearStackToContext(s scope) {
--	for i := len(p.oe) - 1; i >= 0; i-- {
--		tagAtom := p.oe[i].DataAtom
--		switch s {
--		case tableScope:
--			if tagAtom == a.Html || tagAtom == a.Table {
--				p.oe = p.oe[:i+1]
--				return
--			}
--		case tableRowScope:
--			if tagAtom == a.Html || tagAtom == a.Tr {
--				p.oe = p.oe[:i+1]
--				return
--			}
--		case tableBodyScope:
--			if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead {
--				p.oe = p.oe[:i+1]
--				return
--			}
--		default:
--			panic("unreachable")
--		}
--	}
--}
--
--// generateImpliedEndTags pops nodes off the stack of open elements as long as
--// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt.
--// If exceptions are specified, nodes with that name will not be popped off.
--func (p *parser) generateImpliedEndTags(exceptions ...string) {
--	var i int
--loop:
--	for i = len(p.oe) - 1; i >= 0; i-- {
--		n := p.oe[i]
--		if n.Type == ElementNode {
--			switch n.DataAtom {
--			case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt:
--				for _, except := range exceptions {
--					if n.Data == except {
--						break loop
--					}
--				}
--				continue
--			}
--		}
--		break
--	}
--
--	p.oe = p.oe[:i+1]
--}
--
--// addChild adds a child node n to the top element, and pushes n onto the stack
--// of open elements if it is an element node.
--func (p *parser) addChild(n *Node) {
--	if p.shouldFosterParent() {
--		p.fosterParent(n)
--	} else {
--		p.top().AppendChild(n)
--	}
--
--	if n.Type == ElementNode {
--		p.oe = append(p.oe, n)
--	}
--}
--
--// shouldFosterParent returns whether the next node to be added should be
--// foster parented.
--func (p *parser) shouldFosterParent() bool {
--	if p.fosterParenting {
--		switch p.top().DataAtom {
--		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
--			return true
--		}
--	}
--	return false
--}
--
--// fosterParent adds a child node according to the foster parenting rules.
--// Section 12.2.5.3, "foster parenting".
--func (p *parser) fosterParent(n *Node) {
--	var table, parent, prev *Node
--	var i int
--	for i = len(p.oe) - 1; i >= 0; i-- {
--		if p.oe[i].DataAtom == a.Table {
--			table = p.oe[i]
--			break
--		}
--	}
--
--	if table == nil {
--		// The foster parent is the html element.
--		parent = p.oe[0]
--	} else {
--		parent = table.Parent
--	}
--	if parent == nil {
--		parent = p.oe[i-1]
--	}
--
--	if table != nil {
--		prev = table.PrevSibling
--	} else {
--		prev = parent.LastChild
--	}
--	if prev != nil && prev.Type == TextNode && n.Type == TextNode {
--		prev.Data += n.Data
--		return
--	}
--
--	parent.InsertBefore(n, table)
--}
--
--// addText adds text to the preceding node if it is a text node, or else it
--// calls addChild with a new text node.
--func (p *parser) addText(text string) {
--	if text == "" {
--		return
--	}
--
--	if p.shouldFosterParent() {
--		p.fosterParent(&Node{
--			Type: TextNode,
--			Data: text,
--		})
--		return
--	}
--
--	t := p.top()
--	if n := t.LastChild; n != nil && n.Type == TextNode {
--		n.Data += text
--		return
--	}
--	p.addChild(&Node{
--		Type: TextNode,
--		Data: text,
--	})
--}
--
--// addElement adds a child element based on the current token.
--func (p *parser) addElement() {
--	p.addChild(&Node{
--		Type:     ElementNode,
--		DataAtom: p.tok.DataAtom,
--		Data:     p.tok.Data,
--		Attr:     p.tok.Attr,
--	})
--}
--
--// Section 12.2.3.3.
--func (p *parser) addFormattingElement() {
--	tagAtom, attr := p.tok.DataAtom, p.tok.Attr
--	p.addElement()
--
--	// Implement the Noah's Ark clause, but with three per family instead of two.
--	identicalElements := 0
--findIdenticalElements:
--	for i := len(p.afe) - 1; i >= 0; i-- {
--		n := p.afe[i]
--		if n.Type == scopeMarkerNode {
--			break
--		}
--		if n.Type != ElementNode {
--			continue
--		}
--		if n.Namespace != "" {
--			continue
--		}
--		if n.DataAtom != tagAtom {
--			continue
--		}
--		if len(n.Attr) != len(attr) {
--			continue
--		}
--	compareAttributes:
--		for _, t0 := range n.Attr {
--			for _, t1 := range attr {
--				if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val {
--					// Found a match for this attribute, continue with the next attribute.
--					continue compareAttributes
--				}
--			}
--			// If we get here, there is no attribute that matches a.
--			// Therefore the element is not identical to the new one.
--			continue findIdenticalElements
--		}
--
--		identicalElements++
--		if identicalElements >= 3 {
--			p.afe.remove(n)
--		}
--	}
--
--	p.afe = append(p.afe, p.top())
--}
--
--// Section 12.2.3.3.
--func (p *parser) clearActiveFormattingElements() {
--	for {
--		n := p.afe.pop()
--		if len(p.afe) == 0 || n.Type == scopeMarkerNode {
--			return
--		}
--	}
--}
--
--// Section 12.2.3.3.
--func (p *parser) reconstructActiveFormattingElements() {
--	n := p.afe.top()
--	if n == nil {
--		return
--	}
--	if n.Type == scopeMarkerNode || p.oe.index(n) != -1 {
--		return
--	}
--	i := len(p.afe) - 1
--	for n.Type != scopeMarkerNode && p.oe.index(n) == -1 {
--		if i == 0 {
--			i = -1
--			break
--		}
--		i--
--		n = p.afe[i]
--	}
--	for {
--		i++
--		clone := p.afe[i].clone()
--		p.addChild(clone)
--		p.afe[i] = clone
--		if i == len(p.afe)-1 {
--			break
--		}
--	}
--}
--
--// Section 12.2.4.
--func (p *parser) acknowledgeSelfClosingTag() {
--	p.hasSelfClosingToken = false
--}
--
--// An insertion mode (section 12.2.3.1) is the state transition function from
--// a particular state in the HTML5 parser's state machine. It updates the
--// parser's fields depending on parser.tok (where ErrorToken means EOF).
--// It returns whether the token was consumed.
--type insertionMode func(*parser) bool
--
--// setOriginalIM sets the insertion mode to return to after completing a text or
--// inTableText insertion mode.
--// Section 12.2.3.1, "using the rules for".
--func (p *parser) setOriginalIM() {
--	if p.originalIM != nil {
--		panic("html: bad parser state: originalIM was set twice")
--	}
--	p.originalIM = p.im
--}
--
--// Section 12.2.3.1, "reset the insertion mode".
--func (p *parser) resetInsertionMode() {
--	for i := len(p.oe) - 1; i >= 0; i-- {
--		n := p.oe[i]
--		if i == 0 && p.context != nil {
--			n = p.context
--		}
--
--		switch n.DataAtom {
--		case a.Select:
--			p.im = inSelectIM
--		case a.Td, a.Th:
--			p.im = inCellIM
--		case a.Tr:
--			p.im = inRowIM
--		case a.Tbody, a.Thead, a.Tfoot:
--			p.im = inTableBodyIM
--		case a.Caption:
--			p.im = inCaptionIM
--		case a.Colgroup:
--			p.im = inColumnGroupIM
--		case a.Table:
--			p.im = inTableIM
--		case a.Head:
--			p.im = inBodyIM
--		case a.Body:
--			p.im = inBodyIM
--		case a.Frameset:
--			p.im = inFramesetIM
--		case a.Html:
--			p.im = beforeHeadIM
--		default:
--			continue
--		}
--		return
--	}
--	p.im = inBodyIM
--}
--
--const whitespace = " \t\r\n\f"
--
--// Section 12.2.5.4.1.
--func initialIM(p *parser) bool {
--	switch p.tok.Type {
--	case TextToken:
--		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
--		if len(p.tok.Data) == 0 {
--			// It was all whitespace, so ignore it.
--			return true
--		}
--	case CommentToken:
--		p.doc.AppendChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	case DoctypeToken:
--		n, quirks := parseDoctype(p.tok.Data)
--		p.doc.AppendChild(n)
--		p.quirks = quirks
--		p.im = beforeHTMLIM
--		return true
--	}
--	p.quirks = true
--	p.im = beforeHTMLIM
--	return false
--}
--
--// Section 12.2.5.4.2.
--func beforeHTMLIM(p *parser) bool {
--	switch p.tok.Type {
--	case DoctypeToken:
--		// Ignore the token.
--		return true
--	case TextToken:
--		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
--		if len(p.tok.Data) == 0 {
--			// It was all whitespace, so ignore it.
--			return true
--		}
--	case StartTagToken:
--		if p.tok.DataAtom == a.Html {
--			p.addElement()
--			p.im = beforeHeadIM
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Head, a.Body, a.Html, a.Br:
--			p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
--			return false
--		default:
--			// Ignore the token.
--			return true
--		}
--	case CommentToken:
--		p.doc.AppendChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	}
--	p.parseImpliedToken(StartTagToken, a.Html, a.Html.String())
--	return false
--}
--
--// Section 12.2.5.4.3.
--func beforeHeadIM(p *parser) bool {
--	switch p.tok.Type {
--	case TextToken:
--		p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)
--		if len(p.tok.Data) == 0 {
--			// It was all whitespace, so ignore it.
--			return true
--		}
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Head:
--			p.addElement()
--			p.head = p.top()
--			p.im = inHeadIM
--			return true
--		case a.Html:
--			return inBodyIM(p)
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Head, a.Body, a.Html, a.Br:
--			p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
--			return false
--		default:
--			// Ignore the token.
--			return true
--		}
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	case DoctypeToken:
--		// Ignore the token.
--		return true
--	}
--
--	p.parseImpliedToken(StartTagToken, a.Head, a.Head.String())
--	return false
--}
--
--// Section 12.2.5.4.4.
--func inHeadIM(p *parser) bool {
--	switch p.tok.Type {
--	case TextToken:
--		s := strings.TrimLeft(p.tok.Data, whitespace)
--		if len(s) < len(p.tok.Data) {
--			// Add the initial whitespace to the current node.
--			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
--			if s == "" {
--				return true
--			}
--			p.tok.Data = s
--		}
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			return inBodyIM(p)
--		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta:
--			p.addElement()
--			p.oe.pop()
--			p.acknowledgeSelfClosingTag()
--			return true
--		case a.Script, a.Title, a.Noscript, a.Noframes, a.Style:
--			p.addElement()
--			p.setOriginalIM()
--			p.im = textIM
--			return true
--		case a.Head:
--			// Ignore the token.
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Head:
--			n := p.oe.pop()
--			if n.DataAtom != a.Head {
--				panic("html: bad parser state: <head> element not found, in the in-head insertion mode")
--			}
--			p.im = afterHeadIM
--			return true
--		case a.Body, a.Html, a.Br:
--			p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
--			return false
--		default:
--			// Ignore the token.
--			return true
--		}
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	case DoctypeToken:
--		// Ignore the token.
--		return true
--	}
--
--	p.parseImpliedToken(EndTagToken, a.Head, a.Head.String())
--	return false
--}
--
--// Section 12.2.5.4.6.
--func afterHeadIM(p *parser) bool {
--	switch p.tok.Type {
--	case TextToken:
--		s := strings.TrimLeft(p.tok.Data, whitespace)
--		if len(s) < len(p.tok.Data) {
--			// Add the initial whitespace to the current node.
--			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
--			if s == "" {
--				return true
--			}
--			p.tok.Data = s
--		}
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			return inBodyIM(p)
--		case a.Body:
--			p.addElement()
--			p.framesetOK = false
--			p.im = inBodyIM
--			return true
--		case a.Frameset:
--			p.addElement()
--			p.im = inFramesetIM
--			return true
--		case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
--			p.oe = append(p.oe, p.head)
--			defer p.oe.remove(p.head)
--			return inHeadIM(p)
--		case a.Head:
--			// Ignore the token.
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Body, a.Html, a.Br:
--			// Drop down to creating an implied <body> tag.
--		default:
--			// Ignore the token.
--			return true
--		}
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	case DoctypeToken:
--		// Ignore the token.
--		return true
--	}
--
--	p.parseImpliedToken(StartTagToken, a.Body, a.Body.String())
--	p.framesetOK = true
--	return false
--}
--
--// copyAttributes copies attributes of src not found on dst to dst.
--func copyAttributes(dst *Node, src Token) {
--	if len(src.Attr) == 0 {
--		return
--	}
--	attr := map[string]string{}
--	for _, t := range dst.Attr {
--		attr[t.Key] = t.Val
--	}
--	for _, t := range src.Attr {
--		if _, ok := attr[t.Key]; !ok {
--			dst.Attr = append(dst.Attr, t)
--			attr[t.Key] = t.Val
--		}
--	}
--}
--
--// Section 12.2.5.4.7.
--func inBodyIM(p *parser) bool {
--	switch p.tok.Type {
--	case TextToken:
--		d := p.tok.Data
--		switch n := p.oe.top(); n.DataAtom {
--		case a.Pre, a.Listing:
--			if n.FirstChild == nil {
--				// Ignore a newline at the start of a <pre> block.
--				if d != "" && d[0] == '\r' {
--					d = d[1:]
--				}
--				if d != "" && d[0] == '\n' {
--					d = d[1:]
--				}
--			}
--		}
--		d = strings.Replace(d, "\x00", "", -1)
--		if d == "" {
--			return true
--		}
--		p.reconstructActiveFormattingElements()
--		p.addText(d)
--		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
--			// There were non-whitespace characters inserted.
--			p.framesetOK = false
--		}
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			copyAttributes(p.oe[0], p.tok)
--		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
--			return inHeadIM(p)
--		case a.Body:
--			if len(p.oe) >= 2 {
--				body := p.oe[1]
--				if body.Type == ElementNode && body.DataAtom == a.Body {
--					p.framesetOK = false
--					copyAttributes(body, p.tok)
--				}
--			}
--		case a.Frameset:
--			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
--				// Ignore the token.
--				return true
--			}
--			body := p.oe[1]
--			if body.Parent != nil {
--				body.Parent.RemoveChild(body)
--			}
--			p.oe = p.oe[:1]
--			p.addElement()
--			p.im = inFramesetIM
--			return true
--		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
--			p.popUntil(buttonScope, a.P)
--			p.addElement()
--		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
--			p.popUntil(buttonScope, a.P)
--			switch n := p.top(); n.DataAtom {
--			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
--				p.oe.pop()
--			}
--			p.addElement()
--		case a.Pre, a.Listing:
--			p.popUntil(buttonScope, a.P)
--			p.addElement()
--			// The newline, if any, will be dealt with by the TextToken case.
--			p.framesetOK = false
--		case a.Form:
--			if p.form == nil {
--				p.popUntil(buttonScope, a.P)
--				p.addElement()
--				p.form = p.top()
--			}
--		case a.Li:
--			p.framesetOK = false
--			for i := len(p.oe) - 1; i >= 0; i-- {
--				node := p.oe[i]
--				switch node.DataAtom {
--				case a.Li:
--					p.oe = p.oe[:i]
--				case a.Address, a.Div, a.P:
--					continue
--				default:
--					if !isSpecialElement(node) {
--						continue
--					}
--				}
--				break
--			}
--			p.popUntil(buttonScope, a.P)
--			p.addElement()
--		case a.Dd, a.Dt:
--			p.framesetOK = false
--			for i := len(p.oe) - 1; i >= 0; i-- {
--				node := p.oe[i]
--				switch node.DataAtom {
--				case a.Dd, a.Dt:
--					p.oe = p.oe[:i]
--				case a.Address, a.Div, a.P:
--					continue
--				default:
--					if !isSpecialElement(node) {
--						continue
--					}
--				}
--				break
--			}
--			p.popUntil(buttonScope, a.P)
--			p.addElement()
--		case a.Plaintext:
--			p.popUntil(buttonScope, a.P)
--			p.addElement()
--		case a.Button:
--			p.popUntil(defaultScope, a.Button)
--			p.reconstructActiveFormattingElements()
--			p.addElement()
--			p.framesetOK = false
--		case a.A:
--			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
--				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
--					p.inBodyEndTagFormatting(a.A)
--					p.oe.remove(n)
--					p.afe.remove(n)
--					break
--				}
--			}
--			p.reconstructActiveFormattingElements()
--			p.addFormattingElement()
--		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
--			p.reconstructActiveFormattingElements()
--			p.addFormattingElement()
--		case a.Nobr:
--			p.reconstructActiveFormattingElements()
--			if p.elementInScope(defaultScope, a.Nobr) {
--				p.inBodyEndTagFormatting(a.Nobr)
--				p.reconstructActiveFormattingElements()
--			}
--			p.addFormattingElement()
--		case a.Applet, a.Marquee, a.Object:
--			p.reconstructActiveFormattingElements()
--			p.addElement()
--			p.afe = append(p.afe, &scopeMarker)
--			p.framesetOK = false
--		case a.Table:
--			if !p.quirks {
--				p.popUntil(buttonScope, a.P)
--			}
--			p.addElement()
--			p.framesetOK = false
--			p.im = inTableIM
--			return true
--		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
--			p.reconstructActiveFormattingElements()
--			p.addElement()
--			p.oe.pop()
--			p.acknowledgeSelfClosingTag()
--			if p.tok.DataAtom == a.Input {
--				for _, t := range p.tok.Attr {
--					if t.Key == "type" {
--						if strings.ToLower(t.Val) == "hidden" {
--							// Skip setting framesetOK = false
--							return true
--						}
--					}
--				}
--			}
--			p.framesetOK = false
--		case a.Param, a.Source, a.Track:
--			p.addElement()
--			p.oe.pop()
--			p.acknowledgeSelfClosingTag()
--		case a.Hr:
--			p.popUntil(buttonScope, a.P)
--			p.addElement()
--			p.oe.pop()
--			p.acknowledgeSelfClosingTag()
--			p.framesetOK = false
--		case a.Image:
--			p.tok.DataAtom = a.Img
--			p.tok.Data = a.Img.String()
--			return false
--		case a.Isindex:
--			if p.form != nil {
--				// Ignore the token.
--				return true
--			}
--			action := ""
--			prompt := "This is a searchable index. Enter search keywords: "
--			attr := []Attribute{{Key: "name", Val: "isindex"}}
--			for _, t := range p.tok.Attr {
--				switch t.Key {
--				case "action":
--					action = t.Val
--				case "name":
--					// Ignore the attribute.
--				case "prompt":
--					prompt = t.Val
--				default:
--					attr = append(attr, t)
--				}
--			}
--			p.acknowledgeSelfClosingTag()
--			p.popUntil(buttonScope, a.P)
--			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
--			if action != "" {
--				p.form.Attr = []Attribute{{Key: "action", Val: action}}
--			}
--			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
--			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
--			p.addText(prompt)
--			p.addChild(&Node{
--				Type:     ElementNode,
--				DataAtom: a.Input,
--				Data:     a.Input.String(),
--				Attr:     attr,
--			})
--			p.oe.pop()
--			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
--			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
--			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
--		case a.Textarea:
--			p.addElement()
--			p.setOriginalIM()
--			p.framesetOK = false
--			p.im = textIM
--		case a.Xmp:
--			p.popUntil(buttonScope, a.P)
--			p.reconstructActiveFormattingElements()
--			p.framesetOK = false
--			p.addElement()
--			p.setOriginalIM()
--			p.im = textIM
--		case a.Iframe:
--			p.framesetOK = false
--			p.addElement()
--			p.setOriginalIM()
--			p.im = textIM
--		case a.Noembed, a.Noscript:
--			p.addElement()
--			p.setOriginalIM()
--			p.im = textIM
--		case a.Select:
--			p.reconstructActiveFormattingElements()
--			p.addElement()
--			p.framesetOK = false
--			p.im = inSelectIM
--			return true
--		case a.Optgroup, a.Option:
--			if p.top().DataAtom == a.Option {
--				p.oe.pop()
--			}
--			p.reconstructActiveFormattingElements()
--			p.addElement()
--		case a.Rp, a.Rt:
--			if p.elementInScope(defaultScope, a.Ruby) {
--				p.generateImpliedEndTags()
--			}
--			p.addElement()
--		case a.Math, a.Svg:
--			p.reconstructActiveFormattingElements()
--			if p.tok.DataAtom == a.Math {
--				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
--			} else {
--				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
--			}
--			adjustForeignAttributes(p.tok.Attr)
--			p.addElement()
--			p.top().Namespace = p.tok.Data
--			if p.hasSelfClosingToken {
--				p.oe.pop()
--				p.acknowledgeSelfClosingTag()
--			}
--			return true
--		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
--			// Ignore the token.
--		default:
--			p.reconstructActiveFormattingElements()
--			p.addElement()
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Body:
--			if p.elementInScope(defaultScope, a.Body) {
--				p.im = afterBodyIM
--			}
--		case a.Html:
--			if p.elementInScope(defaultScope, a.Body) {
--				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
--				return false
--			}
--			return true
--		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
--			p.popUntil(defaultScope, p.tok.DataAtom)
--		case a.Form:
--			node := p.form
--			p.form = nil
--			i := p.indexOfElementInScope(defaultScope, a.Form)
--			if node == nil || i == -1 || p.oe[i] != node {
--				// Ignore the token.
--				return true
--			}
--			p.generateImpliedEndTags()
--			p.oe.remove(node)
--		case a.P:
--			if !p.elementInScope(buttonScope, a.P) {
--				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
--			}
--			p.popUntil(buttonScope, a.P)
--		case a.Li:
--			p.popUntil(listItemScope, a.Li)
--		case a.Dd, a.Dt:
--			p.popUntil(defaultScope, p.tok.DataAtom)
--		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
--			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
--		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
--			p.inBodyEndTagFormatting(p.tok.DataAtom)
--		case a.Applet, a.Marquee, a.Object:
--			if p.popUntil(defaultScope, p.tok.DataAtom) {
--				p.clearActiveFormattingElements()
--			}
--		case a.Br:
--			p.tok.Type = StartTagToken
--			return false
--		default:
--			p.inBodyEndTagOther(p.tok.DataAtom)
--		}
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--	}
--
--	return true
--}
--
--func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
--	// This is the "adoption agency" algorithm, described at
--	// http://www.whatwg.org/specs/web-apps/current-work/multipage/tokenization.html#adoptionAgency
--
--	// TODO: this is a fairly literal line-by-line translation of that algorithm.
--	// Once the code successfully parses the comprehensive test suite, we should
--	// refactor this code to be more idiomatic.
--
--	// Steps 1-3. The outer loop.
--	for i := 0; i < 8; i++ {
--		// Step 4. Find the formatting element.
--		var formattingElement *Node
--		for j := len(p.afe) - 1; j >= 0; j-- {
--			if p.afe[j].Type == scopeMarkerNode {
--				break
--			}
--			if p.afe[j].DataAtom == tagAtom {
--				formattingElement = p.afe[j]
--				break
--			}
--		}
--		if formattingElement == nil {
--			p.inBodyEndTagOther(tagAtom)
--			return
--		}
--		feIndex := p.oe.index(formattingElement)
--		if feIndex == -1 {
--			p.afe.remove(formattingElement)
--			return
--		}
--		if !p.elementInScope(defaultScope, tagAtom) {
--			// Ignore the tag.
--			return
--		}
--
--		// Steps 5-6. Find the furthest block.
--		var furthestBlock *Node
--		for _, e := range p.oe[feIndex:] {
--			if isSpecialElement(e) {
--				furthestBlock = e
--				break
--			}
--		}
--		if furthestBlock == nil {
--			e := p.oe.pop()
--			for e != formattingElement {
--				e = p.oe.pop()
--			}
--			p.afe.remove(e)
--			return
--		}
--
--		// Steps 7-8. Find the common ancestor and bookmark node.
--		commonAncestor := p.oe[feIndex-1]
--		bookmark := p.afe.index(formattingElement)
--
--		// Step 9. The inner loop. Find the lastNode to reparent.
--		lastNode := furthestBlock
--		node := furthestBlock
--		x := p.oe.index(node)
--		// Steps 9.1-9.3.
--		for j := 0; j < 3; j++ {
--			// Step 9.4.
--			x--
--			node = p.oe[x]
--			// Step 9.5.
--			if p.afe.index(node) == -1 {
--				p.oe.remove(node)
--				continue
--			}
--			// Step 9.6.
--			if node == formattingElement {
--				break
--			}
--			// Step 9.7.
--			clone := node.clone()
--			p.afe[p.afe.index(node)] = clone
--			p.oe[p.oe.index(node)] = clone
--			node = clone
--			// Step 9.8.
--			if lastNode == furthestBlock {
--				bookmark = p.afe.index(node) + 1
--			}
--			// Step 9.9.
--			if lastNode.Parent != nil {
--				lastNode.Parent.RemoveChild(lastNode)
--			}
--			node.AppendChild(lastNode)
--			// Step 9.10.
--			lastNode = node
--		}
--
--		// Step 10. Reparent lastNode to the common ancestor,
--		// or for misnested table nodes, to the foster parent.
--		if lastNode.Parent != nil {
--			lastNode.Parent.RemoveChild(lastNode)
--		}
--		switch commonAncestor.DataAtom {
--		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
--			p.fosterParent(lastNode)
--		default:
--			commonAncestor.AppendChild(lastNode)
--		}
--
--		// Steps 11-13. Reparent nodes from the furthest block's children
--		// to a clone of the formatting element.
--		clone := formattingElement.clone()
--		reparentChildren(clone, furthestBlock)
--		furthestBlock.AppendChild(clone)
--
--		// Step 14. Fix up the list of active formatting elements.
--		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
--			// Move the bookmark with the rest of the list.
--			bookmark--
--		}
--		p.afe.remove(formattingElement)
--		p.afe.insert(bookmark, clone)
--
--		// Step 15. Fix up the stack of open elements.
--		p.oe.remove(formattingElement)
--		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
--	}
--}
--
--// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
--func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
--	for i := len(p.oe) - 1; i >= 0; i-- {
--		if p.oe[i].DataAtom == tagAtom {
--			p.oe = p.oe[:i]
--			break
--		}
--		if isSpecialElement(p.oe[i]) {
--			break
--		}
--	}
--}
--
--// Section 12.2.5.4.8.
--func textIM(p *parser) bool {
--	switch p.tok.Type {
--	case ErrorToken:
--		p.oe.pop()
--	case TextToken:
--		d := p.tok.Data
--		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
--			// Ignore a newline at the start of a <textarea> block.
--			if d != "" && d[0] == '\r' {
--				d = d[1:]
--			}
--			if d != "" && d[0] == '\n' {
--				d = d[1:]
--			}
--		}
--		if d == "" {
--			return true
--		}
--		p.addText(d)
--		return true
--	case EndTagToken:
--		p.oe.pop()
--	}
--	p.im = p.originalIM
--	p.originalIM = nil
--	return p.tok.Type == EndTagToken
--}
--
--// Section 12.2.5.4.9.
--func inTableIM(p *parser) bool {
--	switch p.tok.Type {
--	case ErrorToken:
--		// Stop parsing.
--		return true
--	case TextToken:
--		p.tok.Data = strings.Replace(p.tok.Data, "\x00", "", -1)
--		switch p.oe.top().DataAtom {
--		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
--			if strings.Trim(p.tok.Data, whitespace) == "" {
--				p.addText(p.tok.Data)
--				return true
--			}
--		}
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Caption:
--			p.clearStackToContext(tableScope)
--			p.afe = append(p.afe, &scopeMarker)
--			p.addElement()
--			p.im = inCaptionIM
--			return true
--		case a.Colgroup:
--			p.clearStackToContext(tableScope)
--			p.addElement()
--			p.im = inColumnGroupIM
--			return true
--		case a.Col:
--			p.parseImpliedToken(StartTagToken, a.Colgroup, a.Colgroup.String())
--			return false
--		case a.Tbody, a.Tfoot, a.Thead:
--			p.clearStackToContext(tableScope)
--			p.addElement()
--			p.im = inTableBodyIM
--			return true
--		case a.Td, a.Th, a.Tr:
--			p.parseImpliedToken(StartTagToken, a.Tbody, a.Tbody.String())
--			return false
--		case a.Table:
--			if p.popUntil(tableScope, a.Table) {
--				p.resetInsertionMode()
--				return false
--			}
--			// Ignore the token.
--			return true
--		case a.Style, a.Script:
--			return inHeadIM(p)
--		case a.Input:
--			for _, t := range p.tok.Attr {
--				if t.Key == "type" && strings.ToLower(t.Val) == "hidden" {
--					p.addElement()
--					p.oe.pop()
--					return true
--				}
--			}
--			// Otherwise drop down to the default action.
--		case a.Form:
--			if p.form != nil {
--				// Ignore the token.
--				return true
--			}
--			p.addElement()
--			p.form = p.oe.pop()
--		case a.Select:
--			p.reconstructActiveFormattingElements()
--			switch p.top().DataAtom {
--			case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
--				p.fosterParenting = true
--			}
--			p.addElement()
--			p.fosterParenting = false
--			p.framesetOK = false
--			p.im = inSelectInTableIM
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Table:
--			if p.popUntil(tableScope, a.Table) {
--				p.resetInsertionMode()
--				return true
--			}
--			// Ignore the token.
--			return true
--		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
--			// Ignore the token.
--			return true
--		}
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	case DoctypeToken:
--		// Ignore the token.
--		return true
--	}
--
--	p.fosterParenting = true
--	defer func() { p.fosterParenting = false }()
--
--	return inBodyIM(p)
--}
--
--// Section 12.2.5.4.11.
--func inCaptionIM(p *parser) bool {
--	switch p.tok.Type {
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr:
--			if p.popUntil(tableScope, a.Caption) {
--				p.clearActiveFormattingElements()
--				p.im = inTableIM
--				return false
--			} else {
--				// Ignore the token.
--				return true
--			}
--		case a.Select:
--			p.reconstructActiveFormattingElements()
--			p.addElement()
--			p.framesetOK = false
--			p.im = inSelectInTableIM
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Caption:
--			if p.popUntil(tableScope, a.Caption) {
--				p.clearActiveFormattingElements()
--				p.im = inTableIM
--			}
--			return true
--		case a.Table:
--			if p.popUntil(tableScope, a.Caption) {
--				p.clearActiveFormattingElements()
--				p.im = inTableIM
--				return false
--			} else {
--				// Ignore the token.
--				return true
--			}
--		case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
--			// Ignore the token.
--			return true
--		}
--	}
--	return inBodyIM(p)
--}
--
--// Section 12.2.5.4.12.
--func inColumnGroupIM(p *parser) bool {
--	switch p.tok.Type {
--	case TextToken:
--		s := strings.TrimLeft(p.tok.Data, whitespace)
--		if len(s) < len(p.tok.Data) {
--			// Add the initial whitespace to the current node.
--			p.addText(p.tok.Data[:len(p.tok.Data)-len(s)])
--			if s == "" {
--				return true
--			}
--			p.tok.Data = s
--		}
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	case DoctypeToken:
--		// Ignore the token.
--		return true
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			return inBodyIM(p)
--		case a.Col:
--			p.addElement()
--			p.oe.pop()
--			p.acknowledgeSelfClosingTag()
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Colgroup:
--			if p.oe.top().DataAtom != a.Html {
--				p.oe.pop()
--				p.im = inTableIM
--			}
--			return true
--		case a.Col:
--			// Ignore the token.
--			return true
--		}
--	}
--	if p.oe.top().DataAtom != a.Html {
--		p.oe.pop()
--		p.im = inTableIM
--		return false
--	}
--	return true
--}
--
--// Section 12.2.5.4.13.
--func inTableBodyIM(p *parser) bool {
--	switch p.tok.Type {
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Tr:
--			p.clearStackToContext(tableBodyScope)
--			p.addElement()
--			p.im = inRowIM
--			return true
--		case a.Td, a.Th:
--			p.parseImpliedToken(StartTagToken, a.Tr, a.Tr.String())
--			return false
--		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead:
--			if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
--				p.im = inTableIM
--				return false
--			}
--			// Ignore the token.
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Tbody, a.Tfoot, a.Thead:
--			if p.elementInScope(tableScope, p.tok.DataAtom) {
--				p.clearStackToContext(tableBodyScope)
--				p.oe.pop()
--				p.im = inTableIM
--			}
--			return true
--		case a.Table:
--			if p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {
--				p.im = inTableIM
--				return false
--			}
--			// Ignore the token.
--			return true
--		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th, a.Tr:
--			// Ignore the token.
--			return true
--		}
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	}
--
--	return inTableIM(p)
--}
--
--// Section 12.2.5.4.14.
--func inRowIM(p *parser) bool {
--	switch p.tok.Type {
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Td, a.Th:
--			p.clearStackToContext(tableRowScope)
--			p.addElement()
--			p.afe = append(p.afe, &scopeMarker)
--			p.im = inCellIM
--			return true
--		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
--			if p.popUntil(tableScope, a.Tr) {
--				p.im = inTableBodyIM
--				return false
--			}
--			// Ignore the token.
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Tr:
--			if p.popUntil(tableScope, a.Tr) {
--				p.im = inTableBodyIM
--				return true
--			}
--			// Ignore the token.
--			return true
--		case a.Table:
--			if p.popUntil(tableScope, a.Tr) {
--				p.im = inTableBodyIM
--				return false
--			}
--			// Ignore the token.
--			return true
--		case a.Tbody, a.Tfoot, a.Thead:
--			if p.elementInScope(tableScope, p.tok.DataAtom) {
--				p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
--				return false
--			}
--			// Ignore the token.
--			return true
--		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th:
--			// Ignore the token.
--			return true
--		}
--	}
--
--	return inTableIM(p)
--}
--
--// Section 12.2.5.4.15.
--func inCellIM(p *parser) bool {
--	switch p.tok.Type {
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
--			if p.popUntil(tableScope, a.Td, a.Th) {
--				// Close the cell and reprocess.
--				p.clearActiveFormattingElements()
--				p.im = inRowIM
--				return false
--			}
--			// Ignore the token.
--			return true
--		case a.Select:
--			p.reconstructActiveFormattingElements()
--			p.addElement()
--			p.framesetOK = false
--			p.im = inSelectInTableIM
--			return true
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Td, a.Th:
--			if !p.popUntil(tableScope, p.tok.DataAtom) {
--				// Ignore the token.
--				return true
--			}
--			p.clearActiveFormattingElements()
--			p.im = inRowIM
--			return true
--		case a.Body, a.Caption, a.Col, a.Colgroup, a.Html:
--			// Ignore the token.
--			return true
--		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
--			if !p.elementInScope(tableScope, p.tok.DataAtom) {
--				// Ignore the token.
--				return true
--			}
--			// Close the cell and reprocess.
--			p.popUntil(tableScope, a.Td, a.Th)
--			p.clearActiveFormattingElements()
--			p.im = inRowIM
--			return false
--		}
--	}
--	return inBodyIM(p)
--}
--
--// Section 12.2.5.4.16.
--func inSelectIM(p *parser) bool {
--	switch p.tok.Type {
--	case ErrorToken:
--		// Stop parsing.
--		return true
--	case TextToken:
--		p.addText(strings.Replace(p.tok.Data, "\x00", "", -1))
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			return inBodyIM(p)
--		case a.Option:
--			if p.top().DataAtom == a.Option {
--				p.oe.pop()
--			}
--			p.addElement()
--		case a.Optgroup:
--			if p.top().DataAtom == a.Option {
--				p.oe.pop()
--			}
--			if p.top().DataAtom == a.Optgroup {
--				p.oe.pop()
--			}
--			p.addElement()
--		case a.Select:
--			p.tok.Type = EndTagToken
--			return false
--		case a.Input, a.Keygen, a.Textarea:
--			if p.elementInScope(selectScope, a.Select) {
--				p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
--				return false
--			}
--			// In order to properly ignore <textarea>, we need to change the tokenizer mode.
--			p.tokenizer.NextIsNotRawText()
--			// Ignore the token.
--			return true
--		case a.Script:
--			return inHeadIM(p)
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Option:
--			if p.top().DataAtom == a.Option {
--				p.oe.pop()
--			}
--		case a.Optgroup:
--			i := len(p.oe) - 1
--			if p.oe[i].DataAtom == a.Option {
--				i--
--			}
--			if p.oe[i].DataAtom == a.Optgroup {
--				p.oe = p.oe[:i]
--			}
--		case a.Select:
--			if p.popUntil(selectScope, a.Select) {
--				p.resetInsertionMode()
--			}
--		}
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--	case DoctypeToken:
--		// Ignore the token.
--		return true
--	}
--
--	return true
--}
--
--// Section 12.2.5.4.17.
--func inSelectInTableIM(p *parser) bool {
--	switch p.tok.Type {
--	case StartTagToken, EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Caption, a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr, a.Td, a.Th:
--			if p.tok.Type == StartTagToken || p.elementInScope(tableScope, p.tok.DataAtom) {
--				p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
--				return false
--			} else {
--				// Ignore the token.
--				return true
--			}
--		}
--	}
--	return inSelectIM(p)
--}
--
--// Section 12.2.5.4.18.
--func afterBodyIM(p *parser) bool {
--	switch p.tok.Type {
--	case ErrorToken:
--		// Stop parsing.
--		return true
--	case TextToken:
--		s := strings.TrimLeft(p.tok.Data, whitespace)
--		if len(s) == 0 {
--			// It was all whitespace.
--			return inBodyIM(p)
--		}
--	case StartTagToken:
--		if p.tok.DataAtom == a.Html {
--			return inBodyIM(p)
--		}
--	case EndTagToken:
--		if p.tok.DataAtom == a.Html {
--			if !p.fragment {
--				p.im = afterAfterBodyIM
--			}
--			return true
--		}
--	case CommentToken:
--		// The comment is attached to the <html> element.
--		if len(p.oe) < 1 || p.oe[0].DataAtom != a.Html {
--			panic("html: bad parser state: <html> element not found, in the after-body insertion mode")
--		}
--		p.oe[0].AppendChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	}
--	p.im = inBodyIM
--	return false
--}
--
--// Section 12.2.5.4.19.
--func inFramesetIM(p *parser) bool {
--	switch p.tok.Type {
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--	case TextToken:
--		// Ignore all text but whitespace.
--		s := strings.Map(func(c rune) rune {
--			switch c {
--			case ' ', '\t', '\n', '\f', '\r':
--				return c
--			}
--			return -1
--		}, p.tok.Data)
--		if s != "" {
--			p.addText(s)
--		}
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			return inBodyIM(p)
--		case a.Frameset:
--			p.addElement()
--		case a.Frame:
--			p.addElement()
--			p.oe.pop()
--			p.acknowledgeSelfClosingTag()
--		case a.Noframes:
--			return inHeadIM(p)
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Frameset:
--			if p.oe.top().DataAtom != a.Html {
--				p.oe.pop()
--				if p.oe.top().DataAtom != a.Frameset {
--					p.im = afterFramesetIM
--					return true
--				}
--			}
--		}
--	default:
--		// Ignore the token.
--	}
--	return true
--}
--
--// Section 12.2.5.4.20.
--func afterFramesetIM(p *parser) bool {
--	switch p.tok.Type {
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--	case TextToken:
--		// Ignore all text but whitespace.
--		s := strings.Map(func(c rune) rune {
--			switch c {
--			case ' ', '\t', '\n', '\f', '\r':
--				return c
--			}
--			return -1
--		}, p.tok.Data)
--		if s != "" {
--			p.addText(s)
--		}
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			return inBodyIM(p)
--		case a.Noframes:
--			return inHeadIM(p)
--		}
--	case EndTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			p.im = afterAfterFramesetIM
--			return true
--		}
--	default:
--		// Ignore the token.
--	}
--	return true
--}
--
--// Section 12.2.5.4.21.
--func afterAfterBodyIM(p *parser) bool {
--	switch p.tok.Type {
--	case ErrorToken:
--		// Stop parsing.
--		return true
--	case TextToken:
--		s := strings.TrimLeft(p.tok.Data, whitespace)
--		if len(s) == 0 {
--			// It was all whitespace.
--			return inBodyIM(p)
--		}
--	case StartTagToken:
--		if p.tok.DataAtom == a.Html {
--			return inBodyIM(p)
--		}
--	case CommentToken:
--		p.doc.AppendChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--		return true
--	case DoctypeToken:
--		return inBodyIM(p)
--	}
--	p.im = inBodyIM
--	return false
--}
--
--// Section 12.2.5.4.22.
--func afterAfterFramesetIM(p *parser) bool {
--	switch p.tok.Type {
--	case CommentToken:
--		p.doc.AppendChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--	case TextToken:
--		// Ignore all text but whitespace.
--		s := strings.Map(func(c rune) rune {
--			switch c {
--			case ' ', '\t', '\n', '\f', '\r':
--				return c
--			}
--			return -1
--		}, p.tok.Data)
--		if s != "" {
--			p.tok.Data = s
--			return inBodyIM(p)
--		}
--	case StartTagToken:
--		switch p.tok.DataAtom {
--		case a.Html:
--			return inBodyIM(p)
--		case a.Noframes:
--			return inHeadIM(p)
--		}
--	case DoctypeToken:
--		return inBodyIM(p)
--	default:
--		// Ignore the token.
--	}
--	return true
--}
--
--const whitespaceOrNUL = whitespace + "\x00"
--
--// Section 12.2.5.5.
--func parseForeignContent(p *parser) bool {
--	switch p.tok.Type {
--	case TextToken:
--		if p.framesetOK {
--			p.framesetOK = strings.TrimLeft(p.tok.Data, whitespaceOrNUL) == ""
--		}
--		p.tok.Data = strings.Replace(p.tok.Data, "\x00", "\ufffd", -1)
--		p.addText(p.tok.Data)
--	case CommentToken:
--		p.addChild(&Node{
--			Type: CommentNode,
--			Data: p.tok.Data,
--		})
--	case StartTagToken:
--		b := breakout[p.tok.Data]
--		if p.tok.DataAtom == a.Font {
--		loop:
--			for _, attr := range p.tok.Attr {
--				switch attr.Key {
--				case "color", "face", "size":
--					b = true
--					break loop
--				}
--			}
--		}
--		if b {
--			for i := len(p.oe) - 1; i >= 0; i-- {
--				n := p.oe[i]
--				if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) {
--					p.oe = p.oe[:i+1]
--					break
--				}
--			}
--			return false
--		}
--		switch p.top().Namespace {
--		case "math":
--			adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
--		case "svg":
--			// Adjust SVG tag names. The tokenizer lower-cases tag names, but
--			// SVG wants e.g. "foreignObject" with a capital second "O".
--			if x := svgTagNameAdjustments[p.tok.Data]; x != "" {
--				p.tok.DataAtom = a.Lookup([]byte(x))
--				p.tok.Data = x
--			}
--			adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
--		default:
--			panic("html: bad parser state: unexpected namespace")
--		}
--		adjustForeignAttributes(p.tok.Attr)
--		namespace := p.top().Namespace
--		p.addElement()
--		p.top().Namespace = namespace
--		if namespace != "" {
--			// Don't let the tokenizer go into raw text mode in foreign content
--			// (e.g. in an SVG <title> tag).
--			p.tokenizer.NextIsNotRawText()
--		}
--		if p.hasSelfClosingToken {
--			p.oe.pop()
--			p.acknowledgeSelfClosingTag()
--		}
--	case EndTagToken:
--		for i := len(p.oe) - 1; i >= 0; i-- {
--			if p.oe[i].Namespace == "" {
--				return p.im(p)
--			}
--			if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
--				p.oe = p.oe[:i]
--				break
--			}
--		}
--		return true
--	default:
--		// Ignore the token.
--	}
--	return true
--}
--
--// Section 12.2.5.
--func (p *parser) inForeignContent() bool {
--	if len(p.oe) == 0 {
--		return false
--	}
--	n := p.oe[len(p.oe)-1]
--	if n.Namespace == "" {
--		return false
--	}
--	if mathMLTextIntegrationPoint(n) {
--		if p.tok.Type == StartTagToken && p.tok.DataAtom != a.Mglyph && p.tok.DataAtom != a.Malignmark {
--			return false
--		}
--		if p.tok.Type == TextToken {
--			return false
--		}
--	}
--	if n.Namespace == "math" && n.DataAtom == a.AnnotationXml && p.tok.Type == StartTagToken && p.tok.DataAtom == a.Svg {
--		return false
--	}
--	if htmlIntegrationPoint(n) && (p.tok.Type == StartTagToken || p.tok.Type == TextToken) {
--		return false
--	}
--	if p.tok.Type == ErrorToken {
--		return false
--	}
--	return true
--}
--
--// parseImpliedToken parses a token as though it had appeared in the parser's
--// input.
--func (p *parser) parseImpliedToken(t TokenType, dataAtom a.Atom, data string) {
--	realToken, selfClosing := p.tok, p.hasSelfClosingToken
--	p.tok = Token{
--		Type:     t,
--		DataAtom: dataAtom,
--		Data:     data,
--	}
--	p.hasSelfClosingToken = false
--	p.parseCurrentToken()
--	p.tok, p.hasSelfClosingToken = realToken, selfClosing
--}
--
--// parseCurrentToken runs the current token through the parsing routines
--// until it is consumed.
--func (p *parser) parseCurrentToken() {
--	if p.tok.Type == SelfClosingTagToken {
--		p.hasSelfClosingToken = true
--		p.tok.Type = StartTagToken
--	}
--
--	consumed := false
--	for !consumed {
--		if p.inForeignContent() {
--			consumed = parseForeignContent(p)
--		} else {
--			consumed = p.im(p)
--		}
--	}
--
--	if p.hasSelfClosingToken {
--		// This is a parse error, but ignore it.
--		p.hasSelfClosingToken = false
--	}
--}
--
--func (p *parser) parse() error {
--	// Iterate until EOF. Any other error will cause an early return.
--	var err error
--	for err != io.EOF {
--		// CDATA sections are allowed only in foreign content.
--		n := p.oe.top()
--		p.tokenizer.AllowCDATA(n != nil && n.Namespace != "")
--		// Read and parse the next token.
--		p.tokenizer.Next()
--		p.tok = p.tokenizer.Token()
--		if p.tok.Type == ErrorToken {
--			err = p.tokenizer.Err()
--			if err != nil && err != io.EOF {
--				return err
--			}
--		}
--		p.parseCurrentToken()
--	}
--	return nil
--}
--
--// Parse returns the parse tree for the HTML from the given Reader.
--// The input is assumed to be UTF-8 encoded.
--func Parse(r io.Reader) (*Node, error) {
--	p := &parser{
--		tokenizer: NewTokenizer(r),
--		doc: &Node{
--			Type: DocumentNode,
--		},
--		scripting:  true,
--		framesetOK: true,
--		im:         initialIM,
--	}
--	err := p.parse()
--	if err != nil {
--		return nil, err
--	}
--	return p.doc, nil
--}
--
--// ParseFragment parses a fragment of HTML and returns the nodes that were
--// found. If the fragment is the InnerHTML for an existing element, pass that
--// element in context.
--func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
--	contextTag := ""
--	if context != nil {
--		if context.Type != ElementNode {
--			return nil, errors.New("html: ParseFragment of non-element Node")
--		}
--		// The next check isn't just context.DataAtom.String() == context.Data because
--		// it is valid to pass an element whose tag isn't a known atom. For example,
--		// DataAtom == 0 and Data = "tagfromthefuture" is perfectly consistent.
--		if context.DataAtom != a.Lookup([]byte(context.Data)) {
--			return nil, fmt.Errorf("html: inconsistent Node: DataAtom=%q, Data=%q", context.DataAtom, context.Data)
--		}
--		contextTag = context.DataAtom.String()
--	}
--	p := &parser{
--		tokenizer: NewTokenizerFragment(r, contextTag),
--		doc: &Node{
--			Type: DocumentNode,
--		},
--		scripting: true,
--		fragment:  true,
--		context:   context,
--	}
--
--	root := &Node{
--		Type:     ElementNode,
--		DataAtom: a.Html,
--		Data:     a.Html.String(),
--	}
--	p.doc.AppendChild(root)
--	p.oe = nodeStack{root}
--	p.resetInsertionMode()
--
--	for n := context; n != nil; n = n.Parent {
--		if n.Type == ElementNode && n.DataAtom == a.Form {
--			p.form = n
--			break
--		}
--	}
--
--	err := p.parse()
--	if err != nil {
--		return nil, err
--	}
--
--	parent := p.doc
--	if context != nil {
--		parent = root
--	}
--
--	var result []*Node
--	for c := parent.FirstChild; c != nil; {
--		next := c.NextSibling
--		parent.RemoveChild(c)
--		result = append(result, c)
--		c = next
--	}
--	return result, nil
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/parse_test.go b/Godeps/_workspace/src/golang.org/x/net/html/parse_test.go
-deleted file mode 100644
-index 7e47d11..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/parse_test.go
-+++ /dev/null
-@@ -1,388 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"bufio"
--	"bytes"
--	"errors"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"os"
--	"path/filepath"
--	"runtime"
--	"sort"
--	"strings"
--	"testing"
--
--	"golang.org/x/net/html/atom"
--)
--
--// readParseTest reads a single test case from r.
--func readParseTest(r *bufio.Reader) (text, want, context string, err error) {
--	line, err := r.ReadSlice('\n')
--	if err != nil {
--		return "", "", "", err
--	}
--	var b []byte
--
--	// Read the HTML.
--	if string(line) != "#data\n" {
--		return "", "", "", fmt.Errorf(`got %q want "#data\n"`, line)
--	}
--	for {
--		line, err = r.ReadSlice('\n')
--		if err != nil {
--			return "", "", "", err
--		}
--		if line[0] == '#' {
--			break
--		}
--		b = append(b, line...)
--	}
--	text = strings.TrimSuffix(string(b), "\n")
--	b = b[:0]
--
--	// Skip the error list.
--	if string(line) != "#errors\n" {
--		return "", "", "", fmt.Errorf(`got %q want "#errors\n"`, line)
--	}
--	for {
--		line, err = r.ReadSlice('\n')
--		if err != nil {
--			return "", "", "", err
--		}
--		if line[0] == '#' {
--			break
--		}
--	}
--
--	if string(line) == "#document-fragment\n" {
--		line, err = r.ReadSlice('\n')
--		if err != nil {
--			return "", "", "", err
--		}
--		context = strings.TrimSpace(string(line))
--		line, err = r.ReadSlice('\n')
--		if err != nil {
--			return "", "", "", err
--		}
--	}
--
--	// Read the dump of what the parse tree should be.
--	if string(line) != "#document\n" {
--		return "", "", "", fmt.Errorf(`got %q want "#document\n"`, line)
--	}
--	inQuote := false
--	for {
--		line, err = r.ReadSlice('\n')
--		if err != nil && err != io.EOF {
--			return "", "", "", err
--		}
--		trimmed := bytes.Trim(line, "| \n")
--		if len(trimmed) > 0 {
--			if line[0] == '|' && trimmed[0] == '"' {
--				inQuote = true
--			}
--			if trimmed[len(trimmed)-1] == '"' && !(line[0] == '|' && len(trimmed) == 1) {
--				inQuote = false
--			}
--		}
--		if len(line) == 0 || len(line) == 1 && line[0] == '\n' && !inQuote {
--			break
--		}
--		b = append(b, line...)
--	}
--	return text, string(b), context, nil
--}
--
--func dumpIndent(w io.Writer, level int) {
--	io.WriteString(w, "| ")
--	for i := 0; i < level; i++ {
--		io.WriteString(w, "  ")
--	}
--}
--
--type sortedAttributes []Attribute
--
--func (a sortedAttributes) Len() int {
--	return len(a)
--}
--
--func (a sortedAttributes) Less(i, j int) bool {
--	if a[i].Namespace != a[j].Namespace {
--		return a[i].Namespace < a[j].Namespace
--	}
--	return a[i].Key < a[j].Key
--}
--
--func (a sortedAttributes) Swap(i, j int) {
--	a[i], a[j] = a[j], a[i]
--}
--
--func dumpLevel(w io.Writer, n *Node, level int) error {
--	dumpIndent(w, level)
--	switch n.Type {
--	case ErrorNode:
--		return errors.New("unexpected ErrorNode")
--	case DocumentNode:
--		return errors.New("unexpected DocumentNode")
--	case ElementNode:
--		if n.Namespace != "" {
--			fmt.Fprintf(w, "<%s %s>", n.Namespace, n.Data)
--		} else {
--			fmt.Fprintf(w, "<%s>", n.Data)
--		}
--		attr := sortedAttributes(n.Attr)
--		sort.Sort(attr)
--		for _, a := range attr {
--			io.WriteString(w, "\n")
--			dumpIndent(w, level+1)
--			if a.Namespace != "" {
--				fmt.Fprintf(w, `%s %s="%s"`, a.Namespace, a.Key, a.Val)
--			} else {
--				fmt.Fprintf(w, `%s="%s"`, a.Key, a.Val)
--			}
--		}
--	case TextNode:
--		fmt.Fprintf(w, `"%s"`, n.Data)
--	case CommentNode:
--		fmt.Fprintf(w, "<!-- %s -->", n.Data)
--	case DoctypeNode:
--		fmt.Fprintf(w, "<!DOCTYPE %s", n.Data)
--		if n.Attr != nil {
--			var p, s string
--			for _, a := range n.Attr {
--				switch a.Key {
--				case "public":
--					p = a.Val
--				case "system":
--					s = a.Val
--				}
--			}
--			if p != "" || s != "" {
--				fmt.Fprintf(w, ` "%s"`, p)
--				fmt.Fprintf(w, ` "%s"`, s)
--			}
--		}
--		io.WriteString(w, ">")
--	case scopeMarkerNode:
--		return errors.New("unexpected scopeMarkerNode")
--	default:
--		return errors.New("unknown node type")
--	}
--	io.WriteString(w, "\n")
--	for c := n.FirstChild; c != nil; c = c.NextSibling {
--		if err := dumpLevel(w, c, level+1); err != nil {
--			return err
--		}
--	}
--	return nil
--}
--
--func dump(n *Node) (string, error) {
--	if n == nil || n.FirstChild == nil {
--		return "", nil
--	}
--	var b bytes.Buffer
--	for c := n.FirstChild; c != nil; c = c.NextSibling {
--		if err := dumpLevel(&b, c, 0); err != nil {
--			return "", err
--		}
--	}
--	return b.String(), nil
--}
--
--const testDataDir = "testdata/webkit/"
--
--func TestParser(t *testing.T) {
--	testFiles, err := filepath.Glob(testDataDir + "*.dat")
--	if err != nil {
--		t.Fatal(err)
--	}
--	for _, tf := range testFiles {
--		f, err := os.Open(tf)
--		if err != nil {
--			t.Fatal(err)
--		}
--		defer f.Close()
--		r := bufio.NewReader(f)
--
--		for i := 0; ; i++ {
--			text, want, context, err := readParseTest(r)
--			if err == io.EOF {
--				break
--			}
--			if err != nil {
--				t.Fatal(err)
--			}
--
--			err = testParseCase(text, want, context)
--
--			if err != nil {
--				t.Errorf("%s test #%d %q, %s", tf, i, text, err)
--			}
--		}
--	}
--}
--
--// testParseCase tests one test case from the test files. If the test does not
--// pass, it returns an error that explains the failure.
--// text is the HTML to be parsed, want is a dump of the correct parse tree,
--// and context is the name of the context node, if any.
--func testParseCase(text, want, context string) (err error) {
--	defer func() {
--		if x := recover(); x != nil {
--			switch e := x.(type) {
--			case error:
--				err = e
--			default:
--				err = fmt.Errorf("%v", e)
--			}
--		}
--	}()
--
--	var doc *Node
--	if context == "" {
--		doc, err = Parse(strings.NewReader(text))
--		if err != nil {
--			return err
--		}
--	} else {
--		contextNode := &Node{
--			Type:     ElementNode,
--			DataAtom: atom.Lookup([]byte(context)),
--			Data:     context,
--		}
--		nodes, err := ParseFragment(strings.NewReader(text), contextNode)
--		if err != nil {
--			return err
--		}
--		doc = &Node{
--			Type: DocumentNode,
--		}
--		for _, n := range nodes {
--			doc.AppendChild(n)
--		}
--	}
--
--	if err := checkTreeConsistency(doc); err != nil {
--		return err
--	}
--
--	got, err := dump(doc)
--	if err != nil {
--		return err
--	}
--	// Compare the parsed tree to the #document section.
--	if got != want {
--		return fmt.Errorf("got vs want:\n----\n%s----\n%s----", got, want)
--	}
--
--	if renderTestBlacklist[text] || context != "" {
--		return nil
--	}
--
--	// Check that rendering and re-parsing results in an identical tree.
--	pr, pw := io.Pipe()
--	go func() {
--		pw.CloseWithError(Render(pw, doc))
--	}()
--	doc1, err := Parse(pr)
--	if err != nil {
--		return err
--	}
--	got1, err := dump(doc1)
--	if err != nil {
--		return err
--	}
--	if got != got1 {
--		return fmt.Errorf("got vs got1:\n----\n%s----\n%s----", got, got1)
--	}
--
--	return nil
--}
--
--// Some test input result in parse trees are not 'well-formed' despite
--// following the HTML5 recovery algorithms. Rendering and re-parsing such a
--// tree will not result in an exact clone of that tree. We blacklist such
--// inputs from the render test.
--var renderTestBlacklist = map[string]bool{
--	// The second <a> will be reparented to the first <table>'s parent. This
--	// results in an <a> whose parent is an <a>, which is not 'well-formed'.
--	`<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y`: true,
--	// The same thing with a <p>:
--	`<p><table></p>`: true,
--	// More cases of <a> being reparented:
--	`<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe`: true,
--	`<a><table><a></table><p><a><div><a>`:                                     true,
--	`<a><table><td><a><table></table><a></tr><a></table><a>`:                  true,
--	// A similar reparenting situation involving <nobr>:
--	`<!DOCTYPE html><body><b><nobr>1<table><nobr></b><i><nobr>2<nobr></i>3`: true,
--	// A <plaintext> element is reparented, putting it before a table.
--	// A <plaintext> element can't have anything after it in HTML.
--	`<table><plaintext><td>`:                                   true,
--	`<!doctype html><table><plaintext></plaintext>`:            true,
--	`<!doctype html><table><tbody><plaintext></plaintext>`:     true,
--	`<!doctype html><table><tbody><tr><plaintext></plaintext>`: true,
--	// A form inside a table inside a form doesn't work either.
--	`<!doctype html><form><table></form><form></table></form>`: true,
--	// A script that ends at EOF may escape its own closing tag when rendered.
--	`<!doctype html><script><!--<script `:          true,
--	`<!doctype html><script><!--<script <`:         true,
--	`<!doctype html><script><!--<script <a`:        true,
--	`<!doctype html><script><!--<script </`:        true,
--	`<!doctype html><script><!--<script </s`:       true,
--	`<!doctype html><script><!--<script </script`:  true,
--	`<!doctype html><script><!--<script </scripta`: true,
--	`<!doctype html><script><!--<script -`:         true,
--	`<!doctype html><script><!--<script -a`:        true,
--	`<!doctype html><script><!--<script -<`:        true,
--	`<!doctype html><script><!--<script --`:        true,
--	`<!doctype html><script><!--<script --a`:       true,
--	`<!doctype html><script><!--<script --<`:       true,
--	`<script><!--<script `:                         true,
--	`<script><!--<script <a`:                       true,
--	`<script><!--<script </script`:                 true,
--	`<script><!--<script </scripta`:                true,
--	`<script><!--<script -`:                        true,
--	`<script><!--<script -a`:                       true,
--	`<script><!--<script --`:                       true,
--	`<script><!--<script --a`:                      true,
--	`<script><!--<script <`:                        true,
--	`<script><!--<script </`:                       true,
--	`<script><!--<script </s`:                      true,
--	// Reconstructing the active formatting elements results in a <plaintext>
--	// element that contains an <a> element.
--	`<!doctype html><p><a><plaintext>b`: true,
--}
--
--func TestNodeConsistency(t *testing.T) {
--	// inconsistentNode is a Node whose DataAtom and Data do not agree.
--	inconsistentNode := &Node{
--		Type:     ElementNode,
--		DataAtom: atom.Frameset,
--		Data:     "table",
--	}
--	_, err := ParseFragment(strings.NewReader("<p>hello</p>"), inconsistentNode)
--	if err == nil {
--		t.Errorf("got nil error, want non-nil")
--	}
--}
--
--func BenchmarkParser(b *testing.B) {
--	buf, err := ioutil.ReadFile("testdata/go1.html")
--	if err != nil {
--		b.Fatalf("could not read testdata/go1.html: %v", err)
--	}
--	b.SetBytes(int64(len(buf)))
--	runtime.GC()
--	b.ReportAllocs()
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		Parse(bytes.NewBuffer(buf))
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/render.go b/Godeps/_workspace/src/golang.org/x/net/html/render.go
-deleted file mode 100644
-index 4a833b4..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/render.go
-+++ /dev/null
-@@ -1,271 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"bufio"
--	"errors"
--	"fmt"
--	"io"
--	"strings"
--)
--
--type writer interface {
--	io.Writer
--	WriteByte(c byte) error // in Go 1.1, use io.ByteWriter
--	WriteString(string) (int, error)
--}
--
--// Render renders the parse tree n to the given writer.
--//
--// Rendering is done on a 'best effort' basis: calling Parse on the output of
--// Render will always result in something similar to the original tree, but it
--// is not necessarily an exact clone unless the original tree was 'well-formed'.
--// 'Well-formed' is not easily specified; the HTML5 specification is
--// complicated.
--//
--// Calling Parse on arbitrary input typically results in a 'well-formed' parse
--// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
--// For example, in a 'well-formed' parse tree, no <a> element is a child of
--// another <a> element: parsing "<a><a>" results in two sibling elements.
--// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
--// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
--// children; the <a> is reparented to the <table>'s parent. However, calling
--// Parse on "<a><table><a>" does not return an error, but the result has an <a>
--// element with an <a> child, and is therefore not 'well-formed'.
--//
--// Programmatically constructed trees are typically also 'well-formed', but it
--// is possible to construct a tree that looks innocuous but, when rendered and
--// re-parsed, results in a different tree. A simple example is that a solitary
--// text node would become a tree containing <html>, <head> and <body> elements.
--// Another example is that the programmatic equivalent of "a<head>b</head>c"
--// becomes "<html><head><head/><body>abc</body></html>".
--func Render(w io.Writer, n *Node) error {
--	if x, ok := w.(writer); ok {
--		return render(x, n)
--	}
--	buf := bufio.NewWriter(w)
--	if err := render(buf, n); err != nil {
--		return err
--	}
--	return buf.Flush()
--}
--
--// plaintextAbort is returned from render1 when a <plaintext> element
--// has been rendered. No more end tags should be rendered after that.
--var plaintextAbort = errors.New("html: internal error (plaintext abort)")
--
--func render(w writer, n *Node) error {
--	err := render1(w, n)
--	if err == plaintextAbort {
--		err = nil
--	}
--	return err
--}
--
--func render1(w writer, n *Node) error {
--	// Render non-element nodes; these are the easy cases.
--	switch n.Type {
--	case ErrorNode:
--		return errors.New("html: cannot render an ErrorNode node")
--	case TextNode:
--		return escape(w, n.Data)
--	case DocumentNode:
--		for c := n.FirstChild; c != nil; c = c.NextSibling {
--			if err := render1(w, c); err != nil {
--				return err
--			}
--		}
--		return nil
--	case ElementNode:
--		// No-op.
--	case CommentNode:
--		if _, err := w.WriteString("<!--"); err != nil {
--			return err
--		}
--		if _, err := w.WriteString(n.Data); err != nil {
--			return err
--		}
--		if _, err := w.WriteString("-->"); err != nil {
--			return err
--		}
--		return nil
--	case DoctypeNode:
--		if _, err := w.WriteString("<!DOCTYPE "); err != nil {
--			return err
--		}
--		if _, err := w.WriteString(n.Data); err != nil {
--			return err
--		}
--		if n.Attr != nil {
--			var p, s string
--			for _, a := range n.Attr {
--				switch a.Key {
--				case "public":
--					p = a.Val
--				case "system":
--					s = a.Val
--				}
--			}
--			if p != "" {
--				if _, err := w.WriteString(" PUBLIC "); err != nil {
--					return err
--				}
--				if err := writeQuoted(w, p); err != nil {
--					return err
--				}
--				if s != "" {
--					if err := w.WriteByte(' '); err != nil {
--						return err
--					}
--					if err := writeQuoted(w, s); err != nil {
--						return err
--					}
--				}
--			} else if s != "" {
--				if _, err := w.WriteString(" SYSTEM "); err != nil {
--					return err
--				}
--				if err := writeQuoted(w, s); err != nil {
--					return err
--				}
--			}
--		}
--		return w.WriteByte('>')
--	default:
--		return errors.New("html: unknown node type")
--	}
--
--	// Render the <xxx> opening tag.
--	if err := w.WriteByte('<'); err != nil {
--		return err
--	}
--	if _, err := w.WriteString(n.Data); err != nil {
--		return err
--	}
--	for _, a := range n.Attr {
--		if err := w.WriteByte(' '); err != nil {
--			return err
--		}
--		if a.Namespace != "" {
--			if _, err := w.WriteString(a.Namespace); err != nil {
--				return err
--			}
--			if err := w.WriteByte(':'); err != nil {
--				return err
--			}
--		}
--		if _, err := w.WriteString(a.Key); err != nil {
--			return err
--		}
--		if _, err := w.WriteString(`="`); err != nil {
--			return err
--		}
--		if err := escape(w, a.Val); err != nil {
--			return err
--		}
--		if err := w.WriteByte('"'); err != nil {
--			return err
--		}
--	}
--	if voidElements[n.Data] {
--		if n.FirstChild != nil {
--			return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
--		}
--		_, err := w.WriteString("/>")
--		return err
--	}
--	if err := w.WriteByte('>'); err != nil {
--		return err
--	}
--
--	// Add initial newline where there is danger of a newline beging ignored.
--	if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
--		switch n.Data {
--		case "pre", "listing", "textarea":
--			if err := w.WriteByte('\n'); err != nil {
--				return err
--			}
--		}
--	}
--
--	// Render any child nodes.
--	switch n.Data {
--	case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
--		for c := n.FirstChild; c != nil; c = c.NextSibling {
--			if c.Type == TextNode {
--				if _, err := w.WriteString(c.Data); err != nil {
--					return err
--				}
--			} else {
--				if err := render1(w, c); err != nil {
--					return err
--				}
--			}
--		}
--		if n.Data == "plaintext" {
--			// Don't render anything else. <plaintext> must be the
--			// last element in the file, with no closing tag.
--			return plaintextAbort
--		}
--	default:
--		for c := n.FirstChild; c != nil; c = c.NextSibling {
--			if err := render1(w, c); err != nil {
--				return err
--			}
--		}
--	}
--
--	// Render the </xxx> closing tag.
--	if _, err := w.WriteString("</"); err != nil {
--		return err
--	}
--	if _, err := w.WriteString(n.Data); err != nil {
--		return err
--	}
--	return w.WriteByte('>')
--}
--
--// writeQuoted writes s to w surrounded by quotes. Normally it will use double
--// quotes, but if s contains a double quote, it will use single quotes.
--// It is used for writing the identifiers in a doctype declaration.
--// In valid HTML, they can't contain both types of quotes.
--func writeQuoted(w writer, s string) error {
--	var q byte = '"'
--	if strings.Contains(s, `"`) {
--		q = '\''
--	}
--	if err := w.WriteByte(q); err != nil {
--		return err
--	}
--	if _, err := w.WriteString(s); err != nil {
--		return err
--	}
--	if err := w.WriteByte(q); err != nil {
--		return err
--	}
--	return nil
--}
--
--// Section 12.1.2, "Elements", gives this list of void elements. Void elements
--// are those that can't have any contents.
--var voidElements = map[string]bool{
--	"area":    true,
--	"base":    true,
--	"br":      true,
--	"col":     true,
--	"command": true,
--	"embed":   true,
--	"hr":      true,
--	"img":     true,
--	"input":   true,
--	"keygen":  true,
--	"link":    true,
--	"meta":    true,
--	"param":   true,
--	"source":  true,
--	"track":   true,
--	"wbr":     true,
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/render_test.go b/Godeps/_workspace/src/golang.org/x/net/html/render_test.go
-deleted file mode 100644
-index 11da54b..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/render_test.go
-+++ /dev/null
-@@ -1,156 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"bytes"
--	"testing"
--)
--
--func TestRenderer(t *testing.T) {
--	nodes := [...]*Node{
--		0: {
--			Type: ElementNode,
--			Data: "html",
--		},
--		1: {
--			Type: ElementNode,
--			Data: "head",
--		},
--		2: {
--			Type: ElementNode,
--			Data: "body",
--		},
--		3: {
--			Type: TextNode,
--			Data: "0<1",
--		},
--		4: {
--			Type: ElementNode,
--			Data: "p",
--			Attr: []Attribute{
--				{
--					Key: "id",
--					Val: "A",
--				},
--				{
--					Key: "foo",
--					Val: `abc"def`,
--				},
--			},
--		},
--		5: {
--			Type: TextNode,
--			Data: "2",
--		},
--		6: {
--			Type: ElementNode,
--			Data: "b",
--			Attr: []Attribute{
--				{
--					Key: "empty",
--					Val: "",
--				},
--			},
--		},
--		7: {
--			Type: TextNode,
--			Data: "3",
--		},
--		8: {
--			Type: ElementNode,
--			Data: "i",
--			Attr: []Attribute{
--				{
--					Key: "backslash",
--					Val: `\`,
--				},
--			},
--		},
--		9: {
--			Type: TextNode,
--			Data: "&4",
--		},
--		10: {
--			Type: TextNode,
--			Data: "5",
--		},
--		11: {
--			Type: ElementNode,
--			Data: "blockquote",
--		},
--		12: {
--			Type: ElementNode,
--			Data: "br",
--		},
--		13: {
--			Type: TextNode,
--			Data: "6",
--		},
--	}
--
--	// Build a tree out of those nodes, based on a textual representation.
--	// Only the ".\t"s are significant. The trailing HTML-like text is
--	// just commentary. The "0:" prefixes are for easy cross-reference with
--	// the nodes array.
--	treeAsText := [...]string{
--		0: `<html>`,
--		1: `.	<head>`,
--		2: `.	<body>`,
--		3: `.	.	"0&lt;1"`,
--		4: `.	.	<p id="A" foo="abc&#34;def">`,
--		5: `.	.	.	"2"`,
--		6: `.	.	.	<b empty="">`,
--		7: `.	.	.	.	"3"`,
--		8: `.	.	.	<i backslash="\">`,
--		9: `.	.	.	.	"&amp;4"`,
--		10: `.	.	"5"`,
--		11: `.	.	<blockquote>`,
--		12: `.	.	<br>`,
--		13: `.	.	"6"`,
--	}
--	if len(nodes) != len(treeAsText) {
--		t.Fatal("len(nodes) != len(treeAsText)")
--	}
--	var stack [8]*Node
--	for i, line := range treeAsText {
--		level := 0
--		for line[0] == '.' {
--			// Strip a leading ".\t".
--			line = line[2:]
--			level++
--		}
--		n := nodes[i]
--		if level == 0 {
--			if stack[0] != nil {
--				t.Fatal("multiple root nodes")
--			}
--			stack[0] = n
--		} else {
--			stack[level-1].AppendChild(n)
--			stack[level] = n
--			for i := level + 1; i < len(stack); i++ {
--				stack[i] = nil
--			}
--		}
--		// At each stage of tree construction, we check all nodes for consistency.
--		for j, m := range nodes {
--			if err := checkNodeConsistency(m); err != nil {
--				t.Fatalf("i=%d, j=%d: %v", i, j, err)
--			}
--		}
--	}
--
--	want := `<html><head></head><body>0&lt;1<p id="A" foo="abc&#34;def">` +
--		`2<b empty="">3</b><i backslash="\">&amp;4</i></p>` +
--		`5<blockquote></blockquote><br/>6</body></html>`
--	b := new(bytes.Buffer)
--	if err := Render(b, nodes[0]); err != nil {
--		t.Fatal(err)
--	}
--	if got := b.String(); got != want {
--		t.Errorf("got vs want:\n%s\n%s\n", got, want)
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/go1.html b/Godeps/_workspace/src/golang.org/x/net/html/testdata/go1.html
-deleted file mode 100644
-index a782cc7..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/go1.html
-+++ /dev/null
-@@ -1,2237 +0,0 @@
--<!DOCTYPE html>
--<html>
--<head>
--<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
--
--  <title>Go 1 Release Notes - The Go Programming Language</title>
--
--<link type="text/css" rel="stylesheet" href="/doc/style.css">
--<script type="text/javascript" src="/doc/godocs.js"></script>
--
--<link rel="search" type="application/opensearchdescription+xml" title="godoc" href="/opensearch.xml" />
--
--<script type="text/javascript">
--var _gaq = _gaq || [];
--_gaq.push(["_setAccount", "UA-11222381-2"]);
--_gaq.push(["_trackPageview"]);
--</script>
--</head>
--<body>
--
--<div id="topbar"><div class="container wide">
--
--<form method="GET" action="/search">
--<div id="menu">
--<a href="/doc/">Documents</a>
--<a href="/ref/">References</a>
--<a href="/pkg/">Packages</a>
--<a href="/project/">The Project</a>
--<a href="/help/">Help</a>
--<input type="text" id="search" name="q" class="inactive" value="Search">
--</div>
--<div id="heading"><a href="/">The Go Programming Language</a></div>
--</form>
--
--</div></div>
--
--<div id="page" class="wide">
--
--
--  <div id="plusone"><g:plusone size="small" annotation="none"></g:plusone></div>
--  <h1>Go 1 Release Notes</h1>
--
--
--
--
--<div id="nav"></div>
--
--
--
--
--<h2 id="introduction">Introduction to Go 1</h2>
--
--<p>
--Go version 1, Go 1 for short, defines a language and a set of core libraries
--that provide a stable foundation for creating reliable products, projects, and
--publications.
--</p>
--
--<p>
--The driving motivation for Go 1 is stability for its users. People should be able to
--write Go programs and expect that they will continue to compile and run without
--change, on a time scale of years, including in production environments such as
--Google App Engine. Similarly, people should be able to write books about Go, be
--able to say which version of Go the book is describing, and have that version
--number still be meaningful much later.
--</p>
--
--<p>
--Code that compiles in Go 1 should, with few exceptions, continue to compile and
--run throughout the lifetime of that version, even as we issue updates and bug
--fixes such as Go version 1.1, 1.2, and so on. Other than critical fixes, changes
--made to the language and library for subsequent releases of Go 1 may
--add functionality but will not break existing Go 1 programs.
--<a href="go1compat.html">The Go 1 compatibility document</a>
--explains the compatibility guidelines in more detail.
--</p>
--
--<p>
--Go 1 is a representation of Go as it used today, not a wholesale rethinking of
--the language. We avoided designing new features and instead focused on cleaning
--up problems and inconsistencies and improving portability. There are a number
--changes to the Go language and packages that we had considered for some time and
--prototyped but not released primarily because they are significant and
--backwards-incompatible. Go 1 was an opportunity to get them out, which is
--helpful for the long term, but also means that Go 1 introduces incompatibilities
--for old programs. Fortunately, the <code>go</code> <code>fix</code> tool can
--automate much of the work needed to bring programs up to the Go 1 standard.
--</p>
--
--<p>
--This document outlines the major changes in Go 1 that will affect programmers
--updating existing code; its reference point is the prior release, r60 (tagged as
--r60.3). It also explains how to update code from r60 to run under Go 1.
--</p>
--
--<h2 id="language">Changes to the language</h2>
--
--<h3 id="append">Append</h3>
--
--<p>
--The <code>append</code> predeclared variadic function makes it easy to grow a slice
--by adding elements to the end.
--A common use is to add bytes to the end of a byte slice when generating output.
--However, <code>append</code> did not provide a way to append a string to a <code>[]byte</code>,
--which is another common case.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/greeting := ..byte/` `/append.*hello/`}}
---->    greeting := []byte{}
--    greeting = append(greeting, []byte(&#34;hello &#34;)...)</pre>
--
--<p>
--By analogy with the similar property of <code>copy</code>, Go 1
--permits a string to be appended (byte-wise) directly to a byte
--slice, reducing the friction between strings and byte slices.
--The conversion is no longer necessary:
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/append.*world/`}}
---->    greeting = append(greeting, &#34;world&#34;...)</pre>
--
--<p>
--<em>Updating</em>:
--This is a new feature, so existing code needs no changes.
--</p>
--
--<h3 id="close">Close</h3>
--
--<p>
--The <code>close</code> predeclared function provides a mechanism
--for a sender to signal that no more values will be sent.
--It is important to the implementation of <code>for</code> <code>range</code>
--loops over channels and is helpful in other situations.
--Partly by design and partly because of race conditions that can occur otherwise,
--it is intended for use only by the goroutine sending on the channel,
--not by the goroutine receiving data.
--However, before Go 1 there was no compile-time checking that <code>close</code>
--was being used correctly.
--</p>
--
--<p>
--To close this gap, at least in part, Go 1 disallows <code>close</code> on receive-only channels.
--Attempting to close such a channel is a compile-time error.
--</p>
--
--<pre>
--    var c chan int
--    var csend chan&lt;- int = c
--    var crecv &lt;-chan int = c
--    close(c)     // legal
--    close(csend) // legal
--    close(crecv) // illegal
--</pre>
--
--<p>
--<em>Updating</em>:
--Existing code that attempts to close a receive-only channel was
--erroneous even before Go 1 and should be fixed.  The compiler will
--now reject such code.
--</p>
--
--<h3 id="literals">Composite literals</h3>
--
--<p>
--In Go 1, a composite literal of array, slice, or map type can elide the
--type specification for the elements' initializers if they are of pointer type.
--All four of the initializations in this example are legal; the last one was illegal before Go 1.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/type Date struct/` `/STOP/`}}
---->    type Date struct {
--        month string
--        day   int
--    }
--    <span class="comment">// Struct values, fully qualified; always legal.</span>
--    holiday1 := []Date{
--        Date{&#34;Feb&#34;, 14},
--        Date{&#34;Nov&#34;, 11},
--        Date{&#34;Dec&#34;, 25},
--    }
--    <span class="comment">// Struct values, type name elided; always legal.</span>
--    holiday2 := []Date{
--        {&#34;Feb&#34;, 14},
--        {&#34;Nov&#34;, 11},
--        {&#34;Dec&#34;, 25},
--    }
--    <span class="comment">// Pointers, fully qualified, always legal.</span>
--    holiday3 := []*Date{
--        &amp;Date{&#34;Feb&#34;, 14},
--        &amp;Date{&#34;Nov&#34;, 11},
--        &amp;Date{&#34;Dec&#34;, 25},
--    }
--    <span class="comment">// Pointers, type name elided; legal in Go 1.</span>
--    holiday4 := []*Date{
--        {&#34;Feb&#34;, 14},
--        {&#34;Nov&#34;, 11},
--        {&#34;Dec&#34;, 25},
--    }</pre>
--
--<p>
--<em>Updating</em>:
--This change has no effect on existing code, but the command
--<code>gofmt</code> <code>-s</code> applied to existing source
--will, among other things, elide explicit element types wherever permitted.
--</p>
--
--
--<h3 id="init">Goroutines during init</h3>
--
--<p>
--The old language defined that <code>go</code> statements executed during initialization created goroutines but that they did not begin to run until initialization of the entire program was complete.
--This introduced clumsiness in many places and, in effect, limited the utility
--of the <code>init</code> construct:
--if it was possible for another package to use the library during initialization, the library
--was forced to avoid goroutines.
--This design was done for reasons of simplicity and safety but,
--as our confidence in the language grew, it seemed unnecessary.
--Running goroutines during initialization is no more complex or unsafe than running them during normal execution.
--</p>
--
--<p>
--In Go 1, code that uses goroutines can be called from
--<code>init</code> routines and global initialization expressions
--without introducing a deadlock.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/PackageGlobal/` `/^}/`}}
---->var PackageGlobal int
--
--func init() {
--    c := make(chan int)
--    go initializationFunction(c)
--    PackageGlobal = &lt;-c
--}</pre>
--
--<p>
--<em>Updating</em>:
--This is a new feature, so existing code needs no changes,
--although it's possible that code that depends on goroutines not starting before <code>main</code> will break.
--There was no such code in the standard repository.
--</p>
--
--<h3 id="rune">The rune type</h3>
--
--<p>
--The language spec allows the <code>int</code> type to be 32 or 64 bits wide, but current implementations set <code>int</code> to 32 bits even on 64-bit platforms.
--It would be preferable to have <code>int</code> be 64 bits on 64-bit platforms.
--(There are important consequences for indexing large slices.)
--However, this change would waste space when processing Unicode characters with
--the old language because the <code>int</code> type was also used to hold Unicode code points: each code point would waste an extra 32 bits of storage if <code>int</code> grew from 32 bits to 64.
--</p>
--
--<p>
--To make changing to 64-bit <code>int</code> feasible,
--Go 1 introduces a new basic type, <code>rune</code>, to represent
--individual Unicode code points.
--It is an alias for <code>int32</code>, analogous to <code>byte</code>
--as an alias for <code>uint8</code>.
--</p>
--
--<p>
--Character literals such as <code>'a'</code>, <code>'語'</code>, and <code>'\u0345'</code>
--now have default type <code>rune</code>,
--analogous to <code>1.0</code> having default type <code>float64</code>.
--A variable initialized to a character constant will therefore
--have type <code>rune</code> unless otherwise specified.
--</p>
--
--<p>
--Libraries have been updated to use <code>rune</code> rather than <code>int</code>
--when appropriate. For instance, the functions <code>unicode.ToLower</code> and
--relatives now take and return a <code>rune</code>.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/STARTRUNE/` `/ENDRUNE/`}}
---->    delta := &#39;δ&#39; <span class="comment">// delta has type rune.</span>
--    var DELTA rune
--    DELTA = unicode.ToUpper(delta)
--    epsilon := unicode.ToLower(DELTA + 1)
--    if epsilon != &#39;δ&#39;+1 {
--        log.Fatal(&#34;inconsistent casing for Greek&#34;)
--    }</pre>
--
--<p>
--<em>Updating</em>:
--Most source code will be unaffected by this because the type inference from
--<code>:=</code> initializers introduces the new type silently, and it propagates
--from there.
--Some code may get type errors that a trivial conversion will resolve.
--</p>
--
--<h3 id="error">The error type</h3>
--
--<p>
--Go 1 introduces a new built-in type, <code>error</code>, which has the following definition:
--</p>
--
--<pre>
--    type error interface {
--        Error() string
--    }
--</pre>
--
--<p>
--Since the consequences of this type are all in the package library,
--it is discussed <a href="#errors">below</a>.
--</p>
--
--<h3 id="delete">Deleting from maps</h3>
--
--<p>
--In the old language, to delete the entry with key <code>k</code> from map <code>m</code>, one wrote the statement,
--</p>
--
--<pre>
--    m[k] = value, false
--</pre>
--
--<p>
--This syntax was a peculiar special case, the only two-to-one assignment.
--It required passing a value (usually ignored) that is evaluated but discarded,
--plus a boolean that was nearly always the constant <code>false</code>.
--It did the job but was odd and a point of contention.
--</p>
--
--<p>
--In Go 1, that syntax has gone; instead there is a new built-in
--function, <code>delete</code>.  The call
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/delete\(m, k\)/`}}
---->    delete(m, k)</pre>
--
--<p>
--will delete the map entry retrieved by the expression <code>m[k]</code>.
--There is no return value. Deleting a non-existent entry is a no-op.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will convert expressions of the form <code>m[k] = value,
--false</code> into <code>delete(m, k)</code> when it is clear that
--the ignored value can be safely discarded from the program and
--<code>false</code> refers to the predefined boolean constant.
--The fix tool
--will flag other uses of the syntax for inspection by the programmer.
--</p>
--
--<h3 id="iteration">Iterating in maps</h3>
--
--<p>
--The old language specification did not define the order of iteration for maps,
--and in practice it differed across hardware platforms.
--This caused tests that iterated over maps to be fragile and non-portable, with the
--unpleasant property that a test might always pass on one machine but break on another.
--</p>
--
--<p>
--In Go 1, the order in which elements are visited when iterating
--over a map using a <code>for</code> <code>range</code> statement
--is defined to be unpredictable, even if the same loop is run multiple
--times with the same map.
--Code should not assume that the elements are visited in any particular order.
--</p>
--
--<p>
--This change means that code that depends on iteration order is very likely to break early and be fixed long before it becomes a problem.
--Just as important, it allows the map implementation to ensure better map balancing even when programs are using range loops to select an element from a map.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/Sunday/` `/^	}/`}}
---->    m := map[string]int{&#34;Sunday&#34;: 0, &#34;Monday&#34;: 1}
--    for name, value := range m {
--        <span class="comment">// This loop should not assume Sunday will be visited first.</span>
--        f(name, value)
--    }</pre>
--
--<p>
--<em>Updating</em>:
--This is one change where tools cannot help.  Most existing code
--will be unaffected, but some programs may break or misbehave; we
--recommend manual checking of all range statements over maps to
--verify they do not depend on iteration order. There were a few such
--examples in the standard repository; they have been fixed.
--Note that it was already incorrect to depend on the iteration order, which
--was unspecified. This change codifies the unpredictability.
--</p>
--
--<h3 id="multiple_assignment">Multiple assignment</h3>
--
--<p>
--The language specification has long guaranteed that in assignments
--the right-hand-side expressions are all evaluated before any left-hand-side expressions are assigned.
--To guarantee predictable behavior,
--Go 1 refines the specification further.
--</p>
--
--<p>
--If the left-hand side of the assignment
--statement contains expressions that require evaluation, such as
--function calls or array indexing operations, these will all be done
--using the usual left-to-right rule before any variables are assigned
--their value.  Once everything is evaluated, the actual assignments
--proceed in left-to-right order.
--</p>
--
--<p>
--These examples illustrate the behavior.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/sa :=/` `/then sc.0. = 2/`}}
---->    sa := []int{1, 2, 3}
--    i := 0
--    i, sa[i] = 1, 2 <span class="comment">// sets i = 1, sa[0] = 2</span>
--
--    sb := []int{1, 2, 3}
--    j := 0
--    sb[j], j = 2, 1 <span class="comment">// sets sb[0] = 2, j = 1</span>
--
--    sc := []int{1, 2, 3}
--    sc[0], sc[0] = 1, 2 <span class="comment">// sets sc[0] = 1, then sc[0] = 2 (so sc[0] = 2 at end)</span></pre>
--
--<p>
--<em>Updating</em>:
--This is one change where tools cannot help, but breakage is unlikely.
--No code in the standard repository was broken by this change, and code
--that depended on the previous unspecified behavior was already incorrect.
--</p>
--
--<h3 id="shadowing">Returns and shadowed variables</h3>
--
--<p>
--A common mistake is to use <code>return</code> (without arguments) after an assignment to a variable that has the same name as a result variable but is not the same variable.
--This situation is called <em>shadowing</em>: the result variable has been shadowed by another variable with the same name declared in an inner scope.
--</p>
--
--<p>
--In functions with named return values,
--the Go 1 compilers disallow return statements without arguments if any of the named return values is shadowed at the point of the return statement.
--(It isn't part of the specification, because this is one area we are still exploring;
--the situation is analogous to the compilers rejecting functions that do not end with an explicit return statement.)
--</p>
--
--<p>
--This function implicitly returns a shadowed return value and will be rejected by the compiler:
--</p>
--
--<pre>
--    func Bug() (i, j, k int) {
--        for i = 0; i &lt; 5; i++ {
--            for j := 0; j &lt; 5; j++ { // Redeclares j.
--                k += i*j
--                if k > 100 {
--                    return // Rejected: j is shadowed here.
--                }
--            }
--        }
--        return // OK: j is not shadowed here.
--    }
--</pre>
--
--<p>
--<em>Updating</em>:
--Code that shadows return values in this way will be rejected by the compiler and will need to be fixed by hand.
--The few cases that arose in the standard repository were mostly bugs.
--</p>
--
--<h3 id="unexported">Copying structs with unexported fields</h3>
--
--<p>
--The old language did not allow a package to make a copy of a struct value containing unexported fields belonging to a different package.
--There was, however, a required exception for a method receiver;
--also, the implementations of <code>copy</code> and <code>append</code> have never honored the restriction.
--</p>
--
--<p>
--Go 1 will allow packages to copy struct values containing unexported fields from other packages.
--Besides resolving the inconsistency,
--this change admits a new kind of API: a package can return an opaque value without resorting to a pointer or interface.
--The new implementations of <code>time.Time</code> and
--<code>reflect.Value</code> are examples of types taking advantage of this new property.
--</p>
--
--<p>
--As an example, if package <code>p</code> includes the definitions,
--</p>
--
--<pre>
--    type Struct struct {
--        Public int
--        secret int
--    }
--    func NewStruct(a int) Struct {  // Note: not a pointer.
--        return Struct{a, f(a)}
--    }
--    func (s Struct) String() string {
--        return fmt.Sprintf("{%d (secret %d)}", s.Public, s.secret)
--    }
--</pre>
--
--<p>
--a package that imports <code>p</code> can assign and copy values of type
--<code>p.Struct</code> at will.
--Behind the scenes the unexported fields will be assigned and copied just
--as if they were exported,
--but the client code will never be aware of them. The code
--</p>
--
--<pre>
--    import "p"
--
--    myStruct := p.NewStruct(23)
--    copyOfMyStruct := myStruct
--    fmt.Println(myStruct, copyOfMyStruct)
--</pre>
--
--<p>
--will show that the secret field of the struct has been copied to the new value.
--</p>
--
--<p>
--<em>Updating</em>:
--This is a new feature, so existing code needs no changes.
--</p>
--
--<h3 id="equality">Equality</h3>
--
--<p>
--Before Go 1, the language did not define equality on struct and array values.
--This meant,
--among other things, that structs and arrays could not be used as map keys.
--On the other hand, Go did define equality on function and map values.
--Function equality was problematic in the presence of closures
--(when are two closures equal?)
--while map equality compared pointers, not the maps' content, which was usually
--not what the user would want.
--</p>
--
--<p>
--Go 1 addressed these issues.
--First, structs and arrays can be compared for equality and inequality
--(<code>==</code> and <code>!=</code>),
--and therefore be used as map keys,
--provided they are composed from elements for which equality is also defined,
--using element-wise comparison.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/type Day struct/` `/Printf/`}}
---->    type Day struct {
--        long  string
--        short string
--    }
--    Christmas := Day{&#34;Christmas&#34;, &#34;XMas&#34;}
--    Thanksgiving := Day{&#34;Thanksgiving&#34;, &#34;Turkey&#34;}
--    holiday := map[Day]bool{
--        Christmas:    true,
--        Thanksgiving: true,
--    }
--    fmt.Printf(&#34;Christmas is a holiday: %t\n&#34;, holiday[Christmas])</pre>
--
--<p>
--Second, Go 1 removes the definition of equality for function values,
--except for comparison with <code>nil</code>.
--Finally, map equality is gone too, also except for comparison with <code>nil</code>.
--</p>
--
--<p>
--Note that equality is still undefined for slices, for which the
--calculation is in general infeasible.  Also note that the ordered
--comparison operators (<code>&lt;</code> <code>&lt;=</code>
--<code>&gt;</code> <code>&gt;=</code>) are still undefined for
--structs and arrays.
--
--<p>
--<em>Updating</em>:
--Struct and array equality is a new feature, so existing code needs no changes.
--Existing code that depends on function or map equality will be
--rejected by the compiler and will need to be fixed by hand.
--Few programs will be affected, but the fix may require some
--redesign.
--</p>
--
--<h2 id="packages">The package hierarchy</h2>
--
--<p>
--Go 1 addresses many deficiencies in the old standard library and
--cleans up a number of packages, making them more internally consistent
--and portable.
--</p>
--
--<p>
--This section describes how the packages have been rearranged in Go 1.
--Some have moved, some have been renamed, some have been deleted.
--New packages are described in later sections.
--</p>
--
--<h3 id="hierarchy">The package hierarchy</h3>
--
--<p>
--Go 1 has a rearranged package hierarchy that groups related items
--into subdirectories. For instance, <code>utf8</code> and
--<code>utf16</code> now occupy subdirectories of <code>unicode</code>.
--Also, <a href="#subrepo">some packages</a> have moved into
--subrepositories of
--<a href="http://code.google.com/p/go"><code>code.google.com/p/go</code></a>
--while <a href="#deleted">others</a> have been deleted outright.
--</p>
--
--<table class="codetable" frame="border" summary="Moved packages">
--<colgroup align="left" width="60%"></colgroup>
--<colgroup align="left" width="40%"></colgroup>
--<tr>
--<th align="left">Old path</th>
--<th align="left">New path</th>
--</tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>asn1</td> <td>encoding/asn1</td></tr>
--<tr><td>csv</td> <td>encoding/csv</td></tr>
--<tr><td>gob</td> <td>encoding/gob</td></tr>
--<tr><td>json</td> <td>encoding/json</td></tr>
--<tr><td>xml</td> <td>encoding/xml</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>exp/template/html</td> <td>html/template</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>big</td> <td>math/big</td></tr>
--<tr><td>cmath</td> <td>math/cmplx</td></tr>
--<tr><td>rand</td> <td>math/rand</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>http</td> <td>net/http</td></tr>
--<tr><td>http/cgi</td> <td>net/http/cgi</td></tr>
--<tr><td>http/fcgi</td> <td>net/http/fcgi</td></tr>
--<tr><td>http/httptest</td> <td>net/http/httptest</td></tr>
--<tr><td>http/pprof</td> <td>net/http/pprof</td></tr>
--<tr><td>mail</td> <td>net/mail</td></tr>
--<tr><td>rpc</td> <td>net/rpc</td></tr>
--<tr><td>rpc/jsonrpc</td> <td>net/rpc/jsonrpc</td></tr>
--<tr><td>smtp</td> <td>net/smtp</td></tr>
--<tr><td>url</td> <td>net/url</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>exec</td> <td>os/exec</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>scanner</td> <td>text/scanner</td></tr>
--<tr><td>tabwriter</td> <td>text/tabwriter</td></tr>
--<tr><td>template</td> <td>text/template</td></tr>
--<tr><td>template/parse</td> <td>text/template/parse</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>utf8</td> <td>unicode/utf8</td></tr>
--<tr><td>utf16</td> <td>unicode/utf16</td></tr>
--</table>
--
--<p>
--Note that the package names for the old <code>cmath</code> and
--<code>exp/template/html</code> packages have changed to <code>cmplx</code>
--and <code>template</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update all imports and package renames for packages that
--remain inside the standard repository.  Programs that import packages
--that are no longer in the standard repository will need to be edited
--by hand.
--</p>
--
--<h3 id="exp">The package tree exp</h3>
--
--<p>
--Because they are not standardized, the packages under the <code>exp</code> directory will not be available in the
--standard Go 1 release distributions, although they will be available in source code form
--in <a href="http://code.google.com/p/go/">the repository</a> for
--developers who wish to use them.
--</p>
--
--<p>
--Several packages have moved under <code>exp</code> at the time of Go 1's release:
--</p>
--
--<ul>
--<li><code>ebnf</code></li>
--<li><code>html</code><sup>&#8224;</sup></li>
--<li><code>go/types</code></li>
--</ul>
--
--<p>
--(<sup>&#8224;</sup>The <code>EscapeString</code> and <code>UnescapeString</code> types remain
--in package <code>html</code>.)
--</p>
--
--<p>
--All these packages are available under the same names, with the prefix <code>exp/</code>: <code>exp/ebnf</code> etc.
--</p>
--
--<p>
--Also, the <code>utf8.String</code> type has been moved to its own package, <code>exp/utf8string</code>.
--</p>
--
--<p>
--Finally, the <code>gotype</code> command now resides in <code>exp/gotype</code>, while
--<code>ebnflint</code> is now in <code>exp/ebnflint</code>.
--If they are installed, they now reside in <code>$GOROOT/bin/tool</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--Code that uses packages in <code>exp</code> will need to be updated by hand,
--or else compiled from an installation that has <code>exp</code> available.
--The <code>go</code> <code>fix</code> tool or the compiler will complain about such uses.
--</p>
--
--<h3 id="old">The package tree old</h3>
--
--<p>
--Because they are deprecated, the packages under the <code>old</code> directory will not be available in the
--standard Go 1 release distributions, although they will be available in source code form for
--developers who wish to use them.
--</p>
--
--<p>
--The packages in their new locations are:
--</p>
--
--<ul>
--<li><code>old/netchan</code></li>
--<li><code>old/regexp</code></li>
--<li><code>old/template</code></li>
--</ul>
--
--<p>
--<em>Updating</em>:
--Code that uses packages now in <code>old</code> will need to be updated by hand,
--or else compiled from an installation that has <code>old</code> available.
--The <code>go</code> <code>fix</code> tool will warn about such uses.
--</p>
--
--<h3 id="deleted">Deleted packages</h3>
--
--<p>
--Go 1 deletes several packages outright:
--</p>
--
--<ul>
--<li><code>container/vector</code></li>
--<li><code>exp/datafmt</code></li>
--<li><code>go/typechecker</code></li>
--<li><code>try</code></li>
--</ul>
--
--<p>
--and also the command <code>gotry</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--Code that uses <code>container/vector</code> should be updated to use
--slices directly.  See
--<a href="http://code.google.com/p/go-wiki/wiki/SliceTricks">the Go
--Language Community Wiki</a> for some suggestions.
--Code that uses the other packages (there should be almost zero) will need to be rethought.
--</p>
--
--<h3 id="subrepo">Packages moving to subrepositories</h3>
--
--<p>
--Go 1 has moved a number of packages into other repositories, usually sub-repositories of
--<a href="http://code.google.com/p/go/">the main Go repository</a>.
--This table lists the old and new import paths:
--
--<table class="codetable" frame="border" summary="Sub-repositories">
--<colgroup align="left" width="40%"></colgroup>
--<colgroup align="left" width="60%"></colgroup>
--<tr>
--<th align="left">Old</th>
--<th align="left">New</th>
--</tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>crypto/bcrypt</td> <td>code.google.com/p/go.crypto/bcrypt</tr>
--<tr><td>crypto/blowfish</td> <td>code.google.com/p/go.crypto/blowfish</tr>
--<tr><td>crypto/cast5</td> <td>code.google.com/p/go.crypto/cast5</tr>
--<tr><td>crypto/md4</td> <td>code.google.com/p/go.crypto/md4</tr>
--<tr><td>crypto/ocsp</td> <td>code.google.com/p/go.crypto/ocsp</tr>
--<tr><td>crypto/openpgp</td> <td>code.google.com/p/go.crypto/openpgp</tr>
--<tr><td>crypto/openpgp/armor</td> <td>code.google.com/p/go.crypto/openpgp/armor</tr>
--<tr><td>crypto/openpgp/elgamal</td> <td>code.google.com/p/go.crypto/openpgp/elgamal</tr>
--<tr><td>crypto/openpgp/errors</td> <td>code.google.com/p/go.crypto/openpgp/errors</tr>
--<tr><td>crypto/openpgp/packet</td> <td>code.google.com/p/go.crypto/openpgp/packet</tr>
--<tr><td>crypto/openpgp/s2k</td> <td>code.google.com/p/go.crypto/openpgp/s2k</tr>
--<tr><td>crypto/ripemd160</td> <td>code.google.com/p/go.crypto/ripemd160</tr>
--<tr><td>crypto/twofish</td> <td>code.google.com/p/go.crypto/twofish</tr>
--<tr><td>crypto/xtea</td> <td>code.google.com/p/go.crypto/xtea</tr>
--<tr><td>exp/ssh</td> <td>code.google.com/p/go.crypto/ssh</tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>image/bmp</td> <td>code.google.com/p/go.image/bmp</tr>
--<tr><td>image/tiff</td> <td>code.google.com/p/go.image/tiff</tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>net/dict</td> <td>code.google.com/p/go.net/dict</tr>
--<tr><td>net/websocket</td> <td>code.google.com/p/go.net/websocket</tr>
--<tr><td>exp/spdy</td> <td>code.google.com/p/go.net/spdy</tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>encoding/git85</td> <td>code.google.com/p/go.codereview/git85</tr>
--<tr><td>patch</td> <td>code.google.com/p/go.codereview/patch</tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>exp/wingui</td> <td>code.google.com/p/gowingui</tr>
--</table>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update imports of these packages to use the new import paths.
--Installations that depend on these packages will need to install them using
--a <code>go get</code> command.
--</p>
--
--<h2 id="major">Major changes to the library</h2>
--
--<p>
--This section describes significant changes to the core libraries, the ones that
--affect the most programs.
--</p>
--
--<h3 id="errors">The error type and errors package</h3>
--
--<p>
--The placement of <code>os.Error</code> in package <code>os</code> is mostly historical: errors first came up when implementing package <code>os</code>, and they seemed system-related at the time.
--Since then it has become clear that errors are more fundamental than the operating system.  For example, it would be nice to use <code>Errors</code> in packages that <code>os</code> depends on, like <code>syscall</code>.
--Also, having <code>Error</code> in <code>os</code> introduces many dependencies on <code>os</code> that would otherwise not exist.
--</p>
--
--<p>
--Go 1 solves these problems by introducing a built-in <code>error</code> interface type and a separate <code>errors</code> package (analogous to <code>bytes</code> and <code>strings</code>) that contains utility functions.
--It replaces <code>os.NewError</code> with
--<a href="/pkg/errors/#New"><code>errors.New</code></a>,
--giving errors a more central place in the environment.
--</p>
--
--<p>
--So the widely-used <code>String</code> method does not cause accidental satisfaction
--of the <code>error</code> interface, the <code>error</code> interface uses instead
--the name <code>Error</code> for that method:
--</p>
--
--<pre>
--    type error interface {
--        Error() string
--    }
--</pre>
--
--<p>
--The <code>fmt</code> library automatically invokes <code>Error</code>, as it already
--does for <code>String</code>, for easy printing of error values.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/START ERROR EXAMPLE/` `/END ERROR EXAMPLE/`}}
---->type SyntaxError struct {
--    File    string
--    Line    int
--    Message string
--}
--
--func (se *SyntaxError) Error() string {
--    return fmt.Sprintf(&#34;%s:%d: %s&#34;, se.File, se.Line, se.Message)
--}</pre>
--
--<p>
--All standard packages have been updated to use the new interface; the old <code>os.Error</code> is gone.
--</p>
--
--<p>
--A new package, <a href="/pkg/errors/"><code>errors</code></a>, contains the function
--</p>
--
--<pre>
--func New(text string) error
--</pre>
--
--<p>
--to turn a string into an error. It replaces the old <code>os.NewError</code>.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/ErrSyntax/`}}
---->    var ErrSyntax = errors.New(&#34;syntax error&#34;)</pre>
--		
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
--Code that defines error types with a <code>String</code> method will need to be updated
--by hand to rename the methods to <code>Error</code>.
--</p>
--
--<h3 id="errno">System call errors</h3>
--
--<p>
--The old <code>syscall</code> package, which predated <code>os.Error</code>
--(and just about everything else),
--returned errors as <code>int</code> values.
--In turn, the <code>os</code> package forwarded many of these errors, such
--as <code>EINVAL</code>, but using a different set of errors on each platform.
--This behavior was unpleasant and unportable.
--</p>
--
--<p>
--In Go 1, the
--<a href="/pkg/syscall/"><code>syscall</code></a>
--package instead returns an <code>error</code> for system call errors.
--On Unix, the implementation is done by a
--<a href="/pkg/syscall/#Errno"><code>syscall.Errno</code></a> type
--that satisfies <code>error</code> and replaces the old <code>os.Errno</code>.
--</p>
--
--<p>
--The changes affecting <code>os.EINVAL</code> and relatives are
--described <a href="#os">elsewhere</a>.
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
--Regardless, most code should use the <code>os</code> package
--rather than <code>syscall</code> and so will be unaffected.
--</p>
--
--<h3 id="time">Time</h3>
--
--<p>
--Time is always a challenge to support well in a programming language.
--The old Go <code>time</code> package had <code>int64</code> units, no
--real type safety,
--and no distinction between absolute times and durations.
--</p>
--
--<p>
--One of the most sweeping changes in the Go 1 library is therefore a
--complete redesign of the
--<a href="/pkg/time/"><code>time</code></a> package.
--Instead of an integer number of nanoseconds as an <code>int64</code>,
--and a separate <code>*time.Time</code> type to deal with human
--units such as hours and years,
--there are now two fundamental types:
--<a href="/pkg/time/#Time"><code>time.Time</code></a>
--(a value, so the <code>*</code> is gone), which represents a moment in time;
--and <a href="/pkg/time/#Duration"><code>time.Duration</code></a>,
--which represents an interval.
--Both have nanosecond resolution.
--A <code>Time</code> can represent any time into the ancient
--past and remote future, while a <code>Duration</code> can
--span plus or minus only about 290 years.
--There are methods on these types, plus a number of helpful
--predefined constant durations such as <code>time.Second</code>.
--</p>
--
--<p>
--Among the new methods are things like
--<a href="/pkg/time/#Time.Add"><code>Time.Add</code></a>,
--which adds a <code>Duration</code> to a <code>Time</code>, and
--<a href="/pkg/time/#Time.Sub"><code>Time.Sub</code></a>,
--which subtracts two <code>Times</code> to yield a <code>Duration</code>.
--</p>
--
--<p>
--The most important semantic change is that the Unix epoch (Jan 1, 1970) is now
--relevant only for those functions and methods that mention Unix:
--<a href="/pkg/time/#Unix"><code>time.Unix</code></a>
--and the <a href="/pkg/time/#Time.Unix"><code>Unix</code></a>
--and <a href="/pkg/time/#Time.UnixNano"><code>UnixNano</code></a> methods
--of the <code>Time</code> type.
--In particular,
--<a href="/pkg/time/#Now"><code>time.Now</code></a>
--returns a <code>time.Time</code> value rather than, in the old
--API, an integer nanosecond count since the Unix epoch.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/sleepUntil/` `/^}/`}}
----><span class="comment">// sleepUntil sleeps until the specified time. It returns immediately if it&#39;s too late.</span>
--func sleepUntil(wakeup time.Time) {
--    now := time.Now() <span class="comment">// A Time.</span>
--    if !wakeup.After(now) {
--        return
--    }
--    delta := wakeup.Sub(now) <span class="comment">// A Duration.</span>
--    fmt.Printf(&#34;Sleeping for %.3fs\n&#34;, delta.Seconds())
--    time.Sleep(delta)
--}</pre>
--
--<p>
--The new types, methods, and constants have been propagated through
--all the standard packages that use time, such as <code>os</code> and
--its representation of file time stamps.
--</p>
--
--<p>
--<em>Updating</em>:
--The <code>go</code> <code>fix</code> tool will update many uses of the old <code>time</code> package to use the new
--types and methods, although it does not replace values such as <code>1e9</code>
--representing nanoseconds per second.
--Also, because of type changes in some of the values that arise,
--some of the expressions rewritten by the fix tool may require
--further hand editing; in such cases the rewrite will include
--the correct function or method for the old functionality, but
--may have the wrong type or require further analysis.
--</p>
--
--<h2 id="minor">Minor changes to the library</h2>
--
--<p>
--This section describes smaller changes, such as those to less commonly
--used packages or that affect
--few programs beyond the need to run <code>go</code> <code>fix</code>.
--This category includes packages that are new in Go 1.
--Collectively they improve portability, regularize behavior, and
--make the interfaces more modern and Go-like.
--</p>
--
--<h3 id="archive_zip">The archive/zip package</h3>
--
--<p>
--In Go 1, <a href="/pkg/archive/zip/#Writer"><code>*zip.Writer</code></a> no
--longer has a <code>Write</code> method. Its presence was a mistake.
--</p>
--
--<p>
--<em>Updating</em>:
--What little code is affected will be caught by the compiler and must be updated by hand.
--</p>
--
--<h3 id="bufio">The bufio package</h3>
--
--<p>
--In Go 1, <a href="/pkg/bufio/#NewReaderSize"><code>bufio.NewReaderSize</code></a>
--and
--<a href="/pkg/bufio/#NewWriterSize"><code>bufio.NewWriterSize</code></a>
--functions no longer return an error for invalid sizes.
--If the argument size is too small or invalid, it is adjusted.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update calls that assign the error to _.
--Calls that aren't fixed will be caught by the compiler and must be updated by hand.
--</p>
--
--<h3 id="compress">The compress/flate, compress/gzip and compress/zlib packages</h3>
--
--<p>
--In Go 1, the <code>NewWriterXxx</code> functions in
--<a href="/pkg/compress/flate"><code>compress/flate</code></a>,
--<a href="/pkg/compress/gzip"><code>compress/gzip</code></a> and
--<a href="/pkg/compress/zlib"><code>compress/zlib</code></a>
--all return <code>(*Writer, error)</code> if they take a compression level,
--and <code>*Writer</code> otherwise. Package <code>gzip</code>'s
--<code>Compressor</code> and <code>Decompressor</code> types have been renamed
--to <code>Writer</code> and <code>Reader</code>. Package <code>flate</code>'s
--<code>WrongValueError</code> type has been removed.
--</p>
--
--<p>
--<em>Updating</em>
--Running <code>go</code> <code>fix</code> will update old names and calls that assign the error to _.
--Calls that aren't fixed will be caught by the compiler and must be updated by hand.
--</p>
--
--<h3 id="crypto_aes_des">The crypto/aes and crypto/des packages</h3>
--
--<p>
--In Go 1, the <code>Reset</code> method has been removed. Go does not guarantee
--that memory is not copied and therefore this method was misleading.
--</p>
--
--<p>
--The cipher-specific types <code>*aes.Cipher</code>, <code>*des.Cipher</code>,
--and <code>*des.TripleDESCipher</code> have been removed in favor of
--<code>cipher.Block</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--Remove the calls to Reset. Replace uses of the specific cipher types with
--cipher.Block.
--</p>
--
--<h3 id="crypto_elliptic">The crypto/elliptic package</h3>
--
--<p>
--In Go 1, <a href="/pkg/crypto/elliptic/#Curve"><code>elliptic.Curve</code></a>
--has been made an interface to permit alternative implementations. The curve
--parameters have been moved to the
--<a href="/pkg/crypto/elliptic/#CurveParams"><code>elliptic.CurveParams</code></a>
--structure.
--</p>
--
--<p>
--<em>Updating</em>:
--Existing users of <code>*elliptic.Curve</code> will need to change to
--simply <code>elliptic.Curve</code>. Calls to <code>Marshal</code>,
--<code>Unmarshal</code> and <code>GenerateKey</code> are now functions
--in <code>crypto/elliptic</code> that take an <code>elliptic.Curve</code>
--as their first argument.
--</p>
--
--<h3 id="crypto_hmac">The crypto/hmac package</h3>
--
--<p>
--In Go 1, the hash-specific functions, such as <code>hmac.NewMD5</code>, have
--been removed from <code>crypto/hmac</code>. Instead, <code>hmac.New</code> takes
--a function that returns a <code>hash.Hash</code>, such as <code>md5.New</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will perform the needed changes.
--</p>
--
--<h3 id="crypto_x509">The crypto/x509 package</h3>
--
--<p>
--In Go 1, the
--<a href="/pkg/crypto/x509/#CreateCertificate"><code>CreateCertificate</code></a>
--and
--<a href="/pkg/crypto/x509/#CreateCRL"><code>CreateCRL</code></a>
--functions in <code>crypto/x509</code> have been altered to take an
--<code>interface{}</code> where they previously took a <code>*rsa.PublicKey</code>
--or <code>*rsa.PrivateKey</code>. This will allow other public key algorithms
--to be implemented in the future.
--</p>
--
--<p>
--<em>Updating</em>:
--No changes will be needed.
--</p>
--
--<h3 id="encoding_binary">The encoding/binary package</h3>
--
--<p>
--In Go 1, the <code>binary.TotalSize</code> function has been replaced by
--<a href="/pkg/encoding/binary/#Size"><code>Size</code></a>,
--which takes an <code>interface{}</code> argument rather than
--a <code>reflect.Value</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--What little code is affected will be caught by the compiler and must be updated by hand.
--</p>
--
--<h3 id="encoding_xml">The encoding/xml package</h3>
--
--<p>
--In Go 1, the <a href="/pkg/encoding/xml/"><code>xml</code></a> package
--has been brought closer in design to the other marshaling packages such
--as <a href="/pkg/encoding/gob/"><code>encoding/gob</code></a>.
--</p>
--
--<p>
--The old <code>Parser</code> type is renamed
--<a href="/pkg/encoding/xml/#Decoder"><code>Decoder</code></a> and has a new
--<a href="/pkg/encoding/xml/#Decoder.Decode"><code>Decode</code></a> method. An
--<a href="/pkg/encoding/xml/#Encoder"><code>Encoder</code></a> type was also introduced.
--</p>
--
--<p>
--The functions <a href="/pkg/encoding/xml/#Marshal"><code>Marshal</code></a>
--and <a href="/pkg/encoding/xml/#Unmarshal"><code>Unmarshal</code></a>
--work with <code>[]byte</code> values now. To work with streams,
--use the new <a href="/pkg/encoding/xml/#Encoder"><code>Encoder</code></a>
--and <a href="/pkg/encoding/xml/#Decoder"><code>Decoder</code></a> types.
--</p>
--
--<p>
--When marshaling or unmarshaling values, the format of supported flags in
--field tags has changed to be closer to the
--<a href="/pkg/encoding/json"><code>json</code></a> package
--(<code>`xml:"name,flag"`</code>). The matching done between field tags, field
--names, and the XML attribute and element names is now case-sensitive.
--The <code>XMLName</code> field tag, if present, must also match the name
--of the XML element being marshaled.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update most uses of the package except for some calls to
--<code>Unmarshal</code>. Special care must be taken with field tags,
--since the fix tool will not update them and if not fixed by hand they will
--misbehave silently in some cases. For example, the old
--<code>"attr"</code> is now written <code>",attr"</code> while plain
--<code>"attr"</code> remains valid but with a different meaning.
--</p>
--
--<h3 id="expvar">The expvar package</h3>
--
--<p>
--In Go 1, the <code>RemoveAll</code> function has been removed.
--The <code>Iter</code> function and Iter method on <code>*Map</code> have
--been replaced by
--<a href="/pkg/expvar/#Do"><code>Do</code></a>
--and
--<a href="/pkg/expvar/#Map.Do"><code>(*Map).Do</code></a>.
--</p>
--
--<p>
--<em>Updating</em>:
--Most code using <code>expvar</code> will not need changing. The rare code that used
--<code>Iter</code> can be updated to pass a closure to <code>Do</code> to achieve the same effect.
--</p>
--
--<h3 id="flag">The flag package</h3>
--
--<p>
--In Go 1, the interface <a href="/pkg/flag/#Value"><code>flag.Value</code></a> has changed slightly.
--The <code>Set</code> method now returns an <code>error</code> instead of
--a <code>bool</code> to indicate success or failure.
--</p>
--
--<p>
--There is also a new kind of flag, <code>Duration</code>, to support argument
--values specifying time intervals.
--Values for such flags must be given units, just as <code>time.Duration</code>
--formats them: <code>10s</code>, <code>1h30m</code>, etc.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/timeout/`}}
---->var timeout = flag.Duration(&#34;timeout&#34;, 30*time.Second, &#34;how long to wait for completion&#34;)</pre>
--
--<p>
--<em>Updating</em>:
--Programs that implement their own flags will need minor manual fixes to update their
--<code>Set</code> methods.
--The <code>Duration</code> flag is new and affects no existing code.
--</p>
--
--
--<h3 id="go">The go/* packages</h3>
--
--<p>
--Several packages under <code>go</code> have slightly revised APIs.
--</p>
--
--<p>
--A concrete <code>Mode</code> type was introduced for configuration mode flags
--in the packages
--<a href="/pkg/go/scanner/"><code>go/scanner</code></a>,
--<a href="/pkg/go/parser/"><code>go/parser</code></a>,
--<a href="/pkg/go/printer/"><code>go/printer</code></a>, and
--<a href="/pkg/go/doc/"><code>go/doc</code></a>.
--</p>
--
--<p>
--The modes <code>AllowIllegalChars</code> and <code>InsertSemis</code> have been removed
--from the <a href="/pkg/go/scanner/"><code>go/scanner</code></a> package. They were mostly
--useful for scanning text other then Go source files. Instead, the
--<a href="/pkg/text/scanner/"><code>text/scanner</code></a> package should be used
--for that purpose.
--</p>
--
--<p>
--The <a href="/pkg/go/scanner/#ErrorHandler"><code>ErrorHandler</code></a> provided
--to the scanner's <a href="/pkg/go/scanner/#Scanner.Init"><code>Init</code></a> method is
--now simply a function rather than an interface. The <code>ErrorVector</code> type has
--been removed in favor of the (existing) <a href="/pkg/go/scanner/#ErrorList"><code>ErrorList</code></a>
--type, and the <code>ErrorVector</code> methods have been migrated. Instead of embedding
--an <code>ErrorVector</code> in a client of the scanner, now a client should maintain
--an <code>ErrorList</code>.
--</p>
--
--<p>
--The set of parse functions provided by the <a href="/pkg/go/parser/"><code>go/parser</code></a>
--package has been reduced to the primary parse function
--<a href="/pkg/go/parser/#ParseFile"><code>ParseFile</code></a>, and a couple of
--convenience functions <a href="/pkg/go/parser/#ParseDir"><code>ParseDir</code></a>
--and <a href="/pkg/go/parser/#ParseExpr"><code>ParseExpr</code></a>.
--</p>
--
--<p>
--The <a href="/pkg/go/printer/"><code>go/printer</code></a> package supports an additional
--configuration mode <a href="/pkg/go/printer/#Mode"><code>SourcePos</code></a>;
--if set, the printer will emit <code>//line</code> comments such that the generated
--output contains the original source code position information. The new type
--<a href="/pkg/go/printer/#CommentedNode"><code>CommentedNode</code></a> can be
--used to provide comments associated with an arbitrary
--<a href="/pkg/go/ast/#Node"><code>ast.Node</code></a> (until now only
--<a href="/pkg/go/ast/#File"><code>ast.File</code></a> carried comment information).
--</p>
--
--<p>
--The type names of the <a href="/pkg/go/doc/"><code>go/doc</code></a> package have been
--streamlined by removing the <code>Doc</code> suffix: <code>PackageDoc</code>
--is now <code>Package</code>, <code>ValueDoc</code> is <code>Value</code>, etc.
--Also, all types now consistently have a <code>Name</code> field (or <code>Names</code>,
--in the case of type <code>Value</code>) and <code>Type.Factories</code> has become
--<code>Type.Funcs</code>.
--Instead of calling <code>doc.NewPackageDoc(pkg, importpath)</code>,
--documentation for a package is created with:
--</p>
--
--<pre>
--    doc.New(pkg, importpath, mode)
--</pre>
--
--<p>
--where the new <code>mode</code> parameter specifies the operation mode:
--if set to <a href="/pkg/go/doc/#AllDecls"><code>AllDecls</code></a>, all declarations
--(not just exported ones) are considered.
--The function <code>NewFileDoc</code> was removed, and the function
--<code>CommentText</code> has become the method
--<a href="/pkg/go/ast/#Text"><code>Text</code></a> of
--<a href="/pkg/go/ast/#CommentGroup"><code>ast.CommentGroup</code></a>.
--</p>
--
--<p>
--In package <a href="/pkg/go/token/"><code>go/token</code></a>, the
--<a href="/pkg/go/token/#FileSet"><code>token.FileSet</code></a> method <code>Files</code>
--(which originally returned a channel of <code>*token.File</code>s) has been replaced
--with the iterator <a href="/pkg/go/token/#FileSet.Iterate"><code>Iterate</code></a> that
--accepts a function argument instead.
--</p>
--
--<p>
--In package <a href="/pkg/go/build/"><code>go/build</code></a>, the API
--has been nearly completely replaced.
--The package still computes Go package information
--but it does not run the build: the <code>Cmd</code> and <code>Script</code>
--types are gone.
--(To build code, use the new
--<a href="/cmd/go/"><code>go</code></a> command instead.)
--The <code>DirInfo</code> type is now named
--<a href="/pkg/go/build/#Package"><code>Package</code></a>.
--<code>FindTree</code> and <code>ScanDir</code> are replaced by
--<a href="/pkg/go/build/#Import"><code>Import</code></a>
--and
--<a href="/pkg/go/build/#ImportDir"><code>ImportDir</code></a>.
--</p>
--
--<p>
--<em>Updating</em>:
--Code that uses packages in <code>go</code> will have to be updated by hand; the
--compiler will reject incorrect uses. Templates used in conjunction with any of the
--<code>go/doc</code> types may need manual fixes; the renamed fields will lead
--to run-time errors.
--</p>
--
--<h3 id="hash">The hash package</h3>
--
--<p>
--In Go 1, the definition of <a href="/pkg/hash/#Hash"><code>hash.Hash</code></a> includes
--a new method, <code>BlockSize</code>.  This new method is used primarily in the
--cryptographic libraries.
--</p>
--
--<p>
--The <code>Sum</code> method of the
--<a href="/pkg/hash/#Hash"><code>hash.Hash</code></a> interface now takes a
--<code>[]byte</code> argument, to which the hash value will be appended.
--The previous behavior can be recreated by adding a <code>nil</code> argument to the call.
--</p>
--
--<p>
--<em>Updating</em>:
--Existing implementations of <code>hash.Hash</code> will need to add a
--<code>BlockSize</code> method.  Hashes that process the input one byte at
--a time can implement <code>BlockSize</code> to return 1.
--Running <code>go</code> <code>fix</code> will update calls to the <code>Sum</code> methods of the various
--implementations of <code>hash.Hash</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--Since the package's functionality is new, no updating is necessary.
--</p>
--
--<h3 id="http">The http package</h3>
--
--<p>
--In Go 1 the <a href="/pkg/net/http/"><code>http</code></a> package is refactored,
--putting some of the utilities into a
--<a href="/pkg/net/http/httputil/"><code>httputil</code></a> subdirectory.
--These pieces are only rarely needed by HTTP clients.
--The affected items are:
--</p>
--
--<ul>
--<li>ClientConn</li>
--<li>DumpRequest</li>
--<li>DumpRequestOut</li>
--<li>DumpResponse</li>
--<li>NewChunkedReader</li>
--<li>NewChunkedWriter</li>
--<li>NewClientConn</li>
--<li>NewProxyClientConn</li>
--<li>NewServerConn</li>
--<li>NewSingleHostReverseProxy</li>
--<li>ReverseProxy</li>
--<li>ServerConn</li>
--</ul>
--
--<p>
--The <code>Request.RawURL</code> field has been removed; it was a
--historical artifact.
--</p>
--
--<p>
--The <code>Handle</code> and <code>HandleFunc</code>
--functions, and the similarly-named methods of <code>ServeMux</code>,
--now panic if an attempt is made to register the same pattern twice.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update the few programs that are affected except for
--uses of <code>RawURL</code>, which must be fixed by hand.
--</p>
--
--<h3 id="image">The image package</h3>
--
--<p>
--The <a href="/pkg/image/"><code>image</code></a> package has had a number of
--minor changes, rearrangements and renamings.
--</p>
--
--<p>
--Most of the color handling code has been moved into its own package,
--<a href="/pkg/image/color/"><code>image/color</code></a>.
--For the elements that moved, a symmetry arises; for instance,
--each pixel of an
--<a href="/pkg/image/#RGBA"><code>image.RGBA</code></a>
--is a
--<a href="/pkg/image/color/#RGBA"><code>color.RGBA</code></a>.
--</p>
--
--<p>
--The old <code>image/ycbcr</code> package has been folded, with some
--renamings, into the
--<a href="/pkg/image/"><code>image</code></a>
--and
--<a href="/pkg/image/color/"><code>image/color</code></a>
--packages.
--</p>
--
--<p>
--The old <code>image.ColorImage</code> type is still in the <code>image</code>
--package but has been renamed
--<a href="/pkg/image/#Uniform"><code>image.Uniform</code></a>,
--while <code>image.Tiled</code> has been removed.
--</p>
--
--<p>
--This table lists the renamings.
--</p>
--
--<table class="codetable" frame="border" summary="image renames">
--<colgroup align="left" width="50%"></colgroup>
--<colgroup align="left" width="50%"></colgroup>
--<tr>
--<th align="left">Old</th>
--<th align="left">New</th>
--</tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>image.Color</td> <td>color.Color</td></tr>
--<tr><td>image.ColorModel</td> <td>color.Model</td></tr>
--<tr><td>image.ColorModelFunc</td> <td>color.ModelFunc</td></tr>
--<tr><td>image.PalettedColorModel</td> <td>color.Palette</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>image.RGBAColor</td> <td>color.RGBA</td></tr>
--<tr><td>image.RGBA64Color</td> <td>color.RGBA64</td></tr>
--<tr><td>image.NRGBAColor</td> <td>color.NRGBA</td></tr>
--<tr><td>image.NRGBA64Color</td> <td>color.NRGBA64</td></tr>
--<tr><td>image.AlphaColor</td> <td>color.Alpha</td></tr>
--<tr><td>image.Alpha16Color</td> <td>color.Alpha16</td></tr>
--<tr><td>image.GrayColor</td> <td>color.Gray</td></tr>
--<tr><td>image.Gray16Color</td> <td>color.Gray16</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>image.RGBAColorModel</td> <td>color.RGBAModel</td></tr>
--<tr><td>image.RGBA64ColorModel</td> <td>color.RGBA64Model</td></tr>
--<tr><td>image.NRGBAColorModel</td> <td>color.NRGBAModel</td></tr>
--<tr><td>image.NRGBA64ColorModel</td> <td>color.NRGBA64Model</td></tr>
--<tr><td>image.AlphaColorModel</td> <td>color.AlphaModel</td></tr>
--<tr><td>image.Alpha16ColorModel</td> <td>color.Alpha16Model</td></tr>
--<tr><td>image.GrayColorModel</td> <td>color.GrayModel</td></tr>
--<tr><td>image.Gray16ColorModel</td> <td>color.Gray16Model</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>ycbcr.RGBToYCbCr</td> <td>color.RGBToYCbCr</td></tr>
--<tr><td>ycbcr.YCbCrToRGB</td> <td>color.YCbCrToRGB</td></tr>
--<tr><td>ycbcr.YCbCrColorModel</td> <td>color.YCbCrModel</td></tr>
--<tr><td>ycbcr.YCbCrColor</td> <td>color.YCbCr</td></tr>
--<tr><td>ycbcr.YCbCr</td> <td>image.YCbCr</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>ycbcr.SubsampleRatio444</td> <td>image.YCbCrSubsampleRatio444</td></tr>
--<tr><td>ycbcr.SubsampleRatio422</td> <td>image.YCbCrSubsampleRatio422</td></tr>
--<tr><td>ycbcr.SubsampleRatio420</td> <td>image.YCbCrSubsampleRatio420</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>image.ColorImage</td> <td>image.Uniform</td></tr>
--</table>
--
--<p>
--The image package's <code>New</code> functions
--(<a href="/pkg/image/#NewRGBA"><code>NewRGBA</code></a>,
--<a href="/pkg/image/#NewRGBA64"><code>NewRGBA64</code></a>, etc.)
--take an <a href="/pkg/image/#Rectangle"><code>image.Rectangle</code></a> as an argument
--instead of four integers.
--</p>
--
--<p>
--Finally, there are new predefined <code>color.Color</code> variables
--<a href="/pkg/image/color/#Black"><code>color.Black</code></a>,
--<a href="/pkg/image/color/#White"><code>color.White</code></a>,
--<a href="/pkg/image/color/#Opaque"><code>color.Opaque</code></a>
--and
--<a href="/pkg/image/color/#Transparent"><code>color.Transparent</code></a>.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
--</p>
--
--<h3 id="log_syslog">The log/syslog package</h3>
--
--<p>
--In Go 1, the <a href="/pkg/log/syslog/#NewLogger"><code>syslog.NewLogger</code></a>
--function returns an error as well as a <code>log.Logger</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--What little code is affected will be caught by the compiler and must be updated by hand.
--</p>
--
--<h3 id="mime">The mime package</h3>
--
--<p>
--In Go 1, the <a href="/pkg/mime/#FormatMediaType"><code>FormatMediaType</code></a> function
--of the <code>mime</code> package has  been simplified to make it
--consistent with
--<a href="/pkg/mime/#ParseMediaType"><code>ParseMediaType</code></a>.
--It now takes <code>"text/html"</code> rather than <code>"text"</code> and <code>"html"</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--What little code is affected will be caught by the compiler and must be updated by hand.
--</p>
--
--<h3 id="net">The net package</h3>
--
--<p>
--In Go 1, the various <code>SetTimeout</code>,
--<code>SetReadTimeout</code>, and <code>SetWriteTimeout</code> methods
--have been replaced with
--<a href="/pkg/net/#IPConn.SetDeadline"><code>SetDeadline</code></a>,
--<a href="/pkg/net/#IPConn.SetReadDeadline"><code>SetReadDeadline</code></a>, and
--<a href="/pkg/net/#IPConn.SetWriteDeadline"><code>SetWriteDeadline</code></a>,
--respectively.  Rather than taking a timeout value in nanoseconds that
--apply to any activity on the connection, the new methods set an
--absolute deadline (as a <code>time.Time</code> value) after which
--reads and writes will time out and no longer block.
--</p>
--
--<p>
--There are also new functions
--<a href="/pkg/net/#DialTimeout"><code>net.DialTimeout</code></a>
--to simplify timing out dialing a network address and
--<a href="/pkg/net/#ListenMulticastUDP"><code>net.ListenMulticastUDP</code></a>
--to allow multicast UDP to listen concurrently across multiple listeners.
--The <code>net.ListenMulticastUDP</code> function replaces the old
--<code>JoinGroup</code> and <code>LeaveGroup</code> methods.
--</p>
--
--<p>
--<em>Updating</em>:
--Code that uses the old methods will fail to compile and must be updated by hand.
--The semantic change makes it difficult for the fix tool to update automatically.
--</p>
--
--<h3 id="os">The os package</h3>
--
--<p>
--The <code>Time</code> function has been removed; callers should use
--the <a href="/pkg/time/#Time"><code>Time</code></a> type from the
--<code>time</code> package.
--</p>
--
--<p>
--The <code>Exec</code> function has been removed; callers should use
--<code>Exec</code> from the <code>syscall</code> package, where available.
--</p>
--
--<p>
--The <code>ShellExpand</code> function has been renamed to <a
--href="/pkg/os/#ExpandEnv"><code>ExpandEnv</code></a>.
--</p>
--
--<p>
--The <a href="/pkg/os/#NewFile"><code>NewFile</code></a> function
--now takes a <code>uintptr</code> fd, instead of an <code>int</code>.
--The <a href="/pkg/os/#File.Fd"><code>Fd</code></a> method on files now
--also returns a <code>uintptr</code>.
--</p>
--
--<p>
--There are no longer error constants such as <code>EINVAL</code>
--in the <code>os</code> package, since the set of values varied with
--the underlying operating system. There are new portable functions like
--<a href="/pkg/os/#IsPermission"><code>IsPermission</code></a>
--to test common error properties, plus a few new error values
--with more Go-like names, such as
--<a href="/pkg/os/#ErrPermission"><code>ErrPermission</code></a>
--and
--<a href="/pkg/os/#ErrNoEnv"><code>ErrNoEnv</code></a>.
--</p>
--
--<p>
--The <code>Getenverror</code> function has been removed. To distinguish
--between a non-existent environment variable and an empty string,
--use <a href="/pkg/os/#Environ"><code>os.Environ</code></a> or
--<a href="/pkg/syscall/#Getenv"><code>syscall.Getenv</code></a>.
--</p>
--
--
--<p>
--The <a href="/pkg/os/#Process.Wait"><code>Process.Wait</code></a> method has
--dropped its option argument and the associated constants are gone
--from the package.
--Also, the function <code>Wait</code> is gone; only the method of
--the <code>Process</code> type persists.
--</p>
--
--<p>
--The <code>Waitmsg</code> type returned by
--<a href="/pkg/os/#Process.Wait"><code>Process.Wait</code></a>
--has been replaced with a more portable
--<a href="/pkg/os/#ProcessState"><code>ProcessState</code></a>
--type with accessor methods to recover information about the
--process.
--Because of changes to <code>Wait</code>, the <code>ProcessState</code>
--value always describes an exited process.
--Portability concerns simplified the interface in other ways, but the values returned by the
--<a href="/pkg/os/#ProcessState.Sys"><code>ProcessState.Sys</code></a> and
--<a href="/pkg/os/#ProcessState.SysUsage"><code>ProcessState.SysUsage</code></a>
--methods can be type-asserted to underlying system-specific data structures such as
--<a href="/pkg/syscall/#WaitStatus"><code>syscall.WaitStatus</code></a> and
--<a href="/pkg/syscall/#Rusage"><code>syscall.Rusage</code></a> on Unix.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will drop a zero argument to <code>Process.Wait</code>.
--All other changes will be caught by the compiler and must be updated by hand.
--</p>
--
--<h4 id="os_fileinfo">The os.FileInfo type</h4>
--
--<p>
--Go 1 redefines the <a href="/pkg/os/#FileInfo"><code>os.FileInfo</code></a> type,
--changing it from a struct to an interface:
--</p>
--
--<pre>
--    type FileInfo interface {
--        Name() string       // base name of the file
--        Size() int64        // length in bytes
--        Mode() FileMode     // file mode bits
--        ModTime() time.Time // modification time
--        IsDir() bool        // abbreviation for Mode().IsDir()
--        Sys() interface{}   // underlying data source (can return nil)
--    }
--</pre>
--
--<p>
--The file mode information has been moved into a subtype called
--<a href="/pkg/os/#FileMode"><code>os.FileMode</code></a>,
--a simple integer type with <code>IsDir</code>, <code>Perm</code>, and <code>String</code>
--methods.
--</p>
--
--<p>
--The system-specific details of file modes and properties such as (on Unix)
--i-number have been removed from <code>FileInfo</code> altogether.
--Instead, each operating system's <code>os</code> package provides an
--implementation of the <code>FileInfo</code> interface, which
--has a <code>Sys</code> method that returns the
--system-specific representation of file metadata.
--For instance, to discover the i-number of a file on a Unix system, unpack
--the <code>FileInfo</code> like this:
--</p>
--
--<pre>
--    fi, err := os.Stat("hello.go")
--    if err != nil {
--        log.Fatal(err)
--    }
--    // Check that it's a Unix file.
--    unixStat, ok := fi.Sys().(*syscall.Stat_t)
--    if !ok {
--        log.Fatal("hello.go: not a Unix file")
--    }
--    fmt.Printf("file i-number: %d\n", unixStat.Ino)
--</pre>
--
--<p>
--Assuming (which is unwise) that <code>"hello.go"</code> is a Unix file,
--the i-number expression could be contracted to
--</p>
--
--<pre>
--    fi.Sys().(*syscall.Stat_t).Ino
--</pre>
--
--<p>
--The vast majority of uses of <code>FileInfo</code> need only the methods
--of the standard interface.
--</p>
--
--<p>
--The <code>os</code> package no longer contains wrappers for the POSIX errors
--such as <code>ENOENT</code>.
--For the few programs that need to verify particular error conditions, there are
--now the boolean functions
--<a href="/pkg/os/#IsExist"><code>IsExist</code></a>,
--<a href="/pkg/os/#IsNotExist"><code>IsNotExist</code></a>
--and
--<a href="/pkg/os/#IsPermission"><code>IsPermission</code></a>.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/os\.Open/` `/}/`}}
---->    f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
--    if os.IsExist(err) {
--        log.Printf(&#34;%s already exists&#34;, name)
--    }</pre>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update code that uses the old equivalent of the current <code>os.FileInfo</code>
--and <code>os.FileMode</code> API.
--Code that needs system-specific file details will need to be updated by hand.
--Code that uses the old POSIX error values from the <code>os</code> package
--will fail to compile and will also need to be updated by hand.
--</p>
--
--<h3 id="os_signal">The os/signal package</h3>
--
--<p>
--The <code>os/signal</code> package in Go 1 replaces the
--<code>Incoming</code> function, which returned a channel
--that received all incoming signals,
--with the selective <code>Notify</code> function, which asks
--for delivery of specific signals on an existing channel.
--</p>
--
--<p>
--<em>Updating</em>:
--Code must be updated by hand.
--A literal translation of
--</p>
--<pre>
--c := signal.Incoming()
--</pre>
--<p>
--is
--</p>
--<pre>
--c := make(chan os.Signal)
--signal.Notify(c) // ask for all signals
--</pre>
--<p>
--but most code should list the specific signals it wants to handle instead:
--</p>
--<pre>
--c := make(chan os.Signal)
--signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT)
--</pre>
--
--<h3 id="path_filepath">The path/filepath package</h3>
--
--<p>
--In Go 1, the <a href="/pkg/path/filepath/#Walk"><code>Walk</code></a> function of the
--<code>path/filepath</code> package
--has been changed to take a function value of type
--<a href="/pkg/path/filepath/#WalkFunc"><code>WalkFunc</code></a>
--instead of a <code>Visitor</code> interface value.
--<code>WalkFunc</code> unifies the handling of both files and directories.
--</p>
--
--<pre>
--    type WalkFunc func(path string, info os.FileInfo, err error) error
--</pre>
--
--<p>
--The <code>WalkFunc</code> function will be called even for files or directories that could not be opened;
--in such cases the error argument will describe the failure.
--If a directory's contents are to be skipped,
--the function should return the value <a href="/pkg/path/filepath/#variables"><code>filepath.SkipDir</code></a>
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/STARTWALK/` `/ENDWALK/`}}
---->    markFn := func(path string, info os.FileInfo, err error) error {
--        if path == &#34;pictures&#34; { <span class="comment">// Will skip walking of directory pictures and its contents.</span>
--            return filepath.SkipDir
--        }
--        if err != nil {
--            return err
--        }
--        log.Println(path)
--        return nil
--    }
--    err := filepath.Walk(&#34;.&#34;, markFn)
--    if err != nil {
--        log.Fatal(err)
--    }</pre>
--
--<p>
--<em>Updating</em>:
--The change simplifies most code but has subtle consequences, so affected programs
--will need to be updated by hand.
--The compiler will catch code using the old interface.
--</p>
--
--<h3 id="regexp">The regexp package</h3>
--
--<p>
--The <a href="/pkg/regexp/"><code>regexp</code></a> package has been rewritten.
--It has the same interface but the specification of the regular expressions
--it supports has changed from the old "egrep" form to that of
--<a href="http://code.google.com/p/re2/">RE2</a>.
--</p>
--
--<p>
--<em>Updating</em>:
--Code that uses the package should have its regular expressions checked by hand.
--</p>
--
--<h3 id="runtime">The runtime package</h3>
--
--<p>
--In Go 1, much of the API exported by package
--<code>runtime</code> has been removed in favor of
--functionality provided by other packages.
--Code using the <code>runtime.Type</code> interface
--or its specific concrete type implementations should
--now use package <a href="/pkg/reflect/"><code>reflect</code></a>.
--Code using <code>runtime.Semacquire</code> or <code>runtime.Semrelease</code>
--should use channels or the abstractions in package <a href="/pkg/sync/"><code>sync</code></a>.
--The <code>runtime.Alloc</code>, <code>runtime.Free</code>,
--and <code>runtime.Lookup</code> functions, an unsafe API created for
--debugging the memory allocator, have no replacement.
--</p>
--
--<p>
--Before, <code>runtime.MemStats</code> was a global variable holding
--statistics about memory allocation, and calls to <code>runtime.UpdateMemStats</code>
--ensured that it was up to date.
--In Go 1, <code>runtime.MemStats</code> is a struct type, and code should use
--<a href="/pkg/runtime/#ReadMemStats"><code>runtime.ReadMemStats</code></a>
--to obtain the current statistics.
--</p>
--
--<p>
--The package adds a new function,
--<a href="/pkg/runtime/#NumCPU"><code>runtime.NumCPU</code></a>, that returns the number of CPUs available
--for parallel execution, as reported by the operating system kernel.
--Its value can inform the setting of <code>GOMAXPROCS</code>.
--The <code>runtime.Cgocalls</code> and <code>runtime.Goroutines</code> functions
--have been renamed to <code>runtime.NumCgoCall</code> and <code>runtime.NumGoroutine</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update code for the function renamings.
--Other code will need to be updated by hand.
--</p>
--
--<h3 id="strconv">The strconv package</h3>
--
--<p>
--In Go 1, the
--<a href="/pkg/strconv/"><code>strconv</code></a>
--package has been significantly reworked to make it more Go-like and less C-like,
--although <code>Atoi</code> lives on (it's similar to
--<code>int(ParseInt(x, 10, 0))</code>, as does
--<code>Itoa(x)</code> (<code>FormatInt(int64(x), 10)</code>).
--There are also new variants of some of the functions that append to byte slices rather than
--return strings, to allow control over allocation.
--</p>
--
--<p>
--This table summarizes the renamings; see the
--<a href="/pkg/strconv/">package documentation</a>
--for full details.
--</p>
--
--<table class="codetable" frame="border" summary="strconv renames">
--<colgroup align="left" width="50%"></colgroup>
--<colgroup align="left" width="50%"></colgroup>
--<tr>
--<th align="left">Old call</th>
--<th align="left">New call</th>
--</tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Atob(x)</td> <td>ParseBool(x)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Atof32(x)</td> <td>ParseFloat(x, 32)§</td></tr>
--<tr><td>Atof64(x)</td> <td>ParseFloat(x, 64)</td></tr>
--<tr><td>AtofN(x, n)</td> <td>ParseFloat(x, n)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Atoi(x)</td> <td>Atoi(x)</td></tr>
--<tr><td>Atoi(x)</td> <td>ParseInt(x, 10, 0)§</td></tr>
--<tr><td>Atoi64(x)</td> <td>ParseInt(x, 10, 64)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Atoui(x)</td> <td>ParseUint(x, 10, 0)§</td></tr>
--<tr><td>Atoui64(x)</td> <td>ParseUint(x, 10, 64)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Btoi64(x, b)</td> <td>ParseInt(x, b, 64)</td></tr>
--<tr><td>Btoui64(x, b)</td> <td>ParseUint(x, b, 64)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Btoa(x)</td> <td>FormatBool(x)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Ftoa32(x, f, p)</td> <td>FormatFloat(float64(x), f, p, 32)</td></tr>
--<tr><td>Ftoa64(x, f, p)</td> <td>FormatFloat(x, f, p, 64)</td></tr>
--<tr><td>FtoaN(x, f, p, n)</td> <td>FormatFloat(x, f, p, n)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Itoa(x)</td> <td>Itoa(x)</td></tr>
--<tr><td>Itoa(x)</td> <td>FormatInt(int64(x), 10)</td></tr>
--<tr><td>Itoa64(x)</td> <td>FormatInt(x, 10)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Itob(x, b)</td> <td>FormatInt(int64(x), b)</td></tr>
--<tr><td>Itob64(x, b)</td> <td>FormatInt(x, b)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Uitoa(x)</td> <td>FormatUint(uint64(x), 10)</td></tr>
--<tr><td>Uitoa64(x)</td> <td>FormatUint(x, 10)</td></tr>
--<tr>
--<td colspan="2"><hr></td>
--</tr>
--<tr><td>Uitob(x, b)</td> <td>FormatUint(uint64(x), b)</td></tr>
--<tr><td>Uitob64(x, b)</td> <td>FormatUint(x, b)</td></tr>
--</table>
--		
--<p>
--<em>Updating</em>:
--Running <code>go</code> <code>fix</code> will update almost all code affected by the change.
--<br>
--§ <code>Atoi</code> persists but <code>Atoui</code> and <code>Atof32</code> do not, so
--they may require
--a cast that must be added by hand; the <code>go</code> <code>fix</code> tool will warn about it.
--</p>
--
--
--<h3 id="templates">The template packages</h3>
--
--<p>
--The <code>template</code> and <code>exp/template/html</code> packages have moved to 
--<a href="/pkg/text/template/"><code>text/template</code></a> and
--<a href="/pkg/html/template/"><code>html/template</code></a>.
--More significant, the interface to these packages has been simplified.
--The template language is the same, but the concept of "template set" is gone
--and the functions and methods of the packages have changed accordingly,
--often by elimination.
--</p>
--
--<p>
--Instead of sets, a <code>Template</code> object
--may contain multiple named template definitions,
--in effect constructing
--name spaces for template invocation.
--A template can invoke any other template associated with it, but only those
--templates associated with it.
--The simplest way to associate templates is to parse them together, something
--made easier with the new structure of the packages.
--</p>
--
--<p>
--<em>Updating</em>:
--The imports will be updated by fix tool.
--Single-template uses will be otherwise be largely unaffected.
--Code that uses multiple templates in concert will need to be updated by hand.
--The <a href="/pkg/text/template/#examples">examples</a> in
--the documentation for <code>text/template</code> can provide guidance.
--</p>
--
--<h3 id="testing">The testing package</h3>
--
--<p>
--The testing package has a type, <code>B</code>, passed as an argument to benchmark functions.
--In Go 1, <code>B</code> has new methods, analogous to those of <code>T</code>, enabling
--logging and failure reporting.
--</p>
--
--<pre><!--{{code "/doc/progs/go1.go" `/func.*Benchmark/` `/^}/`}}
---->func BenchmarkSprintf(b *testing.B) {
--    <span class="comment">// Verify correctness before running benchmark.</span>
--    b.StopTimer()
--    got := fmt.Sprintf(&#34;%x&#34;, 23)
--    const expect = &#34;17&#34;
--    if expect != got {
--        b.Fatalf(&#34;expected %q; got %q&#34;, expect, got)
--    }
--    b.StartTimer()
--    for i := 0; i &lt; b.N; i++ {
--        fmt.Sprintf(&#34;%x&#34;, 23)
--    }
--}</pre>
--
--<p>
--<em>Updating</em>:
--Existing code is unaffected, although benchmarks that use <code>println</code>
--or <code>panic</code> should be updated to use the new methods.
--</p>
--
--<h3 id="testing_script">The testing/script package</h3>
--
--<p>
--The testing/script package has been deleted. It was a dreg.
--</p>
--
--<p>
--<em>Updating</em>:
--No code is likely to be affected.
--</p>
--
--<h3 id="unsafe">The unsafe package</h3>
--
--<p>
--In Go 1, the functions
--<code>unsafe.Typeof</code>, <code>unsafe.Reflect</code>,
--<code>unsafe.Unreflect</code>, <code>unsafe.New</code>, and
--<code>unsafe.NewArray</code> have been removed;
--they duplicated safer functionality provided by
--package <a href="/pkg/reflect/"><code>reflect</code></a>.
--</p>
--
--<p>
--<em>Updating</em>:
--Code using these functions must be rewritten to use
--package <a href="/pkg/reflect/"><code>reflect</code></a>.
--The changes to <a href="http://code.google.com/p/go/source/detail?r=2646dc956207">encoding/gob</a> and the <a href="http://code.google.com/p/goprotobuf/source/detail?r=5340ad310031">protocol buffer library</a>
--may be helpful as examples.
--</p>
--
--<h3 id="url">The url package</h3>
--
--<p>
--In Go 1 several fields from the <a href="/pkg/net/url/#URL"><code>url.URL</code></a> type
--were removed or replaced.
--</p>
--
--<p>
--The <a href="/pkg/net/url/#URL.String"><code>String</code></a> method now
--predictably rebuilds an encoded URL string using all of <code>URL</code>'s
--fields as necessary. The resulting string will also no longer have
--passwords escaped.
--</p>
--
--<p>
--The <code>Raw</code> field has been removed. In most cases the <code>String</code>
--method may be used in its place.
--</p>
--
--<p>
--The old <code>RawUserinfo</code> field is replaced by the <code>User</code>
--field, of type <a href="/pkg/net/url/#Userinfo"><code>*net.Userinfo</code></a>.
--Values of this type may be created using the new <a href="/pkg/net/url/#User"><code>net.User</code></a>
--and <a href="/pkg/net/url/#UserPassword"><code>net.UserPassword</code></a>
--functions. The <code>EscapeUserinfo</code> and <code>UnescapeUserinfo</code>
--functions are also gone.
--</p>
--
--<p>
--The <code>RawAuthority</code> field has been removed. The same information is
--available in the <code>Host</code> and <code>User</code> fields.
--</p>
--
--<p>
--The <code>RawPath</code> field and the <code>EncodedPath</code> method have
--been removed. The path information in rooted URLs (with a slash following the
--schema) is now available only in decoded form in the <code>Path</code> field.
--Occasionally, the encoded data may be required to obtain information that
--was lost in the decoding process. These cases must be handled by accessing
--the data the URL was built from.
--</p>
--
--<p>
--URLs with non-rooted paths, such as <code>"mailto:dev at golang.org?subject=Hi"</code>,
--are also handled differently. The <code>OpaquePath</code> boolean field has been
--removed and a new <code>Opaque</code> string field introduced to hold the encoded
--path for such URLs. In Go 1, the cited URL parses as:
--</p>
--
--<pre>
--    URL{
--        Scheme: "mailto",
--        Opaque: "dev at golang.org",
--        RawQuery: "subject=Hi",
--    }
--</pre>
--
--<p>
--A new <a href="/pkg/net/url/#URL.RequestURI"><code>RequestURI</code></a> method was
--added to <code>URL</code>.
--</p>
--
--<p>
--The <code>ParseWithReference</code> function has been renamed to <code>ParseWithFragment</code>.
--</p>
--
--<p>
--<em>Updating</em>:
--Code that uses the old fields will fail to compile and must be updated by hand.
--The semantic changes make it difficult for the fix tool to update automatically.
--</p>
--
--<h2 id="cmd_go">The go command</h2>
--
--<p>
--Go 1 introduces the <a href="/cmd/go/">go command</a>, a tool for fetching,
--building, and installing Go packages and commands. The <code>go</code> command
--does away with makefiles, instead using Go source code to find dependencies and
--determine build conditions. Most existing Go programs will no longer require
--makefiles to be built.
--</p>
--
--<p>
--See <a href="/doc/code.html">How to Write Go Code</a> for a primer on the
--<code>go</code> command and the <a href="/cmd/go/">go command documentation</a>
--for the full details.
--</p>
--
--<p>
--<em>Updating</em>:
--Projects that depend on the Go project's old makefile-based build
--infrastructure (<code>Make.pkg</code>, <code>Make.cmd</code>, and so on) should
--switch to using the <code>go</code> command for building Go code and, if
--necessary, rewrite their makefiles to perform any auxiliary build tasks.
--</p>
--
--<h2 id="cmd_cgo">The cgo command</h2>
--
--<p>
--In Go 1, the <a href="/cmd/cgo">cgo command</a>
--uses a different <code>_cgo_export.h</code>
--file, which is generated for packages containing <code>//export</code> lines.
--The <code>_cgo_export.h</code> file now begins with the C preamble comment,
--so that exported function definitions can use types defined there.
--This has the effect of compiling the preamble multiple times, so a
--package using <code>//export</code> must not put function definitions
--or variable initializations in the C preamble.
--</p>
--
--<h2 id="releases">Packaged releases</h2>
--
--<p>
--One of the most significant changes associated with Go 1 is the availability
--of prepackaged, downloadable distributions.
--They are available for many combinations of architecture and operating system
--(including Windows) and the list will grow.
--Installation details are described on the
--<a href="/doc/install">Getting Started</a> page, while
--the distributions themselves are listed on the
--<a href="http://code.google.com/p/go/downloads/list">downloads page</a>.
--
--
--</div>
--
--<div id="footer">
--Build version go1.0.1.<br>
--Except as <a href="http://code.google.com/policies.html#restrictions">noted</a>,
--the content of this page is licensed under the
--Creative Commons Attribution 3.0 License,
--and code is licensed under a <a href="/LICENSE">BSD license</a>.<br>
--<a href="/doc/tos.html">Terms of Service</a> | 
--<a href="http://www.google.com/intl/en/privacy/privacy-policy.html">Privacy Policy</a>
--</div>
--
--<script type="text/javascript">
--(function() {
--  var ga = document.createElement("script"); ga.type = "text/javascript"; ga.async = true;
--  ga.src = ("https:" == document.location.protocol ? "https://ssl" : "http://www") + ".google-analytics.com/ga.js";
--  var s = document.getElementsByTagName("script")[0]; s.parentNode.insertBefore(ga, s);
--})();
--</script>
--</body>
--<script type="text/javascript">
--  (function() {
--    var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true;
--    po.src = 'https://apis.google.com/js/plusone.js';
--    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);
--  })();
--</script>
--</html>
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/README b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/README
-deleted file mode 100644
-index 9b4c2d8..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/README
-+++ /dev/null
-@@ -1,28 +0,0 @@
--The *.dat files in this directory are copied from The WebKit Open Source
--Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources.
--WebKit is licensed under a BSD style license.
--http://webkit.org/coding/bsd-license.html says:
--
--Copyright (C) 2009 Apple Inc. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are met:
--
--1. Redistributions of source code must retain the above copyright notice,
--this list of conditions and the following disclaimer.
--
--2. Redistributions in binary form must reproduce the above copyright notice,
--this list of conditions and the following disclaimer in the documentation
--and/or other materials provided with the distribution.
--
--THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY
--EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
--WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
--DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
--DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
--(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
--LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
--ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
--SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/adoption01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/adoption01.dat
-deleted file mode 100644
-index 787e1b0..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/adoption01.dat
-+++ /dev/null
-@@ -1,194 +0,0 @@
--#data
--<a><p></a></p>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|     <p>
--|       <a>
--
--#data
--<a>1<p>2</a>3</p>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       "1"
--|     <p>
--|       <a>
--|         "2"
--|       "3"
--
--#data
--<a>1<button>2</a>3</button>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       "1"
--|     <button>
--|       <a>
--|         "2"
--|       "3"
--
--#data
--<a>1<b>2</a>3</b>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       "1"
--|       <b>
--|         "2"
--|     <b>
--|       "3"
--
--#data
--<a>1<div>2<div>3</a>4</div>5</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       "1"
--|     <div>
--|       <a>
--|         "2"
--|       <div>
--|         <a>
--|           "3"
--|         "4"
--|       "5"
--
--#data
--<table><a>1<p>2</a>3</p>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       "1"
--|     <p>
--|       <a>
--|         "2"
--|       "3"
--|     <table>
--
--#data
--<b><b><a><p></a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <b>
--|         <a>
--|         <p>
--|           <a>
--
--#data
--<b><a><b><p></a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <a>
--|         <b>
--|       <b>
--|         <p>
--|           <a>
--
--#data
--<a><b><b><p></a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <b>
--|         <b>
--|     <b>
--|       <b>
--|         <p>
--|           <a>
--
--#data
--<p>1<s id="A">2<b id="B">3</p>4</s>5</b>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       "1"
--|       <s>
--|         id="A"
--|         "2"
--|         <b>
--|           id="B"
--|           "3"
--|     <s>
--|       id="A"
--|       <b>
--|         id="B"
--|         "4"
--|     <b>
--|       id="B"
--|       "5"
--
--#data
--<table><a>1<td>2</td>3</table>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       "1"
--|     <a>
--|       "3"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "2"
--
--#data
--<table>A<td>B</td>C</table>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "AC"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "B"
--
--#data
--<a><svg><tr><input></a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <svg svg>
--|         <svg tr>
--|           <svg input>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/adoption02.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/adoption02.dat
-deleted file mode 100644
-index d18151b..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/adoption02.dat
-+++ /dev/null
-@@ -1,31 +0,0 @@
--#data
--<b>1<i>2<p>3</b>4
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       "1"
--|       <i>
--|         "2"
--|     <i>
--|       <p>
--|         <b>
--|           "3"
--|         "4"
--
--#data
--<a><div><style></style><address><a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|     <div>
--|       <a>
--|         <style>
--|       <address>
--|         <a>
--|         <a>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/comments01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/comments01.dat
-deleted file mode 100644
-index 44f1876..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/comments01.dat
-+++ /dev/null
-@@ -1,135 +0,0 @@
--#data
--FOO<!-- BAR -->BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  BAR  -->
--|     "BAZ"
--
--#data
--FOO<!-- BAR --!>BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  BAR  -->
--|     "BAZ"
--
--#data
--FOO<!-- BAR --   >BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  BAR --   >BAZ -->
--
--#data
--FOO<!-- BAR -- <QUX> -- MUX -->BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  BAR -- <QUX> -- MUX  -->
--|     "BAZ"
--
--#data
--FOO<!-- BAR -- <QUX> -- MUX --!>BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  BAR -- <QUX> -- MUX  -->
--|     "BAZ"
--
--#data
--FOO<!-- BAR -- <QUX> -- MUX -- >BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  BAR -- <QUX> -- MUX -- >BAZ -->
--
--#data
--FOO<!---->BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  -->
--|     "BAZ"
--
--#data
--FOO<!--->BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  -->
--|     "BAZ"
--
--#data
--FOO<!-->BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!--  -->
--|     "BAZ"
--
--#data
--<?xml version="1.0">Hi
--#errors
--#document
--| <!-- ?xml version="1.0" -->
--| <html>
--|   <head>
--|   <body>
--|     "Hi"
--
--#data
--<?xml version="1.0">
--#errors
--#document
--| <!-- ?xml version="1.0" -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<?xml version
--#errors
--#document
--| <!-- ?xml version -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--FOO<!----->BAZ
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <!-- - -->
--|     "BAZ"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/doctype01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/doctype01.dat
-deleted file mode 100644
-index ae45732..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/doctype01.dat
-+++ /dev/null
-@@ -1,370 +0,0 @@
--#data
--<!DOCTYPE html>Hello
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!dOctYpE HtMl>Hello
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPEhtml>Hello
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE>Hello
--#errors
--#document
--| <!DOCTYPE >
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE >Hello
--#errors
--#document
--| <!DOCTYPE >
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato >Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato taco>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato taco "ddd>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato sYstEM>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato sYstEM    >Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE   potato       sYstEM  ggg>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato SYSTEM taco  >Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato SYSTEM 'taco"'>Hello
--#errors
--#document
--| <!DOCTYPE potato "" "taco"">
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato SYSTEM "taco">Hello
--#errors
--#document
--| <!DOCTYPE potato "" "taco">
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato SYSTEM "tai'co">Hello
--#errors
--#document
--| <!DOCTYPE potato "" "tai'co">
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato SYSTEMtaco "ddd">Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato grass SYSTEM taco>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato pUbLIc>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato pUbLIc >Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato pUbLIcgoof>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato PUBLIC goof>Hello
--#errors
--#document
--| <!DOCTYPE potato>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato PUBLIC "go'of">Hello
--#errors
--#document
--| <!DOCTYPE potato "go'of" "">
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato PUBLIC 'go'of'>Hello
--#errors
--#document
--| <!DOCTYPE potato "go" "">
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato PUBLIC 'go:hh   of' >Hello
--#errors
--#document
--| <!DOCTYPE potato "go:hh   of" "">
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE potato PUBLIC "W3C-//dfdf" SYSTEM ggg>Hello
--#errors
--#document
--| <!DOCTYPE potato "W3C-//dfdf" "">
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
--   "http://www.w3.org/TR/html4/strict.dtd">Hello
--#errors
--#document
--| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE ...>Hello
--#errors
--#document
--| <!DOCTYPE ...>
--| <html>
--|   <head>
--|   <body>
--|     "Hello"
--
--#data
--<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
--"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
--#errors
--#document
--| <!DOCTYPE html "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN"
--"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
--#errors
--#document
--| <!DOCTYPE html "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE root-element [SYSTEM OR PUBLIC FPI] "uri" [ 
--<!-- internal declarations -->
--]>
--#errors
--#document
--| <!DOCTYPE root-element>
--| <html>
--|   <head>
--|   <body>
--|     "]>"
--
--#data
--<!DOCTYPE html PUBLIC
--  "-//WAPFORUM//DTD XHTML Mobile 1.0//EN"
--    "http://www.wapforum.org/DTD/xhtml-mobile10.dtd">
--#errors
--#document
--| <!DOCTYPE html "-//WAPFORUM//DTD XHTML Mobile 1.0//EN" "http://www.wapforum.org/DTD/xhtml-mobile10.dtd">
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE HTML SYSTEM "http://www.w3.org/DTD/HTML4-strict.dtd"><body><b>Mine!</b></body>
--#errors
--#document
--| <!DOCTYPE html "" "http://www.w3.org/DTD/HTML4-strict.dtd">
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       "Mine!"
--
--#data
--<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN""http://www.w3.org/TR/html4/strict.dtd">
--#errors
--#document
--| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"'http://www.w3.org/TR/html4/strict.dtd'>
--#errors
--#document
--| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE HTML PUBLIC"-//W3C//DTD HTML 4.01//EN"'http://www.w3.org/TR/html4/strict.dtd'>
--#errors
--#document
--| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE HTML PUBLIC'-//W3C//DTD HTML 4.01//EN''http://www.w3.org/TR/html4/strict.dtd'>
--#errors
--#document
--| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
--| <html>
--|   <head>
--|   <body>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/entities01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/entities01.dat
-deleted file mode 100644
-index c8073b7..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/entities01.dat
-+++ /dev/null
-@@ -1,603 +0,0 @@
--#data
--FOO&gt;BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO>BAR"
--
--#data
--FOO&gtBAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO>BAR"
--
--#data
--FOO&gt BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO> BAR"
--
--#data
--FOO&gt;;;BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO>;;BAR"
--
--#data
--I'm &notit; I tell you
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "I'm ¬it; I tell you"
--
--#data
--I'm &notin; I tell you
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "I'm ∉ I tell you"
--
--#data
--FOO& BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO& BAR"
--
--#data
--FOO&<BAR>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO&"
--|     <bar>
--
--#data
--FOO&&&&gt;BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO&&&>BAR"
--
--#data
--FOO&#41;BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO)BAR"
--
--#data
--FOO&#x41;BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOABAR"
--
--#data
--FOO&#X41;BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOABAR"
--
--#data
--FOO&#BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO&#BAR"
--
--#data
--FOO&#ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO&#ZOO"
--
--#data
--FOO&#xBAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOºR"
--
--#data
--FOO&#xZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO&#xZOO"
--
--#data
--FOO&#XZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO&#XZOO"
--
--#data
--FOO&#41BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO)BAR"
--
--#data
--FOO&#x41BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO䆺R"
--
--#data
--FOO&#x41ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOAZOO"
--
--#data
--FOO&#x0000;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO�ZOO"
--
--#data
--FOO&#x0078;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOxZOO"
--
--#data
--FOO&#x0079;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOyZOO"
--
--#data
--FOO&#x0080;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO€ZOO"
--
--#data
--FOO&#x0081;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOZOO"
--
--#data
--FOO&#x0082;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO‚ZOO"
--
--#data
--FOO&#x0083;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOƒZOO"
--
--#data
--FOO&#x0084;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO„ZOO"
--
--#data
--FOO&#x0085;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO…ZOO"
--
--#data
--FOO&#x0086;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO†ZOO"
--
--#data
--FOO&#x0087;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO‡ZOO"
--
--#data
--FOO&#x0088;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOˆZOO"
--
--#data
--FOO&#x0089;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO‰ZOO"
--
--#data
--FOO&#x008A;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOŠZOO"
--
--#data
--FOO&#x008B;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO‹ZOO"
--
--#data
--FOO&#x008C;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOŒZOO"
--
--#data
--FOO&#x008D;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOZOO"
--
--#data
--FOO&#x008E;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOŽZOO"
--
--#data
--FOO&#x008F;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOZOO"
--
--#data
--FOO&#x0090;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOZOO"
--
--#data
--FOO&#x0091;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO‘ZOO"
--
--#data
--FOO&#x0092;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO’ZOO"
--
--#data
--FOO&#x0093;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO“ZOO"
--
--#data
--FOO&#x0094;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO”ZOO"
--
--#data
--FOO&#x0095;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO•ZOO"
--
--#data
--FOO&#x0096;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO–ZOO"
--
--#data
--FOO&#x0097;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO—ZOO"
--
--#data
--FOO&#x0098;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO˜ZOO"
--
--#data
--FOO&#x0099;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO™ZOO"
--
--#data
--FOO&#x009A;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOšZOO"
--
--#data
--FOO&#x009B;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO›ZOO"
--
--#data
--FOO&#x009C;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOœZOO"
--
--#data
--FOO&#x009D;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOZOO"
--
--#data
--FOO&#x009E;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOžZOO"
--
--#data
--FOO&#x009F;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOŸZOO"
--
--#data
--FOO&#x00A0;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO ZOO"
--
--#data
--FOO&#xD7FF;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO퟿ZOO"
--
--#data
--FOO&#xD800;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO�ZOO"
--
--#data
--FOO&#xD801;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO�ZOO"
--
--#data
--FOO&#xDFFE;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO�ZOO"
--
--#data
--FOO&#xDFFF;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO�ZOO"
--
--#data
--FOO&#xE000;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOOZOO"
--
--#data
--FOO&#x10FFFE;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO􏿾ZOO"
--
--#data
--FOO&#x1087D4;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO􈟔ZOO"
--
--#data
--FOO&#x10FFFF;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO􏿿ZOO"
--
--#data
--FOO&#x110000;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO�ZOO"
--
--#data
--FOO&#xFFFFFF;ZOO
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO�ZOO"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/entities02.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/entities02.dat
-deleted file mode 100644
-index e2fb42a..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/entities02.dat
-+++ /dev/null
-@@ -1,249 +0,0 @@
--#data
--<div bar="ZZ&gt;YY"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ>YY"
--
--#data
--<div bar="ZZ&"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&"
--
--#data
--<div bar='ZZ&'></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&"
--
--#data
--<div bar=ZZ&></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&"
--
--#data
--<div bar="ZZ&gt=YY"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&gt=YY"
--
--#data
--<div bar="ZZ&gt0YY"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&gt0YY"
--
--#data
--<div bar="ZZ&gt9YY"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&gt9YY"
--
--#data
--<div bar="ZZ&gtaYY"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&gtaYY"
--
--#data
--<div bar="ZZ&gtZYY"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&gtZYY"
--
--#data
--<div bar="ZZ&gt YY"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ> YY"
--
--#data
--<div bar="ZZ&gt"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ>"
--
--#data
--<div bar='ZZ&gt'></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ>"
--
--#data
--<div bar=ZZ&gt></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ>"
--
--#data
--<div bar="ZZ&pound_id=23"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ£_id=23"
--
--#data
--<div bar="ZZ&prod_id=23"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&prod_id=23"
--
--#data
--<div bar="ZZ&pound;_id=23"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ£_id=23"
--
--#data
--<div bar="ZZ&prod;_id=23"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ∏_id=23"
--
--#data
--<div bar="ZZ&pound=23"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&pound=23"
--
--#data
--<div bar="ZZ&prod=23"></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       bar="ZZ&prod=23"
--
--#data
--<div>ZZ&pound_id=23</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "ZZ£_id=23"
--
--#data
--<div>ZZ&prod_id=23</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "ZZ&prod_id=23"
--
--#data
--<div>ZZ&pound;_id=23</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "ZZ£_id=23"
--
--#data
--<div>ZZ&prod;_id=23</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "ZZ∏_id=23"
--
--#data
--<div>ZZ&pound=23</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "ZZ£=23"
--
--#data
--<div>ZZ&prod=23</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "ZZ&prod=23"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/html5test-com.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/html5test-com.dat
-deleted file mode 100644
-index d7cb71d..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/html5test-com.dat
-+++ /dev/null
-@@ -1,246 +0,0 @@
--#data
--<div<div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div<div>
--
--#data
--<div foo<bar=''>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       foo<bar=""
--
--#data
--<div foo=`bar`>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       foo="`bar`"
--
--#data
--<div \"foo=''>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       \"foo=""
--
--#data
--<a href='\nbar'></a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       href="\nbar"
--
--#data
--<!DOCTYPE html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--
--#data
--&lang;&rang;
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "⟨⟩"
--
--#data
--&apos;
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "'"
--
--#data
--&ImaginaryI;
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "ⅈ"
--
--#data
--&Kopf;
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "𝕂"
--
--#data
--&notinva;
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "∉"
--
--#data
--<?import namespace="foo" implementation="#bar">
--#errors
--#document
--| <!-- ?import namespace="foo" implementation="#bar" -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!--foo--bar-->
--#errors
--#document
--| <!-- foo--bar -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<![CDATA[x]]>
--#errors
--#document
--| <!-- [CDATA[x]] -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<textarea><!--</textarea>--></textarea>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "<!--"
--|     "-->"
--
--#data
--<textarea><!--</textarea>-->
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "<!--"
--|     "-->"
--
--#data
--<style><!--</style>--></style>
--#errors
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--"
--|   <body>
--|     "-->"
--
--#data
--<style><!--</style>-->
--#errors
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--"
--|   <body>
--|     "-->"
--
--#data
--<ul><li>A </li> <li>B</li></ul>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <ul>
--|       <li>
--|         "A "
--|       " "
--|       <li>
--|         "B"
--
--#data
--<table><form><input type=hidden><input></form><div></div></table>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <input>
--|     <div>
--|     <table>
--|       <form>
--|       <input>
--|         type="hidden"
--
--#data
--<i>A<b>B<p></i>C</b>D
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <i>
--|       "A"
--|       <b>
--|         "B"
--|     <b>
--|     <p>
--|       <b>
--|         <i>
--|         "C"
--|       "D"
--
--#data
--<div></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--
--#data
--<svg></svg>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--
--#data
--<math></math>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/inbody01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/inbody01.dat
-deleted file mode 100644
-index 3f2bd37..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/inbody01.dat
-+++ /dev/null
-@@ -1,43 +0,0 @@
--#data
--<button>1</foo>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <button>
--|       "1"
--
--#data
--<foo>1<p>2</foo>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <foo>
--|       "1"
--|       <p>
--|         "2"
--
--#data
--<dd>1</foo>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <dd>
--|       "1"
--
--#data
--<foo>1<dd>2</foo>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <foo>
--|       "1"
--|       <dd>
--|         "2"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/isindex.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/isindex.dat
-deleted file mode 100644
-index 88325ff..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/isindex.dat
-+++ /dev/null
-@@ -1,40 +0,0 @@
--#data
--<isindex>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       <hr>
--|       <label>
--|         "This is a searchable index. Enter search keywords: "
--|         <input>
--|           name="isindex"
--|       <hr>
--
--#data
--<isindex name="A" action="B" prompt="C" foo="D">
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       action="B"
--|       <hr>
--|       <label>
--|         "C"
--|         <input>
--|           foo="D"
--|           name="isindex"
--|       <hr>
--
--#data
--<form><isindex>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <form>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/pending-spec-changes-plain-text-unsafe.dat
-deleted file mode 100644
-index a5ebb1eb285116af391137bc94beac0c8a6834b4..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 115
-zcmXZUQ3`+{41i&ucZ#9c5brYEqF^T2f`Sg8m2W@)!xxy0Am++fibh!_xp`HU=1fj=
-l5Tv!*b_iUjqsV4(V_d9g>VZ9lc;ttC7t#O7YxuDS4-Zl&BR>ED
-
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat
-deleted file mode 100644
-index 5a92084..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/pending-spec-changes.dat
-+++ /dev/null
-@@ -1,52 +0,0 @@
--#data
--<input type="hidden"><frameset>
--#errors
--21: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
--31: “frameset” start tag seen.
--31: End of file seen and there were open elements.
--#document
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!DOCTYPE html><table><caption><svg>foo</table>bar
--#errors
--47: End tag “table” did not match the name of the current open element (“svg”).
--47: “table” closed but “caption” was still open.
--47: End tag “table” seen, but there were open elements.
--36: Unclosed element “svg”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <svg svg>
--|           "foo"
--|     "bar"
--
--#data
--<table><tr><td><svg><desc><td></desc><circle>
--#errors
--7: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
--30: A table cell was implicitly closed, but there were open elements.
--26: Unclosed element “desc”.
--20: Unclosed element “svg”.
--37: Stray end tag “desc”.
--45: End of file seen and there were open elements.
--45: Unclosed element “circle”.
--7: Unclosed element “table”.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <svg svg>
--|               <svg desc>
--|           <td>
--|             <circle>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/plain-text-unsafe.dat
-deleted file mode 100644
-index 04cc11fb9d458ea32dca02e2f3bf39221196ab8e..0000000000000000000000000000000000000000
-GIT binary patch
-literal 0
-HcmV?d00001
-
-literal 4166
-zcmds4&1%~~5Y}1XcNojiOL5|Zdr6iB6Q@@dnjYGa!^&DaD*8j(rYZE*N*}4O(Ai!6
-zt?Ym#%MQd~yz+X#nfd0M+40P0g4rKk_ucGyu~@9HzqzhG<5`wuxjplf&5wx3!u}29
-zQA8od1>ll1zgT*S|4T0c9E6$RdB?_+5>}tF$TnjU&$*!FvRd{rQXeva!GcpkGm9M!
-zZBWBlo0XH%V%aC7h2%Ws8$qo;*=zDp0+b3F08~mq!5(ow4OtKi{*2LVgD~WoB_9R=
-z%96mMsPR;h$nTtgfB$G~Tu5~MsAZ5p?I at Yv->g at 6t9!$ThX*>G;HMo(<c>}#7Rl5a
-zZg4uE1I7jOIW4nFO4J6iM;kDRJYY at HxlJ-2>|)pZu4LM<KOUh3ErDsMA{%qAZOUw$
-zscvR?JZJVK)-qamv2krWRmhr-vcp#rkm+dl=W)$LH~WnyKCXT2=DO^$@Rb}6$4 at S`
-zDy!WdH|zeTS5P`WCOgJYv%MecK8>qS(UCIoh@(L9F*ZXarNczOPxy50-rRltbPH<s
-zA!)|x#GcqI^c|On6=j}5m2?=K6mlgf$6nP%Y{F?5Ca>+lsqMcUzhGX-DG?dIeM%yw
-zq)6Z5tV+moc?F- at Px$g4N7 at AhG2|lSEV{6lAFkjw_958<_GvD+SlP=VmQ!lVHXJsI
-znhY+?3E0d<$JA<%tK<^VtQR#nU at +CT{-PMJ<%52yKtV-o{8a81dy0d-O}vj9)n^7k
-zT4d@%G%wJ%%qhlePD%yYMMpP?=tpcJ%YZV=t3+x1nKCnh=v}&mgl&nSNPf^%ki)ze
-l`$yqfayHMBo}R^L^DOS^S$;Op@}8cl(m$8f+I>c;?LSw^)ujLc
-
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scriptdata01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scriptdata01.dat
-deleted file mode 100644
-index 76b67f4..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scriptdata01.dat
-+++ /dev/null
-@@ -1,308 +0,0 @@
--#data
--FOO<script>'Hello'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'Hello'"
--|     "BAR"
--
--#data
--FOO<script></script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|     "BAR"
--
--#data
--FOO<script></script >BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|     "BAR"
--
--#data
--FOO<script></script/>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|     "BAR"
--
--#data
--FOO<script></script/ >BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|     "BAR"
--
--#data
--FOO<script type="text/plain"></scriptx>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "</scriptx>BAR"
--
--#data
--FOO<script></script foo=">" dd>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|     "BAR"
--
--#data
--FOO<script>'<'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<'"
--|     "BAR"
--
--#data
--FOO<script>'<!'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!'"
--|     "BAR"
--
--#data
--FOO<script>'<!-'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!-'"
--|     "BAR"
--
--#data
--FOO<script>'<!--'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!--'"
--|     "BAR"
--
--#data
--FOO<script>'<!---'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!---'"
--|     "BAR"
--
--#data
--FOO<script>'<!-->'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!-->'"
--|     "BAR"
--
--#data
--FOO<script>'<!-->'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!-->'"
--|     "BAR"
--
--#data
--FOO<script>'<!-- potato'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!-- potato'"
--|     "BAR"
--
--#data
--FOO<script>'<!-- <sCrIpt'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!-- <sCrIpt'"
--|     "BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt>'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt>'</script>BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt> -'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt> -'</script>BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt> --'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt> --'</script>BAR"
--
--#data
--FOO<script>'<!-- <sCrIpt> -->'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       "'<!-- <sCrIpt> -->'"
--|     "BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt> --!>'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt> --!>'</script>BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt> -- >'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt> -- >'</script>BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt '</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt '</script>BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt/'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt/'</script>BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt\'</script>BAR
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt\'"
--|     "BAR"
--
--#data
--FOO<script type="text/plain">'<!-- <sCrIpt/'</script>BAR</script>QUX
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "FOO"
--|     <script>
--|       type="text/plain"
--|       "'<!-- <sCrIpt/'</script>BAR"
--|     "QUX"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat
-deleted file mode 100644
-index 4e08d0e..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scripted/adoption01.dat
-+++ /dev/null
-@@ -1,15 +0,0 @@
--#data
--<p><b id="A"><script>document.getElementById("A").id = "B"</script></p>TEXT</b>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <b>
--|         id="B"
--|         <script>
--|           "document.getElementById("A").id = "B""
--|     <b>
--|       id="A"
--|       "TEXT"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat
-deleted file mode 100644
-index ef4a41c..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/scripted/webkit01.dat
-+++ /dev/null
-@@ -1,28 +0,0 @@
--#data
--1<script>document.write("2")</script>3
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "1"
--|     <script>
--|       "document.write("2")"
--|     "23"
--
--#data
--1<script>document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")</script>4
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "1"
--|     <script>
--|       "document.write("<script>document.write('2')</scr"+ "ipt><script>document.write('3')</scr" + "ipt>")"
--|     <script>
--|       "document.write('2')"
--|     "2"
--|     <script>
--|       "document.write('3')"
--|     "34"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tables01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tables01.dat
-deleted file mode 100644
-index c4b47e4..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tables01.dat
-+++ /dev/null
-@@ -1,212 +0,0 @@
--#data
--<table><th>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <th>
--
--#data
--<table><td>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--<table><col foo='bar'>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <colgroup>
--|         <col>
--|           foo="bar"
--
--#data
--<table><colgroup></html>foo
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "foo"
--|     <table>
--|       <colgroup>
--
--#data
--<table></table><p>foo
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|     <p>
--|       "foo"
--
--#data
--<table></body></caption></col></colgroup></html></tbody></td></tfoot></th></thead></tr><td>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--<table><select><option>3</select></table>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--|         "3"
--|     <table>
--
--#data
--<table><select><table></table></select></table>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|     <table>
--|     <table>
--
--#data
--<table><select></table>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|     <table>
--
--#data
--<table><select><option>A<tr><td>B</td></tr></table>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--|         "A"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "B"
--
--#data
--<table><td></body></caption></col></colgroup></html>foo
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "foo"
--
--#data
--<table><td>A</table>B
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "A"
--|     "B"
--
--#data
--<table><tr><caption>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|       <caption>
--
--#data
--<table><tr></body></caption></col></colgroup></html></td></th><td>foo
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "foo"
--
--#data
--<table><td><tr>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|         <tr>
--
--#data
--<table><td><button><td>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <button>
--|           <td>
--
--#data
--<table><tr><td><svg><desc><td>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <svg svg>
--|               <svg desc>
--|           <td>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests1.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests1.dat
-deleted file mode 100644
-index cbf8bdd..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests1.dat
-+++ /dev/null
-@@ -1,1952 +0,0 @@
--#data
--Test
--#errors
--Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "Test"
--
--#data
--<p>One<p>Two
--#errors
--Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       "One"
--|     <p>
--|       "Two"
--
--#data
--Line1<br>Line2<br>Line3<br>Line4
--#errors
--Line: 1 Col: 5 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "Line1"
--|     <br>
--|     "Line2"
--|     <br>
--|     "Line3"
--|     <br>
--|     "Line4"
--
--#data
--<html>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<head>
--#errors
--Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<body>
--#errors
--Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<html><head>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<html><head></head>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<html><head></head><body>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<html><head></head><body></body>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<html><head><body></body></html>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<html><head></body></html>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--Line: 1 Col: 19 Unexpected end tag (body).
--Line: 1 Col: 26 Unexpected end tag (html).
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<html><head><body></html>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<html><body></html>
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<body></html>
--#errors
--Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<head></html>
--#errors
--Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
--Line: 1 Col: 13 Unexpected end tag (html). Ignored.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--</head>
--#errors
--Line: 1 Col: 7 Unexpected end tag (head). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--</body>
--#errors
--Line: 1 Col: 7 Unexpected end tag (body). Expected DOCTYPE.
--Line: 1 Col: 7 Unexpected end tag (body) after the (implied) root element.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--</html>
--#errors
--Line: 1 Col: 7 Unexpected end tag (html). Expected DOCTYPE.
--Line: 1 Col: 7 Unexpected end tag (html) after the (implied) root element.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<b><table><td><i></table>
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
--Line: 1 Col: 25 Got table cell end tag (td) while required end tags are missing.
--Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <table>
--|         <tbody>
--|           <tr>
--|             <td>
--|               <i>
--
--#data
--<b><table><td></b><i></table>X
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
--Line: 1 Col: 18 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 29 Got table cell end tag (td) while required end tags are missing.
--Line: 1 Col: 30 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <table>
--|         <tbody>
--|           <tr>
--|             <td>
--|               <i>
--|       "X"
--
--#data
--<h1>Hello<h2>World
--#errors
--4: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
--13: Heading cannot be a child of another heading.
--18: End of file seen and there were open elements.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <h1>
--|       "Hello"
--|     <h2>
--|       "World"
--
--#data
--<a><p>X<a>Y</a>Z</p></a>
--#errors
--Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE.
--Line: 1 Col: 10 Unexpected start tag (a) implies end tag (a).
--Line: 1 Col: 10 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 24 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|     <p>
--|       <a>
--|         "X"
--|       <a>
--|         "Y"
--|       "Z"
--
--#data
--<b><button>foo</b>bar
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 15 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|     <button>
--|       <b>
--|         "foo"
--|       "bar"
--
--#data
--<!DOCTYPE html><span><button>foo</span>bar
--#errors
--39: End tag “span” seen but there were unclosed elements.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <span>
--|       <button>
--|         "foobar"
--
--#data
--<p><b><div><marquee></p></b></div>X
--#errors
--Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected end tag (p). Ignored.
--Line: 1 Col: 24 Unexpected end tag (p). Ignored.
--Line: 1 Col: 28 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 34 End tag (div) seen too early. Expected other end tag.
--Line: 1 Col: 35 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <b>
--|     <div>
--|       <b>
--|         <marquee>
--|           <p>
--|           "X"
--
--#data
--<script><div></script></div><title><p></title><p><p>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 28 Unexpected end tag (div). Ignored.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<div>"
--|     <title>
--|       "<p>"
--|   <body>
--|     <p>
--|     <p>
--
--#data
--<!--><div>--<!-->
--#errors
--Line: 1 Col: 5 Incorrect comment.
--Line: 1 Col: 10 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 17 Incorrect comment.
--Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
--#document
--| <!--  -->
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "--"
--|       <!--  -->
--
--#data
--<p><hr></p>
--#errors
--Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected end tag (p). Ignored.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <hr>
--|     <p>
--
--#data
--<select><b><option><select><option></b></select>X
--#errors
--Line: 1 Col: 8 Unexpected start tag (select). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected start tag token (b) in the select phase. Ignored.
--Line: 1 Col: 27 Unexpected select start tag in the select phase treated as select end tag.
--Line: 1 Col: 39 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 48 Unexpected end tag (select). Ignored.
--Line: 1 Col: 49 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--|     <option>
--|       "X"
--
--#data
--<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y
--#errors
--Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected table cell start tag (td) in the table body phase.
--Line: 1 Col: 35 Unexpected start tag (a) implies end tag (a).
--Line: 1 Col: 40 Got table cell end tag (td) while required end tags are missing.
--Line: 1 Col: 43 Unexpected start tag (a) in table context caused voodoo mode.
--Line: 1 Col: 43 Unexpected start tag (a) implies end tag (a).
--Line: 1 Col: 43 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 51 Unexpected implied end tag (a) in the table phase.
--Line: 1 Col: 63 Unexpected start tag (a) implies end tag (a).
--Line: 1 Col: 64 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <a>
--|       <table>
--|         <tbody>
--|           <tr>
--|             <td>
--|               <a>
--|                 <table>
--|               <a>
--|     <a>
--|       <b>
--|         "X"
--|       "C"
--|     <a>
--|       "Y"
--
--#data
--<a X>0<b>1<a Y>2
--#errors
--Line: 1 Col: 5 Unexpected start tag (a). Expected DOCTYPE.
--Line: 1 Col: 15 Unexpected start tag (a) implies end tag (a).
--Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 16 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       x=""
--|       "0"
--|       <b>
--|         "1"
--|     <b>
--|       <a>
--|         y=""
--|         "2"
--
--#data
--<!-----><font><div>hello<table>excite!<b>me!<th><i>please!</tr><!--X-->
--#errors
--Line: 1 Col: 7 Unexpected '-' after '--' found in comment.
--Line: 1 Col: 14 Unexpected start tag (font). Expected DOCTYPE.
--Line: 1 Col: 38 Unexpected non-space characters in table context caused voodoo mode.
--Line: 1 Col: 41 Unexpected start tag (b) in table context caused voodoo mode.
--Line: 1 Col: 48 Unexpected implied end tag (b) in the table phase.
--Line: 1 Col: 48 Unexpected table cell start tag (th) in the table body phase.
--Line: 1 Col: 63 Got table cell end tag (th) while required end tags are missing.
--Line: 1 Col: 71 Unexpected end of file. Expected table content.
--#document
--| <!-- - -->
--| <html>
--|   <head>
--|   <body>
--|     <font>
--|       <div>
--|         "helloexcite!"
--|         <b>
--|           "me!"
--|         <table>
--|           <tbody>
--|             <tr>
--|               <th>
--|                 <i>
--|                   "please!"
--|             <!-- X -->
--
--#data
--<!DOCTYPE html><li>hello<li>world<ul>how<li>do</ul>you</body><!--do-->
--#errors
--Line: 1 Col: 61 Unexpected end tag (li). Missing end tag (body).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <li>
--|       "hello"
--|     <li>
--|       "world"
--|       <ul>
--|         "how"
--|         <li>
--|           "do"
--|       "you"
--|   <!-- do -->
--
--#data
--<!DOCTYPE html>A<option>B<optgroup>C<select>D</option>E
--#errors
--Line: 1 Col: 54 Unexpected end tag (option) in the select phase. Ignored.
--Line: 1 Col: 55 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "A"
--|     <option>
--|       "B"
--|     <optgroup>
--|       "C"
--|       <select>
--|         "DE"
--
--#data
--<
--#errors
--Line: 1 Col: 1 Expected tag name. Got something else instead
--Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "<"
--
--#data
--<#
--#errors
--Line: 1 Col: 1 Expected tag name. Got something else instead
--Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "<#"
--
--#data
--</
--#errors
--Line: 1 Col: 2 Expected closing tag. Unexpected end of file.
--Line: 1 Col: 2 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "</"
--
--#data
--</#
--#errors
--Line: 1 Col: 2 Expected closing tag. Unexpected character '#' found.
--Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- # -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<?
--#errors
--Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
--Line: 1 Col: 2 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- ? -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<?#
--#errors
--Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
--Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- ?# -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!
--#errors
--Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
--Line: 1 Col: 2 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!--  -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!#
--#errors
--Line: 1 Col: 3 Expected '--' or 'DOCTYPE'. Not found.
--Line: 1 Col: 3 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- # -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<?COMMENT?>
--#errors
--Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
--Line: 1 Col: 11 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- ?COMMENT? -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!COMMENT>
--#errors
--Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
--Line: 1 Col: 10 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- COMMENT -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--</ COMMENT >
--#errors
--Line: 1 Col: 2 Expected closing tag. Unexpected character ' ' found.
--Line: 1 Col: 12 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!--  COMMENT  -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<?COM--MENT?>
--#errors
--Line: 1 Col: 1 Expected tag name. Got '?' instead. (HTML doesn't support processing instructions.)
--Line: 1 Col: 13 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- ?COM--MENT? -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!COM--MENT>
--#errors
--Line: 1 Col: 2 Expected '--' or 'DOCTYPE'. Not found.
--Line: 1 Col: 12 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- COM--MENT -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--</ COM--MENT >
--#errors
--Line: 1 Col: 2 Expected closing tag. Unexpected character ' ' found.
--Line: 1 Col: 14 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!--  COM--MENT  -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><style> EOF
--#errors
--Line: 1 Col: 26 Unexpected end of file. Expected end tag (style).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       " EOF"
--|   <body>
--
--#data
--<!DOCTYPE html><script> <!-- </script> --> </script> EOF
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       " <!-- "
--|     " "
--|   <body>
--|     "-->  EOF"
--
--#data
--<b><p></b>TEST
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 10 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|     <p>
--|       <b>
--|       "TEST"
--
--#data
--<p id=a><b><p id=b></b>TEST
--#errors
--Line: 1 Col: 8 Unexpected start tag (p). Expected DOCTYPE.
--Line: 1 Col: 19 Unexpected end tag (p). Ignored.
--Line: 1 Col: 23 End tag (b) violates step 1, paragraph 2 of the adoption agency algorithm.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       id="a"
--|       <b>
--|     <p>
--|       id="b"
--|       "TEST"
--
--#data
--<b id=a><p><b id=b></p></b>TEST
--#errors
--Line: 1 Col: 8 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 23 Unexpected end tag (p). Ignored.
--Line: 1 Col: 27 End tag (b) violates step 1, paragraph 2 of the adoption agency algorithm.
--Line: 1 Col: 31 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       id="a"
--|       <p>
--|         <b>
--|           id="b"
--|       "TEST"
--
--#data
--<!DOCTYPE html><title>U-test</title><body><div><p>Test<u></p></div></body>
--#errors
--Line: 1 Col: 61 Unexpected end tag (p). Ignored.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <title>
--|       "U-test"
--|   <body>
--|     <div>
--|       <p>
--|         "Test"
--|         <u>
--
--#data
--<!DOCTYPE html><font><table></font></table></font>
--#errors
--Line: 1 Col: 35 Unexpected end tag (font) in table context caused voodoo mode.
--Line: 1 Col: 35 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <font>
--|       <table>
--
--#data
--<font><p>hello<b>cruel</font>world
--#errors
--Line: 1 Col: 6 Unexpected start tag (font). Expected DOCTYPE.
--Line: 1 Col: 29 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 29 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 34 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <font>
--|     <p>
--|       <font>
--|         "hello"
--|         <b>
--|           "cruel"
--|       <b>
--|         "world"
--
--#data
--<b>Test</i>Test
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 11 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       "TestTest"
--
--#data
--<b>A<cite>B<div>C
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       "A"
--|       <cite>
--|         "B"
--|         <div>
--|           "C"
--
--#data
--<b>A<cite>B<div>C</cite>D
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 24 Unexpected end tag (cite). Ignored.
--Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       "A"
--|       <cite>
--|         "B"
--|         <div>
--|           "CD"
--
--#data
--<b>A<cite>B<div>C</b>D
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 21 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 22 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       "A"
--|       <cite>
--|         "B"
--|     <div>
--|       <b>
--|         "C"
--|       "D"
--
--#data
--
--#errors
--Line: 1 Col: 0 Unexpected End of file. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<DIV>
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 5 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--
--#data
--<DIV> abc
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 9 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc"
--
--#data
--<DIV> abc <B>
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 13 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--
--#data
--<DIV> abc <B> def
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 17 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def"
--
--#data
--<DIV> abc <B> def <I>
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 21 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--
--#data
--<DIV> abc <B> def <I> ghi
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi"
--
--#data
--<DIV> abc <B> def <I> ghi <P>
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 29 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi "
--|           <p>
--
--#data
--<DIV> abc <B> def <I> ghi <P> jkl
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 33 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi "
--|           <p>
--|             " jkl"
--
--#data
--<DIV> abc <B> def <I> ghi <P> jkl </B>
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 38 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi "
--|       <i>
--|         <p>
--|           <b>
--|             " jkl "
--
--#data
--<DIV> abc <B> def <I> ghi <P> jkl </B> mno
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 42 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi "
--|       <i>
--|         <p>
--|           <b>
--|             " jkl "
--|           " mno"
--
--#data
--<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I>
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 47 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi "
--|       <i>
--|       <p>
--|         <i>
--|           <b>
--|             " jkl "
--|           " mno "
--
--#data
--<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 51 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi "
--|       <i>
--|       <p>
--|         <i>
--|           <b>
--|             " jkl "
--|           " mno "
--|         " pqr"
--
--#data
--<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr </P>
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 56 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi "
--|       <i>
--|       <p>
--|         <i>
--|           <b>
--|             " jkl "
--|           " mno "
--|         " pqr "
--
--#data
--<DIV> abc <B> def <I> ghi <P> jkl </B> mno </I> pqr </P> stu
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 38 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 47 End tag (i) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 60 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       " abc "
--|       <b>
--|         " def "
--|         <i>
--|           " ghi "
--|       <i>
--|       <p>
--|         <i>
--|           <b>
--|             " jkl "
--|           " mno "
--|         " pqr "
--|       " stu"
--
--#data
--<test attribute---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------->
--#errors
--Line: 1 Col: 1040 Unexpected start tag (test). Expected DOCTYPE.
--Line: 1 Col: 1040 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <test>
--|       attribute----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------=""
--
--#data
--<a href="blah">aba<table><a href="foo">br<tr><td></td></tr>x</table>aoe
--#errors
--Line: 1 Col: 15 Unexpected start tag (a). Expected DOCTYPE.
--Line: 1 Col: 39 Unexpected start tag (a) in table context caused voodoo mode.
--Line: 1 Col: 39 Unexpected start tag (a) implies end tag (a).
--Line: 1 Col: 39 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 45 Unexpected implied end tag (a) in the table phase.
--Line: 1 Col: 68 Unexpected implied end tag (a) in the table phase.
--Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
--
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       href="blah"
--|       "aba"
--|       <a>
--|         href="foo"
--|         "br"
--|       <a>
--|         href="foo"
--|         "x"
--|       <table>
--|         <tbody>
--|           <tr>
--|             <td>
--|     <a>
--|       href="foo"
--|       "aoe"
--
--#data
--<a href="blah">aba<table><tr><td><a href="foo">br</td></tr>x</table>aoe
--#errors
--Line: 1 Col: 15 Unexpected start tag (a). Expected DOCTYPE.
--Line: 1 Col: 54 Got table cell end tag (td) while required end tags are missing.
--Line: 1 Col: 60 Unexpected non-space characters in table context caused voodoo mode.
--Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       href="blah"
--|       "abax"
--|       <table>
--|         <tbody>
--|           <tr>
--|             <td>
--|               <a>
--|                 href="foo"
--|                 "br"
--|       "aoe"
--
--#data
--<table><a href="blah">aba<tr><td><a href="foo">br</td></tr>x</table>aoe
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 22 Unexpected start tag (a) in table context caused voodoo mode.
--Line: 1 Col: 29 Unexpected implied end tag (a) in the table phase.
--Line: 1 Col: 54 Got table cell end tag (td) while required end tags are missing.
--Line: 1 Col: 68 Unexpected implied end tag (a) in the table phase.
--Line: 1 Col: 71 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       href="blah"
--|       "aba"
--|     <a>
--|       href="blah"
--|       "x"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <a>
--|               href="foo"
--|               "br"
--|     <a>
--|       href="blah"
--|       "aoe"
--
--#data
--<a href=a>aa<marquee>aa<a href=b>bb</marquee>aa
--#errors
--Line: 1 Col: 10 Unexpected start tag (a). Expected DOCTYPE.
--Line: 1 Col: 45 End tag (marquee) seen too early. Expected other end tag.
--Line: 1 Col: 47 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       href="a"
--|       "aa"
--|       <marquee>
--|         "aa"
--|         <a>
--|           href="b"
--|           "bb"
--|       "aa"
--
--#data
--<wbr><strike><code></strike><code><strike></code>
--#errors
--Line: 1 Col: 5 Unexpected start tag (wbr). Expected DOCTYPE.
--Line: 1 Col: 28 End tag (strike) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 49 Unexpected end tag (code). Ignored.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <wbr>
--|     <strike>
--|       <code>
--|     <code>
--|       <code>
--|         <strike>
--
--#data
--<!DOCTYPE html><spacer>foo
--#errors
--26: End of file seen and there were open elements.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <spacer>
--|       "foo"
--
--#data
--<title><meta></title><link><title><meta></title>
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <title>
--|       "<meta>"
--|     <link>
--|     <title>
--|       "<meta>"
--|   <body>
--
--#data
--<style><!--</style><meta><script>--><link></script>
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--Line: 1 Col: 51 Unexpected end of file. Expected end tag (style).
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--"
--|     <meta>
--|     <script>
--|       "--><link>"
--|   <body>
--
--#data
--<head><meta></head><link>
--#errors
--Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
--Line: 1 Col: 25 Unexpected start tag (link) that can be in head. Moved.
--#document
--| <html>
--|   <head>
--|     <meta>
--|     <link>
--|   <body>
--
--#data
--<table><tr><tr><td><td><span><th><span>X</table>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 33 Got table cell end tag (td) while required end tags are missing.
--Line: 1 Col: 48 Got table cell end tag (th) while required end tags are missing.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|         <tr>
--|           <td>
--|           <td>
--|             <span>
--|           <th>
--|             <span>
--|               "X"
--
--#data
--<body><body><base><link><meta><title><p></title><body><p></body>
--#errors
--Line: 1 Col: 6 Unexpected start tag (body). Expected DOCTYPE.
--Line: 1 Col: 12 Unexpected start tag (body).
--Line: 1 Col: 54 Unexpected start tag (body).
--Line: 1 Col: 64 Unexpected end tag (p). Missing end tag (body).
--#document
--| <html>
--|   <head>
--|   <body>
--|     <base>
--|     <link>
--|     <meta>
--|     <title>
--|       "<p>"
--|     <p>
--
--#data
--<textarea><p></textarea>
--#errors
--Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "<p>"
--
--#data
--<p></isindex></noembed></noframes></noscript></optgroup></option></plaintext></textarea>
--#errors
--Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE.
--Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element.
--Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element.
--Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element.
--Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element.
--Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element.
--Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element.
--Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element.
--Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element.
--Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element.
--Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element.
--Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element.
--Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element.
--Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element.
--Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element.
--Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element.
--Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element.
--Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element.
--Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element.
--Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element.
--Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element.
--Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element.
--Line: 1 Col: 130 Unexpected end tag (br). Treated as br element.
--Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 140 This element (img) has no end tag.
--Line: 1 Col: 148 Unexpected end tag (title). Ignored.
--Line: 1 Col: 155 Unexpected end tag (span). Ignored.
--Line: 1 Col: 163 Unexpected end tag (style). Ignored.
--Line: 1 Col: 172 Unexpected end tag (script). Ignored.
--Line: 1 Col: 180 Unexpected end tag (table). Ignored.
--Line: 1 Col: 185 Unexpected end tag (th). Ignored.
--Line: 1 Col: 190 Unexpected end tag (td). Ignored.
--Line: 1 Col: 195 Unexpected end tag (tr). Ignored.
--Line: 1 Col: 203 This element (frame) has no end tag.
--Line: 1 Col: 210 This element (area) has no end tag.
--Line: 1 Col: 217 Unexpected end tag (link). Ignored.
--Line: 1 Col: 225 This element (param) has no end tag.
--Line: 1 Col: 230 This element (hr) has no end tag.
--Line: 1 Col: 238 This element (input) has no end tag.
--Line: 1 Col: 244 Unexpected end tag (col). Ignored.
--Line: 1 Col: 251 Unexpected end tag (base). Ignored.
--Line: 1 Col: 258 Unexpected end tag (meta). Ignored.
--Line: 1 Col: 269 This element (basefont) has no end tag.
--Line: 1 Col: 279 This element (bgsound) has no end tag.
--Line: 1 Col: 287 This element (embed) has no end tag.
--Line: 1 Col: 296 This element (spacer) has no end tag.
--Line: 1 Col: 300 Unexpected end tag (p). Ignored.
--Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag.
--Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag.
--Line: 1 Col: 320 Unexpected end tag (caption). Ignored.
--Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored.
--Line: 1 Col: 339 Unexpected end tag (tbody). Ignored.
--Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored.
--Line: 1 Col: 355 Unexpected end tag (thead). Ignored.
--Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag.
--Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag.
--Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag.
--Line: 1 Col: 393 Unexpected end tag (dir). Ignored.
--Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag.
--Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag.
--Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag.
--Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag.
--Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag.
--Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag.
--Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag.
--Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag.
--Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 460 This element (wbr) has no end tag.
--Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag.
--Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag.
--Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag.
--Line: 1 Col: 513 Unexpected end tag (html). Ignored.
--Line: 1 Col: 513 Unexpected end tag (frameset). Ignored.
--Line: 1 Col: 520 Unexpected end tag (head). Ignored.
--Line: 1 Col: 529 Unexpected end tag (iframe). Ignored.
--Line: 1 Col: 537 This element (image) has no end tag.
--Line: 1 Col: 547 This element (isindex) has no end tag.
--Line: 1 Col: 557 Unexpected end tag (noembed). Ignored.
--Line: 1 Col: 568 Unexpected end tag (noframes). Ignored.
--Line: 1 Col: 579 Unexpected end tag (noscript). Ignored.
--Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored.
--Line: 1 Col: 599 Unexpected end tag (option). Ignored.
--Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored.
--Line: 1 Col: 622 Unexpected end tag (textarea). Ignored.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <br>
--|     <p>
--
--#data
--<table><tr></strong></b></em></i></u></strike></s></blink></tt></pre></big></small></font></select></h1></h2></h3></h4></h5></h6></body></br></a></img></title></span></style></script></table></th></td></tr></frame></area></link></param></hr></input></col></base></meta></basefont></bgsound></embed></spacer></p></dd></dt></caption></colgroup></tbody></tfoot></thead></address></blockquote></center></dir></div></dl></fieldset></listing></menu></ol></ul></li></nobr></wbr></form></button></marquee></object></html></frameset></head></iframe></image></isindex></noembed></noframes></noscript></optgroup></option></plaintext></textarea>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode.
--Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode.
--Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode.
--Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode.
--Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode.
--Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode.
--Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode.
--Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode.
--Line: 1 Col: 58 Unexpected end tag (blink). Ignored.
--Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode.
--Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode.
--Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag.
--Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode.
--Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode.
--Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode.
--Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode.
--Line: 1 Col: 99 Unexpected end tag (select). Ignored.
--Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode.
--Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag.
--Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode.
--Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag.
--Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode.
--Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag.
--Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode.
--Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag.
--Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode.
--Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag.
--Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode.
--Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag.
--Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored.
--Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode.
--Line: 1 Col: 141 Unexpected end tag (br). Treated as br element.
--Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode.
--Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode.
--Line: 1 Col: 151 This element (img) has no end tag.
--Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode.
--Line: 1 Col: 159 Unexpected end tag (title). Ignored.
--Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode.
--Line: 1 Col: 166 Unexpected end tag (span). Ignored.
--Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode.
--Line: 1 Col: 174 Unexpected end tag (style). Ignored.
--Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode.
--Line: 1 Col: 183 Unexpected end tag (script). Ignored.
--Line: 1 Col: 196 Unexpected end tag (th). Ignored.
--Line: 1 Col: 201 Unexpected end tag (td). Ignored.
--Line: 1 Col: 206 Unexpected end tag (tr). Ignored.
--Line: 1 Col: 214 This element (frame) has no end tag.
--Line: 1 Col: 221 This element (area) has no end tag.
--Line: 1 Col: 228 Unexpected end tag (link). Ignored.
--Line: 1 Col: 236 This element (param) has no end tag.
--Line: 1 Col: 241 This element (hr) has no end tag.
--Line: 1 Col: 249 This element (input) has no end tag.
--Line: 1 Col: 255 Unexpected end tag (col). Ignored.
--Line: 1 Col: 262 Unexpected end tag (base). Ignored.
--Line: 1 Col: 269 Unexpected end tag (meta). Ignored.
--Line: 1 Col: 280 This element (basefont) has no end tag.
--Line: 1 Col: 290 This element (bgsound) has no end tag.
--Line: 1 Col: 298 This element (embed) has no end tag.
--Line: 1 Col: 307 This element (spacer) has no end tag.
--Line: 1 Col: 311 Unexpected end tag (p). Ignored.
--Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag.
--Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag.
--Line: 1 Col: 331 Unexpected end tag (caption). Ignored.
--Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored.
--Line: 1 Col: 350 Unexpected end tag (tbody). Ignored.
--Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored.
--Line: 1 Col: 366 Unexpected end tag (thead). Ignored.
--Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag.
--Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag.
--Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag.
--Line: 1 Col: 404 Unexpected end tag (dir). Ignored.
--Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag.
--Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag.
--Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag.
--Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag.
--Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag.
--Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag.
--Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag.
--Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag.
--Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm.
--Line: 1 Col: 471 This element (wbr) has no end tag.
--Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag.
--Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag.
--Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag.
--Line: 1 Col: 524 Unexpected end tag (html). Ignored.
--Line: 1 Col: 524 Unexpected end tag (frameset). Ignored.
--Line: 1 Col: 531 Unexpected end tag (head). Ignored.
--Line: 1 Col: 540 Unexpected end tag (iframe). Ignored.
--Line: 1 Col: 548 This element (image) has no end tag.
--Line: 1 Col: 558 This element (isindex) has no end tag.
--Line: 1 Col: 568 Unexpected end tag (noembed). Ignored.
--Line: 1 Col: 579 Unexpected end tag (noframes). Ignored.
--Line: 1 Col: 590 Unexpected end tag (noscript). Ignored.
--Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored.
--Line: 1 Col: 610 Unexpected end tag (option). Ignored.
--Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored.
--Line: 1 Col: 633 Unexpected end tag (textarea). Ignored.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <br>
--|     <table>
--|       <tbody>
--|         <tr>
--|     <p>
--
--#data
--<frameset>
--#errors
--Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
--Line: 1 Col: 10 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <frameset>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests10.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests10.dat
-deleted file mode 100644
-index 4f8df86..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests10.dat
-+++ /dev/null
-@@ -1,799 +0,0 @@
--#data
--<!DOCTYPE html><svg></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--
--#data
--<!DOCTYPE html><svg></svg><![CDATA[a]]>
--#errors
--29: Bogus comment
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|     <!-- [CDATA[a]] -->
--
--#data
--<!DOCTYPE html><body><svg></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--
--#data
--<!DOCTYPE html><body><select><svg></svg></select>
--#errors
--35: Stray “svg” start tag.
--42: Stray end tag “svg”
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!DOCTYPE html><body><select><option><svg></svg></option></select>
--#errors
--43: Stray “svg” start tag.
--50: Stray end tag “svg”
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--
--#data
--<!DOCTYPE html><body><table><svg></svg></table>
--#errors
--34: Start tag “svg” seen in “table”.
--41: Stray end tag “svg”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|     <table>
--
--#data
--<!DOCTYPE html><body><table><svg><g>foo</g></svg></table>
--#errors
--34: Start tag “svg” seen in “table”.
--46: Stray end tag “g”.
--53: Stray end tag “svg”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg g>
--|         "foo"
--|     <table>
--
--#data
--<!DOCTYPE html><body><table><svg><g>foo</g><g>bar</g></svg></table>
--#errors
--34: Start tag “svg” seen in “table”.
--46: Stray end tag “g”.
--58: Stray end tag “g”.
--65: Stray end tag “svg”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg g>
--|         "foo"
--|       <svg g>
--|         "bar"
--|     <table>
--
--#data
--<!DOCTYPE html><body><table><tbody><svg><g>foo</g><g>bar</g></svg></tbody></table>
--#errors
--41: Start tag “svg” seen in “table”.
--53: Stray end tag “g”.
--65: Stray end tag “g”.
--72: Stray end tag “svg”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg g>
--|         "foo"
--|       <svg g>
--|         "bar"
--|     <table>
--|       <tbody>
--
--#data
--<!DOCTYPE html><body><table><tbody><tr><svg><g>foo</g><g>bar</g></svg></tr></tbody></table>
--#errors
--45: Start tag “svg” seen in “table”.
--57: Stray end tag “g”.
--69: Stray end tag “g”.
--76: Stray end tag “svg”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg g>
--|         "foo"
--|       <svg g>
--|         "bar"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!DOCTYPE html><body><table><tbody><tr><td><svg><g>foo</g><g>bar</g></svg></td></tr></tbody></table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <svg svg>
--|               <svg g>
--|                 "foo"
--|               <svg g>
--|                 "bar"
--
--#data
--<!DOCTYPE html><body><table><tbody><tr><td><svg><g>foo</g><g>bar</g></svg><p>baz</td></tr></tbody></table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <svg svg>
--|               <svg g>
--|                 "foo"
--|               <svg g>
--|                 "bar"
--|             <p>
--|               "baz"
--
--#data
--<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g></svg><p>baz</caption></table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <svg svg>
--|           <svg g>
--|             "foo"
--|           <svg g>
--|             "bar"
--|         <p>
--|           "baz"
--
--#data
--<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
--#errors
--70: HTML start tag “p” in a foreign namespace context.
--81: “table” closed but “caption” was still open.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <svg svg>
--|           <svg g>
--|             "foo"
--|           <svg g>
--|             "bar"
--|         <p>
--|           "baz"
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body><table><caption><svg><g>foo</g><g>bar</g>baz</table><p>quux
--#errors
--78: “table” closed but “caption” was still open.
--78: Unclosed elements on stack.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <svg svg>
--|           <svg g>
--|             "foo"
--|           <svg g>
--|             "bar"
--|           "baz"
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body><table><colgroup><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
--#errors
--44: Start tag “svg” seen in “table”.
--56: Stray end tag “g”.
--68: Stray end tag “g”.
--71: HTML start tag “p” in a foreign namespace context.
--71: Start tag “p” seen in “table”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg g>
--|         "foo"
--|       <svg g>
--|         "bar"
--|     <p>
--|       "baz"
--|     <table>
--|       <colgroup>
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body><table><tr><td><select><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
--#errors
--50: Stray “svg” start tag.
--54: Stray “g” start tag.
--62: Stray end tag “g”
--66: Stray “g” start tag.
--74: Stray end tag “g”
--77: Stray “p” start tag.
--88: “table” end tag with “select” open.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <select>
--|               "foobarbaz"
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body><table><select><svg><g>foo</g><g>bar</g><p>baz</table><p>quux
--#errors
--36: Start tag “select” seen in “table”.
--42: Stray “svg” start tag.
--46: Stray “g” start tag.
--54: Stray end tag “g”
--58: Stray “g” start tag.
--66: Stray end tag “g”
--69: Stray “p” start tag.
--80: “table” end tag with “select” open.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       "foobarbaz"
--|     <table>
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body></body></html><svg><g>foo</g><g>bar</g><p>baz
--#errors
--41: Stray “svg” start tag.
--68: HTML start tag “p” in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg g>
--|         "foo"
--|       <svg g>
--|         "bar"
--|     <p>
--|       "baz"
--
--#data
--<!DOCTYPE html><body></body><svg><g>foo</g><g>bar</g><p>baz
--#errors
--34: Stray “svg” start tag.
--61: HTML start tag “p” in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg g>
--|         "foo"
--|       <svg g>
--|         "bar"
--|     <p>
--|       "baz"
--
--#data
--<!DOCTYPE html><frameset><svg><g></g><g></g><p><span>
--#errors
--31: Stray “svg” start tag.
--35: Stray “g” start tag.
--40: Stray end tag “g”
--44: Stray “g” start tag.
--49: Stray end tag “g”
--52: Stray “p” start tag.
--58: Stray “span” start tag.
--58: End of file seen and there were open elements.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!DOCTYPE html><frameset></frameset><svg><g></g><g></g><p><span>
--#errors
--42: Stray “svg” start tag.
--46: Stray “g” start tag.
--51: Stray end tag “g”
--55: Stray “g” start tag.
--60: Stray end tag “g”
--63: Stray “p” start tag.
--69: Stray “span” start tag.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!DOCTYPE html><body xlink:href=foo><svg xlink:href=foo></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     xlink:href="foo"
--|     <svg svg>
--|       xlink href="foo"
--
--#data
--<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo></g></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     xlink:href="foo"
--|     xml:lang="en"
--|     <svg svg>
--|       <svg g>
--|         xlink href="foo"
--|         xml lang="en"
--
--#data
--<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo /></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     xlink:href="foo"
--|     xml:lang="en"
--|     <svg svg>
--|       <svg g>
--|         xlink href="foo"
--|         xml lang="en"
--
--#data
--<!DOCTYPE html><body xlink:href=foo xml:lang=en><svg><g xml:lang=en xlink:href=foo />bar</svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     xlink:href="foo"
--|     xml:lang="en"
--|     <svg svg>
--|       <svg g>
--|         xlink href="foo"
--|         xml lang="en"
--|       "bar"
--
--#data
--<svg></path>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--
--#data
--<div><svg></div>a
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <svg svg>
--|     "a"
--
--#data
--<div><svg><path></div>a
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <svg svg>
--|         <svg path>
--|     "a"
--
--#data
--<div><svg><path></svg><path>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <svg svg>
--|         <svg path>
--|       <path>
--
--#data
--<div><svg><path><foreignObject><math></div>a
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <svg svg>
--|         <svg path>
--|           <svg foreignObject>
--|             <math math>
--|               "a"
--
--#data
--<div><svg><path><foreignObject><p></div>a
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <svg svg>
--|         <svg path>
--|           <svg foreignObject>
--|             <p>
--|               "a"
--
--#data
--<!DOCTYPE html><svg><desc><div><svg><ul>a
--#errors
--40: HTML start tag “ul” in a foreign namespace context.
--41: End of file in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg desc>
--|         <div>
--|           <svg svg>
--|           <ul>
--|             "a"
--
--#data
--<!DOCTYPE html><svg><desc><svg><ul>a
--#errors
--35: HTML start tag “ul” in a foreign namespace context.
--36: End of file in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg desc>
--|         <svg svg>
--|         <ul>
--|           "a"
--
--#data
--<!DOCTYPE html><p><svg><desc><p>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <svg svg>
--|         <svg desc>
--|           <p>
--
--#data
--<!DOCTYPE html><p><svg><title><p>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <svg svg>
--|         <svg title>
--|           <p>
--
--#data
--<div><svg><path><foreignObject><p></foreignObject><p>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <svg svg>
--|         <svg path>
--|           <svg foreignObject>
--|             <p>
--|             <p>
--
--#data
--<math><mi><div><object><div><span></span></div></object></div></mi><mi>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         <div>
--|           <object>
--|             <div>
--|               <span>
--|       <math mi>
--
--#data
--<math><mi><svg><foreignObject><div><div></div></div></foreignObject></svg></mi><mi>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         <svg svg>
--|           <svg foreignObject>
--|             <div>
--|               <div>
--|       <math mi>
--
--#data
--<svg><script></script><path>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg script>
--|       <svg path>
--
--#data
--<table><svg></svg><tr>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<math><mi><mglyph>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         <math mglyph>
--
--#data
--<math><mi><malignmark>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         <math malignmark>
--
--#data
--<math><mo><mglyph>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mo>
--|         <math mglyph>
--
--#data
--<math><mo><malignmark>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mo>
--|         <math malignmark>
--
--#data
--<math><mn><mglyph>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mn>
--|         <math mglyph>
--
--#data
--<math><mn><malignmark>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mn>
--|         <math malignmark>
--
--#data
--<math><ms><mglyph>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math ms>
--|         <math mglyph>
--
--#data
--<math><ms><malignmark>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math ms>
--|         <math malignmark>
--
--#data
--<math><mtext><mglyph>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mtext>
--|         <math mglyph>
--
--#data
--<math><mtext><malignmark>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mtext>
--|         <math malignmark>
--
--#data
--<math><annotation-xml><svg></svg></annotation-xml><mi>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         <svg svg>
--|       <math mi>
--
--#data
--<math><annotation-xml><svg><foreignObject><div><math><mi></mi></math><span></span></div></foreignObject><path></path></svg></annotation-xml><mi>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         <svg svg>
--|           <svg foreignObject>
--|             <div>
--|               <math math>
--|                 <math mi>
--|               <span>
--|           <svg path>
--|       <math mi>
--
--#data
--<math><annotation-xml><svg><foreignObject><math><mi><svg></svg></mi><mo></mo></math><span></span></foreignObject><path></path></svg></annotation-xml><mi>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         <svg svg>
--|           <svg foreignObject>
--|             <math math>
--|               <math mi>
--|                 <svg svg>
--|               <math mo>
--|             <span>
--|           <svg path>
--|       <math mi>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests11.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests11.dat
-deleted file mode 100644
-index 638cde4..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests11.dat
-+++ /dev/null
-@@ -1,482 +0,0 @@
--#data
--<!DOCTYPE html><body><svg attributeName='' attributeType='' baseFrequency='' baseProfile='' calcMode='' clipPathUnits='' contentScriptType='' contentStyleType='' diffuseConstant='' edgeMode='' externalResourcesRequired='' filterRes='' filterUnits='' glyphRef='' gradientTransform='' gradientUnits='' kernelMatrix='' kernelUnitLength='' keyPoints='' keySplines='' keyTimes='' lengthAdjust='' limitingConeAngle='' markerHeight='' markerUnits='' markerWidth='' maskContentUnits='' maskUnits='' numOctaves='' pathLength='' patternContentUnits='' patternTransform='' patternUnits='' pointsAtX='' pointsAtY='' pointsAtZ='' preserveAlpha='' preserveAspectRatio='' primitiveUnits='' refX='' refY='' repeatCount='' repeatDur='' requiredExtensions='' requiredFeatures='' specularConstant='' specularExponent='' spreadMethod='' startOffset='' stdDeviation='' stitchTiles='' surfaceScale='' systemLanguage='' tableValues='' targetX='' targetY='' textLength='' viewBox='' viewTarget='' xChannelSelector='' yChannelSelector='' zoomAndPan=''></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       attributeName=""
--|       attributeType=""
--|       baseFrequency=""
--|       baseProfile=""
--|       calcMode=""
--|       clipPathUnits=""
--|       contentScriptType=""
--|       contentStyleType=""
--|       diffuseConstant=""
--|       edgeMode=""
--|       externalResourcesRequired=""
--|       filterRes=""
--|       filterUnits=""
--|       glyphRef=""
--|       gradientTransform=""
--|       gradientUnits=""
--|       kernelMatrix=""
--|       kernelUnitLength=""
--|       keyPoints=""
--|       keySplines=""
--|       keyTimes=""
--|       lengthAdjust=""
--|       limitingConeAngle=""
--|       markerHeight=""
--|       markerUnits=""
--|       markerWidth=""
--|       maskContentUnits=""
--|       maskUnits=""
--|       numOctaves=""
--|       pathLength=""
--|       patternContentUnits=""
--|       patternTransform=""
--|       patternUnits=""
--|       pointsAtX=""
--|       pointsAtY=""
--|       pointsAtZ=""
--|       preserveAlpha=""
--|       preserveAspectRatio=""
--|       primitiveUnits=""
--|       refX=""
--|       refY=""
--|       repeatCount=""
--|       repeatDur=""
--|       requiredExtensions=""
--|       requiredFeatures=""
--|       specularConstant=""
--|       specularExponent=""
--|       spreadMethod=""
--|       startOffset=""
--|       stdDeviation=""
--|       stitchTiles=""
--|       surfaceScale=""
--|       systemLanguage=""
--|       tableValues=""
--|       targetX=""
--|       targetY=""
--|       textLength=""
--|       viewBox=""
--|       viewTarget=""
--|       xChannelSelector=""
--|       yChannelSelector=""
--|       zoomAndPan=""
--
--#data
--<!DOCTYPE html><BODY><SVG ATTRIBUTENAME='' ATTRIBUTETYPE='' BASEFREQUENCY='' BASEPROFILE='' CALCMODE='' CLIPPATHUNITS='' CONTENTSCRIPTTYPE='' CONTENTSTYLETYPE='' DIFFUSECONSTANT='' EDGEMODE='' EXTERNALRESOURCESREQUIRED='' FILTERRES='' FILTERUNITS='' GLYPHREF='' GRADIENTTRANSFORM='' GRADIENTUNITS='' KERNELMATRIX='' KERNELUNITLENGTH='' KEYPOINTS='' KEYSPLINES='' KEYTIMES='' LENGTHADJUST='' LIMITINGCONEANGLE='' MARKERHEIGHT='' MARKERUNITS='' MARKERWIDTH='' MASKCONTENTUNITS='' MASKUNITS='' NUMOCTAVES='' PATHLENGTH='' PATTERNCONTENTUNITS='' PATTERNTRANSFORM='' PATTERNUNITS='' POINTSATX='' POINTSATY='' POINTSATZ='' PRESERVEALPHA='' PRESERVEASPECTRATIO='' PRIMITIVEUNITS='' REFX='' REFY='' REPEATCOUNT='' REPEATDUR='' REQUIREDEXTENSIONS='' REQUIREDFEATURES='' SPECULARCONSTANT='' SPECULAREXPONENT='' SPREADMETHOD='' STARTOFFSET='' STDDEVIATION='' STITCHTILES='' SURFACESCALE='' SYSTEMLANGUAGE='' TABLEVALUES='' TARGETX='' TARGETY='' TEXTLENGTH='' VIEWBOX='' VIEWTARGET='' XCHANNELSELECTOR='' YCHANNELSELECTOR='' ZOOMANDPAN=''></SVG>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       attributeName=""
--|       attributeType=""
--|       baseFrequency=""
--|       baseProfile=""
--|       calcMode=""
--|       clipPathUnits=""
--|       contentScriptType=""
--|       contentStyleType=""
--|       diffuseConstant=""
--|       edgeMode=""
--|       externalResourcesRequired=""
--|       filterRes=""
--|       filterUnits=""
--|       glyphRef=""
--|       gradientTransform=""
--|       gradientUnits=""
--|       kernelMatrix=""
--|       kernelUnitLength=""
--|       keyPoints=""
--|       keySplines=""
--|       keyTimes=""
--|       lengthAdjust=""
--|       limitingConeAngle=""
--|       markerHeight=""
--|       markerUnits=""
--|       markerWidth=""
--|       maskContentUnits=""
--|       maskUnits=""
--|       numOctaves=""
--|       pathLength=""
--|       patternContentUnits=""
--|       patternTransform=""
--|       patternUnits=""
--|       pointsAtX=""
--|       pointsAtY=""
--|       pointsAtZ=""
--|       preserveAlpha=""
--|       preserveAspectRatio=""
--|       primitiveUnits=""
--|       refX=""
--|       refY=""
--|       repeatCount=""
--|       repeatDur=""
--|       requiredExtensions=""
--|       requiredFeatures=""
--|       specularConstant=""
--|       specularExponent=""
--|       spreadMethod=""
--|       startOffset=""
--|       stdDeviation=""
--|       stitchTiles=""
--|       surfaceScale=""
--|       systemLanguage=""
--|       tableValues=""
--|       targetX=""
--|       targetY=""
--|       textLength=""
--|       viewBox=""
--|       viewTarget=""
--|       xChannelSelector=""
--|       yChannelSelector=""
--|       zoomAndPan=""
--
--#data
--<!DOCTYPE html><body><svg attributename='' attributetype='' basefrequency='' baseprofile='' calcmode='' clippathunits='' contentscripttype='' contentstyletype='' diffuseconstant='' edgemode='' externalresourcesrequired='' filterres='' filterunits='' glyphref='' gradienttransform='' gradientunits='' kernelmatrix='' kernelunitlength='' keypoints='' keysplines='' keytimes='' lengthadjust='' limitingconeangle='' markerheight='' markerunits='' markerwidth='' maskcontentunits='' maskunits='' numoctaves='' pathlength='' patterncontentunits='' patterntransform='' patternunits='' pointsatx='' pointsaty='' pointsatz='' preservealpha='' preserveaspectratio='' primitiveunits='' refx='' refy='' repeatcount='' repeatdur='' requiredextensions='' requiredfeatures='' specularconstant='' specularexponent='' spreadmethod='' startoffset='' stddeviation='' stitchtiles='' surfacescale='' systemlanguage='' tablevalues='' targetx='' targety='' textlength='' viewbox='' viewtarget='' xchannelselector='' ychannelselector='' zoomandpan=''></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       attributeName=""
--|       attributeType=""
--|       baseFrequency=""
--|       baseProfile=""
--|       calcMode=""
--|       clipPathUnits=""
--|       contentScriptType=""
--|       contentStyleType=""
--|       diffuseConstant=""
--|       edgeMode=""
--|       externalResourcesRequired=""
--|       filterRes=""
--|       filterUnits=""
--|       glyphRef=""
--|       gradientTransform=""
--|       gradientUnits=""
--|       kernelMatrix=""
--|       kernelUnitLength=""
--|       keyPoints=""
--|       keySplines=""
--|       keyTimes=""
--|       lengthAdjust=""
--|       limitingConeAngle=""
--|       markerHeight=""
--|       markerUnits=""
--|       markerWidth=""
--|       maskContentUnits=""
--|       maskUnits=""
--|       numOctaves=""
--|       pathLength=""
--|       patternContentUnits=""
--|       patternTransform=""
--|       patternUnits=""
--|       pointsAtX=""
--|       pointsAtY=""
--|       pointsAtZ=""
--|       preserveAlpha=""
--|       preserveAspectRatio=""
--|       primitiveUnits=""
--|       refX=""
--|       refY=""
--|       repeatCount=""
--|       repeatDur=""
--|       requiredExtensions=""
--|       requiredFeatures=""
--|       specularConstant=""
--|       specularExponent=""
--|       spreadMethod=""
--|       startOffset=""
--|       stdDeviation=""
--|       stitchTiles=""
--|       surfaceScale=""
--|       systemLanguage=""
--|       tableValues=""
--|       targetX=""
--|       targetY=""
--|       textLength=""
--|       viewBox=""
--|       viewTarget=""
--|       xChannelSelector=""
--|       yChannelSelector=""
--|       zoomAndPan=""
--
--#data
--<!DOCTYPE html><body><math attributeName='' attributeType='' baseFrequency='' baseProfile='' calcMode='' clipPathUnits='' contentScriptType='' contentStyleType='' diffuseConstant='' edgeMode='' externalResourcesRequired='' filterRes='' filterUnits='' glyphRef='' gradientTransform='' gradientUnits='' kernelMatrix='' kernelUnitLength='' keyPoints='' keySplines='' keyTimes='' lengthAdjust='' limitingConeAngle='' markerHeight='' markerUnits='' markerWidth='' maskContentUnits='' maskUnits='' numOctaves='' pathLength='' patternContentUnits='' patternTransform='' patternUnits='' pointsAtX='' pointsAtY='' pointsAtZ='' preserveAlpha='' preserveAspectRatio='' primitiveUnits='' refX='' refY='' repeatCount='' repeatDur='' requiredExtensions='' requiredFeatures='' specularConstant='' specularExponent='' spreadMethod='' startOffset='' stdDeviation='' stitchTiles='' surfaceScale='' systemLanguage='' tableValues='' targetX='' targetY='' textLength='' viewBox='' viewTarget='' xChannelSelector='' yChannelSelector='' zoomAndPan=''></math>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       attributename=""
--|       attributetype=""
--|       basefrequency=""
--|       baseprofile=""
--|       calcmode=""
--|       clippathunits=""
--|       contentscripttype=""
--|       contentstyletype=""
--|       diffuseconstant=""
--|       edgemode=""
--|       externalresourcesrequired=""
--|       filterres=""
--|       filterunits=""
--|       glyphref=""
--|       gradienttransform=""
--|       gradientunits=""
--|       kernelmatrix=""
--|       kernelunitlength=""
--|       keypoints=""
--|       keysplines=""
--|       keytimes=""
--|       lengthadjust=""
--|       limitingconeangle=""
--|       markerheight=""
--|       markerunits=""
--|       markerwidth=""
--|       maskcontentunits=""
--|       maskunits=""
--|       numoctaves=""
--|       pathlength=""
--|       patterncontentunits=""
--|       patterntransform=""
--|       patternunits=""
--|       pointsatx=""
--|       pointsaty=""
--|       pointsatz=""
--|       preservealpha=""
--|       preserveaspectratio=""
--|       primitiveunits=""
--|       refx=""
--|       refy=""
--|       repeatcount=""
--|       repeatdur=""
--|       requiredextensions=""
--|       requiredfeatures=""
--|       specularconstant=""
--|       specularexponent=""
--|       spreadmethod=""
--|       startoffset=""
--|       stddeviation=""
--|       stitchtiles=""
--|       surfacescale=""
--|       systemlanguage=""
--|       tablevalues=""
--|       targetx=""
--|       targety=""
--|       textlength=""
--|       viewbox=""
--|       viewtarget=""
--|       xchannelselector=""
--|       ychannelselector=""
--|       zoomandpan=""
--
--#data
--<!DOCTYPE html><body><svg><altGlyph /><altGlyphDef /><altGlyphItem /><animateColor /><animateMotion /><animateTransform /><clipPath /><feBlend /><feColorMatrix /><feComponentTransfer /><feComposite /><feConvolveMatrix /><feDiffuseLighting /><feDisplacementMap /><feDistantLight /><feFlood /><feFuncA /><feFuncB /><feFuncG /><feFuncR /><feGaussianBlur /><feImage /><feMerge /><feMergeNode /><feMorphology /><feOffset /><fePointLight /><feSpecularLighting /><feSpotLight /><feTile /><feTurbulence /><foreignObject /><glyphRef /><linearGradient /><radialGradient /><textPath /></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg altGlyph>
--|       <svg altGlyphDef>
--|       <svg altGlyphItem>
--|       <svg animateColor>
--|       <svg animateMotion>
--|       <svg animateTransform>
--|       <svg clipPath>
--|       <svg feBlend>
--|       <svg feColorMatrix>
--|       <svg feComponentTransfer>
--|       <svg feComposite>
--|       <svg feConvolveMatrix>
--|       <svg feDiffuseLighting>
--|       <svg feDisplacementMap>
--|       <svg feDistantLight>
--|       <svg feFlood>
--|       <svg feFuncA>
--|       <svg feFuncB>
--|       <svg feFuncG>
--|       <svg feFuncR>
--|       <svg feGaussianBlur>
--|       <svg feImage>
--|       <svg feMerge>
--|       <svg feMergeNode>
--|       <svg feMorphology>
--|       <svg feOffset>
--|       <svg fePointLight>
--|       <svg feSpecularLighting>
--|       <svg feSpotLight>
--|       <svg feTile>
--|       <svg feTurbulence>
--|       <svg foreignObject>
--|       <svg glyphRef>
--|       <svg linearGradient>
--|       <svg radialGradient>
--|       <svg textPath>
--
--#data
--<!DOCTYPE html><body><svg><altglyph /><altglyphdef /><altglyphitem /><animatecolor /><animatemotion /><animatetransform /><clippath /><feblend /><fecolormatrix /><fecomponenttransfer /><fecomposite /><feconvolvematrix /><fediffuselighting /><fedisplacementmap /><fedistantlight /><feflood /><fefunca /><fefuncb /><fefuncg /><fefuncr /><fegaussianblur /><feimage /><femerge /><femergenode /><femorphology /><feoffset /><fepointlight /><fespecularlighting /><fespotlight /><fetile /><feturbulence /><foreignobject /><glyphref /><lineargradient /><radialgradient /><textpath /></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg altGlyph>
--|       <svg altGlyphDef>
--|       <svg altGlyphItem>
--|       <svg animateColor>
--|       <svg animateMotion>
--|       <svg animateTransform>
--|       <svg clipPath>
--|       <svg feBlend>
--|       <svg feColorMatrix>
--|       <svg feComponentTransfer>
--|       <svg feComposite>
--|       <svg feConvolveMatrix>
--|       <svg feDiffuseLighting>
--|       <svg feDisplacementMap>
--|       <svg feDistantLight>
--|       <svg feFlood>
--|       <svg feFuncA>
--|       <svg feFuncB>
--|       <svg feFuncG>
--|       <svg feFuncR>
--|       <svg feGaussianBlur>
--|       <svg feImage>
--|       <svg feMerge>
--|       <svg feMergeNode>
--|       <svg feMorphology>
--|       <svg feOffset>
--|       <svg fePointLight>
--|       <svg feSpecularLighting>
--|       <svg feSpotLight>
--|       <svg feTile>
--|       <svg feTurbulence>
--|       <svg foreignObject>
--|       <svg glyphRef>
--|       <svg linearGradient>
--|       <svg radialGradient>
--|       <svg textPath>
--
--#data
--<!DOCTYPE html><BODY><SVG><ALTGLYPH /><ALTGLYPHDEF /><ALTGLYPHITEM /><ANIMATECOLOR /><ANIMATEMOTION /><ANIMATETRANSFORM /><CLIPPATH /><FEBLEND /><FECOLORMATRIX /><FECOMPONENTTRANSFER /><FECOMPOSITE /><FECONVOLVEMATRIX /><FEDIFFUSELIGHTING /><FEDISPLACEMENTMAP /><FEDISTANTLIGHT /><FEFLOOD /><FEFUNCA /><FEFUNCB /><FEFUNCG /><FEFUNCR /><FEGAUSSIANBLUR /><FEIMAGE /><FEMERGE /><FEMERGENODE /><FEMORPHOLOGY /><FEOFFSET /><FEPOINTLIGHT /><FESPECULARLIGHTING /><FESPOTLIGHT /><FETILE /><FETURBULENCE /><FOREIGNOBJECT /><GLYPHREF /><LINEARGRADIENT /><RADIALGRADIENT /><TEXTPATH /></SVG>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg altGlyph>
--|       <svg altGlyphDef>
--|       <svg altGlyphItem>
--|       <svg animateColor>
--|       <svg animateMotion>
--|       <svg animateTransform>
--|       <svg clipPath>
--|       <svg feBlend>
--|       <svg feColorMatrix>
--|       <svg feComponentTransfer>
--|       <svg feComposite>
--|       <svg feConvolveMatrix>
--|       <svg feDiffuseLighting>
--|       <svg feDisplacementMap>
--|       <svg feDistantLight>
--|       <svg feFlood>
--|       <svg feFuncA>
--|       <svg feFuncB>
--|       <svg feFuncG>
--|       <svg feFuncR>
--|       <svg feGaussianBlur>
--|       <svg feImage>
--|       <svg feMerge>
--|       <svg feMergeNode>
--|       <svg feMorphology>
--|       <svg feOffset>
--|       <svg fePointLight>
--|       <svg feSpecularLighting>
--|       <svg feSpotLight>
--|       <svg feTile>
--|       <svg feTurbulence>
--|       <svg foreignObject>
--|       <svg glyphRef>
--|       <svg linearGradient>
--|       <svg radialGradient>
--|       <svg textPath>
--
--#data
--<!DOCTYPE html><body><math><altGlyph /><altGlyphDef /><altGlyphItem /><animateColor /><animateMotion /><animateTransform /><clipPath /><feBlend /><feColorMatrix /><feComponentTransfer /><feComposite /><feConvolveMatrix /><feDiffuseLighting /><feDisplacementMap /><feDistantLight /><feFlood /><feFuncA /><feFuncB /><feFuncG /><feFuncR /><feGaussianBlur /><feImage /><feMerge /><feMergeNode /><feMorphology /><feOffset /><fePointLight /><feSpecularLighting /><feSpotLight /><feTile /><feTurbulence /><foreignObject /><glyphRef /><linearGradient /><radialGradient /><textPath /></math>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math altglyph>
--|       <math altglyphdef>
--|       <math altglyphitem>
--|       <math animatecolor>
--|       <math animatemotion>
--|       <math animatetransform>
--|       <math clippath>
--|       <math feblend>
--|       <math fecolormatrix>
--|       <math fecomponenttransfer>
--|       <math fecomposite>
--|       <math feconvolvematrix>
--|       <math fediffuselighting>
--|       <math fedisplacementmap>
--|       <math fedistantlight>
--|       <math feflood>
--|       <math fefunca>
--|       <math fefuncb>
--|       <math fefuncg>
--|       <math fefuncr>
--|       <math fegaussianblur>
--|       <math feimage>
--|       <math femerge>
--|       <math femergenode>
--|       <math femorphology>
--|       <math feoffset>
--|       <math fepointlight>
--|       <math fespecularlighting>
--|       <math fespotlight>
--|       <math fetile>
--|       <math feturbulence>
--|       <math foreignobject>
--|       <math glyphref>
--|       <math lineargradient>
--|       <math radialgradient>
--|       <math textpath>
--
--#data
--<!DOCTYPE html><body><svg><solidColor /></svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg solidcolor>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests12.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests12.dat
-deleted file mode 100644
-index 63107d2..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests12.dat
-+++ /dev/null
-@@ -1,62 +0,0 @@
--#data
--<!DOCTYPE html><body><p>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       "foo"
--|       <math math>
--|         <math mtext>
--|           <i>
--|             "baz"
--|         <math annotation-xml>
--|           <svg svg>
--|             <svg desc>
--|               <b>
--|                 "eggs"
--|             <svg g>
--|               <svg foreignObject>
--|                 <p>
--|                   "spam"
--|                 <table>
--|                   <tbody>
--|                     <tr>
--|                       <td>
--|                         <img>
--|             <svg g>
--|               "quux"
--|       "bar"
--
--#data
--<!DOCTYPE html><body>foo<math><mtext><i>baz</i></mtext><annotation-xml><svg><desc><b>eggs</b></desc><g><foreignObject><P>spam<TABLE><tr><td><img></td></table></foreignObject></g><g>quux</g></svg></annotation-xml></math>bar
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "foo"
--|     <math math>
--|       <math mtext>
--|         <i>
--|           "baz"
--|       <math annotation-xml>
--|         <svg svg>
--|           <svg desc>
--|             <b>
--|               "eggs"
--|           <svg g>
--|             <svg foreignObject>
--|               <p>
--|                 "spam"
--|               <table>
--|                 <tbody>
--|                   <tr>
--|                     <td>
--|                       <img>
--|           <svg g>
--|             "quux"
--|     "bar"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests14.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests14.dat
-deleted file mode 100644
-index b8713f8..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests14.dat
-+++ /dev/null
-@@ -1,74 +0,0 @@
--#data
--<!DOCTYPE html><html><body><xyz:abc></xyz:abc>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <xyz:abc>
--
--#data
--<!DOCTYPE html><html><body><xyz:abc></xyz:abc><span></span>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <xyz:abc>
--|     <span>
--
--#data
--<!DOCTYPE html><html><html abc:def=gh><xyz:abc></xyz:abc>
--#errors
--15: Unexpected start tag html
--#document
--| <!DOCTYPE html>
--| <html>
--|   abc:def="gh"
--|   <head>
--|   <body>
--|     <xyz:abc>
--
--#data
--<!DOCTYPE html><html xml:lang=bar><html xml:lang=foo>
--#errors
--15: Unexpected start tag html
--#document
--| <!DOCTYPE html>
--| <html>
--|   xml:lang="bar"
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><html 123=456>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   123="456"
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><html 123=456><html 789=012>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   123="456"
--|   789="012"
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><html><body 789=012>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     789="012"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests15.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests15.dat
-deleted file mode 100644
-index 6ce1c0d..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests15.dat
-+++ /dev/null
-@@ -1,208 +0,0 @@
--#data
--<!DOCTYPE html><p><b><i><u></p> <p>X
--#errors
--Line: 1 Col: 31 Unexpected end tag (p). Ignored.
--Line: 1 Col: 36 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <b>
--|         <i>
--|           <u>
--|     <b>
--|       <i>
--|         <u>
--|           " "
--|           <p>
--|             "X"
--
--#data
--<p><b><i><u></p>
--<p>X
--#errors
--Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE.
--Line: 1 Col: 16 Unexpected end tag (p). Ignored.
--Line: 2 Col: 4 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <b>
--|         <i>
--|           <u>
--|     <b>
--|       <i>
--|         <u>
--|           "
--"
--|           <p>
--|             "X"
--
--#data
--<!doctype html></html> <head>
--#errors
--Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     " "
--
--#data
--<!doctype html></body><meta>
--#errors
--Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <meta>
--
--#data
--<html></html><!-- foo -->
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element.
--#document
--| <html>
--|   <head>
--|   <body>
--| <!--  foo  -->
--
--#data
--<!doctype html></body><title>X</title>
--#errors
--Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <title>
--|       "X"
--
--#data
--<!doctype html><table> X<meta></table>
--#errors
--Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode.
--Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     " X"
--|     <meta>
--|     <table>
--
--#data
--<!doctype html><table> x</table>
--#errors
--Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     " x"
--|     <table>
--
--#data
--<!doctype html><table> x </table>
--#errors
--Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     " x "
--|     <table>
--
--#data
--<!doctype html><table><tr> x</table>
--#errors
--Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     " x"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!doctype html><table>X<style> <tr>x </style> </table>
--#errors
--Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "X"
--|     <table>
--|       <style>
--|         " <tr>x "
--|       " "
--
--#data
--<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div>
--#errors
--Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode.
--Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <a>
--|         "foo"
--|       <table>
--|         " "
--|         <tbody>
--|           <tr>
--|             <td>
--|               "bar"
--|             " "
--
--#data
--<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes>
--#errors
--6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
--13: Stray start tag “frame”.
--21: Stray end tag “frame”.
--29: Stray end tag “frame”.
--39: “frameset” start tag after “body” already open.
--105: End of file seen inside an [R]CDATA element.
--105: End of file seen and there were open elements.
--XXX: These errors are wrong, please fix me!
--#document
--| <html>
--|   <head>
--|   <frameset>
--|     <frame>
--|     <frameset>
--|       <frame>
--|     <noframes>
--|       "</frameset><noframes>"
--
--#data
--<!DOCTYPE html><object></html>
--#errors
--1: Expected closing tag. Unexpected end of file
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <object>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests16.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests16.dat
-deleted file mode 100644
-index c8ef66f..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests16.dat
-+++ /dev/null
-@@ -1,2299 +0,0 @@
--#data
--<!doctype html><script>
--#errors
--Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|   <body>
--
--#data
--<!doctype html><script>a
--#errors
--Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "a"
--|   <body>
--
--#data
--<!doctype html><script><
--#errors
--Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<"
--|   <body>
--
--#data
--<!doctype html><script></
--#errors
--Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</"
--|   <body>
--
--#data
--<!doctype html><script></S
--#errors
--Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</S"
--|   <body>
--
--#data
--<!doctype html><script></SC
--#errors
--Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</SC"
--|   <body>
--
--#data
--<!doctype html><script></SCR
--#errors
--Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</SCR"
--|   <body>
--
--#data
--<!doctype html><script></SCRI
--#errors
--Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</SCRI"
--|   <body>
--
--#data
--<!doctype html><script></SCRIP
--#errors
--Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</SCRIP"
--|   <body>
--
--#data
--<!doctype html><script></SCRIPT
--#errors
--Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</SCRIPT"
--|   <body>
--
--#data
--<!doctype html><script></SCRIPT 
--#errors
--Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|   <body>
--
--#data
--<!doctype html><script></s
--#errors
--Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</s"
--|   <body>
--
--#data
--<!doctype html><script></sc
--#errors
--Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</sc"
--|   <body>
--
--#data
--<!doctype html><script></scr
--#errors
--Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</scr"
--|   <body>
--
--#data
--<!doctype html><script></scri
--#errors
--Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</scri"
--|   <body>
--
--#data
--<!doctype html><script></scrip
--#errors
--Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</scrip"
--|   <body>
--
--#data
--<!doctype html><script></script
--#errors
--Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "</script"
--|   <body>
--
--#data
--<!doctype html><script></script 
--#errors
--Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|   <body>
--
--#data
--<!doctype html><script><!
--#errors
--Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!"
--|   <body>
--
--#data
--<!doctype html><script><!a
--#errors
--Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!a"
--|   <body>
--
--#data
--<!doctype html><script><!-
--#errors
--Line: 1 Col: 26 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!-"
--|   <body>
--
--#data
--<!doctype html><script><!-a
--#errors
--Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!-a"
--|   <body>
--
--#data
--<!doctype html><script><!--
--#errors
--Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--"
--|   <body>
--
--#data
--<!doctype html><script><!--a
--#errors
--Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--a"
--|   <body>
--
--#data
--<!doctype html><script><!--<
--#errors
--Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<"
--|   <body>
--
--#data
--<!doctype html><script><!--<a
--#errors
--Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<a"
--|   <body>
--
--#data
--<!doctype html><script><!--</
--#errors
--Line: 1 Col: 27 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--</"
--|   <body>
--
--#data
--<!doctype html><script><!--</script
--#errors
--Line: 1 Col: 35 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--</script"
--|   <body>
--
--#data
--<!doctype html><script><!--</script 
--#errors
--Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--"
--|   <body>
--
--#data
--<!doctype html><script><!--<s
--#errors
--Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<s"
--|   <body>
--
--#data
--<!doctype html><script><!--<script
--#errors
--Line: 1 Col: 34 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script"
--|   <body>
--
--#data
--<!doctype html><script><!--<script 
--#errors
--Line: 1 Col: 35 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script "
--|   <body>
--
--#data
--<!doctype html><script><!--<script <
--#errors
--Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script <"
--|   <body>
--
--#data
--<!doctype html><script><!--<script <a
--#errors
--Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script <a"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </
--#errors
--Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </s
--#errors
--Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </s"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script
--#errors
--Line: 1 Col: 43 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </scripta
--#errors
--Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </scripta"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script 
--#errors
--Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script "
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script>
--#errors
--Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script>"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script/
--#errors
--Line: 1 Col: 44 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script/"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script <
--#errors
--Line: 1 Col: 45 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script <"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script <a
--#errors
--Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script <a"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script </
--#errors
--Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script </"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script </script
--#errors
--Line: 1 Col: 52 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script </script"
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script </script 
--#errors
--Line: 1 Col: 53 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script "
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script </script/
--#errors
--Line: 1 Col: 53 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script "
--|   <body>
--
--#data
--<!doctype html><script><!--<script </script </script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script "
--|   <body>
--
--#data
--<!doctype html><script><!--<script -
--#errors
--Line: 1 Col: 36 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -"
--|   <body>
--
--#data
--<!doctype html><script><!--<script -a
--#errors
--Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -a"
--|   <body>
--
--#data
--<!doctype html><script><!--<script -<
--#errors
--Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -<"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --
--#errors
--Line: 1 Col: 37 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --a
--#errors
--Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --a"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --<
--#errors
--Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --<"
--|   <body>
--
--#data
--<!doctype html><script><!--<script -->
--#errors
--Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -->"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --><
--#errors
--Line: 1 Col: 39 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --><"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --></
--#errors
--Line: 1 Col: 40 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --></"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --></script
--#errors
--Line: 1 Col: 46 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --></script"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --></script 
--#errors
--Line: 1 Col: 47 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -->"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --></script/
--#errors
--Line: 1 Col: 47 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -->"
--|   <body>
--
--#data
--<!doctype html><script><!--<script --></script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -->"
--|   <body>
--
--#data
--<!doctype html><script><!--<script><\/script>--></script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script><\/script>-->"
--|   <body>
--
--#data
--<!doctype html><script><!--<script></scr'+'ipt>--></script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></scr'+'ipt>-->"
--|   <body>
--
--#data
--<!doctype html><script><!--<script></script><script></script></script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>"
--|   <body>
--
--#data
--<!doctype html><script><!--<script></script><script></script>--><!--</script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>--><!--"
--|   <body>
--
--#data
--<!doctype html><script><!--<script></script><script></script>-- ></script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>-- >"
--|   <body>
--
--#data
--<!doctype html><script><!--<script></script><script></script>- -></script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>- ->"
--|   <body>
--
--#data
--<!doctype html><script><!--<script></script><script></script>- - ></script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>- - >"
--|   <body>
--
--#data
--<!doctype html><script><!--<script></script><script></script>-></script>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>->"
--|   <body>
--
--#data
--<!doctype html><script><!--<script>--!></script>X
--#errors
--Line: 1 Col: 49 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script>--!></script>X"
--|   <body>
--
--#data
--<!doctype html><script><!--<scr'+'ipt></script>--></script>
--#errors
--Line: 1 Col: 59 Unexpected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<scr'+'ipt>"
--|   <body>
--|     "-->"
--
--#data
--<!doctype html><script><!--<script></scr'+'ipt></script>X
--#errors
--Line: 1 Col: 57 Unexpected end of file. Expected end tag (script).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></scr'+'ipt></script>X"
--|   <body>
--
--#data
--<!doctype html><style><!--<style></style>--></style>
--#errors
--Line: 1 Col: 52 Unexpected end tag (style).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       "<!--<style>"
--|   <body>
--|     "-->"
--
--#data
--<!doctype html><style><!--</style>X
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       "<!--"
--|   <body>
--|     "X"
--
--#data
--<!doctype html><style><!--...</style>...--></style>
--#errors
--Line: 1 Col: 51 Unexpected end tag (style).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       "<!--..."
--|   <body>
--|     "...-->"
--
--#data
--<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
--|   <body>
--|     "X"
--
--#data
--<!doctype html><style><!--...<style><!--...--!></style>--></style>
--#errors
--Line: 1 Col: 66 Unexpected end tag (style).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       "<!--...<style><!--...--!>"
--|   <body>
--|     "-->"
--
--#data
--<!doctype html><style><!--...</style><!-- --><style>@import ...</style>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       "<!--..."
--|     <!--   -->
--|     <style>
--|       "@import ..."
--|   <body>
--
--#data
--<!doctype html><style>...<style><!--...</style><!-- --></style>
--#errors
--Line: 1 Col: 63 Unexpected end tag (style).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       "...<style><!--..."
--|     <!--   -->
--|   <body>
--
--#data
--<!doctype html><style>...<!--[if IE]><style>...</style>X
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <style>
--|       "...<!--[if IE]><style>..."
--|   <body>
--|     "X"
--
--#data
--<!doctype html><title><!--<title></title>--></title>
--#errors
--Line: 1 Col: 52 Unexpected end tag (title).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <title>
--|       "<!--<title>"
--|   <body>
--|     "-->"
--
--#data
--<!doctype html><title>&lt;/title></title>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <title>
--|       "</title>"
--|   <body>
--
--#data
--<!doctype html><title>foo/title><link></head><body>X
--#errors
--Line: 1 Col: 52 Unexpected end of file. Expected end tag (title).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <title>
--|       "foo/title><link></head><body>X"
--|   <body>
--
--#data
--<!doctype html><noscript><!--<noscript></noscript>--></noscript>
--#errors
--Line: 1 Col: 64 Unexpected end tag (noscript).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <noscript>
--|       "<!--<noscript>"
--|   <body>
--|     "-->"
--
--#data
--<!doctype html><noscript><!--</noscript>X<noscript>--></noscript>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <noscript>
--|       "<!--"
--|   <body>
--|     "X"
--|     <noscript>
--|       "-->"
--
--#data
--<!doctype html><noscript><iframe></noscript>X
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <noscript>
--|       "<iframe>"
--|   <body>
--|     "X"
--
--#data
--<!doctype html><noframes><!--<noframes></noframes>--></noframes>
--#errors
--Line: 1 Col: 64 Unexpected end tag (noframes).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <noframes>
--|       "<!--<noframes>"
--|   <body>
--|     "-->"
--
--#data
--<!doctype html><noframes><body><script><!--...</script></body></noframes></html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <noframes>
--|       "<body><script><!--...</script></body>"
--|   <body>
--
--#data
--<!doctype html><textarea><!--<textarea></textarea>--></textarea>
--#errors
--Line: 1 Col: 64 Unexpected end tag (textarea).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "<!--<textarea>"
--|     "-->"
--
--#data
--<!doctype html><textarea>&lt;/textarea></textarea>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "</textarea>"
--
--#data
--<!doctype html><textarea>&lt;</textarea>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "<"
--
--#data
--<!doctype html><textarea>a&lt;b</textarea>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "a<b"
--
--#data
--<!doctype html><iframe><!--<iframe></iframe>--></iframe>
--#errors
--Line: 1 Col: 56 Unexpected end tag (iframe).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <iframe>
--|       "<!--<iframe>"
--|     "-->"
--
--#data
--<!doctype html><iframe>...<!--X->...<!--/X->...</iframe>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <iframe>
--|       "...<!--X->...<!--/X->..."
--
--#data
--<!doctype html><xmp><!--<xmp></xmp>--></xmp>
--#errors
--Line: 1 Col: 44 Unexpected end tag (xmp).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <xmp>
--|       "<!--<xmp>"
--|     "-->"
--
--#data
--<!doctype html><noembed><!--<noembed></noembed>--></noembed>
--#errors
--Line: 1 Col: 60 Unexpected end tag (noembed).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <noembed>
--|       "<!--<noembed>"
--|     "-->"
--
--#data
--<script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 8 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|   <body>
--
--#data
--<script>a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 9 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "a"
--|   <body>
--
--#data
--<script><
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 9 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<"
--|   <body>
--
--#data
--<script></
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 10 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</"
--|   <body>
--
--#data
--<script></S
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</S"
--|   <body>
--
--#data
--<script></SC
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</SC"
--|   <body>
--
--#data
--<script></SCR
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</SCR"
--|   <body>
--
--#data
--<script></SCRI
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</SCRI"
--|   <body>
--
--#data
--<script></SCRIP
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 15 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</SCRIP"
--|   <body>
--
--#data
--<script></SCRIPT
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 16 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</SCRIPT"
--|   <body>
--
--#data
--<script></SCRIPT 
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 17 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|   <body>
--
--#data
--<script></s
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</s"
--|   <body>
--
--#data
--<script></sc
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</sc"
--|   <body>
--
--#data
--<script></scr
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</scr"
--|   <body>
--
--#data
--<script></scri
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</scri"
--|   <body>
--
--#data
--<script></scrip
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 15 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</scrip"
--|   <body>
--
--#data
--<script></script
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 16 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</script"
--|   <body>
--
--#data
--<script></script 
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 17 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|   <body>
--
--#data
--<script><!
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 10 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!"
--|   <body>
--
--#data
--<script><!a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!a"
--|   <body>
--
--#data
--<script><!-
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!-"
--|   <body>
--
--#data
--<script><!-a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!-a"
--|   <body>
--
--#data
--<script><!--
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 12 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--"
--|   <body>
--
--#data
--<script><!--a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--a"
--|   <body>
--
--#data
--<script><!--<
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 13 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<"
--|   <body>
--
--#data
--<script><!--<a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<a"
--|   <body>
--
--#data
--<script><!--</
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--</"
--|   <body>
--
--#data
--<script><!--</script
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 20 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--</script"
--|   <body>
--
--#data
--<script><!--</script 
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--"
--|   <body>
--
--#data
--<script><!--<s
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<s"
--|   <body>
--
--#data
--<script><!--<script
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 19 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script"
--|   <body>
--
--#data
--<script><!--<script 
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 20 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script "
--|   <body>
--
--#data
--<script><!--<script <
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script <"
--|   <body>
--
--#data
--<script><!--<script <a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script <a"
--|   <body>
--
--#data
--<script><!--<script </
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </"
--|   <body>
--
--#data
--<script><!--<script </s
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </s"
--|   <body>
--
--#data
--<script><!--<script </script
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 28 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script"
--|   <body>
--
--#data
--<script><!--<script </scripta
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </scripta"
--|   <body>
--
--#data
--<script><!--<script </script 
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script "
--|   <body>
--
--#data
--<script><!--<script </script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script>"
--|   <body>
--
--#data
--<script><!--<script </script/
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 29 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script/"
--|   <body>
--
--#data
--<script><!--<script </script <
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 30 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script <"
--|   <body>
--
--#data
--<script><!--<script </script <a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script <a"
--|   <body>
--
--#data
--<script><!--<script </script </
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script </"
--|   <body>
--
--#data
--<script><!--<script </script </script
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script </script"
--|   <body>
--
--#data
--<script><!--<script </script </script 
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script "
--|   <body>
--
--#data
--<script><!--<script </script </script/
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 38 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script "
--|   <body>
--
--#data
--<script><!--<script </script </script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script </script "
--|   <body>
--
--#data
--<script><!--<script -
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 21 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -"
--|   <body>
--
--#data
--<script><!--<script -a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -a"
--|   <body>
--
--#data
--<script><!--<script --
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 22 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --"
--|   <body>
--
--#data
--<script><!--<script --a
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --a"
--|   <body>
--
--#data
--<script><!--<script -->
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 23 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -->"
--|   <body>
--
--#data
--<script><!--<script --><
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 24 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --><"
--|   <body>
--
--#data
--<script><!--<script --></
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 25 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --></"
--|   <body>
--
--#data
--<script><!--<script --></script
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 31 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script --></script"
--|   <body>
--
--#data
--<script><!--<script --></script 
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -->"
--|   <body>
--
--#data
--<script><!--<script --></script/
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 32 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -->"
--|   <body>
--
--#data
--<script><!--<script --></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script -->"
--|   <body>
--
--#data
--<script><!--<script><\/script>--></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script><\/script>-->"
--|   <body>
--
--#data
--<script><!--<script></scr'+'ipt>--></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></scr'+'ipt>-->"
--|   <body>
--
--#data
--<script><!--<script></script><script></script></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>"
--|   <body>
--
--#data
--<script><!--<script></script><script></script>--><!--</script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>--><!--"
--|   <body>
--
--#data
--<script><!--<script></script><script></script>-- ></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>-- >"
--|   <body>
--
--#data
--<script><!--<script></script><script></script>- -></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>- ->"
--|   <body>
--
--#data
--<script><!--<script></script><script></script>- - ></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>- - >"
--|   <body>
--
--#data
--<script><!--<script></script><script></script>-></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></script><script></script>->"
--|   <body>
--
--#data
--<script><!--<script>--!></script>X
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 34 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script>--!></script>X"
--|   <body>
--
--#data
--<script><!--<scr'+'ipt></script>--></script>
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 44 Unexpected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<scr'+'ipt>"
--|   <body>
--|     "-->"
--
--#data
--<script><!--<script></scr'+'ipt></script>X
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 42 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "<!--<script></scr'+'ipt></script>X"
--|   <body>
--
--#data
--<style><!--<style></style>--></style>
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--Line: 1 Col: 37 Unexpected end tag (style).
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--<style>"
--|   <body>
--|     "-->"
--
--#data
--<style><!--</style>X
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--"
--|   <body>
--|     "X"
--
--#data
--<style><!--...</style>...--></style>
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--Line: 1 Col: 36 Unexpected end tag (style).
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--..."
--|   <body>
--|     "...-->"
--
--#data
--<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>"
--|   <body>
--|     "X"
--
--#data
--<style><!--...<style><!--...--!></style>--></style>
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--Line: 1 Col: 51 Unexpected end tag (style).
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--...<style><!--...--!>"
--|   <body>
--|     "-->"
--
--#data
--<style><!--...</style><!-- --><style>@import ...</style>
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <style>
--|       "<!--..."
--|     <!--   -->
--|     <style>
--|       "@import ..."
--|   <body>
--
--#data
--<style>...<style><!--...</style><!-- --></style>
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--Line: 1 Col: 48 Unexpected end tag (style).
--#document
--| <html>
--|   <head>
--|     <style>
--|       "...<style><!--..."
--|     <!--   -->
--|   <body>
--
--#data
--<style>...<!--[if IE]><style>...</style>X
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <style>
--|       "...<!--[if IE]><style>..."
--|   <body>
--|     "X"
--
--#data
--<title><!--<title></title>--></title>
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--Line: 1 Col: 37 Unexpected end tag (title).
--#document
--| <html>
--|   <head>
--|     <title>
--|       "<!--<title>"
--|   <body>
--|     "-->"
--
--#data
--<title>&lt;/title></title>
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <title>
--|       "</title>"
--|   <body>
--
--#data
--<title>foo/title><link></head><body>X
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--Line: 1 Col: 37 Unexpected end of file. Expected end tag (title).
--#document
--| <html>
--|   <head>
--|     <title>
--|       "foo/title><link></head><body>X"
--|   <body>
--
--#data
--<noscript><!--<noscript></noscript>--></noscript>
--#errors
--Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
--Line: 1 Col: 49 Unexpected end tag (noscript).
--#document
--| <html>
--|   <head>
--|     <noscript>
--|       "<!--<noscript>"
--|   <body>
--|     "-->"
--
--#data
--<noscript><!--</noscript>X<noscript>--></noscript>
--#errors
--Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <noscript>
--|       "<!--"
--|   <body>
--|     "X"
--|     <noscript>
--|       "-->"
--
--#data
--<noscript><iframe></noscript>X
--#errors
--Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <noscript>
--|       "<iframe>"
--|   <body>
--|     "X"
--
--#data
--<noframes><!--<noframes></noframes>--></noframes>
--#errors
--Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE.
--Line: 1 Col: 49 Unexpected end tag (noframes).
--#document
--| <html>
--|   <head>
--|     <noframes>
--|       "<!--<noframes>"
--|   <body>
--|     "-->"
--
--#data
--<noframes><body><script><!--...</script></body></noframes></html>
--#errors
--Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <noframes>
--|       "<body><script><!--...</script></body>"
--|   <body>
--
--#data
--<textarea><!--<textarea></textarea>--></textarea>
--#errors
--Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
--Line: 1 Col: 49 Unexpected end tag (textarea).
--#document
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "<!--<textarea>"
--|     "-->"
--
--#data
--<textarea>&lt;/textarea></textarea>
--#errors
--Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "</textarea>"
--
--#data
--<iframe><!--<iframe></iframe>--></iframe>
--#errors
--Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
--Line: 1 Col: 41 Unexpected end tag (iframe).
--#document
--| <html>
--|   <head>
--|   <body>
--|     <iframe>
--|       "<!--<iframe>"
--|     "-->"
--
--#data
--<iframe>...<!--X->...<!--/X->...</iframe>
--#errors
--Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <iframe>
--|       "...<!--X->...<!--/X->..."
--
--#data
--<xmp><!--<xmp></xmp>--></xmp>
--#errors
--Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE.
--Line: 1 Col: 29 Unexpected end tag (xmp).
--#document
--| <html>
--|   <head>
--|   <body>
--|     <xmp>
--|       "<!--<xmp>"
--|     "-->"
--
--#data
--<noembed><!--<noembed></noembed>--></noembed>
--#errors
--Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE.
--Line: 1 Col: 45 Unexpected end tag (noembed).
--#document
--| <html>
--|   <head>
--|   <body>
--|     <noembed>
--|       "<!--<noembed>"
--|     "-->"
--
--#data
--<!doctype html><table>
--
--#errors
--Line 2 Col 0 Unexpected end of file. Expected table content.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       "
--"
--
--#data
--<!doctype html><table><td><span><font></span><span>
--#errors
--Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase.
--Line 1 Col 45 Unexpected end tag (span).
--Line 1 Col 51 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <span>
--|               <font>
--|             <font>
--|               <span>
--
--#data
--<!doctype html><form><table></form><form></table></form>
--#errors
--35: Stray end tag “form”.
--41: Start tag “form” seen in “table”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       <table>
--|         <form>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests17.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests17.dat
-deleted file mode 100644
-index 7b555f8..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests17.dat
-+++ /dev/null
-@@ -1,153 +0,0 @@
--#data
--<!doctype html><table><tbody><select><tr>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!doctype html><table><tr><select><td>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--<!doctype html><table><tr><td><select><td>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <select>
--|           <td>
--
--#data
--<!doctype html><table><tr><th><select><td>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <th>
--|             <select>
--|           <td>
--
--#data
--<!doctype html><table><caption><select><tr>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <select>
--|       <tbody>
--|         <tr>
--
--#data
--<!doctype html><select><tr>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!doctype html><select><td>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!doctype html><select><th>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!doctype html><select><tbody>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!doctype html><select><thead>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!doctype html><select><tfoot>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!doctype html><select><caption>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!doctype html><table><tr></table>a
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|     "a"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests18.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests18.dat
-deleted file mode 100644
-index 680e1f0..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests18.dat
-+++ /dev/null
-@@ -1,269 +0,0 @@
--#data
--<!doctype html><plaintext></plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <plaintext>
--|       "</plaintext>"
--
--#data
--<!doctype html><table><plaintext></plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <plaintext>
--|       "</plaintext>"
--|     <table>
--
--#data
--<!doctype html><table><tbody><plaintext></plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <plaintext>
--|       "</plaintext>"
--|     <table>
--|       <tbody>
--
--#data
--<!doctype html><table><tbody><tr><plaintext></plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <plaintext>
--|       "</plaintext>"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!doctype html><table><tbody><tr><plaintext></plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <plaintext>
--|       "</plaintext>"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!doctype html><table><td><plaintext></plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <plaintext>
--|               "</plaintext>"
--
--#data
--<!doctype html><table><caption><plaintext></plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <plaintext>
--|           "</plaintext>"
--
--#data
--<!doctype html><table><tr><style></script></style>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "abc"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <style>
--|             "</script>"
--
--#data
--<!doctype html><table><tr><script></style></script>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "abc"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <script>
--|             "</style>"
--
--#data
--<!doctype html><table><caption><style></script></style>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <style>
--|           "</script>"
--|         "abc"
--
--#data
--<!doctype html><table><td><style></script></style>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <style>
--|               "</script>"
--|             "abc"
--
--#data
--<!doctype html><select><script></style></script>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <script>
--|         "</style>"
--|       "abc"
--
--#data
--<!doctype html><table><select><script></style></script>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <script>
--|         "</style>"
--|       "abc"
--|     <table>
--
--#data
--<!doctype html><table><tr><select><script></style></script>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <script>
--|         "</style>"
--|       "abc"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!doctype html><frameset></frameset><noframes>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|   <noframes>
--|     "abc"
--
--#data
--<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|   <noframes>
--|     "abc"
--|   <!-- abc -->
--
--#data
--<!doctype html><frameset></frameset></html><noframes>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|   <noframes>
--|     "abc"
--
--#data
--<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|   <noframes>
--|     "abc"
--| <!-- abc -->
--
--#data
--<!doctype html><table><tr></tbody><tfoot>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|       <tfoot>
--
--#data
--<!doctype html><table><td><svg></svg>abc<td>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <svg svg>
--|             "abc"
--|           <td>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests19.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests19.dat
-deleted file mode 100644
-index 0d62f5a..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests19.dat
-+++ /dev/null
-@@ -1,1237 +0,0 @@
--#data
--<!doctype html><math><mn DefinitionUrl="foo">
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mn>
--|         definitionURL="foo"
--
--#data
--<!doctype html><html></p><!--foo-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <!-- foo -->
--|   <head>
--|   <body>
--
--#data
--<!doctype html><head></head></p><!--foo-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <!-- foo -->
--|   <body>
--
--#data
--<!doctype html><body><p><pre>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <pre>
--
--#data
--<!doctype html><body><p><listing>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <listing>
--
--#data
--<!doctype html><p><plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <plaintext>
--
--#data
--<!doctype html><p><h1>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <h1>
--
--#data
--<!doctype html><form><isindex>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <form>
--
--#data
--<!doctype html><isindex action="POST">
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       action="POST"
--|       <hr>
--|       <label>
--|         "This is a searchable index. Enter search keywords: "
--|         <input>
--|           name="isindex"
--|       <hr>
--
--#data
--<!doctype html><isindex prompt="this is isindex">
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       <hr>
--|       <label>
--|         "this is isindex"
--|         <input>
--|           name="isindex"
--|       <hr>
--
--#data
--<!doctype html><isindex type="hidden">
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       <hr>
--|       <label>
--|         "This is a searchable index. Enter search keywords: "
--|         <input>
--|           name="isindex"
--|           type="hidden"
--|       <hr>
--
--#data
--<!doctype html><isindex name="foo">
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       <hr>
--|       <label>
--|         "This is a searchable index. Enter search keywords: "
--|         <input>
--|           name="isindex"
--|       <hr>
--
--#data
--<!doctype html><ruby><p><rp>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <ruby>
--|       <p>
--|       <rp>
--
--#data
--<!doctype html><ruby><div><span><rp>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <ruby>
--|       <div>
--|         <span>
--|           <rp>
--
--#data
--<!doctype html><ruby><div><p><rp>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <ruby>
--|       <div>
--|         <p>
--|         <rp>
--
--#data
--<!doctype html><ruby><p><rt>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <ruby>
--|       <p>
--|       <rt>
--
--#data
--<!doctype html><ruby><div><span><rt>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <ruby>
--|       <div>
--|         <span>
--|           <rt>
--
--#data
--<!doctype html><ruby><div><p><rt>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <ruby>
--|       <div>
--|         <p>
--|         <rt>
--
--#data
--<!doctype html><math/><foo>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|     <foo>
--
--#data
--<!doctype html><svg/><foo>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|     <foo>
--
--#data
--<!doctype html><div></body><!--foo-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|   <!-- foo -->
--
--#data
--<!doctype html><h1><div><h3><span></h1>foo
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <h1>
--|       <div>
--|         <h3>
--|           <span>
--|         "foo"
--
--#data
--<!doctype html><p></h3>foo
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       "foo"
--
--#data
--<!doctype html><h3><li>abc</h2>foo
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <h3>
--|       <li>
--|         "abc"
--|     "foo"
--
--#data
--<!doctype html><table>abc<!--foo-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "abc"
--|     <table>
--|       <!-- foo -->
--
--#data
--<!doctype html><table>  <!--foo-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       "  "
--|       <!-- foo -->
--
--#data
--<!doctype html><table> b <!--foo-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     " b "
--|     <table>
--|       <!-- foo -->
--
--#data
--<!doctype html><select><option><option>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--|       <option>
--
--#data
--<!doctype html><select><option></optgroup>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--
--#data
--<!doctype html><select><option></optgroup>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--
--#data
--<!doctype html><p><math><mi><p><h1>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <math math>
--|         <math mi>
--|           <p>
--|           <h1>
--
--#data
--<!doctype html><p><math><mo><p><h1>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <math math>
--|         <math mo>
--|           <p>
--|           <h1>
--
--#data
--<!doctype html><p><math><mn><p><h1>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <math math>
--|         <math mn>
--|           <p>
--|           <h1>
--
--#data
--<!doctype html><p><math><ms><p><h1>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <math math>
--|         <math ms>
--|           <p>
--|           <h1>
--
--#data
--<!doctype html><p><math><mtext><p><h1>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <math math>
--|         <math mtext>
--|           <p>
--|           <h1>
--
--#data
--<!doctype html><frameset></noframes>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!doctype html><html c=d><body></html><html a=b>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   a="b"
--|   c="d"
--|   <head>
--|   <body>
--
--#data
--<!doctype html><html c=d><frameset></frameset></html><html a=b>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   a="b"
--|   c="d"
--|   <head>
--|   <frameset>
--
--#data
--<!doctype html><html><frameset></frameset></html><!--foo-->
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--| <!-- foo -->
--
--#data
--<!doctype html><html><frameset></frameset></html>  
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|   "  "
--
--#data
--<!doctype html><html><frameset></frameset></html>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!doctype html><html><frameset></frameset></html><p>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!doctype html><html><frameset></frameset></html></p>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<html><frameset></frameset></html><!doctype html>
--#errors
--#document
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!doctype html><body><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!doctype html><p><frameset><frame>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|     <frame>
--
--#data
--<!doctype html><p>a<frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       "a"
--
--#data
--<!doctype html><p> <frameset><frame>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|     <frame>
--
--#data
--<!doctype html><pre><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--
--#data
--<!doctype html><listing><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <listing>
--
--#data
--<!doctype html><li><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <li>
--
--#data
--<!doctype html><dd><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <dd>
--
--#data
--<!doctype html><dt><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <dt>
--
--#data
--<!doctype html><button><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <button>
--
--#data
--<!doctype html><applet><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <applet>
--
--#data
--<!doctype html><marquee><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <marquee>
--
--#data
--<!doctype html><object><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <object>
--
--#data
--<!doctype html><table><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--
--#data
--<!doctype html><area><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <area>
--
--#data
--<!doctype html><basefont><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <basefont>
--|   <frameset>
--
--#data
--<!doctype html><bgsound><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <bgsound>
--|   <frameset>
--
--#data
--<!doctype html><br><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <br>
--
--#data
--<!doctype html><embed><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <embed>
--
--#data
--<!doctype html><img><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <img>
--
--#data
--<!doctype html><input><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <input>
--
--#data
--<!doctype html><keygen><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <keygen>
--
--#data
--<!doctype html><wbr><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <wbr>
--
--#data
--<!doctype html><hr><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <hr>
--
--#data
--<!doctype html><textarea></textarea><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--
--#data
--<!doctype html><xmp></xmp><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <xmp>
--
--#data
--<!doctype html><iframe></iframe><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <iframe>
--
--#data
--<!doctype html><select></select><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!doctype html><svg></svg><frameset><frame>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|     <frame>
--
--#data
--<!doctype html><math></math><frameset><frame>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|     <frame>
--
--#data
--<!doctype html><svg><foreignObject><div> <frameset><frame>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|     <frame>
--
--#data
--<!doctype html><svg>a</svg><frameset><frame>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "a"
--
--#data
--<!doctype html><svg> </svg><frameset><frame>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--|     <frame>
--
--#data
--<html>aaa<frameset></frameset>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "aaa"
--
--#data
--<html> a <frameset></frameset>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "a "
--
--#data
--<!doctype html><div><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!doctype html><div><body><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <div>
--
--#data
--<!doctype html><p><math></p>a
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <math math>
--|     "a"
--
--#data
--<!doctype html><p><math><mn><span></p>a
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <math math>
--|         <math mn>
--|           <span>
--|             <p>
--|             "a"
--
--#data
--<!doctype html><math></html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--
--#data
--<!doctype html><meta charset="ascii">
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <meta>
--|       charset="ascii"
--|   <body>
--
--#data
--<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii">
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <meta>
--|       content="text/html;charset=ascii"
--|       http-equiv="content-type"
--|   <body>
--
--#data
--<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8">
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -->
--|     <meta>
--|       charset="utf8"
--|   <body>
--
--#data
--<!doctype html><html a=b><head></head><html c=d>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   a="b"
--|   c="d"
--|   <head>
--|   <body>
--
--#data
--<!doctype html><image/>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <img>
--
--#data
--<!doctype html>a<i>b<table>c<b>d</i>e</b>f
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "a"
--|     <i>
--|       "bc"
--|       <b>
--|         "de"
--|       "f"
--|       <table>
--
--#data
--<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <i>
--|       "a"
--|       <b>
--|         "b"
--|     <b>
--|     <div>
--|       <b>
--|         <i>
--|           "c"
--|           <a>
--|             "d"
--|         <a>
--|           "e"
--|       <a>
--|         "f"
--|     <table>
--
--#data
--<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <i>
--|       "a"
--|       <b>
--|         "b"
--|     <b>
--|     <div>
--|       <b>
--|         <i>
--|           "c"
--|           <a>
--|             "d"
--|         <a>
--|           "e"
--|       <a>
--|         "f"
--
--#data
--<!doctype html><table><i>a<b>b<div>c</i>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <i>
--|       "a"
--|       <b>
--|         "b"
--|     <b>
--|       <div>
--|         <i>
--|           "c"
--|     <table>
--
--#data
--<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <i>
--|       "a"
--|       <b>
--|         "b"
--|     <b>
--|     <div>
--|       <b>
--|         <i>
--|           "c"
--|           <a>
--|             "d"
--|         <a>
--|           "e"
--|       <a>
--|         "f"
--|     <table>
--
--#data
--<!doctype html><table><i>a<div>b<tr>c<b>d</i>e
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <i>
--|       "a"
--|       <div>
--|         "b"
--|     <i>
--|       "c"
--|       <b>
--|         "d"
--|     <b>
--|       "e"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!doctype html><table><td><table><i>a<div>b<b>c</i>d
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <i>
--|               "a"
--|             <div>
--|               <i>
--|                 "b"
--|                 <b>
--|                   "c"
--|               <b>
--|                 "d"
--|             <table>
--
--#data
--<!doctype html><body><bgsound>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <bgsound>
--
--#data
--<!doctype html><body><basefont>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <basefont>
--
--#data
--<!doctype html><a><b></a><basefont>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <b>
--|     <basefont>
--
--#data
--<!doctype html><a><b></a><bgsound>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <b>
--|     <bgsound>
--
--#data
--<!doctype html><figcaption><article></figcaption>a
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <figcaption>
--|       <article>
--|     "a"
--
--#data
--<!doctype html><summary><article></summary>a
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <summary>
--|       <article>
--|     "a"
--
--#data
--<!doctype html><p><a><plaintext>b
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <a>
--|     <plaintext>
--|       <a>
--|         "b"
--
--#data
--<!DOCTYPE html><div>a<a></div>b<p>c</p>d
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "a"
--|       <a>
--|     <a>
--|       "b"
--|       <p>
--|         "c"
--|       "d"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests2.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests2.dat
-deleted file mode 100644
-index 60d8592..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests2.dat
-+++ /dev/null
-@@ -1,763 +0,0 @@
--#data
--<!DOCTYPE html>Test
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "Test"
--
--#data
--<textarea>test</div>test
--#errors
--Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
--Line: 1 Col: 24 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "test</div>test"
--
--#data
--<table><td>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase.
--Line: 1 Col: 11 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--<table><td>test</tbody></table>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "test"
--
--#data
--<frame>test
--#errors
--Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE.
--Line: 1 Col: 7 Unexpected start tag frame. Ignored.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "test"
--
--#data
--<!DOCTYPE html><frameset>test
--#errors
--Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored.
--Line: 1 Col: 29 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!DOCTYPE html><frameset><!DOCTYPE html>
--#errors
--Line: 1 Col: 40 Unexpected DOCTYPE. Ignored.
--Line: 1 Col: 40 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!DOCTYPE html><font><p><b>test</font>
--#errors
--Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <font>
--|     <p>
--|       <font>
--|         <b>
--|           "test"
--
--#data
--<!DOCTYPE html><dt><div><dd>
--#errors
--Line: 1 Col: 28 Missing end tag (div, dt).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <dt>
--|       <div>
--|     <dd>
--
--#data
--<script></x
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--Line: 1 Col: 11 Unexpected end of file. Expected end tag (script).
--#document
--| <html>
--|   <head>
--|     <script>
--|       "</x"
--|   <body>
--
--#data
--<table><plaintext><td>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode.
--Line: 1 Col: 22 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <plaintext>
--|       "<td>"
--|     <table>
--
--#data
--<plaintext></plaintext>
--#errors
--Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE.
--Line: 1 Col: 23 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <plaintext>
--|       "</plaintext>"
--
--#data
--<!DOCTYPE html><table><tr>TEST
--#errors
--Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode.
--Line: 1 Col: 30 Unexpected end of file. Expected table content.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "TEST"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4>
--#errors
--Line: 1 Col: 37 Unexpected start tag (body).
--Line: 1 Col: 53 Unexpected start tag (body).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     t1="1"
--|     t2="2"
--|     t3="3"
--|     t4="4"
--
--#data
--</b test
--#errors
--Line: 1 Col: 8 Unexpected end of file in attribute name.
--Line: 1 Col: 8 End tag contains unexpected attributes.
--Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE.
--Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html></b test<b &=&amp>X
--#errors
--Line: 1 Col: 32 Named entity didn't end with ';'.
--Line: 1 Col: 33 End tag contains unexpected attributes.
--Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "X"
--
--#data
--<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt
--#errors
--Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
--Line: 1 Col: 54 Unexpected end of file in the tag name.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       type="text/x-foobar;baz"
--|       "X</SCRipt"
--|   <body>
--
--#data
--&
--#errors
--Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "&"
--
--#data
--&#
--#errors
--Line: 1 Col: 1 Numeric entity expected. Got end of file instead.
--Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "&#"
--
--#data
--&#X
--#errors
--Line: 1 Col: 3 Numeric entity expected but none found.
--Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "&#X"
--
--#data
--&#x
--#errors
--Line: 1 Col: 3 Numeric entity expected but none found.
--Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "&#x"
--
--#data
--&#45
--#errors
--Line: 1 Col: 4 Numeric entity didn't end with ';'.
--Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "-"
--
--#data
--&x-test
--#errors
--Line: 1 Col: 1 Named entity expected. Got none.
--Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "&x-test"
--
--#data
--<!doctypehtml><p><li>
--#errors
--Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <li>
--
--#data
--<!doctypehtml><p><dt>
--#errors
--Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <dt>
--
--#data
--<!doctypehtml><p><dd>
--#errors
--Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <dd>
--
--#data
--<!doctypehtml><p><form>
--#errors
--Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
--Line: 1 Col: 23 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <form>
--
--#data
--<!DOCTYPE html><p></P>X
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     "X"
--
--#data
--&AMP
--#errors
--Line: 1 Col: 4 Named entity didn't end with ';'.
--Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "&"
--
--#data
--&AMp;
--#errors
--Line: 1 Col: 1 Named entity expected. Got none.
--Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "&AMp;"
--
--#data
--<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY>
--#errors
--Line: 1 Col: 110 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly>
--
--#data
--<!DOCTYPE html>X</body>X
--#errors
--Line: 1 Col: 24 Unexpected non-space characters in the after body phase.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "XX"
--
--#data
--<!DOCTYPE html><!-- X
--#errors
--Line: 1 Col: 21 Unexpected end of file in comment.
--#document
--| <!DOCTYPE html>
--| <!--  X -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><table><caption>test TEST</caption><td>test
--#errors
--Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase.
--Line: 1 Col: 58 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         "test TEST"
--|       <tbody>
--|         <tr>
--|           <td>
--|             "test"
--
--#data
--<!DOCTYPE html><select><option><optgroup>
--#errors
--Line: 1 Col: 41 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--|       <optgroup>
--
--#data
--<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option>
--#errors
--Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag.
--Line: 1 Col: 76 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <optgroup>
--|         <option>
--|       <option>
--|     <option>
--
--#data
--<!DOCTYPE html><select><optgroup><option><optgroup>
--#errors
--Line: 1 Col: 51 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <optgroup>
--|         <option>
--|       <optgroup>
--
--#data
--<!DOCTYPE html><datalist><option>foo</datalist>bar
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <datalist>
--|       <option>
--|         "foo"
--|     "bar"
--
--#data
--<!DOCTYPE html><font><input><input></font>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <font>
--|       <input>
--|       <input>
--
--#data
--<!DOCTYPE html><!-- XXX - XXX -->
--#errors
--#document
--| <!DOCTYPE html>
--| <!--  XXX - XXX  -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><!-- XXX - XXX
--#errors
--Line: 1 Col: 29 Unexpected end of file in comment (-)
--#document
--| <!DOCTYPE html>
--| <!--  XXX - XXX -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><!-- XXX - XXX - XXX -->
--#errors
--#document
--| <!DOCTYPE html>
--| <!--  XXX - XXX - XXX  -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<isindex test=x name=x>
--#errors
--Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE.
--Line: 1 Col: 23 Unexpected start tag isindex. Don't use it!
--#document
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       <hr>
--|       <label>
--|         "This is a searchable index. Enter search keywords: "
--|         <input>
--|           name="isindex"
--|           test="x"
--|       <hr>
--
--#data
--test
--test
--#errors
--Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "test
--test"
--
--#data
--<!DOCTYPE html><body><title>test</body></title>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <title>
--|       "test</body>"
--
--#data
--<!DOCTYPE html><body><title>X</title><meta name=z><link rel=foo><style>
--x { content:"</style" } </style>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <title>
--|       "X"
--|     <meta>
--|       name="z"
--|     <link>
--|       rel="foo"
--|     <style>
--|       "
--x { content:"</style" } "
--
--#data
--<!DOCTYPE html><select><optgroup></optgroup></select>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <optgroup>
--
--#data
-- 
-- 
--#errors
--Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html>  <html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><script>
--</script>  <title>x</title>  </head>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <script>
--|       "
--"
--|     "  "
--|     <title>
--|       "x"
--|     "  "
--|   <body>
--
--#data
--<!DOCTYPE html><html><body><html id=x>
--#errors
--Line: 1 Col: 38 html needs to be the first start tag.
--#document
--| <!DOCTYPE html>
--| <html>
--|   id="x"
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html>X</body><html id="x">
--#errors
--Line: 1 Col: 36 Unexpected start tag token (html) in the after body phase.
--Line: 1 Col: 36 html needs to be the first start tag.
--#document
--| <!DOCTYPE html>
--| <html>
--|   id="x"
--|   <head>
--|   <body>
--|     "X"
--
--#data
--<!DOCTYPE html><head><html id=x>
--#errors
--Line: 1 Col: 32 html needs to be the first start tag.
--#document
--| <!DOCTYPE html>
--| <html>
--|   id="x"
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html>X</html>X
--#errors
--Line: 1 Col: 24 Unexpected non-space characters in the after body phase.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "XX"
--
--#data
--<!DOCTYPE html>X</html> 
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "X "
--
--#data
--<!DOCTYPE html>X</html><p>X
--#errors
--Line: 1 Col: 26 Unexpected start tag (p).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "X"
--|     <p>
--|       "X"
--
--#data
--<!DOCTYPE html>X<p/x/y/z>
--#errors
--Line: 1 Col: 19 Expected a > after the /.
--Line: 1 Col: 21 Solidus (/) incorrectly placed in tag.
--Line: 1 Col: 23 Solidus (/) incorrectly placed in tag.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "X"
--|     <p>
--|       x=""
--|       y=""
--|       z=""
--
--#data
--<!DOCTYPE html><!--x--
--#errors
--Line: 1 Col: 22 Unexpected end of file in comment (--).
--#document
--| <!DOCTYPE html>
--| <!-- x -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE html><table><tr><td></p></table>
--#errors
--Line: 1 Col: 34 Unexpected end tag (p). Ignored.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <p>
--
--#data
--<!DOCTYPE <!DOCTYPE HTML>><!--<!--x-->-->
--#errors
--Line: 1 Col: 20 Expected space or '>'. Got ''
--Line: 1 Col: 25 Erroneous DOCTYPE.
--Line: 1 Col: 35 Unexpected character in comment found.
--#document
--| <!DOCTYPE <!doctype>
--| <html>
--|   <head>
--|   <body>
--|     ">"
--|     <!-- <!--x -->
--|     "-->"
--
--#data
--<!doctype html><div><form></form><div></div></div>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <form>
--|       <div>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests20.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests20.dat
-deleted file mode 100644
-index 6bd8256..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests20.dat
-+++ /dev/null
-@@ -1,455 +0,0 @@
--#data
--<!doctype html><p><button><button>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|       <button>
--
--#data
--<!doctype html><p><button><address>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <address>
--
--#data
--<!doctype html><p><button><blockquote>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <blockquote>
--
--#data
--<!doctype html><p><button><menu>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <menu>
--
--#data
--<!doctype html><p><button><p>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <p>
--
--#data
--<!doctype html><p><button><ul>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <ul>
--
--#data
--<!doctype html><p><button><h1>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <h1>
--
--#data
--<!doctype html><p><button><h6>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <h6>
--
--#data
--<!doctype html><p><button><listing>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <listing>
--
--#data
--<!doctype html><p><button><pre>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <pre>
--
--#data
--<!doctype html><p><button><form>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <form>
--
--#data
--<!doctype html><p><button><li>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <li>
--
--#data
--<!doctype html><p><button><dd>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <dd>
--
--#data
--<!doctype html><p><button><dt>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <dt>
--
--#data
--<!doctype html><p><button><plaintext>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <plaintext>
--
--#data
--<!doctype html><p><button><table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <table>
--
--#data
--<!doctype html><p><button><hr>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <hr>
--
--#data
--<!doctype html><p><button><xmp>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <xmp>
--
--#data
--<!doctype html><p><button></p>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <button>
--|         <p>
--
--#data
--<!doctype html><address><button></address>a
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <address>
--|       <button>
--|     "a"
--
--#data
--<!doctype html><address><button></address>a
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <address>
--|       <button>
--|     "a"
--
--#data
--<p><table></p>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <p>
--|       <table>
--
--#data
--<!doctype html><svg>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--
--#data
--<!doctype html><p><figcaption>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <figcaption>
--
--#data
--<!doctype html><p><summary>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <summary>
--
--#data
--<!doctype html><form><table><form>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       <table>
--
--#data
--<!doctype html><table><form><form>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <form>
--
--#data
--<!doctype html><table><form></table><form>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <form>
--
--#data
--<!doctype html><svg><foreignObject><p>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg foreignObject>
--|         <p>
--
--#data
--<!doctype html><svg><title>abc
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg title>
--|         "abc"
--
--#data
--<option><span><option>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <option>
--|       <span>
--|         <option>
--
--#data
--<option><option>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <option>
--|     <option>
--
--#data
--<math><annotation-xml><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|     <div>
--
--#data
--<math><annotation-xml encoding="application/svg+xml"><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         encoding="application/svg+xml"
--|     <div>
--
--#data
--<math><annotation-xml encoding="application/xhtml+xml"><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         encoding="application/xhtml+xml"
--|         <div>
--
--#data
--<math><annotation-xml encoding="aPPlication/xhtmL+xMl"><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         encoding="aPPlication/xhtmL+xMl"
--|         <div>
--
--#data
--<math><annotation-xml encoding="text/html"><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         encoding="text/html"
--|         <div>
--
--#data
--<math><annotation-xml encoding="Text/htmL"><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         encoding="Text/htmL"
--|         <div>
--
--#data
--<math><annotation-xml encoding=" text/html "><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         encoding=" text/html "
--|     <div>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests21.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests21.dat
-deleted file mode 100644
-index 1260ec0..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests21.dat
-+++ /dev/null
-@@ -1,221 +0,0 @@
--#data
--<svg><![CDATA[foo]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "foo"
--
--#data
--<math><![CDATA[foo]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       "foo"
--
--#data
--<div><![CDATA[foo]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <!-- [CDATA[foo]] -->
--
--#data
--<svg><![CDATA[foo
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "foo"
--
--#data
--<svg><![CDATA[foo
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "foo"
--
--#data
--<svg><![CDATA[
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--
--#data
--<svg><![CDATA[]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--
--#data
--<svg><![CDATA[]] >]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "]] >"
--
--#data
--<svg><![CDATA[]] >]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "]] >"
--
--#data
--<svg><![CDATA[]]
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "]]"
--
--#data
--<svg><![CDATA[]
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "]"
--
--#data
--<svg><![CDATA[]>a
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "]>a"
--
--#data
--<svg><foreignObject><div><![CDATA[foo]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg foreignObject>
--|         <div>
--|           <!-- [CDATA[foo]] -->
--
--#data
--<svg><![CDATA[<svg>]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "<svg>"
--
--#data
--<svg><![CDATA[</svg>a]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "</svg>a"
--
--#data
--<svg><![CDATA[<svg>a
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "<svg>a"
--
--#data
--<svg><![CDATA[</svg>a
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "</svg>a"
--
--#data
--<svg><![CDATA[<svg>]]><path>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "<svg>"
--|       <svg path>
--
--#data
--<svg><![CDATA[<svg>]]></path>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "<svg>"
--
--#data
--<svg><![CDATA[<svg>]]><!--path-->
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "<svg>"
--|       <!-- path -->
--
--#data
--<svg><![CDATA[<svg>]]>path
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "<svg>path"
--
--#data
--<svg><![CDATA[<!--svg-->]]>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       "<!--svg-->"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests22.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests22.dat
-deleted file mode 100644
-index aab27b2..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests22.dat
-+++ /dev/null
-@@ -1,157 +0,0 @@
--#data
--<a><b><big><em><strong><div>X</a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <b>
--|         <big>
--|           <em>
--|             <strong>
--|     <big>
--|       <em>
--|         <strong>
--|           <div>
--|             <a>
--|               "X"
--
--#data
--<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8>A</a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <b>
--|     <b>
--|       <div>
--|         id="1"
--|         <a>
--|         <div>
--|           id="2"
--|           <a>
--|           <div>
--|             id="3"
--|             <a>
--|             <div>
--|               id="4"
--|               <a>
--|               <div>
--|                 id="5"
--|                 <a>
--|                 <div>
--|                   id="6"
--|                   <a>
--|                   <div>
--|                     id="7"
--|                     <a>
--|                     <div>
--|                       id="8"
--|                       <a>
--|                         "A"
--
--#data
--<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8><div id=9>A</a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <b>
--|     <b>
--|       <div>
--|         id="1"
--|         <a>
--|         <div>
--|           id="2"
--|           <a>
--|           <div>
--|             id="3"
--|             <a>
--|             <div>
--|               id="4"
--|               <a>
--|               <div>
--|                 id="5"
--|                 <a>
--|                 <div>
--|                   id="6"
--|                   <a>
--|                   <div>
--|                     id="7"
--|                     <a>
--|                     <div>
--|                       id="8"
--|                       <a>
--|                         <div>
--|                           id="9"
--|                           "A"
--
--#data
--<a><b><div id=1><div id=2><div id=3><div id=4><div id=5><div id=6><div id=7><div id=8><div id=9><div id=10>A</a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       <b>
--|     <b>
--|       <div>
--|         id="1"
--|         <a>
--|         <div>
--|           id="2"
--|           <a>
--|           <div>
--|             id="3"
--|             <a>
--|             <div>
--|               id="4"
--|               <a>
--|               <div>
--|                 id="5"
--|                 <a>
--|                 <div>
--|                   id="6"
--|                   <a>
--|                   <div>
--|                     id="7"
--|                     <a>
--|                     <div>
--|                       id="8"
--|                       <a>
--|                         <div>
--|                           id="9"
--|                           <div>
--|                             id="10"
--|                             "A"
--
--#data
--<cite><b><cite><i><cite><i><cite><i><div>X</b>TEST
--#errors
--Line: 1 Col: 6 Unexpected start tag (cite). Expected DOCTYPE.
--Line: 1 Col: 46 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 50 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <cite>
--|       <b>
--|         <cite>
--|           <i>
--|             <cite>
--|               <i>
--|                 <cite>
--|                   <i>
--|       <i>
--|         <i>
--|           <div>
--|             <b>
--|               "X"
--|             "TEST"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests23.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests23.dat
-deleted file mode 100644
-index 34d2a73..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests23.dat
-+++ /dev/null
-@@ -1,155 +0,0 @@
--#data
--<p><font size=4><font color=red><font size=4><font size=4><font size=4><font size=4><font size=4><font color=red><p>X
--#errors
--3: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
--116: Unclosed elements.
--117: End of file seen and there were open elements.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <font>
--|         size="4"
--|         <font>
--|           color="red"
--|           <font>
--|             size="4"
--|             <font>
--|               size="4"
--|               <font>
--|                 size="4"
--|                 <font>
--|                   size="4"
--|                   <font>
--|                     size="4"
--|                     <font>
--|                       color="red"
--|     <p>
--|       <font>
--|         color="red"
--|         <font>
--|           size="4"
--|           <font>
--|             size="4"
--|             <font>
--|               size="4"
--|               <font>
--|                 color="red"
--|                 "X"
--
--#data
--<p><font size=4><font size=4><font size=4><font size=4><p>X
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <font>
--|         size="4"
--|         <font>
--|           size="4"
--|           <font>
--|             size="4"
--|             <font>
--|               size="4"
--|     <p>
--|       <font>
--|         size="4"
--|         <font>
--|           size="4"
--|           <font>
--|             size="4"
--|             "X"
--
--#data
--<p><font size=4><font size=4><font size=4><font size="5"><font size=4><p>X
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <font>
--|         size="4"
--|         <font>
--|           size="4"
--|           <font>
--|             size="4"
--|             <font>
--|               size="5"
--|               <font>
--|                 size="4"
--|     <p>
--|       <font>
--|         size="4"
--|         <font>
--|           size="4"
--|           <font>
--|             size="5"
--|             <font>
--|               size="4"
--|               "X"
--
--#data
--<p><font size=4 id=a><font size=4 id=b><font size=4><font size=4><p>X
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <font>
--|         id="a"
--|         size="4"
--|         <font>
--|           id="b"
--|           size="4"
--|           <font>
--|             size="4"
--|             <font>
--|               size="4"
--|     <p>
--|       <font>
--|         id="a"
--|         size="4"
--|         <font>
--|           id="b"
--|           size="4"
--|           <font>
--|             size="4"
--|             <font>
--|               size="4"
--|               "X"
--
--#data
--<p><b id=a><b id=a><b id=a><b><object><b id=a><b id=a>X</object><p>Y
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <b>
--|         id="a"
--|         <b>
--|           id="a"
--|           <b>
--|             id="a"
--|             <b>
--|               <object>
--|                 <b>
--|                   id="a"
--|                   <b>
--|                     id="a"
--|                     "X"
--|     <p>
--|       <b>
--|         id="a"
--|         <b>
--|           id="a"
--|           <b>
--|             id="a"
--|             <b>
--|               "Y"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests24.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests24.dat
-deleted file mode 100644
-index f6dc7eb..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests24.dat
-+++ /dev/null
-@@ -1,79 +0,0 @@
--#data
--<!DOCTYPE html>&NotEqualTilde;
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "≂̸"
--
--#data
--<!DOCTYPE html>&NotEqualTilde;A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "≂̸A"
--
--#data
--<!DOCTYPE html>&ThickSpace;
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "  "
--
--#data
--<!DOCTYPE html>&ThickSpace;A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "  A"
--
--#data
--<!DOCTYPE html>&NotSubset;
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "⊂⃒"
--
--#data
--<!DOCTYPE html>&NotSubset;A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "⊂⃒A"
--
--#data
--<!DOCTYPE html>&Gopf;
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "𝔾"
--
--#data
--<!DOCTYPE html>&Gopf;A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "𝔾A"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests25.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests25.dat
-deleted file mode 100644
-index 00de729..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests25.dat
-+++ /dev/null
-@@ -1,219 +0,0 @@
--#data
--<!DOCTYPE html><body><foo>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <foo>
--|       "A"
--
--#data
--<!DOCTYPE html><body><area>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <area>
--|     "A"
--
--#data
--<!DOCTYPE html><body><base>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <base>
--|     "A"
--
--#data
--<!DOCTYPE html><body><basefont>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <basefont>
--|     "A"
--
--#data
--<!DOCTYPE html><body><bgsound>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <bgsound>
--|     "A"
--
--#data
--<!DOCTYPE html><body><br>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <br>
--|     "A"
--
--#data
--<!DOCTYPE html><body><col>A
--#errors
--26: Stray start tag “col”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "A"
--
--#data
--<!DOCTYPE html><body><command>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <command>
--|     "A"
--
--#data
--<!DOCTYPE html><body><embed>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <embed>
--|     "A"
--
--#data
--<!DOCTYPE html><body><frame>A
--#errors
--26: Stray start tag “frame”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "A"
--
--#data
--<!DOCTYPE html><body><hr>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <hr>
--|     "A"
--
--#data
--<!DOCTYPE html><body><img>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <img>
--|     "A"
--
--#data
--<!DOCTYPE html><body><input>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <input>
--|     "A"
--
--#data
--<!DOCTYPE html><body><keygen>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <keygen>
--|     "A"
--
--#data
--<!DOCTYPE html><body><link>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <link>
--|     "A"
--
--#data
--<!DOCTYPE html><body><meta>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <meta>
--|     "A"
--
--#data
--<!DOCTYPE html><body><param>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <param>
--|     "A"
--
--#data
--<!DOCTYPE html><body><source>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <source>
--|     "A"
--
--#data
--<!DOCTYPE html><body><track>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <track>
--|     "A"
--
--#data
--<!DOCTYPE html><body><wbr>A
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <wbr>
--|     "A"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests26.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests26.dat
-deleted file mode 100644
-index fae11ff..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests26.dat
-+++ /dev/null
-@@ -1,313 +0,0 @@
--#data
--<!DOCTYPE html><body><a href='#1'><nobr>1<nobr></a><br><a href='#2'><nobr>2<nobr></a><br><a href='#3'><nobr>3<nobr></a>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       href="#1"
--|       <nobr>
--|         "1"
--|       <nobr>
--|     <nobr>
--|       <br>
--|       <a>
--|         href="#2"
--|     <a>
--|       href="#2"
--|       <nobr>
--|         "2"
--|       <nobr>
--|     <nobr>
--|       <br>
--|       <a>
--|         href="#3"
--|     <a>
--|       href="#3"
--|       <nobr>
--|         "3"
--|       <nobr>
--
--#data
--<!DOCTYPE html><body><b><nobr>1<nobr></b><i><nobr>2<nobr></i>3
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <nobr>
--|         "1"
--|       <nobr>
--|     <nobr>
--|       <i>
--|     <i>
--|       <nobr>
--|         "2"
--|       <nobr>
--|     <nobr>
--|       "3"
--
--#data
--<!DOCTYPE html><body><b><nobr>1<table><nobr></b><i><nobr>2<nobr></i>3
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <nobr>
--|         "1"
--|         <nobr>
--|           <i>
--|         <i>
--|           <nobr>
--|             "2"
--|           <nobr>
--|         <nobr>
--|           "3"
--|         <table>
--
--#data
--<!DOCTYPE html><body><b><nobr>1<table><tr><td><nobr></b><i><nobr>2<nobr></i>3
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <nobr>
--|         "1"
--|         <table>
--|           <tbody>
--|             <tr>
--|               <td>
--|                 <nobr>
--|                   <i>
--|                 <i>
--|                   <nobr>
--|                     "2"
--|                   <nobr>
--|                 <nobr>
--|                   "3"
--
--#data
--<!DOCTYPE html><body><b><nobr>1<div><nobr></b><i><nobr>2<nobr></i>3
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <nobr>
--|         "1"
--|     <div>
--|       <b>
--|         <nobr>
--|         <nobr>
--|       <nobr>
--|         <i>
--|       <i>
--|         <nobr>
--|           "2"
--|         <nobr>
--|       <nobr>
--|         "3"
--
--#data
--<!DOCTYPE html><body><b><nobr>1<nobr></b><div><i><nobr>2<nobr></i>3
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <nobr>
--|         "1"
--|       <nobr>
--|     <div>
--|       <nobr>
--|         <i>
--|       <i>
--|         <nobr>
--|           "2"
--|         <nobr>
--|       <nobr>
--|         "3"
--
--#data
--<!DOCTYPE html><body><b><nobr>1<nobr><ins></b><i><nobr>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <nobr>
--|         "1"
--|       <nobr>
--|         <ins>
--|     <nobr>
--|       <i>
--|     <i>
--|       <nobr>
--
--#data
--<!DOCTYPE html><body><b><nobr>1<ins><nobr></b><i>2
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       <nobr>
--|         "1"
--|         <ins>
--|       <nobr>
--|     <nobr>
--|       <i>
--|         "2"
--
--#data
--<!DOCTYPE html><body><b>1<nobr></b><i><nobr>2</i>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       "1"
--|       <nobr>
--|     <nobr>
--|       <i>
--|     <i>
--|       <nobr>
--|         "2"
--
--#data
--<p><code x</code></p>
--
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <code>
--|         code=""
--|         x<=""
--|     <code>
--|       code=""
--|       x<=""
--|       "
--"
--
--#data
--<!DOCTYPE html><svg><foreignObject><p><i></p>a
--#errors
--45: End tag “p” seen, but there were open elements.
--41: Unclosed element “i”.
--46: End of file seen and there were open elements.
--35: Unclosed element “foreignObject”.
--20: Unclosed element “svg”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg foreignObject>
--|         <p>
--|           <i>
--|         <i>
--|           "a"
--
--#data
--<!DOCTYPE html><table><tr><td><svg><foreignObject><p><i></p>a
--#errors
--56: End tag “p” seen, but there were open elements.
--52: Unclosed element “i”.
--57: End of file seen and there were open elements.
--46: Unclosed element “foreignObject”.
--31: Unclosed element “svg”.
--22: Unclosed element “table”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <svg svg>
--|               <svg foreignObject>
--|                 <p>
--|                   <i>
--|                 <i>
--|                   "a"
--
--#data
--<!DOCTYPE html><math><mtext><p><i></p>a
--#errors
--38: End tag “p” seen, but there were open elements.
--34: Unclosed element “i”.
--39: End of file in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mtext>
--|         <p>
--|           <i>
--|         <i>
--|           "a"
--
--#data
--<!DOCTYPE html><table><tr><td><math><mtext><p><i></p>a
--#errors
--53: End tag “p” seen, but there were open elements.
--49: Unclosed element “i”.
--54: End of file in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <math math>
--|               <math mtext>
--|                 <p>
--|                   <i>
--|                 <i>
--|                   "a"
--
--#data
--<!DOCTYPE html><body><div><!/div>a
--#errors
--29: Bogus comment.
--34: End of file seen and there were open elements.
--26: Unclosed element “div”.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <!-- /div -->
--|       "a"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests3.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests3.dat
-deleted file mode 100644
-index 38dc501..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests3.dat
-+++ /dev/null
-@@ -1,305 +0,0 @@
--#data
--<head></head><style></style>
--#errors
--Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
--Line: 1 Col: 20 Unexpected start tag (style) that can be in head. Moved.
--#document
--| <html>
--|   <head>
--|     <style>
--|   <body>
--
--#data
--<head></head><script></script>
--#errors
--Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
--Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved.
--#document
--| <html>
--|   <head>
--|     <script>
--|   <body>
--
--#data
--<head></head><!-- --><style></style><!-- --><script></script>
--#errors
--Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
--Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved.
--#document
--| <html>
--|   <head>
--|     <style>
--|     <script>
--|   <!--   -->
--|   <!--   -->
--|   <body>
--
--#data
--<head></head><!-- -->x<style></style><!-- --><script></script>
--#errors
--Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <!--   -->
--|   <body>
--|     "x"
--|     <style>
--|     <!--   -->
--|     <script>
--
--#data
--<!DOCTYPE html><html><head></head><body><pre>
--</pre></body></html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--
--#data
--<!DOCTYPE html><html><head></head><body><pre>
--foo</pre></body></html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--|       "foo"
--
--#data
--<!DOCTYPE html><html><head></head><body><pre>
--
--foo</pre></body></html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--|       "
--foo"
--
--#data
--<!DOCTYPE html><html><head></head><body><pre>
--foo
--</pre></body></html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--|       "foo
--"
--
--#data
--<!DOCTYPE html><html><head></head><body><pre>x</pre><span>
--</span></body></html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--|       "x"
--|     <span>
--|       "
--"
--
--#data
--<!DOCTYPE html><html><head></head><body><pre>x
--y</pre></body></html>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--|       "x
--y"
--
--#data
--<!DOCTYPE html><html><head></head><body><pre>x<div>
--y</pre></body></html>
--#errors
--Line: 2 Col: 7 End tag (pre) seen too early. Expected other end tag.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--|       "x"
--|       <div>
--|         "
--y"
--
--#data
--<!DOCTYPE html><pre>&#x0a;&#x0a;A</pre>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <pre>
--|       "
--A"
--
--#data
--<!DOCTYPE html><HTML><META><HEAD></HEAD></HTML>
--#errors
--Line: 1 Col: 33 Unexpected start tag head in existing head. Ignored.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <meta>
--|   <body>
--
--#data
--<!DOCTYPE html><HTML><HEAD><head></HEAD></HTML>
--#errors
--Line: 1 Col: 33 Unexpected start tag head in existing head. Ignored.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--
--#data
--<textarea>foo<span>bar</span><i>baz
--#errors
--Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
--Line: 1 Col: 35 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "foo<span>bar</span><i>baz"
--
--#data
--<title>foo<span>bar</em><i>baz
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--Line: 1 Col: 30 Unexpected end of file. Expected end tag (title).
--#document
--| <html>
--|   <head>
--|     <title>
--|       "foo<span>bar</em><i>baz"
--|   <body>
--
--#data
--<!DOCTYPE html><textarea>
--</textarea>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--
--#data
--<!DOCTYPE html><textarea>
--foo</textarea>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "foo"
--
--#data
--<!DOCTYPE html><textarea>
--
--foo</textarea>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       "
--foo"
--
--#data
--<!DOCTYPE html><html><head></head><body><ul><li><div><p><li></ul></body></html>
--#errors
--Line: 1 Col: 60 Missing end tag (div, li).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <ul>
--|       <li>
--|         <div>
--|           <p>
--|       <li>
--
--#data
--<!doctype html><nobr><nobr><nobr>
--#errors
--Line: 1 Col: 27 Unexpected start tag (nobr) implies end tag (nobr).
--Line: 1 Col: 33 Unexpected start tag (nobr) implies end tag (nobr).
--Line: 1 Col: 33 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <nobr>
--|     <nobr>
--|     <nobr>
--
--#data
--<!doctype html><nobr><nobr></nobr><nobr>
--#errors
--Line: 1 Col: 27 Unexpected start tag (nobr) implies end tag (nobr).
--Line: 1 Col: 40 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <nobr>
--|     <nobr>
--|     <nobr>
--
--#data
--<!doctype html><html><body><p><table></table></body></html>
--#errors
--Not known
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <table>
--
--#data
--<p><table></table>
--#errors
--Not known
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <table>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests4.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests4.dat
-deleted file mode 100644
-index 3c50632..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests4.dat
-+++ /dev/null
-@@ -1,59 +0,0 @@
--#data
--direct div content
--#errors
--#document-fragment
--div
--#document
--| "direct div content"
--
--#data
--direct textarea content
--#errors
--#document-fragment
--textarea
--#document
--| "direct textarea content"
--
--#data
--textarea content with <em>pseudo</em> <foo>markup
--#errors
--#document-fragment
--textarea
--#document
--| "textarea content with <em>pseudo</em> <foo>markup"
--
--#data
--this is &#x0043;DATA inside a <style> element
--#errors
--#document-fragment
--style
--#document
--| "this is &#x0043;DATA inside a <style> element"
--
--#data
--</plaintext>
--#errors
--#document-fragment
--plaintext
--#document
--| "</plaintext>"
--
--#data
--setting html's innerHTML
--#errors
--Line: 1 Col: 24 Unexpected EOF in inner html mode.
--#document-fragment
--html
--#document
--| <head>
--| <body>
--|   "setting html's innerHTML"
--
--#data
--<title>setting head's innerHTML</title>
--#errors
--#document-fragment
--head
--#document
--| <title>
--|   "setting head's innerHTML"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests5.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests5.dat
-deleted file mode 100644
-index d7b5128..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests5.dat
-+++ /dev/null
-@@ -1,191 +0,0 @@
--#data
--<style> <!-- </style>x
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--Line: 1 Col: 22 Unexpected end of file. Expected end tag (style).
--#document
--| <html>
--|   <head>
--|     <style>
--|       " <!-- "
--|   <body>
--|     "x"
--
--#data
--<style> <!-- </style> --> </style>x
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <style>
--|       " <!-- "
--|     " "
--|   <body>
--|     "--> x"
--
--#data
--<style> <!--> </style>x
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <style>
--|       " <!--> "
--|   <body>
--|     "x"
--
--#data
--<style> <!---> </style>x
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <style>
--|       " <!---> "
--|   <body>
--|     "x"
--
--#data
--<iframe> <!---> </iframe>x
--#errors
--Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <iframe>
--|       " <!---> "
--|     "x"
--
--#data
--<iframe> <!--- </iframe>->x</iframe> --> </iframe>x
--#errors
--Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <iframe>
--|       " <!--- "
--|     "->x --> x"
--
--#data
--<script> <!-- </script> --> </script>x
--#errors
--Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <script>
--|       " <!-- "
--|     " "
--|   <body>
--|     "--> x"
--
--#data
--<title> <!-- </title> --> </title>x
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <title>
--|       " <!-- "
--|     " "
--|   <body>
--|     "--> x"
--
--#data
--<textarea> <!--- </textarea>->x</textarea> --> </textarea>x
--#errors
--Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <textarea>
--|       " <!--- "
--|     "->x --> x"
--
--#data
--<style> <!</-- </style>x
--#errors
--Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <style>
--|       " <!</-- "
--|   <body>
--|     "x"
--
--#data
--<p><xmp></xmp>
--#errors
--XXX: Unknown
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|     <xmp>
--
--#data
--<xmp> <!-- > --> </xmp>
--#errors
--Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <xmp>
--|       " <!-- > --> "
--
--#data
--<title>&amp;</title>
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <title>
--|       "&"
--|   <body>
--
--#data
--<title><!--&amp;--></title>
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <title>
--|       "<!--&-->"
--|   <body>
--
--#data
--<title><!--</title>
--#errors
--Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE.
--Line: 1 Col: 19 Unexpected end of file. Expected end tag (title).
--#document
--| <html>
--|   <head>
--|     <title>
--|       "<!--"
--|   <body>
--
--#data
--<noscript><!--</noscript>--></noscript>
--#errors
--Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|     <noscript>
--|       "<!--"
--|   <body>
--|     "-->"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests6.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests6.dat
-deleted file mode 100644
-index f28ece4..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests6.dat
-+++ /dev/null
-@@ -1,663 +0,0 @@
--#data
--<!doctype html></head> <head>
--#errors
--Line: 1 Col: 29 Unexpected start tag head. Ignored.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   " "
--|   <body>
--
--#data
--<!doctype html><form><div></form><div>
--#errors
--33: End tag "form" seen but there were unclosed elements.
--38: End of file seen and there were open elements.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <form>
--|       <div>
--|         <div>
--
--#data
--<!doctype html><title>&amp;</title>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <title>
--|       "&"
--|   <body>
--
--#data
--<!doctype html><title><!--&amp;--></title>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <title>
--|       "<!--&-->"
--|   <body>
--
--#data
--<!doctype>
--#errors
--Line: 1 Col: 9 No space after literal string 'DOCTYPE'.
--Line: 1 Col: 10 Unexpected > character. Expected DOCTYPE name.
--Line: 1 Col: 10 Erroneous DOCTYPE.
--#document
--| <!DOCTYPE >
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!---x
--#errors
--Line: 1 Col: 6 Unexpected end of file in comment.
--Line: 1 Col: 6 Unexpected End of file. Expected DOCTYPE.
--#document
--| <!-- -x -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<body>
--<div>
--#errors
--Line: 1 Col: 6 Unexpected start tag (body).
--Line: 2 Col: 5 Expected closing tag. Unexpected end of file.
--#document-fragment
--div
--#document
--| "
--"
--| <div>
--
--#data
--<frameset></frameset>
--foo
--#errors
--Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
--Line: 2 Col: 3 Unexpected non-space characters in the after frameset phase. Ignored.
--#document
--| <html>
--|   <head>
--|   <frameset>
--|   "
--"
--
--#data
--<frameset></frameset>
--<noframes>
--#errors
--Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
--Line: 2 Col: 10 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <frameset>
--|   "
--"
--|   <noframes>
--
--#data
--<frameset></frameset>
--<div>
--#errors
--Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
--Line: 2 Col: 5 Unexpected start tag (div) in the after frameset phase. Ignored.
--#document
--| <html>
--|   <head>
--|   <frameset>
--|   "
--"
--
--#data
--<frameset></frameset>
--</html>
--#errors
--Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <frameset>
--|   "
--"
--
--#data
--<frameset></frameset>
--</div>
--#errors
--Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
--Line: 2 Col: 6 Unexpected end tag (div) in the after frameset phase. Ignored.
--#document
--| <html>
--|   <head>
--|   <frameset>
--|   "
--"
--
--#data
--<form><form>
--#errors
--Line: 1 Col: 6 Unexpected start tag (form). Expected DOCTYPE.
--Line: 1 Col: 12 Unexpected start tag (form).
--Line: 1 Col: 12 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <form>
--
--#data
--<button><button>
--#errors
--Line: 1 Col: 8 Unexpected start tag (button). Expected DOCTYPE.
--Line: 1 Col: 16 Unexpected start tag (button) implies end tag (button).
--Line: 1 Col: 16 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <button>
--|     <button>
--
--#data
--<table><tr><td></th>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 20 Unexpected end tag (th). Ignored.
--Line: 1 Col: 20 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--<table><caption><td>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 20 Unexpected end tag (td). Ignored.
--Line: 1 Col: 20 Unexpected table cell start tag (td) in the table body phase.
--Line: 1 Col: 20 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--<table><caption><div>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 21 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <div>
--
--#data
--</caption><div>
--#errors
--Line: 1 Col: 10 Unexpected end tag (caption). Ignored.
--Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
--#document-fragment
--caption
--#document
--| <div>
--
--#data
--<table><caption><div></caption>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 31 Unexpected end tag (caption). Missing end tag (div).
--Line: 1 Col: 31 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <div>
--
--#data
--<table><caption></table>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 24 Unexpected end table tag in caption. Generates implied end caption.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--
--#data
--</table><div>
--#errors
--Line: 1 Col: 8 Unexpected end table tag in caption. Generates implied end caption.
--Line: 1 Col: 8 Unexpected end tag (caption). Ignored.
--Line: 1 Col: 13 Expected closing tag. Unexpected end of file.
--#document-fragment
--caption
--#document
--| <div>
--
--#data
--<table><caption></body></col></colgroup></html></tbody></td></tfoot></th></thead></tr>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 23 Unexpected end tag (body). Ignored.
--Line: 1 Col: 29 Unexpected end tag (col). Ignored.
--Line: 1 Col: 40 Unexpected end tag (colgroup). Ignored.
--Line: 1 Col: 47 Unexpected end tag (html). Ignored.
--Line: 1 Col: 55 Unexpected end tag (tbody). Ignored.
--Line: 1 Col: 60 Unexpected end tag (td). Ignored.
--Line: 1 Col: 68 Unexpected end tag (tfoot). Ignored.
--Line: 1 Col: 73 Unexpected end tag (th). Ignored.
--Line: 1 Col: 81 Unexpected end tag (thead). Ignored.
--Line: 1 Col: 86 Unexpected end tag (tr). Ignored.
--Line: 1 Col: 86 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--
--#data
--<table><caption><div></div>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 27 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <div>
--
--#data
--<table><tr><td></body></caption></col></colgroup></html>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 22 Unexpected end tag (body). Ignored.
--Line: 1 Col: 32 Unexpected end tag (caption). Ignored.
--Line: 1 Col: 38 Unexpected end tag (col). Ignored.
--Line: 1 Col: 49 Unexpected end tag (colgroup). Ignored.
--Line: 1 Col: 56 Unexpected end tag (html). Ignored.
--Line: 1 Col: 56 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--</table></tbody></tfoot></thead></tr><div>
--#errors
--Line: 1 Col: 8 Unexpected end tag (table). Ignored.
--Line: 1 Col: 16 Unexpected end tag (tbody). Ignored.
--Line: 1 Col: 24 Unexpected end tag (tfoot). Ignored.
--Line: 1 Col: 32 Unexpected end tag (thead). Ignored.
--Line: 1 Col: 37 Unexpected end tag (tr). Ignored.
--Line: 1 Col: 42 Expected closing tag. Unexpected end of file.
--#document-fragment
--td
--#document
--| <div>
--
--#data
--<table><colgroup>foo
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 20 Unexpected non-space characters in table context caused voodoo mode.
--Line: 1 Col: 20 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "foo"
--|     <table>
--|       <colgroup>
--
--#data
--foo<col>
--#errors
--Line: 1 Col: 3 Unexpected end tag (colgroup). Ignored.
--#document-fragment
--colgroup
--#document
--| <col>
--
--#data
--<table><colgroup></col>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 23 This element (col) has no end tag.
--Line: 1 Col: 23 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <colgroup>
--
--#data
--<frameset><div>
--#errors
--Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
--Line: 1 Col: 15 Unexpected start tag token (div) in the frameset phase. Ignored.
--Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--</frameset><frame>
--#errors
--Line: 1 Col: 11 Unexpected end tag token (frameset) in the frameset phase (innerHTML).
--#document-fragment
--frameset
--#document
--| <frame>
--
--#data
--<frameset></div>
--#errors
--Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE.
--Line: 1 Col: 16 Unexpected end tag token (div) in the frameset phase. Ignored.
--Line: 1 Col: 16 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--</body><div>
--#errors
--Line: 1 Col: 7 Unexpected end tag (body). Ignored.
--Line: 1 Col: 12 Expected closing tag. Unexpected end of file.
--#document-fragment
--body
--#document
--| <div>
--
--#data
--<table><tr><div>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 16 Unexpected start tag (div) in table context caused voodoo mode.
--Line: 1 Col: 16 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--</tr><td>
--#errors
--Line: 1 Col: 5 Unexpected end tag (tr). Ignored.
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--</tbody></tfoot></thead><td>
--#errors
--Line: 1 Col: 8 Unexpected end tag (tbody). Ignored.
--Line: 1 Col: 16 Unexpected end tag (tfoot). Ignored.
--Line: 1 Col: 24 Unexpected end tag (thead). Ignored.
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<table><tr><div><td>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 16 Unexpected start tag (div) in table context caused voodoo mode.
--Line: 1 Col: 20 Unexpected implied end tag (div) in the table row phase.
--Line: 1 Col: 20 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--<caption><col><colgroup><tbody><tfoot><thead><tr>
--#errors
--Line: 1 Col: 9 Unexpected start tag (caption).
--Line: 1 Col: 14 Unexpected start tag (col).
--Line: 1 Col: 24 Unexpected start tag (colgroup).
--Line: 1 Col: 31 Unexpected start tag (tbody).
--Line: 1 Col: 38 Unexpected start tag (tfoot).
--Line: 1 Col: 45 Unexpected start tag (thead).
--Line: 1 Col: 49 Unexpected end of file. Expected table content.
--#document-fragment
--tbody
--#document
--| <tr>
--
--#data
--<table><tbody></thead>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 22 Unexpected end tag (thead) in the table body phase. Ignored.
--Line: 1 Col: 22 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--
--#data
--</table><tr>
--#errors
--Line: 1 Col: 8 Unexpected end tag (table). Ignored.
--Line: 1 Col: 12 Unexpected end of file. Expected table content.
--#document-fragment
--tbody
--#document
--| <tr>
--
--#data
--<table><tbody></body></caption></col></colgroup></html></td></th></tr>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 21 Unexpected end tag (body) in the table body phase. Ignored.
--Line: 1 Col: 31 Unexpected end tag (caption) in the table body phase. Ignored.
--Line: 1 Col: 37 Unexpected end tag (col) in the table body phase. Ignored.
--Line: 1 Col: 48 Unexpected end tag (colgroup) in the table body phase. Ignored.
--Line: 1 Col: 55 Unexpected end tag (html) in the table body phase. Ignored.
--Line: 1 Col: 60 Unexpected end tag (td) in the table body phase. Ignored.
--Line: 1 Col: 65 Unexpected end tag (th) in the table body phase. Ignored.
--Line: 1 Col: 70 Unexpected end tag (tr) in the table body phase. Ignored.
--Line: 1 Col: 70 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--
--#data
--<table><tbody></div>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 20 Unexpected end tag (div) in table context caused voodoo mode.
--Line: 1 Col: 20 End tag (div) seen too early. Expected other end tag.
--Line: 1 Col: 20 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--
--#data
--<table><table>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected start tag (table) implies end tag (table).
--Line: 1 Col: 14 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|     <table>
--
--#data
--<table></body></caption></col></colgroup></html></tbody></td></tfoot></th></thead></tr>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 14 Unexpected end tag (body). Ignored.
--Line: 1 Col: 24 Unexpected end tag (caption). Ignored.
--Line: 1 Col: 30 Unexpected end tag (col). Ignored.
--Line: 1 Col: 41 Unexpected end tag (colgroup). Ignored.
--Line: 1 Col: 48 Unexpected end tag (html). Ignored.
--Line: 1 Col: 56 Unexpected end tag (tbody). Ignored.
--Line: 1 Col: 61 Unexpected end tag (td). Ignored.
--Line: 1 Col: 69 Unexpected end tag (tfoot). Ignored.
--Line: 1 Col: 74 Unexpected end tag (th). Ignored.
--Line: 1 Col: 82 Unexpected end tag (thead). Ignored.
--Line: 1 Col: 87 Unexpected end tag (tr). Ignored.
--Line: 1 Col: 87 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--
--#data
--</table><tr>
--#errors
--Line: 1 Col: 8 Unexpected end tag (table). Ignored.
--Line: 1 Col: 12 Unexpected end of file. Expected table content.
--#document-fragment
--table
--#document
--| <tbody>
--|   <tr>
--
--#data
--<body></body></html>
--#errors
--Line: 1 Col: 20 Unexpected html end tag in inner html mode.
--Line: 1 Col: 20 Unexpected EOF in inner html mode.
--#document-fragment
--html
--#document
--| <head>
--| <body>
--
--#data
--<html><frameset></frameset></html> 
--#errors
--Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <frameset>
--|   " "
--
--#data
--<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"><html></html>
--#errors
--Line: 1 Col: 50 Erroneous DOCTYPE.
--Line: 1 Col: 63 Unexpected end tag (html) after the (implied) root element.
--#document
--| <!DOCTYPE html "-//W3C//DTD HTML 4.01//EN" "">
--| <html>
--|   <head>
--|   <body>
--
--#data
--<param><frameset></frameset>
--#errors
--Line: 1 Col: 7 Unexpected start tag (param). Expected DOCTYPE.
--Line: 1 Col: 17 Unexpected start tag (frameset).
--#document
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<source><frameset></frameset>
--#errors
--Line: 1 Col: 7 Unexpected start tag (source). Expected DOCTYPE.
--Line: 1 Col: 17 Unexpected start tag (frameset).
--#document
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<track><frameset></frameset>
--#errors
--Line: 1 Col: 7 Unexpected start tag (track). Expected DOCTYPE.
--Line: 1 Col: 17 Unexpected start tag (frameset).
--#document
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--</html><frameset></frameset>
--#errors
--7: End tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
--17: Stray “frameset” start tag.
--17: “frameset” start tag seen.
--#document
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--</body><frameset></frameset>
--#errors
--7: End tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.
--17: Stray “frameset” start tag.
--17: “frameset” start tag seen.
--#document
--| <html>
--|   <head>
--|   <frameset>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests7.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests7.dat
-deleted file mode 100644
-index f5193c6..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests7.dat
-+++ /dev/null
-@@ -1,390 +0,0 @@
--#data
--<!doctype html><body><title>X</title>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <title>
--|       "X"
--
--#data
--<!doctype html><table><title>X</title></table>
--#errors
--Line: 1 Col: 29 Unexpected start tag (title) in table context caused voodoo mode.
--Line: 1 Col: 38 Unexpected end tag (title) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <title>
--|       "X"
--|     <table>
--
--#data
--<!doctype html><head></head><title>X</title>
--#errors
--Line: 1 Col: 35 Unexpected start tag (title) that can be in head. Moved.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <title>
--|       "X"
--|   <body>
--
--#data
--<!doctype html></head><title>X</title>
--#errors
--Line: 1 Col: 29 Unexpected start tag (title) that can be in head. Moved.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|     <title>
--|       "X"
--|   <body>
--
--#data
--<!doctype html><table><meta></table>
--#errors
--Line: 1 Col: 28 Unexpected start tag (meta) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <meta>
--|     <table>
--
--#data
--<!doctype html><table>X<tr><td><table> <meta></table></table>
--#errors
--Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode.
--Line: 1 Col: 45 Unexpected start tag (meta) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "X"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <meta>
--|             <table>
--|               " "
--
--#data
--<!doctype html><html> <head>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!doctype html> <head>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!doctype html><table><style> <tr>x </style> </table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <style>
--|         " <tr>x "
--|       " "
--
--#data
--<!doctype html><table><TBODY><script> <tr>x </script> </table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <script>
--|           " <tr>x "
--|         " "
--
--#data
--<!doctype html><p><applet><p>X</p></applet>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <applet>
--|         <p>
--|           "X"
--
--#data
--<!doctype html><listing>
--X</listing>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <listing>
--|       "X"
--
--#data
--<!doctype html><select><input>X
--#errors
--Line: 1 Col: 30 Unexpected input start tag in the select phase.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|     <input>
--|     "X"
--
--#data
--<!doctype html><select><select>X
--#errors
--Line: 1 Col: 31 Unexpected select start tag in the select phase treated as select end tag.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|     "X"
--
--#data
--<!doctype html><table><input type=hidDEN></table>
--#errors
--Line: 1 Col: 41 Unexpected input with type hidden in table context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <input>
--|         type="hidDEN"
--
--#data
--<!doctype html><table>X<input type=hidDEN></table>
--#errors
--Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     "X"
--|     <table>
--|       <input>
--|         type="hidDEN"
--
--#data
--<!doctype html><table>  <input type=hidDEN></table>
--#errors
--Line: 1 Col: 43 Unexpected input with type hidden in table context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       "  "
--|       <input>
--|         type="hidDEN"
--
--#data
--<!doctype html><table>  <input type='hidDEN'></table>
--#errors
--Line: 1 Col: 45 Unexpected input with type hidden in table context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       "  "
--|       <input>
--|         type="hidDEN"
--
--#data
--<!doctype html><table><input type=" hidden"><input type=hidDEN></table>
--#errors
--Line: 1 Col: 44 Unexpected start tag (input) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <input>
--|       type=" hidden"
--|     <table>
--|       <input>
--|         type="hidDEN"
--
--#data
--<!doctype html><table><select>X<tr>
--#errors
--Line: 1 Col: 30 Unexpected start tag (select) in table context caused voodoo mode.
--Line: 1 Col: 35 Unexpected table element start tag (trs) in the select in table phase.
--Line: 1 Col: 35 Unexpected end of file. Expected table content.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       "X"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!doctype html><select>X</select>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       "X"
--
--#data
--<!DOCTYPE hTmL><html></html>
--#errors
--Line: 1 Col: 28 Unexpected end tag (html) after the (implied) root element.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--
--#data
--<!DOCTYPE HTML><html></html>
--#errors
--Line: 1 Col: 28 Unexpected end tag (html) after the (implied) root element.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--
--#data
--<body>X</body></body>
--#errors
--Line: 1 Col: 21 Unexpected end tag token (body) in the after body phase.
--Line: 1 Col: 21 Unexpected EOF in inner html mode.
--#document-fragment
--html
--#document
--| <head>
--| <body>
--|   "X"
--
--#data
--<div><p>a</x> b
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 13 Unexpected end tag (x). Ignored.
--Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <p>
--|         "a b"
--
--#data
--<table><tr><td><code></code> </table>
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <code>
--|             " "
--
--#data
--<table><b><tr><td>aaa</td></tr>bbb</table>ccc
--#errors
--XXX: Fix me
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|     <b>
--|       "bbb"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "aaa"
--|     <b>
--|       "ccc"
--
--#data
--A<table><tr> B</tr> B</table>
--#errors
--XXX: Fix me
--#document
--| <html>
--|   <head>
--|   <body>
--|     "A B B"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--A<table><tr> B</tr> </em>C</table>
--#errors
--XXX: Fix me
--#document
--| <html>
--|   <head>
--|   <body>
--|     "A BC"
--|     <table>
--|       <tbody>
--|         <tr>
--|         " "
--
--#data
--<select><keygen>
--#errors
--Not known
--#document
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|     <keygen>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests8.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests8.dat
-deleted file mode 100644
-index 90e6c91..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests8.dat
-+++ /dev/null
-@@ -1,148 +0,0 @@
--#data
--<div>
--<div></div>
--</span>x
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 3 Col: 7 Unexpected end tag (span). Ignored.
--Line: 3 Col: 8 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "
--"
--|       <div>
--|       "
--x"
--
--#data
--<div>x<div></div>
--</span>x
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 2 Col: 7 Unexpected end tag (span). Ignored.
--Line: 2 Col: 8 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "x"
--|       <div>
--|       "
--x"
--
--#data
--<div>x<div></div>x</span>x
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 25 Unexpected end tag (span). Ignored.
--Line: 1 Col: 26 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "x"
--|       <div>
--|       "xx"
--
--#data
--<div>x<div></div>y</span>z
--#errors
--Line: 1 Col: 5 Unexpected start tag (div). Expected DOCTYPE.
--Line: 1 Col: 25 Unexpected end tag (span). Ignored.
--Line: 1 Col: 26 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "x"
--|       <div>
--|       "yz"
--
--#data
--<table><div>x<div></div>x</span>x
--#errors
--Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE.
--Line: 1 Col: 12 Unexpected start tag (div) in table context caused voodoo mode.
--Line: 1 Col: 18 Unexpected start tag (div) in table context caused voodoo mode.
--Line: 1 Col: 24 Unexpected end tag (div) in table context caused voodoo mode.
--Line: 1 Col: 32 Unexpected end tag (span) in table context caused voodoo mode.
--Line: 1 Col: 32 Unexpected end tag (span). Ignored.
--Line: 1 Col: 33 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "x"
--|       <div>
--|       "xx"
--|     <table>
--
--#data
--x<table>x
--#errors
--Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
--Line: 1 Col: 9 Unexpected non-space characters in table context caused voodoo mode.
--Line: 1 Col: 9 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "xx"
--|     <table>
--
--#data
--x<table><table>x
--#errors
--Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE.
--Line: 1 Col: 15 Unexpected start tag (table) implies end tag (table).
--Line: 1 Col: 16 Unexpected non-space characters in table context caused voodoo mode.
--Line: 1 Col: 16 Unexpected end of file. Expected table content.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "x"
--|     <table>
--|     "x"
--|     <table>
--
--#data
--<b>a<div></div><div></b>y
--#errors
--Line: 1 Col: 3 Unexpected start tag (b). Expected DOCTYPE.
--Line: 1 Col: 24 End tag (b) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 25 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|       "a"
--|       <div>
--|     <div>
--|       <b>
--|       "y"
--
--#data
--<a><div><p></a>
--#errors
--Line: 1 Col: 3 Unexpected start tag (a). Expected DOCTYPE.
--Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 15 End tag (a) violates step 1, paragraph 3 of the adoption agency algorithm.
--Line: 1 Col: 15 Expected closing tag. Unexpected end of file.
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|     <div>
--|       <a>
--|       <p>
--|         <a>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests9.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests9.dat
-deleted file mode 100644
-index 554e27a..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests9.dat
-+++ /dev/null
-@@ -1,457 +0,0 @@
--#data
--<!DOCTYPE html><math></math>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--
--#data
--<!DOCTYPE html><body><math></math>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--
--#data
--<!DOCTYPE html><math><mi>
--#errors
--25: End of file in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--
--#data
--<!DOCTYPE html><math><annotation-xml><svg><u>
--#errors
--45: HTML start tag “u” in a foreign namespace context.
--45: End of file seen and there were open elements.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math annotation-xml>
--|         <svg svg>
--|     <u>
--
--#data
--<!DOCTYPE html><body><select><math></math></select>
--#errors
--Line: 1 Col: 35 Unexpected start tag token (math) in the select phase. Ignored.
--Line: 1 Col: 42 Unexpected end tag (math) in the select phase. Ignored.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--
--#data
--<!DOCTYPE html><body><select><option><math></math></option></select>
--#errors
--Line: 1 Col: 43 Unexpected start tag token (math) in the select phase. Ignored.
--Line: 1 Col: 50 Unexpected end tag (math) in the select phase. Ignored.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--
--#data
--<!DOCTYPE html><body><table><math></math></table>
--#errors
--Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode.
--Line: 1 Col: 41 Unexpected end tag (math) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|     <table>
--
--#data
--<!DOCTYPE html><body><table><math><mi>foo</mi></math></table>
--#errors
--Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode.
--Line: 1 Col: 46 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 53 Unexpected end tag (math) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         "foo"
--|     <table>
--
--#data
--<!DOCTYPE html><body><table><math><mi>foo</mi><mi>bar</mi></math></table>
--#errors
--Line: 1 Col: 34 Unexpected start tag (math) in table context caused voodoo mode.
--Line: 1 Col: 46 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 58 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 65 Unexpected end tag (math) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         "foo"
--|       <math mi>
--|         "bar"
--|     <table>
--
--#data
--<!DOCTYPE html><body><table><tbody><math><mi>foo</mi><mi>bar</mi></math></tbody></table>
--#errors
--Line: 1 Col: 41 Unexpected start tag (math) in table context caused voodoo mode.
--Line: 1 Col: 53 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 65 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 72 Unexpected end tag (math) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         "foo"
--|       <math mi>
--|         "bar"
--|     <table>
--|       <tbody>
--
--#data
--<!DOCTYPE html><body><table><tbody><tr><math><mi>foo</mi><mi>bar</mi></math></tr></tbody></table>
--#errors
--Line: 1 Col: 45 Unexpected start tag (math) in table context caused voodoo mode.
--Line: 1 Col: 57 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 69 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 76 Unexpected end tag (math) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         "foo"
--|       <math mi>
--|         "bar"
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<!DOCTYPE html><body><table><tbody><tr><td><math><mi>foo</mi><mi>bar</mi></math></td></tr></tbody></table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <math math>
--|               <math mi>
--|                 "foo"
--|               <math mi>
--|                 "bar"
--
--#data
--<!DOCTYPE html><body><table><tbody><tr><td><math><mi>foo</mi><mi>bar</mi></math><p>baz</td></tr></tbody></table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <math math>
--|               <math mi>
--|                 "foo"
--|               <math mi>
--|                 "bar"
--|             <p>
--|               "baz"
--
--#data
--<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi></math><p>baz</caption></table>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <math math>
--|           <math mi>
--|             "foo"
--|           <math mi>
--|             "bar"
--|         <p>
--|           "baz"
--
--#data
--<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
--#errors
--Line: 1 Col: 70 HTML start tag "p" in a foreign namespace context.
--Line: 1 Col: 81 Unexpected end table tag in caption. Generates implied end caption.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <math math>
--|           <math mi>
--|             "foo"
--|           <math mi>
--|             "bar"
--|         <p>
--|           "baz"
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body><table><caption><math><mi>foo</mi><mi>bar</mi>baz</table><p>quux
--#errors
--Line: 1 Col: 78 Unexpected end table tag in caption. Generates implied end caption.
--Line: 1 Col: 78 Unexpected end tag (caption). Missing end tag (math).
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <caption>
--|         <math math>
--|           <math mi>
--|             "foo"
--|           <math mi>
--|             "bar"
--|           "baz"
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body><table><colgroup><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
--#errors
--Line: 1 Col: 44 Unexpected start tag (math) in table context caused voodoo mode.
--Line: 1 Col: 56 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 68 Unexpected end tag (mi) in table context caused voodoo mode.
--Line: 1 Col: 71 HTML start tag "p" in a foreign namespace context.
--Line: 1 Col: 71 Unexpected start tag (p) in table context caused voodoo mode.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         "foo"
--|       <math mi>
--|         "bar"
--|     <p>
--|       "baz"
--|     <table>
--|       <colgroup>
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body><table><tr><td><select><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
--#errors
--Line: 1 Col: 50 Unexpected start tag token (math) in the select phase. Ignored.
--Line: 1 Col: 54 Unexpected start tag token (mi) in the select phase. Ignored.
--Line: 1 Col: 62 Unexpected end tag (mi) in the select phase. Ignored.
--Line: 1 Col: 66 Unexpected start tag token (mi) in the select phase. Ignored.
--Line: 1 Col: 74 Unexpected end tag (mi) in the select phase. Ignored.
--Line: 1 Col: 77 Unexpected start tag token (p) in the select phase. Ignored.
--Line: 1 Col: 88 Unexpected table element end tag (tables) in the select in table phase.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <select>
--|               "foobarbaz"
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body><table><select><math><mi>foo</mi><mi>bar</mi><p>baz</table><p>quux
--#errors
--Line: 1 Col: 36 Unexpected start tag (select) in table context caused voodoo mode.
--Line: 1 Col: 42 Unexpected start tag token (math) in the select phase. Ignored.
--Line: 1 Col: 46 Unexpected start tag token (mi) in the select phase. Ignored.
--Line: 1 Col: 54 Unexpected end tag (mi) in the select phase. Ignored.
--Line: 1 Col: 58 Unexpected start tag token (mi) in the select phase. Ignored.
--Line: 1 Col: 66 Unexpected end tag (mi) in the select phase. Ignored.
--Line: 1 Col: 69 Unexpected start tag token (p) in the select phase. Ignored.
--Line: 1 Col: 80 Unexpected table element end tag (tables) in the select in table phase.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       "foobarbaz"
--|     <table>
--|     <p>
--|       "quux"
--
--#data
--<!DOCTYPE html><body></body></html><math><mi>foo</mi><mi>bar</mi><p>baz
--#errors
--Line: 1 Col: 41 Unexpected start tag (math).
--Line: 1 Col: 68 HTML start tag "p" in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         "foo"
--|       <math mi>
--|         "bar"
--|     <p>
--|       "baz"
--
--#data
--<!DOCTYPE html><body></body><math><mi>foo</mi><mi>bar</mi><p>baz
--#errors
--Line: 1 Col: 34 Unexpected start tag token (math) in the after body phase.
--Line: 1 Col: 61 HTML start tag "p" in a foreign namespace context.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mi>
--|         "foo"
--|       <math mi>
--|         "bar"
--|     <p>
--|       "baz"
--
--#data
--<!DOCTYPE html><frameset><math><mi></mi><mi></mi><p><span>
--#errors
--Line: 1 Col: 31 Unexpected start tag token (math) in the frameset phase. Ignored.
--Line: 1 Col: 35 Unexpected start tag token (mi) in the frameset phase. Ignored.
--Line: 1 Col: 40 Unexpected end tag token (mi) in the frameset phase. Ignored.
--Line: 1 Col: 44 Unexpected start tag token (mi) in the frameset phase. Ignored.
--Line: 1 Col: 49 Unexpected end tag token (mi) in the frameset phase. Ignored.
--Line: 1 Col: 52 Unexpected start tag token (p) in the frameset phase. Ignored.
--Line: 1 Col: 58 Unexpected start tag token (span) in the frameset phase. Ignored.
--Line: 1 Col: 58 Expected closing tag. Unexpected end of file.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!DOCTYPE html><frameset></frameset><math><mi></mi><mi></mi><p><span>
--#errors
--Line: 1 Col: 42 Unexpected start tag (math) in the after frameset phase. Ignored.
--Line: 1 Col: 46 Unexpected start tag (mi) in the after frameset phase. Ignored.
--Line: 1 Col: 51 Unexpected end tag (mi) in the after frameset phase. Ignored.
--Line: 1 Col: 55 Unexpected start tag (mi) in the after frameset phase. Ignored.
--Line: 1 Col: 60 Unexpected end tag (mi) in the after frameset phase. Ignored.
--Line: 1 Col: 63 Unexpected start tag (p) in the after frameset phase. Ignored.
--Line: 1 Col: 69 Unexpected start tag (span) in the after frameset phase. Ignored.
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!DOCTYPE html><body xlink:href=foo><math xlink:href=foo></math>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     xlink:href="foo"
--|     <math math>
--|       xlink href="foo"
--
--#data
--<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo></mi></math>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     xlink:href="foo"
--|     xml:lang="en"
--|     <math math>
--|       <math mi>
--|         xlink href="foo"
--|         xml lang="en"
--
--#data
--<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo /></math>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     xlink:href="foo"
--|     xml:lang="en"
--|     <math math>
--|       <math mi>
--|         xlink href="foo"
--|         xml lang="en"
--
--#data
--<!DOCTYPE html><body xlink:href=foo xml:lang=en><math><mi xml:lang=en xlink:href=foo />bar</math>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     xlink:href="foo"
--|     xml:lang="en"
--|     <math math>
--|       <math mi>
--|         xlink href="foo"
--|         xml lang="en"
--|       "bar"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat
-deleted file mode 100644
-index 6c78661..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tests_innerHTML_1.dat
-+++ /dev/null
-@@ -1,741 +0,0 @@
--#data
--<body><span>
--#errors
--#document-fragment
--body
--#document
--| <span>
--
--#data
--<span><body>
--#errors
--#document-fragment
--body
--#document
--| <span>
--
--#data
--<span><body>
--#errors
--#document-fragment
--div
--#document
--| <span>
--
--#data
--<body><span>
--#errors
--#document-fragment
--html
--#document
--| <head>
--| <body>
--|   <span>
--
--#data
--<frameset><span>
--#errors
--#document-fragment
--body
--#document
--| <span>
--
--#data
--<span><frameset>
--#errors
--#document-fragment
--body
--#document
--| <span>
--
--#data
--<span><frameset>
--#errors
--#document-fragment
--div
--#document
--| <span>
--
--#data
--<frameset><span>
--#errors
--#document-fragment
--html
--#document
--| <head>
--| <frameset>
--
--#data
--<table><tr>
--#errors
--#document-fragment
--table
--#document
--| <tbody>
--|   <tr>
--
--#data
--</table><tr>
--#errors
--#document-fragment
--table
--#document
--| <tbody>
--|   <tr>
--
--#data
--<a>
--#errors
--#document-fragment
--table
--#document
--| <a>
--
--#data
--<a>
--#errors
--#document-fragment
--table
--#document
--| <a>
--
--#data
--<a><caption>a
--#errors
--#document-fragment
--table
--#document
--| <a>
--| <caption>
--|   "a"
--
--#data
--<a><colgroup><col>
--#errors
--#document-fragment
--table
--#document
--| <a>
--| <colgroup>
--|   <col>
--
--#data
--<a><tbody><tr>
--#errors
--#document-fragment
--table
--#document
--| <a>
--| <tbody>
--|   <tr>
--
--#data
--<a><tfoot><tr>
--#errors
--#document-fragment
--table
--#document
--| <a>
--| <tfoot>
--|   <tr>
--
--#data
--<a><thead><tr>
--#errors
--#document-fragment
--table
--#document
--| <a>
--| <thead>
--|   <tr>
--
--#data
--<a><tr>
--#errors
--#document-fragment
--table
--#document
--| <a>
--| <tbody>
--|   <tr>
--
--#data
--<a><th>
--#errors
--#document-fragment
--table
--#document
--| <a>
--| <tbody>
--|   <tr>
--|     <th>
--
--#data
--<a><td>
--#errors
--#document-fragment
--table
--#document
--| <a>
--| <tbody>
--|   <tr>
--|     <td>
--
--#data
--<table></table><tbody>
--#errors
--#document-fragment
--caption
--#document
--| <table>
--
--#data
--</table><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--
--#data
--<span></table>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--
--#data
--</caption><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--
--#data
--<span></caption><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><caption><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><col><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><colgroup><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><html><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><tbody><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><td><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><tfoot><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><thead><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><th><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span><tr><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--<span></table><span>
--#errors
--#document-fragment
--caption
--#document
--| <span>
--|   <span>
--
--#data
--</colgroup><col>
--#errors
--#document-fragment
--colgroup
--#document
--| <col>
--
--#data
--<a><col>
--#errors
--#document-fragment
--colgroup
--#document
--| <col>
--
--#data
--<caption><a>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--
--#data
--<col><a>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--
--#data
--<colgroup><a>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--
--#data
--<tbody><a>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--
--#data
--<tfoot><a>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--
--#data
--<thead><a>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--
--#data
--</table><a>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--
--#data
--<a><tr>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--| <tr>
--
--#data
--<a><td>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--| <tr>
--|   <td>
--
--#data
--<a><td>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--| <tr>
--|   <td>
--
--#data
--<a><td>
--#errors
--#document-fragment
--tbody
--#document
--| <a>
--| <tr>
--|   <td>
--
--#data
--<td><table><tbody><a><tr>
--#errors
--#document-fragment
--tbody
--#document
--| <tr>
--|   <td>
--|     <a>
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--</tr><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<td><table><a><tr></tr><tr>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--|   <a>
--|   <table>
--|     <tbody>
--|       <tr>
--|       <tr>
--
--#data
--<caption><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<col><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<colgroup><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<tbody><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<tfoot><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<thead><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<tr><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--</table><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--
--#data
--<td><table></table><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--|   <table>
--| <td>
--
--#data
--<td><table></table><td>
--#errors
--#document-fragment
--tr
--#document
--| <td>
--|   <table>
--| <td>
--
--#data
--<caption><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--<col><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--<colgroup><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--<tbody><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--<tfoot><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--<th><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--<thead><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--<tr><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--</table><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--</tbody><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--</td><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--</tfoot><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--</thead><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--</th><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--</tr><a>
--#errors
--#document-fragment
--td
--#document
--| <a>
--
--#data
--<table><td><td>
--#errors
--#document-fragment
--td
--#document
--| <table>
--|   <tbody>
--|     <tr>
--|       <td>
--|       <td>
--
--#data
--</select><option>
--#errors
--#document-fragment
--select
--#document
--| <option>
--
--#data
--<input><option>
--#errors
--#document-fragment
--select
--#document
--| <option>
--
--#data
--<keygen><option>
--#errors
--#document-fragment
--select
--#document
--| <option>
--
--#data
--<textarea><option>
--#errors
--#document-fragment
--select
--#document
--| <option>
--
--#data
--</html><!--abc-->
--#errors
--#document-fragment
--html
--#document
--| <head>
--| <body>
--| <!-- abc -->
--
--#data
--</frameset><frame>
--#errors
--#document-fragment
--frameset
--#document
--| <frame>
--
--#data
--#errors
--#document-fragment
--html
--#document
--| <head>
--| <body>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tricky01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tricky01.dat
-deleted file mode 100644
-index 0841992..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/tricky01.dat
-+++ /dev/null
-@@ -1,261 +0,0 @@
--#data
--<b><p>Bold </b> Not bold</p>
--Also not bold.
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <b>
--|     <p>
--|       <b>
--|         "Bold "
--|       " Not bold"
--|     "
--Also not bold."
--
--#data
--<html>
--<font color=red><i>Italic and Red<p>Italic and Red </font> Just italic.</p> Italic only.</i> Plain
--<p>I should not be red. <font color=red>Red. <i>Italic and red.</p>
--<p>Italic and red. </i> Red.</font> I should not be red.</p>
--<b>Bold <i>Bold and italic</b> Only Italic </i> Plain
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <font>
--|       color="red"
--|       <i>
--|         "Italic and Red"
--|     <i>
--|       <p>
--|         <font>
--|           color="red"
--|           "Italic and Red "
--|         " Just italic."
--|       " Italic only."
--|     " Plain
--"
--|     <p>
--|       "I should not be red. "
--|       <font>
--|         color="red"
--|         "Red. "
--|         <i>
--|           "Italic and red."
--|     <font>
--|       color="red"
--|       <i>
--|         "
--"
--|     <p>
--|       <font>
--|         color="red"
--|         <i>
--|           "Italic and red. "
--|         " Red."
--|       " I should not be red."
--|     "
--"
--|     <b>
--|       "Bold "
--|       <i>
--|         "Bold and italic"
--|     <i>
--|       " Only Italic "
--|     " Plain"
--
--#data
--<html><body>
--<p><font size="7">First paragraph.</p>
--<p>Second paragraph.</p></font>
--<b><p><i>Bold and Italic</b> Italic</p>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "
--"
--|     <p>
--|       <font>
--|         size="7"
--|         "First paragraph."
--|     <font>
--|       size="7"
--|       "
--"
--|       <p>
--|         "Second paragraph."
--|     "
--"
--|     <b>
--|     <p>
--|       <b>
--|         <i>
--|           "Bold and Italic"
--|       <i>
--|         " Italic"
--
--#data
--<html>
--<dl>
--<dt><b>Boo
--<dd>Goo?
--</dl>
--</html>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <dl>
--|       "
--"
--|       <dt>
--|         <b>
--|           "Boo
--"
--|       <dd>
--|         <b>
--|           "Goo?
--"
--|     <b>
--|       "
--"
--
--#data
--<html><body>
--<label><a><div>Hello<div>World</div></a></label>  
--</body></html>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "
--"
--|     <label>
--|       <a>
--|       <div>
--|         <a>
--|           "Hello"
--|           <div>
--|             "World"
--|         "  
--"
--
--#data
--<table><center> <font>a</center> <img> <tr><td> </td> </tr> </table>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <center>
--|       " "
--|       <font>
--|         "a"
--|     <font>
--|       <img>
--|       " "
--|     <table>
--|       " "
--|       <tbody>
--|         <tr>
--|           <td>
--|             " "
--|           " "
--|         " "
--
--#data
--<table><tr><p><a><p>You should see this text.
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       <a>
--|     <p>
--|       <a>
--|         "You should see this text."
--|     <table>
--|       <tbody>
--|         <tr>
--
--#data
--<TABLE>
--<TR>
--<CENTER><CENTER><TD></TD></TR><TR>
--<FONT>
--<TABLE><tr></tr></TABLE>
--</P>
--<a></font><font></a>
--This page contains an insanely badly-nested tag sequence.
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <center>
--|       <center>
--|     <font>
--|       "
--"
--|     <table>
--|       "
--"
--|       <tbody>
--|         <tr>
--|           "
--"
--|           <td>
--|         <tr>
--|           "
--"
--|     <table>
--|       <tbody>
--|         <tr>
--|     <font>
--|       "
--"
--|       <p>
--|       "
--"
--|       <a>
--|     <a>
--|       <font>
--|     <font>
--|       "
--This page contains an insanely badly-nested tag sequence."
--
--#data
--<html>
--<body>
--<b><nobr><div>This text is in a div inside a nobr</nobr>More text that should not be in the nobr, i.e., the
--nobr should have closed the div inside it implicitly. </b><pre>A pre tag outside everything else.</pre>
--</body>
--</html>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "
--"
--|     <b>
--|       <nobr>
--|     <div>
--|       <b>
--|         <nobr>
--|           "This text is in a div inside a nobr"
--|         "More text that should not be in the nobr, i.e., the
--nobr should have closed the div inside it implicitly. "
--|       <pre>
--|         "A pre tag outside everything else."
--|       "
--
--"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/webkit01.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/webkit01.dat
-deleted file mode 100644
-index 9d425e9..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/webkit01.dat
-+++ /dev/null
-@@ -1,610 +0,0 @@
--#data
--Test
--#errors
--Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE.
--#document
--| <html>
--|   <head>
--|   <body>
--|     "Test"
--
--#data
--<div></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--
--#data
--<div>Test</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "Test"
--
--#data
--<di
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<div>Hello</div>
--<script>
--console.log("PASS");
--</script>
--<div>Bye</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "Hello"
--|     "
--"
--|     <script>
--|       "
--console.log("PASS");
--"
--|     "
--"
--|     <div>
--|       "Bye"
--
--#data
--<div foo="bar">Hello</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       foo="bar"
--|       "Hello"
--
--#data
--<div>Hello</div>
--<script>
--console.log("FOO<span>BAR</span>BAZ");
--</script>
--<div>Bye</div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       "Hello"
--|     "
--"
--|     <script>
--|       "
--console.log("FOO<span>BAR</span>BAZ");
--"
--|     "
--"
--|     <div>
--|       "Bye"
--
--#data
--<foo bar="baz"></foo><potato quack="duck"></potato>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <foo>
--|       bar="baz"
--|     <potato>
--|       quack="duck"
--
--#data
--<foo bar="baz"><potato quack="duck"></potato></foo>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <foo>
--|       bar="baz"
--|       <potato>
--|         quack="duck"
--
--#data
--<foo></foo bar="baz"><potato></potato quack="duck">
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <foo>
--|     <potato>
--
--#data
--</ tttt>
--#errors
--#document
--| <!--  tttt -->
--| <html>
--|   <head>
--|   <body>
--
--#data
--<div FOO ><img><img></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       foo=""
--|       <img>
--|       <img>
--
--#data
--<p>Test</p<p>Test2</p>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       "TestTest2"
--
--#data
--<rdar://problem/6869687>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <rdar:>
--|       6869687=""
--|       problem=""
--
--#data
--<A>test< /A>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|       "test< /A>"
--
--#data
--&lt;
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "<"
--
--#data
--<body foo='bar'><body foo='baz' yo='mama'>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     foo="bar"
--|     yo="mama"
--
--#data
--<body></br foo="bar"></body>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <br>
--
--#data
--<bdy><br foo="bar"></body>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <bdy>
--|       <br>
--|         foo="bar"
--
--#data
--<body></body></br foo="bar">
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <br>
--
--#data
--<bdy></body><br foo="bar">
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <bdy>
--|       <br>
--|         foo="bar"
--
--#data
--<html><body></body></html><!-- Hi there -->
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--| <!--  Hi there  -->
--
--#data
--<html><body></body></html>x<!-- Hi there -->
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "x"
--|     <!--  Hi there  -->
--
--#data
--<html><body></body></html>x<!-- Hi there --></html><!-- Again -->
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "x"
--|     <!--  Hi there  -->
--| <!--  Again  -->
--
--#data
--<html><body></body></html>x<!-- Hi there --></body></html><!-- Again -->
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "x"
--|     <!--  Hi there  -->
--| <!--  Again  -->
--
--#data
--<html><body><ruby><div><rp>xx</rp></div></ruby></body></html>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <ruby>
--|       <div>
--|         <rp>
--|           "xx"
--
--#data
--<html><body><ruby><div><rt>xx</rt></div></ruby></body></html>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <ruby>
--|       <div>
--|         <rt>
--|           "xx"
--
--#data
--<html><frameset><!--1--><noframes>A</noframes><!--2--></frameset><!--3--><noframes>B</noframes><!--4--></html><!--5--><noframes>C</noframes><!--6-->
--#errors
--#document
--| <html>
--|   <head>
--|   <frameset>
--|     <!-- 1 -->
--|     <noframes>
--|       "A"
--|     <!-- 2 -->
--|   <!-- 3 -->
--|   <noframes>
--|     "B"
--|   <!-- 4 -->
--|   <noframes>
--|     "C"
--| <!-- 5 -->
--| <!-- 6 -->
--
--#data
--<select><option>A<select><option>B<select><option>C<select><option>D<select><option>E<select><option>F<select><option>G<select>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <select>
--|       <option>
--|         "A"
--|     <option>
--|       "B"
--|       <select>
--|         <option>
--|           "C"
--|     <option>
--|       "D"
--|       <select>
--|         <option>
--|           "E"
--|     <option>
--|       "F"
--|       <select>
--|         <option>
--|           "G"
--
--#data
--<dd><dd><dt><dt><dd><li><li>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <dd>
--|     <dd>
--|     <dt>
--|     <dt>
--|     <dd>
--|       <li>
--|       <li>
--
--#data
--<div><b></div><div><nobr>a<nobr>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <b>
--|     <div>
--|       <b>
--|         <nobr>
--|           "a"
--|         <nobr>
--
--#data
--<head></head>
--<body></body>
--#errors
--#document
--| <html>
--|   <head>
--|   "
--"
--|   <body>
--
--#data
--<head></head> <style></style>ddd
--#errors
--#document
--| <html>
--|   <head>
--|     <style>
--|   " "
--|   <body>
--|     "ddd"
--
--#data
--<kbd><table></kbd><col><select><tr>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <kbd>
--|       <select>
--|       <table>
--|         <colgroup>
--|           <col>
--|         <tbody>
--|           <tr>
--
--#data
--<kbd><table></kbd><col><select><tr></table><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <kbd>
--|       <select>
--|       <table>
--|         <colgroup>
--|           <col>
--|         <tbody>
--|           <tr>
--|       <div>
--
--#data
--<a><li><style></style><title></title></a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|     <li>
--|       <a>
--|         <style>
--|         <title>
--
--#data
--<font></p><p><meta><title></title></font>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <font>
--|       <p>
--|     <p>
--|       <font>
--|         <meta>
--|         <title>
--
--#data
--<a><center><title></title><a>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <a>
--|     <center>
--|       <a>
--|         <title>
--|       <a>
--
--#data
--<svg><title><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg title>
--|         <div>
--
--#data
--<svg><title><rect><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg title>
--|         <rect>
--|           <div>
--
--#data
--<svg><title><svg><div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg title>
--|         <svg svg>
--|         <div>
--
--#data
--<img <="" FAIL>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <img>
--|       <=""
--|       fail=""
--
--#data
--<ul><li><div id='foo'/>A</li><li>B<div>C</div></li></ul>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <ul>
--|       <li>
--|         <div>
--|           id="foo"
--|           "A"
--|       <li>
--|         "B"
--|         <div>
--|           "C"
--
--#data
--<svg><em><desc></em>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|     <em>
--|       <desc>
--
--#data
--<table><tr><td><svg><desc><td></desc><circle>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             <svg svg>
--|               <svg desc>
--|           <td>
--|             <circle>
--
--#data
--<svg><tfoot></mi><td>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <svg svg>
--|       <svg tfoot>
--|         <svg td>
--
--#data
--<math><mrow><mrow><mn>1</mn></mrow><mi>a</mi></mrow></math>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <math math>
--|       <math mrow>
--|         <math mrow>
--|           <math mn>
--|             "1"
--|         <math mi>
--|           "a"
--
--#data
--<!doctype html><input type="hidden"><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <frameset>
--
--#data
--<!doctype html><input type="button"><frameset>
--#errors
--#document
--| <!DOCTYPE html>
--| <html>
--|   <head>
--|   <body>
--|     <input>
--|       type="button"
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/webkit02.dat b/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/webkit02.dat
-deleted file mode 100644
-index 905783d..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/testdata/webkit/webkit02.dat
-+++ /dev/null
-@@ -1,159 +0,0 @@
--#data
--<foo bar=qux/>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <foo>
--|       bar="qux/"
--
--#data
--<p id="status"><noscript><strong>A</strong></noscript><span>B</span></p>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <p>
--|       id="status"
--|       <noscript>
--|         "<strong>A</strong>"
--|       <span>
--|         "B"
--
--#data
--<div><sarcasm><div></div></sarcasm></div>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <div>
--|       <sarcasm>
--|         <div>
--
--#data
--<html><body><img src="" border="0" alt="><div>A</div></body></html>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--
--#data
--<table><td></tbody>A
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     "A"
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--
--#data
--<table><td></thead>A
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "A"
--
--#data
--<table><td></tfoot>A
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <tbody>
--|         <tr>
--|           <td>
--|             "A"
--
--#data
--<table><thead><td></tbody>A
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <table>
--|       <thead>
--|         <tr>
--|           <td>
--|             "A"
--
--#data
--<legend>test</legend>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <legend>
--|       "test"
--
--#data
--<table><input>
--#errors
--#document
--| <html>
--|   <head>
--|   <body>
--|     <input>
--|     <table>
--
--#data
--<b><em><dcell><postfield><postfield><postfield><postfield><missing_glyph><missing_glyph><missing_glyph><missing_glyph><hkern><aside></b></em>
--#errors
--#document-fragment
--div
--#document
--| <b>
--|   <em>
--|     <dcell>
--|       <postfield>
--|         <postfield>
--|           <postfield>
--|             <postfield>
--|               <missing_glyph>
--|                 <missing_glyph>
--|                   <missing_glyph>
--|                     <missing_glyph>
--|                       <hkern>
--| <aside>
--|   <em>
--|     <b>
--
--#data
--<isindex action="x">
--#errors
--#document-fragment
--table
--#document
--| <form>
--|   action="x"
--| <hr>
--| <label>
--|   "This is a searchable index. Enter search keywords: "
--|   <input>
--|     name="isindex"
--| <hr>
--
--#data
--<option><XH<optgroup></optgroup>
--#errors
--#document-fragment
--select
--#document
--| <option>
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/token.go b/Godeps/_workspace/src/golang.org/x/net/html/token.go
-deleted file mode 100644
-index 893e272..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/token.go
-+++ /dev/null
-@@ -1,1219 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"bytes"
--	"errors"
--	"io"
--	"strconv"
--	"strings"
--
--	"golang.org/x/net/html/atom"
--)
--
--// A TokenType is the type of a Token.
--type TokenType uint32
--
--const (
--	// ErrorToken means that an error occurred during tokenization.
--	ErrorToken TokenType = iota
--	// TextToken means a text node.
--	TextToken
--	// A StartTagToken looks like <a>.
--	StartTagToken
--	// An EndTagToken looks like </a>.
--	EndTagToken
--	// A SelfClosingTagToken tag looks like <br/>.
--	SelfClosingTagToken
--	// A CommentToken looks like <!--x-->.
--	CommentToken
--	// A DoctypeToken looks like <!DOCTYPE x>
--	DoctypeToken
--)
--
--// ErrBufferExceeded means that the buffering limit was exceeded.
--var ErrBufferExceeded = errors.New("max buffer exceeded")
--
--// String returns a string representation of the TokenType.
--func (t TokenType) String() string {
--	switch t {
--	case ErrorToken:
--		return "Error"
--	case TextToken:
--		return "Text"
--	case StartTagToken:
--		return "StartTag"
--	case EndTagToken:
--		return "EndTag"
--	case SelfClosingTagToken:
--		return "SelfClosingTag"
--	case CommentToken:
--		return "Comment"
--	case DoctypeToken:
--		return "Doctype"
--	}
--	return "Invalid(" + strconv.Itoa(int(t)) + ")"
--}
--
--// An Attribute is an attribute namespace-key-value triple. Namespace is
--// non-empty for foreign attributes like xlink, Key is alphabetic (and hence
--// does not contain escapable characters like '&', '<' or '>'), and Val is
--// unescaped (it looks like "a<b" rather than "a&lt;b").
--//
--// Namespace is only used by the parser, not the tokenizer.
--type Attribute struct {
--	Namespace, Key, Val string
--}
--
--// A Token consists of a TokenType and some Data (tag name for start and end
--// tags, content for text, comments and doctypes). A tag Token may also contain
--// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
--// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or
--// zero if Data is not a known tag name.
--type Token struct {
--	Type     TokenType
--	DataAtom atom.Atom
--	Data     string
--	Attr     []Attribute
--}
--
--// tagString returns a string representation of a tag Token's Data and Attr.
--func (t Token) tagString() string {
--	if len(t.Attr) == 0 {
--		return t.Data
--	}
--	buf := bytes.NewBufferString(t.Data)
--	for _, a := range t.Attr {
--		buf.WriteByte(' ')
--		buf.WriteString(a.Key)
--		buf.WriteString(`="`)
--		escape(buf, a.Val)
--		buf.WriteByte('"')
--	}
--	return buf.String()
--}
--
--// String returns a string representation of the Token.
--func (t Token) String() string {
--	switch t.Type {
--	case ErrorToken:
--		return ""
--	case TextToken:
--		return EscapeString(t.Data)
--	case StartTagToken:
--		return "<" + t.tagString() + ">"
--	case EndTagToken:
--		return "</" + t.tagString() + ">"
--	case SelfClosingTagToken:
--		return "<" + t.tagString() + "/>"
--	case CommentToken:
--		return "<!--" + t.Data + "-->"
--	case DoctypeToken:
--		return "<!DOCTYPE " + t.Data + ">"
--	}
--	return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
--}
--
--// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
--// the end is exclusive.
--type span struct {
--	start, end int
--}
--
--// A Tokenizer returns a stream of HTML Tokens.
--type Tokenizer struct {
--	// r is the source of the HTML text.
--	r io.Reader
--	// tt is the TokenType of the current token.
--	tt TokenType
--	// err is the first error encountered during tokenization. It is possible
--	// for tt != Error && err != nil to hold: this means that Next returned a
--	// valid token but the subsequent Next call will return an error token.
--	// For example, if the HTML text input was just "plain", then the first
--	// Next call would set z.err to io.EOF but return a TextToken, and all
--	// subsequent Next calls would return an ErrorToken.
--	// err is never reset. Once it becomes non-nil, it stays non-nil.
--	err error
--	// readErr is the error returned by the io.Reader r. It is separate from
--	// err because it is valid for an io.Reader to return (n int, err1 error)
--	// such that n > 0 && err1 != nil, and callers should always process the
--	// n > 0 bytes before considering the error err1.
--	readErr error
--	// buf[raw.start:raw.end] holds the raw bytes of the current token.
--	// buf[raw.end:] is buffered input that will yield future tokens.
--	raw span
--	buf []byte
--	// maxBuf limits the data buffered in buf. A value of 0 means unlimited.
--	maxBuf int
--	// buf[data.start:data.end] holds the raw bytes of the current token's data:
--	// a text token's text, a tag token's tag name, etc.
--	data span
--	// pendingAttr is the attribute key and value currently being tokenized.
--	// When complete, pendingAttr is pushed onto attr. nAttrReturned is
--	// incremented on each call to TagAttr.
--	pendingAttr   [2]span
--	attr          [][2]span
--	nAttrReturned int
--	// rawTag is the "script" in "</script>" that closes the next token. If
--	// non-empty, the subsequent call to Next will return a raw or RCDATA text
--	// token: one that treats "<p>" as text instead of an element.
--	// rawTag's contents are lower-cased.
--	rawTag string
--	// textIsRaw is whether the current text token's data is not escaped.
--	textIsRaw bool
--	// convertNUL is whether NUL bytes in the current token's data should
--	// be converted into \ufffd replacement characters.
--	convertNUL bool
--	// allowCDATA is whether CDATA sections are allowed in the current context.
--	allowCDATA bool
--}
--
--// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as
--// the text "foo". The default value is false, which means to recognize it as
--// a bogus comment "<!-- [CDATA[foo]] -->" instead.
--//
--// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
--// only if tokenizing foreign content, such as MathML and SVG. However,
--// tracking foreign-contentness is difficult to do purely in the tokenizer,
--// as opposed to the parser, due to HTML integration points: an <svg> element
--// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-
--// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the
--// responsibility of the user of a tokenizer to call AllowCDATA as appropriate.
--// In practice, if using the tokenizer without caring whether MathML or SVG
--// CDATA is text or comments, such as tokenizing HTML to find all the anchor
--// text, it is acceptable to ignore this responsibility.
--func (z *Tokenizer) AllowCDATA(allowCDATA bool) {
--	z.allowCDATA = allowCDATA
--}
--
--// NextIsNotRawText instructs the tokenizer that the next token should not be
--// considered as 'raw text'. Some elements, such as script and title elements,
--// normally require the next token after the opening tag to be 'raw text' that
--// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>"
--// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and
--// an end tag token for "</title>". There are no distinct start tag or end tag
--// tokens for the "<b>" and "</b>".
--//
--// This tokenizer implementation will generally look for raw text at the right
--// times. Strictly speaking, an HTML5 compliant tokenizer should not look for
--// raw text if in foreign content: <title> generally needs raw text, but a
--// <title> inside an <svg> does not. Another example is that a <textarea>
--// generally needs raw text, but a <textarea> is not allowed as an immediate
--// child of a <select>; in normal parsing, a <textarea> implies </select>, but
--// one cannot close the implicit element when parsing a <select>'s InnerHTML.
--// Similarly to AllowCDATA, tracking the correct moment to override raw-text-
--// ness is difficult to do purely in the tokenizer, as opposed to the parser.
--// For strict compliance with the HTML5 tokenization algorithm, it is the
--// responsibility of the user of a tokenizer to call NextIsNotRawText as
--// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this
--// responsibility for basic usage.
--//
--// Note that this 'raw text' concept is different from the one offered by the
--// Tokenizer.Raw method.
--func (z *Tokenizer) NextIsNotRawText() {
--	z.rawTag = ""
--}
--
--// Err returns the error associated with the most recent ErrorToken token.
--// This is typically io.EOF, meaning the end of tokenization.
--func (z *Tokenizer) Err() error {
--	if z.tt != ErrorToken {
--		return nil
--	}
--	return z.err
--}
--
--// readByte returns the next byte from the input stream, doing a buffered read
--// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
--// slice that holds all the bytes read so far for the current token.
--// It sets z.err if the underlying reader returns an error.
--// Pre-condition: z.err == nil.
--func (z *Tokenizer) readByte() byte {
--	if z.raw.end >= len(z.buf) {
--		// Our buffer is exhausted and we have to read from z.r. Check if the
--		// previous read resulted in an error.
--		if z.readErr != nil {
--			z.err = z.readErr
--			return 0
--		}
--		// We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
--		// z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
--		// allocate a new buffer before the copy.
--		c := cap(z.buf)
--		d := z.raw.end - z.raw.start
--		var buf1 []byte
--		if 2*d > c {
--			buf1 = make([]byte, d, 2*c)
--		} else {
--			buf1 = z.buf[:d]
--		}
--		copy(buf1, z.buf[z.raw.start:z.raw.end])
--		if x := z.raw.start; x != 0 {
--			// Adjust the data/attr spans to refer to the same contents after the copy.
--			z.data.start -= x
--			z.data.end -= x
--			z.pendingAttr[0].start -= x
--			z.pendingAttr[0].end -= x
--			z.pendingAttr[1].start -= x
--			z.pendingAttr[1].end -= x
--			for i := range z.attr {
--				z.attr[i][0].start -= x
--				z.attr[i][0].end -= x
--				z.attr[i][1].start -= x
--				z.attr[i][1].end -= x
--			}
--		}
--		z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
--		// Now that we have copied the live bytes to the start of the buffer,
--		// we read from z.r into the remainder.
--		var n int
--		n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)])
--		if n == 0 {
--			z.err = z.readErr
--			return 0
--		}
--		z.buf = buf1[:d+n]
--	}
--	x := z.buf[z.raw.end]
--	z.raw.end++
--	if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf {
--		z.err = ErrBufferExceeded
--		return 0
--	}
--	return x
--}
--
--// Buffered returns a slice containing data buffered but not yet tokenized.
--func (z *Tokenizer) Buffered() []byte {
--	return z.buf[z.raw.end:]
--}
--
--// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).
--// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)
--// too many times in succession.
--func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
--	for i := 0; i < 100; i++ {
--		n, err := r.Read(b)
--		if n != 0 || err != nil {
--			return n, err
--		}
--	}
--	return 0, io.ErrNoProgress
--}
--
--// skipWhiteSpace skips past any white space.
--func (z *Tokenizer) skipWhiteSpace() {
--	if z.err != nil {
--		return
--	}
--	for {
--		c := z.readByte()
--		if z.err != nil {
--			return
--		}
--		switch c {
--		case ' ', '\n', '\r', '\t', '\f':
--			// No-op.
--		default:
--			z.raw.end--
--			return
--		}
--	}
--}
--
--// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
--// is typically something like "script" or "textarea".
--func (z *Tokenizer) readRawOrRCDATA() {
--	if z.rawTag == "script" {
--		z.readScript()
--		z.textIsRaw = true
--		z.rawTag = ""
--		return
--	}
--loop:
--	for {
--		c := z.readByte()
--		if z.err != nil {
--			break loop
--		}
--		if c != '<' {
--			continue loop
--		}
--		c = z.readByte()
--		if z.err != nil {
--			break loop
--		}
--		if c != '/' {
--			continue loop
--		}
--		if z.readRawEndTag() || z.err != nil {
--			break loop
--		}
--	}
--	z.data.end = z.raw.end
--	// A textarea's or title's RCDATA can contain escaped entities.
--	z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
--	z.rawTag = ""
--}
--
--// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag.
--// If it succeeds, it backs up the input position to reconsume the tag and
--// returns true. Otherwise it returns false. The opening "</" has already been
--// consumed.
--func (z *Tokenizer) readRawEndTag() bool {
--	for i := 0; i < len(z.rawTag); i++ {
--		c := z.readByte()
--		if z.err != nil {
--			return false
--		}
--		if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
--			z.raw.end--
--			return false
--		}
--	}
--	c := z.readByte()
--	if z.err != nil {
--		return false
--	}
--	switch c {
--	case ' ', '\n', '\r', '\t', '\f', '/', '>':
--		// The 3 is 2 for the leading "</" plus 1 for the trailing character c.
--		z.raw.end -= 3 + len(z.rawTag)
--		return true
--	}
--	z.raw.end--
--	return false
--}
--
--// readScript reads until the next </script> tag, following the byzantine
--// rules for escaping/hiding the closing tag.
--func (z *Tokenizer) readScript() {
--	defer func() {
--		z.data.end = z.raw.end
--	}()
--	var c byte
--
--scriptData:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	if c == '<' {
--		goto scriptDataLessThanSign
--	}
--	goto scriptData
--
--scriptDataLessThanSign:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch c {
--	case '/':
--		goto scriptDataEndTagOpen
--	case '!':
--		goto scriptDataEscapeStart
--	}
--	z.raw.end--
--	goto scriptData
--
--scriptDataEndTagOpen:
--	if z.readRawEndTag() || z.err != nil {
--		return
--	}
--	goto scriptData
--
--scriptDataEscapeStart:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	if c == '-' {
--		goto scriptDataEscapeStartDash
--	}
--	z.raw.end--
--	goto scriptData
--
--scriptDataEscapeStartDash:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	if c == '-' {
--		goto scriptDataEscapedDashDash
--	}
--	z.raw.end--
--	goto scriptData
--
--scriptDataEscaped:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch c {
--	case '-':
--		goto scriptDataEscapedDash
--	case '<':
--		goto scriptDataEscapedLessThanSign
--	}
--	goto scriptDataEscaped
--
--scriptDataEscapedDash:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch c {
--	case '-':
--		goto scriptDataEscapedDashDash
--	case '<':
--		goto scriptDataEscapedLessThanSign
--	}
--	goto scriptDataEscaped
--
--scriptDataEscapedDashDash:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch c {
--	case '-':
--		goto scriptDataEscapedDashDash
--	case '<':
--		goto scriptDataEscapedLessThanSign
--	case '>':
--		goto scriptData
--	}
--	goto scriptDataEscaped
--
--scriptDataEscapedLessThanSign:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	if c == '/' {
--		goto scriptDataEscapedEndTagOpen
--	}
--	if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
--		goto scriptDataDoubleEscapeStart
--	}
--	z.raw.end--
--	goto scriptData
--
--scriptDataEscapedEndTagOpen:
--	if z.readRawEndTag() || z.err != nil {
--		return
--	}
--	goto scriptDataEscaped
--
--scriptDataDoubleEscapeStart:
--	z.raw.end--
--	for i := 0; i < len("script"); i++ {
--		c = z.readByte()
--		if z.err != nil {
--			return
--		}
--		if c != "script"[i] && c != "SCRIPT"[i] {
--			z.raw.end--
--			goto scriptDataEscaped
--		}
--	}
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch c {
--	case ' ', '\n', '\r', '\t', '\f', '/', '>':
--		goto scriptDataDoubleEscaped
--	}
--	z.raw.end--
--	goto scriptDataEscaped
--
--scriptDataDoubleEscaped:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch c {
--	case '-':
--		goto scriptDataDoubleEscapedDash
--	case '<':
--		goto scriptDataDoubleEscapedLessThanSign
--	}
--	goto scriptDataDoubleEscaped
--
--scriptDataDoubleEscapedDash:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch c {
--	case '-':
--		goto scriptDataDoubleEscapedDashDash
--	case '<':
--		goto scriptDataDoubleEscapedLessThanSign
--	}
--	goto scriptDataDoubleEscaped
--
--scriptDataDoubleEscapedDashDash:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch c {
--	case '-':
--		goto scriptDataDoubleEscapedDashDash
--	case '<':
--		goto scriptDataDoubleEscapedLessThanSign
--	case '>':
--		goto scriptData
--	}
--	goto scriptDataDoubleEscaped
--
--scriptDataDoubleEscapedLessThanSign:
--	c = z.readByte()
--	if z.err != nil {
--		return
--	}
--	if c == '/' {
--		goto scriptDataDoubleEscapeEnd
--	}
--	z.raw.end--
--	goto scriptDataDoubleEscaped
--
--scriptDataDoubleEscapeEnd:
--	if z.readRawEndTag() {
--		z.raw.end += len("</script>")
--		goto scriptDataEscaped
--	}
--	if z.err != nil {
--		return
--	}
--	goto scriptDataDoubleEscaped
--}
--
--// readComment reads the next comment token starting with "<!--". The opening
--// "<!--" has already been consumed.
--func (z *Tokenizer) readComment() {
--	z.data.start = z.raw.end
--	defer func() {
--		if z.data.end < z.data.start {
--			// It's a comment with no data, like <!-->.
--			z.data.end = z.data.start
--		}
--	}()
--	for dashCount := 2; ; {
--		c := z.readByte()
--		if z.err != nil {
--			// Ignore up to two dashes at EOF.
--			if dashCount > 2 {
--				dashCount = 2
--			}
--			z.data.end = z.raw.end - dashCount
--			return
--		}
--		switch c {
--		case '-':
--			dashCount++
--			continue
--		case '>':
--			if dashCount >= 2 {
--				z.data.end = z.raw.end - len("-->")
--				return
--			}
--		case '!':
--			if dashCount >= 2 {
--				c = z.readByte()
--				if z.err != nil {
--					z.data.end = z.raw.end
--					return
--				}
--				if c == '>' {
--					z.data.end = z.raw.end - len("--!>")
--					return
--				}
--			}
--		}
--		dashCount = 0
--	}
--}
--
--// readUntilCloseAngle reads until the next ">".
--func (z *Tokenizer) readUntilCloseAngle() {
--	z.data.start = z.raw.end
--	for {
--		c := z.readByte()
--		if z.err != nil {
--			z.data.end = z.raw.end
--			return
--		}
--		if c == '>' {
--			z.data.end = z.raw.end - len(">")
--			return
--		}
--	}
--}
--
--// readMarkupDeclaration reads the next token starting with "<!". It might be
--// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or
--// "<!a bogus comment". The opening "<!" has already been consumed.
--func (z *Tokenizer) readMarkupDeclaration() TokenType {
--	z.data.start = z.raw.end
--	var c [2]byte
--	for i := 0; i < 2; i++ {
--		c[i] = z.readByte()
--		if z.err != nil {
--			z.data.end = z.raw.end
--			return CommentToken
--		}
--	}
--	if c[0] == '-' && c[1] == '-' {
--		z.readComment()
--		return CommentToken
--	}
--	z.raw.end -= 2
--	if z.readDoctype() {
--		return DoctypeToken
--	}
--	if z.allowCDATA && z.readCDATA() {
--		z.convertNUL = true
--		return TextToken
--	}
--	// It's a bogus comment.
--	z.readUntilCloseAngle()
--	return CommentToken
--}
--
--// readDoctype attempts to read a doctype declaration and returns true if
--// successful. The opening "<!" has already been consumed.
--func (z *Tokenizer) readDoctype() bool {
--	const s = "DOCTYPE"
--	for i := 0; i < len(s); i++ {
--		c := z.readByte()
--		if z.err != nil {
--			z.data.end = z.raw.end
--			return false
--		}
--		if c != s[i] && c != s[i]+('a'-'A') {
--			// Back up to read the fragment of "DOCTYPE" again.
--			z.raw.end = z.data.start
--			return false
--		}
--	}
--	if z.skipWhiteSpace(); z.err != nil {
--		z.data.start = z.raw.end
--		z.data.end = z.raw.end
--		return true
--	}
--	z.readUntilCloseAngle()
--	return true
--}
--
--// readCDATA attempts to read a CDATA section and returns true if
--// successful. The opening "<!" has already been consumed.
--func (z *Tokenizer) readCDATA() bool {
--	const s = "[CDATA["
--	for i := 0; i < len(s); i++ {
--		c := z.readByte()
--		if z.err != nil {
--			z.data.end = z.raw.end
--			return false
--		}
--		if c != s[i] {
--			// Back up to read the fragment of "[CDATA[" again.
--			z.raw.end = z.data.start
--			return false
--		}
--	}
--	z.data.start = z.raw.end
--	brackets := 0
--	for {
--		c := z.readByte()
--		if z.err != nil {
--			z.data.end = z.raw.end
--			return true
--		}
--		switch c {
--		case ']':
--			brackets++
--		case '>':
--			if brackets >= 2 {
--				z.data.end = z.raw.end - len("]]>")
--				return true
--			}
--			brackets = 0
--		default:
--			brackets = 0
--		}
--	}
--}
--
--// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
--// case-insensitively matches any element of ss.
--func (z *Tokenizer) startTagIn(ss ...string) bool {
--loop:
--	for _, s := range ss {
--		if z.data.end-z.data.start != len(s) {
--			continue loop
--		}
--		for i := 0; i < len(s); i++ {
--			c := z.buf[z.data.start+i]
--			if 'A' <= c && c <= 'Z' {
--				c += 'a' - 'A'
--			}
--			if c != s[i] {
--				continue loop
--			}
--		}
--		return true
--	}
--	return false
--}
--
--// readStartTag reads the next start tag token. The opening "<a" has already
--// been consumed, where 'a' means anything in [A-Za-z].
--func (z *Tokenizer) readStartTag() TokenType {
--	z.readTag(true)
--	if z.err != nil {
--		return ErrorToken
--	}
--	// Several tags flag the tokenizer's next token as raw.
--	c, raw := z.buf[z.data.start], false
--	if 'A' <= c && c <= 'Z' {
--		c += 'a' - 'A'
--	}
--	switch c {
--	case 'i':
--		raw = z.startTagIn("iframe")
--	case 'n':
--		raw = z.startTagIn("noembed", "noframes", "noscript")
--	case 'p':
--		raw = z.startTagIn("plaintext")
--	case 's':
--		raw = z.startTagIn("script", "style")
--	case 't':
--		raw = z.startTagIn("textarea", "title")
--	case 'x':
--		raw = z.startTagIn("xmp")
--	}
--	if raw {
--		z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
--	}
--	// Look for a self-closing token like "<br/>".
--	if z.err == nil && z.buf[z.raw.end-2] == '/' {
--		return SelfClosingTagToken
--	}
--	return StartTagToken
--}
--
--// readTag reads the next tag token and its attributes. If saveAttr, those
--// attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
--// The opening "<a" or "</a" has already been consumed, where 'a' means anything
--// in [A-Za-z].
--func (z *Tokenizer) readTag(saveAttr bool) {
--	z.attr = z.attr[:0]
--	z.nAttrReturned = 0
--	// Read the tag name and attribute key/value pairs.
--	z.readTagName()
--	if z.skipWhiteSpace(); z.err != nil {
--		return
--	}
--	for {
--		c := z.readByte()
--		if z.err != nil || c == '>' {
--			break
--		}
--		z.raw.end--
--		z.readTagAttrKey()
--		z.readTagAttrVal()
--		// Save pendingAttr if saveAttr and that attribute has a non-empty key.
--		if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {
--			z.attr = append(z.attr, z.pendingAttr)
--		}
--		if z.skipWhiteSpace(); z.err != nil {
--			break
--		}
--	}
--}
--
--// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
--// is positioned such that the first byte of the tag name (the "d" in "<div")
--// has already been consumed.
--func (z *Tokenizer) readTagName() {
--	z.data.start = z.raw.end - 1
--	for {
--		c := z.readByte()
--		if z.err != nil {
--			z.data.end = z.raw.end
--			return
--		}
--		switch c {
--		case ' ', '\n', '\r', '\t', '\f':
--			z.data.end = z.raw.end - 1
--			return
--		case '/', '>':
--			z.raw.end--
--			z.data.end = z.raw.end
--			return
--		}
--	}
--}
--
--// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
--// Precondition: z.err == nil.
--func (z *Tokenizer) readTagAttrKey() {
--	z.pendingAttr[0].start = z.raw.end
--	for {
--		c := z.readByte()
--		if z.err != nil {
--			z.pendingAttr[0].end = z.raw.end
--			return
--		}
--		switch c {
--		case ' ', '\n', '\r', '\t', '\f', '/':
--			z.pendingAttr[0].end = z.raw.end - 1
--			return
--		case '=', '>':
--			z.raw.end--
--			z.pendingAttr[0].end = z.raw.end
--			return
--		}
--	}
--}
--
--// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
--func (z *Tokenizer) readTagAttrVal() {
--	z.pendingAttr[1].start = z.raw.end
--	z.pendingAttr[1].end = z.raw.end
--	if z.skipWhiteSpace(); z.err != nil {
--		return
--	}
--	c := z.readByte()
--	if z.err != nil {
--		return
--	}
--	if c != '=' {
--		z.raw.end--
--		return
--	}
--	if z.skipWhiteSpace(); z.err != nil {
--		return
--	}
--	quote := z.readByte()
--	if z.err != nil {
--		return
--	}
--	switch quote {
--	case '>':
--		z.raw.end--
--		return
--
--	case '\'', '"':
--		z.pendingAttr[1].start = z.raw.end
--		for {
--			c := z.readByte()
--			if z.err != nil {
--				z.pendingAttr[1].end = z.raw.end
--				return
--			}
--			if c == quote {
--				z.pendingAttr[1].end = z.raw.end - 1
--				return
--			}
--		}
--
--	default:
--		z.pendingAttr[1].start = z.raw.end - 1
--		for {
--			c := z.readByte()
--			if z.err != nil {
--				z.pendingAttr[1].end = z.raw.end
--				return
--			}
--			switch c {
--			case ' ', '\n', '\r', '\t', '\f':
--				z.pendingAttr[1].end = z.raw.end - 1
--				return
--			case '>':
--				z.raw.end--
--				z.pendingAttr[1].end = z.raw.end
--				return
--			}
--		}
--	}
--}
--
--// Next scans the next token and returns its type.
--func (z *Tokenizer) Next() TokenType {
--	z.raw.start = z.raw.end
--	z.data.start = z.raw.end
--	z.data.end = z.raw.end
--	if z.err != nil {
--		z.tt = ErrorToken
--		return z.tt
--	}
--	if z.rawTag != "" {
--		if z.rawTag == "plaintext" {
--			// Read everything up to EOF.
--			for z.err == nil {
--				z.readByte()
--			}
--			z.data.end = z.raw.end
--			z.textIsRaw = true
--		} else {
--			z.readRawOrRCDATA()
--		}
--		if z.data.end > z.data.start {
--			z.tt = TextToken
--			z.convertNUL = true
--			return z.tt
--		}
--	}
--	z.textIsRaw = false
--	z.convertNUL = false
--
--loop:
--	for {
--		c := z.readByte()
--		if z.err != nil {
--			break loop
--		}
--		if c != '<' {
--			continue loop
--		}
--
--		// Check if the '<' we have just read is part of a tag, comment
--		// or doctype. If not, it's part of the accumulated text token.
--		c = z.readByte()
--		if z.err != nil {
--			break loop
--		}
--		var tokenType TokenType
--		switch {
--		case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
--			tokenType = StartTagToken
--		case c == '/':
--			tokenType = EndTagToken
--		case c == '!' || c == '?':
--			// We use CommentToken to mean any of "<!--actual comments-->",
--			// "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
--			tokenType = CommentToken
--		default:
--			// Reconsume the current character.
--			z.raw.end--
--			continue
--		}
--
--		// We have a non-text token, but we might have accumulated some text
--		// before that. If so, we return the text first, and return the non-
--		// text token on the subsequent call to Next.
--		if x := z.raw.end - len("<a"); z.raw.start < x {
--			z.raw.end = x
--			z.data.end = x
--			z.tt = TextToken
--			return z.tt
--		}
--		switch tokenType {
--		case StartTagToken:
--			z.tt = z.readStartTag()
--			return z.tt
--		case EndTagToken:
--			c = z.readByte()
--			if z.err != nil {
--				break loop
--			}
--			if c == '>' {
--				// "</>" does not generate a token at all. Generate an empty comment
--				// to allow passthrough clients to pick up the data using Raw.
--				// Reset the tokenizer state and start again.
--				z.tt = CommentToken
--				return z.tt
--			}
--			if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
--				z.readTag(false)
--				if z.err != nil {
--					z.tt = ErrorToken
--				} else {
--					z.tt = EndTagToken
--				}
--				return z.tt
--			}
--			z.raw.end--
--			z.readUntilCloseAngle()
--			z.tt = CommentToken
--			return z.tt
--		case CommentToken:
--			if c == '!' {
--				z.tt = z.readMarkupDeclaration()
--				return z.tt
--			}
--			z.raw.end--
--			z.readUntilCloseAngle()
--			z.tt = CommentToken
--			return z.tt
--		}
--	}
--	if z.raw.start < z.raw.end {
--		z.data.end = z.raw.end
--		z.tt = TextToken
--		return z.tt
--	}
--	z.tt = ErrorToken
--	return z.tt
--}
--
--// Raw returns the unmodified text of the current token. Calling Next, Token,
--// Text, TagName or TagAttr may change the contents of the returned slice.
--func (z *Tokenizer) Raw() []byte {
--	return z.buf[z.raw.start:z.raw.end]
--}
--
--// convertNewlines converts "\r" and "\r\n" in s to "\n".
--// The conversion happens in place, but the resulting slice may be shorter.
--func convertNewlines(s []byte) []byte {
--	for i, c := range s {
--		if c != '\r' {
--			continue
--		}
--
--		src := i + 1
--		if src >= len(s) || s[src] != '\n' {
--			s[i] = '\n'
--			continue
--		}
--
--		dst := i
--		for src < len(s) {
--			if s[src] == '\r' {
--				if src+1 < len(s) && s[src+1] == '\n' {
--					src++
--				}
--				s[dst] = '\n'
--			} else {
--				s[dst] = s[src]
--			}
--			src++
--			dst++
--		}
--		return s[:dst]
--	}
--	return s
--}
--
--var (
--	nul         = []byte("\x00")
--	replacement = []byte("\ufffd")
--)
--
--// Text returns the unescaped text of a text, comment or doctype token. The
--// contents of the returned slice may change on the next call to Next.
--func (z *Tokenizer) Text() []byte {
--	switch z.tt {
--	case TextToken, CommentToken, DoctypeToken:
--		s := z.buf[z.data.start:z.data.end]
--		z.data.start = z.raw.end
--		z.data.end = z.raw.end
--		s = convertNewlines(s)
--		if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) {
--			s = bytes.Replace(s, nul, replacement, -1)
--		}
--		if !z.textIsRaw {
--			s = unescape(s, false)
--		}
--		return s
--	}
--	return nil
--}
--
--// TagName returns the lower-cased name of a tag token (the `img` out of
--// `<IMG SRC="foo">`) and whether the tag has attributes.
--// The contents of the returned slice may change on the next call to Next.
--func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
--	if z.data.start < z.data.end {
--		switch z.tt {
--		case StartTagToken, EndTagToken, SelfClosingTagToken:
--			s := z.buf[z.data.start:z.data.end]
--			z.data.start = z.raw.end
--			z.data.end = z.raw.end
--			return lower(s), z.nAttrReturned < len(z.attr)
--		}
--	}
--	return nil, false
--}
--
--// TagAttr returns the lower-cased key and unescaped value of the next unparsed
--// attribute for the current tag token and whether there are more attributes.
--// The contents of the returned slices may change on the next call to Next.
--func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
--	if z.nAttrReturned < len(z.attr) {
--		switch z.tt {
--		case StartTagToken, SelfClosingTagToken:
--			x := z.attr[z.nAttrReturned]
--			z.nAttrReturned++
--			key = z.buf[x[0].start:x[0].end]
--			val = z.buf[x[1].start:x[1].end]
--			return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr)
--		}
--	}
--	return nil, nil, false
--}
--
--// Token returns the next Token. The result's Data and Attr values remain valid
--// after subsequent Next calls.
--func (z *Tokenizer) Token() Token {
--	t := Token{Type: z.tt}
--	switch z.tt {
--	case TextToken, CommentToken, DoctypeToken:
--		t.Data = string(z.Text())
--	case StartTagToken, SelfClosingTagToken, EndTagToken:
--		name, moreAttr := z.TagName()
--		for moreAttr {
--			var key, val []byte
--			key, val, moreAttr = z.TagAttr()
--			t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)})
--		}
--		if a := atom.Lookup(name); a != 0 {
--			t.DataAtom, t.Data = a, a.String()
--		} else {
--			t.DataAtom, t.Data = 0, string(name)
--		}
--	}
--	return t
--}
--
--// SetMaxBuf sets a limit on the amount of data buffered during tokenization.
--// A value of 0 means unlimited.
--func (z *Tokenizer) SetMaxBuf(n int) {
--	z.maxBuf = n
--}
--
--// NewTokenizer returns a new HTML Tokenizer for the given Reader.
--// The input is assumed to be UTF-8 encoded.
--func NewTokenizer(r io.Reader) *Tokenizer {
--	return NewTokenizerFragment(r, "")
--}
--
--// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for
--// tokenizing an existing element's InnerHTML fragment. contextTag is that
--// element's tag, such as "div" or "iframe".
--//
--// For example, how the InnerHTML "a<b" is tokenized depends on whether it is
--// for a <p> tag or a <script> tag.
--//
--// The input is assumed to be UTF-8 encoded.
--func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer {
--	z := &Tokenizer{
--		r:   r,
--		buf: make([]byte, 0, 4096),
--	}
--	if contextTag != "" {
--		switch s := strings.ToLower(contextTag); s {
--		case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp":
--			z.rawTag = s
--		}
--	}
--	return z
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/html/token_test.go b/Godeps/_workspace/src/golang.org/x/net/html/token_test.go
-deleted file mode 100644
-index f6988a8..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/html/token_test.go
-+++ /dev/null
-@@ -1,748 +0,0 @@
--// Copyright 2010 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package html
--
--import (
--	"bytes"
--	"io"
--	"io/ioutil"
--	"reflect"
--	"runtime"
--	"strings"
--	"testing"
--)
--
--type tokenTest struct {
--	// A short description of the test case.
--	desc string
--	// The HTML to parse.
--	html string
--	// The string representations of the expected tokens, joined by '$'.
--	golden string
--}
--
--var tokenTests = []tokenTest{
--	{
--		"empty",
--		"",
--		"",
--	},
--	// A single text node. The tokenizer should not break text nodes on whitespace,
--	// nor should it normalize whitespace within a text node.
--	{
--		"text",
--		"foo  bar",
--		"foo  bar",
--	},
--	// An entity.
--	{
--		"entity",
--		"one &lt; two",
--		"one &lt; two",
--	},
--	// A start, self-closing and end tag. The tokenizer does not care if the start
--	// and end tokens don't match; that is the job of the parser.
--	{
--		"tags",
--		"<a>b<c/>d</e>",
--		"<a>$b$<c/>$d$</e>",
--	},
--	// Angle brackets that aren't a tag.
--	{
--		"not a tag #0",
--		"<",
--		"&lt;",
--	},
--	{
--		"not a tag #1",
--		"</",
--		"&lt;/",
--	},
--	{
--		"not a tag #2",
--		"</>",
--		"<!---->",
--	},
--	{
--		"not a tag #3",
--		"a</>b",
--		"a$<!---->$b",
--	},
--	{
--		"not a tag #4",
--		"</ >",
--		"<!-- -->",
--	},
--	{
--		"not a tag #5",
--		"</.",
--		"<!--.-->",
--	},
--	{
--		"not a tag #6",
--		"</.>",
--		"<!--.-->",
--	},
--	{
--		"not a tag #7",
--		"a < b",
--		"a &lt; b",
--	},
--	{
--		"not a tag #8",
--		"<.>",
--		"&lt;.&gt;",
--	},
--	{
--		"not a tag #9",
--		"a<<<b>>>c",
--		"a&lt;&lt;$<b>$&gt;&gt;c",
--	},
--	{
--		"not a tag #10",
--		"if x<0 and y < 0 then x*y>0",
--		"if x&lt;0 and y &lt; 0 then x*y&gt;0",
--	},
--	{
--		"not a tag #11",
--		"<<p>",
--		"&lt;$<p>",
--	},
--	// EOF in a tag name.
--	{
--		"tag name eof #0",
--		"<a",
--		"",
--	},
--	{
--		"tag name eof #1",
--		"<a ",
--		"",
--	},
--	{
--		"tag name eof #2",
--		"a<b",
--		"a",
--	},
--	{
--		"tag name eof #3",
--		"<a><b",
--		"<a>",
--	},
--	{
--		"tag name eof #4",
--		`<a x`,
--		``,
--	},
--	// Some malformed tags that are missing a '>'.
--	{
--		"malformed tag #0",
--		`<p</p>`,
--		`<p< p="">`,
--	},
--	{
--		"malformed tag #1",
--		`<p </p>`,
--		`<p <="" p="">`,
--	},
--	{
--		"malformed tag #2",
--		`<p id`,
--		``,
--	},
--	{
--		"malformed tag #3",
--		`<p id=`,
--		``,
--	},
--	{
--		"malformed tag #4",
--		`<p id=>`,
--		`<p id="">`,
--	},
--	{
--		"malformed tag #5",
--		`<p id=0`,
--		``,
--	},
--	{
--		"malformed tag #6",
--		`<p id=0</p>`,
--		`<p id="0&lt;/p">`,
--	},
--	{
--		"malformed tag #7",
--		`<p id="0</p>`,
--		``,
--	},
--	{
--		"malformed tag #8",
--		`<p id="0"</p>`,
--		`<p id="0" <="" p="">`,
--	},
--	{
--		"malformed tag #9",
--		`<p></p id`,
--		`<p>`,
--	},
--	// Raw text and RCDATA.
--	{
--		"basic raw text",
--		"<script><a></b></script>",
--		"<script>$&lt;a&gt;&lt;/b&gt;$</script>",
--	},
--	{
--		"unfinished script end tag",
--		"<SCRIPT>a</SCR",
--		"<script>$a&lt;/SCR",
--	},
--	{
--		"broken script end tag",
--		"<SCRIPT>a</SCR ipt>",
--		"<script>$a&lt;/SCR ipt&gt;",
--	},
--	{
--		"EOF in script end tag",
--		"<SCRIPT>a</SCRipt",
--		"<script>$a&lt;/SCRipt",
--	},
--	{
--		"scriptx end tag",
--		"<SCRIPT>a</SCRiptx",
--		"<script>$a&lt;/SCRiptx",
--	},
--	{
--		"' ' completes script end tag",
--		"<SCRIPT>a</SCRipt ",
--		"<script>$a",
--	},
--	{
--		"'>' completes script end tag",
--		"<SCRIPT>a</SCRipt>",
--		"<script>$a$</script>",
--	},
--	{
--		"self-closing script end tag",
--		"<SCRIPT>a</SCRipt/>",
--		"<script>$a$</script>",
--	},
--	{
--		"nested script tag",
--		"<SCRIPT>a</SCRipt<script>",
--		"<script>$a&lt;/SCRipt&lt;script&gt;",
--	},
--	{
--		"script end tag after unfinished",
--		"<SCRIPT>a</SCRipt</script>",
--		"<script>$a&lt;/SCRipt$</script>",
--	},
--	{
--		"script/style mismatched tags",
--		"<script>a</style>",
--		"<script>$a&lt;/style&gt;",
--	},
--	{
--		"style element with entity",
--		"<style>&apos;",
--		"<style>$&amp;apos;",
--	},
--	{
--		"textarea with tag",
--		"<textarea><div></textarea>",
--		"<textarea>$&lt;div&gt;$</textarea>",
--	},
--	{
--		"title with tag and entity",
--		"<title><b>K&amp;R C</b></title>",
--		"<title>$&lt;b&gt;K&amp;R C&lt;/b&gt;$</title>",
--	},
--	// DOCTYPE tests.
--	{
--		"Proper DOCTYPE",
--		"<!DOCTYPE html>",
--		"<!DOCTYPE html>",
--	},
--	{
--		"DOCTYPE with no space",
--		"<!doctypehtml>",
--		"<!DOCTYPE html>",
--	},
--	{
--		"DOCTYPE with two spaces",
--		"<!doctype  html>",
--		"<!DOCTYPE html>",
--	},
--	{
--		"looks like DOCTYPE but isn't",
--		"<!DOCUMENT html>",
--		"<!--DOCUMENT html-->",
--	},
--	{
--		"DOCTYPE at EOF",
--		"<!DOCtype",
--		"<!DOCTYPE >",
--	},
--	// XML processing instructions.
--	{
--		"XML processing instruction",
--		"<?xml?>",
--		"<!--?xml?-->",
--	},
--	// Comments.
--	{
--		"comment0",
--		"abc<b><!-- skipme --></b>def",
--		"abc$<b>$<!-- skipme -->$</b>$def",
--	},
--	{
--		"comment1",
--		"a<!-->z",
--		"a$<!---->$z",
--	},
--	{
--		"comment2",
--		"a<!--->z",
--		"a$<!---->$z",
--	},
--	{
--		"comment3",
--		"a<!--x>-->z",
--		"a$<!--x>-->$z",
--	},
--	{
--		"comment4",
--		"a<!--x->-->z",
--		"a$<!--x->-->$z",
--	},
--	{
--		"comment5",
--		"a<!>z",
--		"a$<!---->$z",
--	},
--	{
--		"comment6",
--		"a<!->z",
--		"a$<!----->$z",
--	},
--	{
--		"comment7",
--		"a<!---<>z",
--		"a$<!---<>z-->",
--	},
--	{
--		"comment8",
--		"a<!--z",
--		"a$<!--z-->",
--	},
--	{
--		"comment9",
--		"a<!--z-",
--		"a$<!--z-->",
--	},
--	{
--		"comment10",
--		"a<!--z--",
--		"a$<!--z-->",
--	},
--	{
--		"comment11",
--		"a<!--z---",
--		"a$<!--z--->",
--	},
--	{
--		"comment12",
--		"a<!--z----",
--		"a$<!--z---->",
--	},
--	{
--		"comment13",
--		"a<!--x--!>z",
--		"a$<!--x-->$z",
--	},
--	// An attribute with a backslash.
--	{
--		"backslash",
--		`<p id="a\"b">`,
--		`<p id="a\" b"="">`,
--	},
--	// Entities, tag name and attribute key lower-casing, and whitespace
--	// normalization within a tag.
--	{
--		"tricky",
--		"<p \t\n iD=\"a&quot;B\"  foo=\"bar\"><EM>te&lt;&amp;;xt</em></p>",
--		`<p id="a&#34;B" foo="bar">$<em>$te&lt;&amp;;xt$</em>$</p>`,
--	},
--	// A nonexistent entity. Tokenizing and converting back to a string should
--	// escape the "&" to become "&amp;".
--	{
--		"noSuchEntity",
--		`<a b="c&noSuchEntity;d">&lt;&alsoDoesntExist;&`,
--		`<a b="c&amp;noSuchEntity;d">$&lt;&amp;alsoDoesntExist;&amp;`,
--	},
--	{
--		"entity without semicolon",
--		`&notit;&notin;<a b="q=z&amp=5&notice=hello&not;=world">`,
--		`¬it;∉$<a b="q=z&amp;amp=5&amp;notice=hello¬=world">`,
--	},
--	{
--		"entity with digits",
--		"&frac12;",
--		"½",
--	},
--	// Attribute tests:
--	// http://dev.w3.org/html5/spec/Overview.html#attributes-0
--	{
--		"Empty attribute",
--		`<input disabled FOO>`,
--		`<input disabled="" foo="">`,
--	},
--	{
--		"Empty attribute, whitespace",
--		`<input disabled FOO >`,
--		`<input disabled="" foo="">`,
--	},
--	{
--		"Unquoted attribute value",
--		`<input value=yes FOO=BAR>`,
--		`<input value="yes" foo="BAR">`,
--	},
--	{
--		"Unquoted attribute value, spaces",
--		`<input value = yes FOO = BAR>`,
--		`<input value="yes" foo="BAR">`,
--	},
--	{
--		"Unquoted attribute value, trailing space",
--		`<input value=yes FOO=BAR >`,
--		`<input value="yes" foo="BAR">`,
--	},
--	{
--		"Single-quoted attribute value",
--		`<input value='yes' FOO='BAR'>`,
--		`<input value="yes" foo="BAR">`,
--	},
--	{
--		"Single-quoted attribute value, trailing space",
--		`<input value='yes' FOO='BAR' >`,
--		`<input value="yes" foo="BAR">`,
--	},
--	{
--		"Double-quoted attribute value",
--		`<input value="I'm an attribute" FOO="BAR">`,
--		`<input value="I&#39;m an attribute" foo="BAR">`,
--	},
--	{
--		"Attribute name characters",
--		`<meta http-equiv="content-type">`,
--		`<meta http-equiv="content-type">`,
--	},
--	{
--		"Mixed attributes",
--		`a<P V="0 1" w='2' X=3 y>z`,
--		`a$<p v="0 1" w="2" x="3" y="">$z`,
--	},
--	{
--		"Attributes with a solitary single quote",
--		`<p id=can't><p id=won't>`,
--		`<p id="can&#39;t">$<p id="won&#39;t">`,
--	},
--}
--
--func TestTokenizer(t *testing.T) {
--loop:
--	for _, tt := range tokenTests {
--		z := NewTokenizer(strings.NewReader(tt.html))
--		if tt.golden != "" {
--			for i, s := range strings.Split(tt.golden, "$") {
--				if z.Next() == ErrorToken {
--					t.Errorf("%s token %d: want %q got error %v", tt.desc, i, s, z.Err())
--					continue loop
--				}
--				actual := z.Token().String()
--				if s != actual {
--					t.Errorf("%s token %d: want %q got %q", tt.desc, i, s, actual)
--					continue loop
--				}
--			}
--		}
--		z.Next()
--		if z.Err() != io.EOF {
--			t.Errorf("%s: want EOF got %q", tt.desc, z.Err())
--		}
--	}
--}
--
--func TestMaxBuffer(t *testing.T) {
--	// Exceeding the maximum buffer size generates ErrBufferExceeded.
--	z := NewTokenizer(strings.NewReader("<" + strings.Repeat("t", 10)))
--	z.SetMaxBuf(5)
--	tt := z.Next()
--	if got, want := tt, ErrorToken; got != want {
--		t.Fatalf("token type: got: %v want: %v", got, want)
--	}
--	if got, want := z.Err(), ErrBufferExceeded; got != want {
--		t.Errorf("error type: got: %v want: %v", got, want)
--	}
--	if got, want := string(z.Raw()), "<tttt"; got != want {
--		t.Fatalf("buffered before overflow: got: %q want: %q", got, want)
--	}
--}
--
--func TestMaxBufferReconstruction(t *testing.T) {
--	// Exceeding the maximum buffer size at any point while tokenizing permits
--	// reconstructing the original input.
--tests:
--	for _, test := range tokenTests {
--		for maxBuf := 1; ; maxBuf++ {
--			r := strings.NewReader(test.html)
--			z := NewTokenizer(r)
--			z.SetMaxBuf(maxBuf)
--			var tokenized bytes.Buffer
--			for {
--				tt := z.Next()
--				tokenized.Write(z.Raw())
--				if tt == ErrorToken {
--					if err := z.Err(); err != io.EOF && err != ErrBufferExceeded {
--						t.Errorf("%s: unexpected error: %v", test.desc, err)
--					}
--					break
--				}
--			}
--			// Anything tokenized along with untokenized input or data left in the reader.
--			assembled, err := ioutil.ReadAll(io.MultiReader(&tokenized, bytes.NewReader(z.Buffered()), r))
--			if err != nil {
--				t.Errorf("%s: ReadAll: %v", test.desc, err)
--				continue tests
--			}
--			if got, want := string(assembled), test.html; got != want {
--				t.Errorf("%s: reassembled html:\n got: %q\nwant: %q", test.desc, got, want)
--				continue tests
--			}
--			// EOF indicates that we completed tokenization and hence found the max
--			// maxBuf that generates ErrBufferExceeded, so continue to the next test.
--			if z.Err() == io.EOF {
--				break
--			}
--		} // buffer sizes
--	} // tests
--}
--
--func TestPassthrough(t *testing.T) {
--	// Accumulating the raw output for each parse event should reconstruct the
--	// original input.
--	for _, test := range tokenTests {
--		z := NewTokenizer(strings.NewReader(test.html))
--		var parsed bytes.Buffer
--		for {
--			tt := z.Next()
--			parsed.Write(z.Raw())
--			if tt == ErrorToken {
--				break
--			}
--		}
--		if got, want := parsed.String(), test.html; got != want {
--			t.Errorf("%s: parsed output:\n got: %q\nwant: %q", test.desc, got, want)
--		}
--	}
--}
--
--func TestBufAPI(t *testing.T) {
--	s := "0<a>1</a>2<b>3<a>4<a>5</a>6</b>7</a>8<a/>9"
--	z := NewTokenizer(bytes.NewBufferString(s))
--	var result bytes.Buffer
--	depth := 0
--loop:
--	for {
--		tt := z.Next()
--		switch tt {
--		case ErrorToken:
--			if z.Err() != io.EOF {
--				t.Error(z.Err())
--			}
--			break loop
--		case TextToken:
--			if depth > 0 {
--				result.Write(z.Text())
--			}
--		case StartTagToken, EndTagToken:
--			tn, _ := z.TagName()
--			if len(tn) == 1 && tn[0] == 'a' {
--				if tt == StartTagToken {
--					depth++
--				} else {
--					depth--
--				}
--			}
--		}
--	}
--	u := "14567"
--	v := string(result.Bytes())
--	if u != v {
--		t.Errorf("TestBufAPI: want %q got %q", u, v)
--	}
--}
--
--func TestConvertNewlines(t *testing.T) {
--	testCases := map[string]string{
--		"Mac\rDOS\r\nUnix\n":    "Mac\nDOS\nUnix\n",
--		"Unix\nMac\rDOS\r\n":    "Unix\nMac\nDOS\n",
--		"DOS\r\nDOS\r\nDOS\r\n": "DOS\nDOS\nDOS\n",
--		"":         "",
--		"\n":       "\n",
--		"\n\r":     "\n\n",
--		"\r":       "\n",
--		"\r\n":     "\n",
--		"\r\n\n":   "\n\n",
--		"\r\n\r":   "\n\n",
--		"\r\n\r\n": "\n\n",
--		"\r\r":     "\n\n",
--		"\r\r\n":   "\n\n",
--		"\r\r\n\n": "\n\n\n",
--		"\r\r\r\n": "\n\n\n",
--		"\r \n":    "\n \n",
--		"xyz":      "xyz",
--	}
--	for in, want := range testCases {
--		if got := string(convertNewlines([]byte(in))); got != want {
--			t.Errorf("input %q: got %q, want %q", in, got, want)
--		}
--	}
--}
--
--func TestReaderEdgeCases(t *testing.T) {
--	const s = "<p>An io.Reader can return (0, nil) or (n, io.EOF).</p>"
--	testCases := []io.Reader{
--		&zeroOneByteReader{s: s},
--		&eofStringsReader{s: s},
--		&stuckReader{},
--	}
--	for i, tc := range testCases {
--		got := []TokenType{}
--		z := NewTokenizer(tc)
--		for {
--			tt := z.Next()
--			if tt == ErrorToken {
--				break
--			}
--			got = append(got, tt)
--		}
--		if err := z.Err(); err != nil && err != io.EOF {
--			if err != io.ErrNoProgress {
--				t.Errorf("i=%d: %v", i, err)
--			}
--			continue
--		}
--		want := []TokenType{
--			StartTagToken,
--			TextToken,
--			EndTagToken,
--		}
--		if !reflect.DeepEqual(got, want) {
--			t.Errorf("i=%d: got %v, want %v", i, got, want)
--			continue
--		}
--	}
--}
--
--// zeroOneByteReader is like a strings.Reader that alternates between
--// returning 0 bytes and 1 byte at a time.
--type zeroOneByteReader struct {
--	s string
--	n int
--}
--
--func (r *zeroOneByteReader) Read(p []byte) (int, error) {
--	if len(p) == 0 {
--		return 0, nil
--	}
--	if len(r.s) == 0 {
--		return 0, io.EOF
--	}
--	r.n++
--	if r.n%2 != 0 {
--		return 0, nil
--	}
--	p[0], r.s = r.s[0], r.s[1:]
--	return 1, nil
--}
--
--// eofStringsReader is like a strings.Reader but can return an (n, err) where
--// n > 0 && err != nil.
--type eofStringsReader struct {
--	s string
--}
--
--func (r *eofStringsReader) Read(p []byte) (int, error) {
--	n := copy(p, r.s)
--	r.s = r.s[n:]
--	if r.s != "" {
--		return n, nil
--	}
--	return n, io.EOF
--}
--
--// stuckReader is an io.Reader that always returns no data and no error.
--type stuckReader struct{}
--
--func (*stuckReader) Read(p []byte) (int, error) {
--	return 0, nil
--}
--
--const (
--	rawLevel = iota
--	lowLevel
--	highLevel
--)
--
--func benchmarkTokenizer(b *testing.B, level int) {
--	buf, err := ioutil.ReadFile("testdata/go1.html")
--	if err != nil {
--		b.Fatalf("could not read testdata/go1.html: %v", err)
--	}
--	b.SetBytes(int64(len(buf)))
--	runtime.GC()
--	b.ReportAllocs()
--	b.ResetTimer()
--	for i := 0; i < b.N; i++ {
--		z := NewTokenizer(bytes.NewBuffer(buf))
--		for {
--			tt := z.Next()
--			if tt == ErrorToken {
--				if err := z.Err(); err != nil && err != io.EOF {
--					b.Fatalf("tokenizer error: %v", err)
--				}
--				break
--			}
--			switch level {
--			case rawLevel:
--				// Calling z.Raw just returns the raw bytes of the token. It does
--				// not unescape &lt; to <, or lower-case tag names and attribute keys.
--				z.Raw()
--			case lowLevel:
--				// Caling z.Text, z.TagName and z.TagAttr returns []byte values
--				// whose contents may change on the next call to z.Next.
--				switch tt {
--				case TextToken, CommentToken, DoctypeToken:
--					z.Text()
--				case StartTagToken, SelfClosingTagToken:
--					_, more := z.TagName()
--					for more {
--						_, _, more = z.TagAttr()
--					}
--				case EndTagToken:
--					z.TagName()
--				}
--			case highLevel:
--				// Calling z.Token converts []byte values to strings whose validity
--				// extend beyond the next call to z.Next.
--				z.Token()
--			}
--		}
--	}
--}
--
--func BenchmarkRawLevelTokenizer(b *testing.B)  { benchmarkTokenizer(b, rawLevel) }
--func BenchmarkLowLevelTokenizer(b *testing.B)  { benchmarkTokenizer(b, lowLevel) }
--func BenchmarkHighLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, highLevel) }
-diff --git a/Godeps/_workspace/src/golang.org/x/net/websocket/client.go b/Godeps/_workspace/src/golang.org/x/net/websocket/client.go
-deleted file mode 100644
-index a861bb9..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/websocket/client.go
-+++ /dev/null
-@@ -1,98 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package websocket
--
--import (
--	"bufio"
--	"crypto/tls"
--	"io"
--	"net"
--	"net/http"
--	"net/url"
--)
--
--// DialError is an error that occurs while dialling a websocket server.
--type DialError struct {
--	*Config
--	Err error
--}
--
--func (e *DialError) Error() string {
--	return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
--}
--
--// NewConfig creates a new WebSocket config for client connection.
--func NewConfig(server, origin string) (config *Config, err error) {
--	config = new(Config)
--	config.Version = ProtocolVersionHybi13
--	config.Location, err = url.ParseRequestURI(server)
--	if err != nil {
--		return
--	}
--	config.Origin, err = url.ParseRequestURI(origin)
--	if err != nil {
--		return
--	}
--	config.Header = http.Header(make(map[string][]string))
--	return
--}
--
--// NewClient creates a new WebSocket client connection over rwc.
--func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
--	br := bufio.NewReader(rwc)
--	bw := bufio.NewWriter(rwc)
--	err = hybiClientHandshake(config, br, bw)
--	if err != nil {
--		return
--	}
--	buf := bufio.NewReadWriter(br, bw)
--	ws = newHybiClientConn(config, buf, rwc)
--	return
--}
--
--// Dial opens a new client connection to a WebSocket.
--func Dial(url_, protocol, origin string) (ws *Conn, err error) {
--	config, err := NewConfig(url_, origin)
--	if err != nil {
--		return nil, err
--	}
--	if protocol != "" {
--		config.Protocol = []string{protocol}
--	}
--	return DialConfig(config)
--}
--
--// DialConfig opens a new client connection to a WebSocket with a config.
--func DialConfig(config *Config) (ws *Conn, err error) {
--	var client net.Conn
--	if config.Location == nil {
--		return nil, &DialError{config, ErrBadWebSocketLocation}
--	}
--	if config.Origin == nil {
--		return nil, &DialError{config, ErrBadWebSocketOrigin}
--	}
--	switch config.Location.Scheme {
--	case "ws":
--		client, err = net.Dial("tcp", config.Location.Host)
--
--	case "wss":
--		client, err = tls.Dial("tcp", config.Location.Host, config.TlsConfig)
--
--	default:
--		err = ErrBadScheme
--	}
--	if err != nil {
--		goto Error
--	}
--
--	ws, err = NewClient(config, client)
--	if err != nil {
--		goto Error
--	}
--	return
--
--Error:
--	return nil, &DialError{config, err}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/websocket/exampledial_test.go b/Godeps/_workspace/src/golang.org/x/net/websocket/exampledial_test.go
-deleted file mode 100644
-index 72bb9d4..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/websocket/exampledial_test.go
-+++ /dev/null
-@@ -1,31 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package websocket_test
--
--import (
--	"fmt"
--	"log"
--
--	"golang.org/x/net/websocket"
--)
--
--// This example demonstrates a trivial client.
--func ExampleDial() {
--	origin := "http://localhost/"
--	url := "ws://localhost:12345/ws"
--	ws, err := websocket.Dial(url, "", origin)
--	if err != nil {
--		log.Fatal(err)
--	}
--	if _, err := ws.Write([]byte("hello, world!\n")); err != nil {
--		log.Fatal(err)
--	}
--	var msg = make([]byte, 512)
--	var n int
--	if n, err = ws.Read(msg); err != nil {
--		log.Fatal(err)
--	}
--	fmt.Printf("Received: %s.\n", msg[:n])
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/websocket/examplehandler_test.go b/Godeps/_workspace/src/golang.org/x/net/websocket/examplehandler_test.go
-deleted file mode 100644
-index f22a98f..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/websocket/examplehandler_test.go
-+++ /dev/null
-@@ -1,26 +0,0 @@
--// Copyright 2012 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package websocket_test
--
--import (
--	"io"
--	"net/http"
--
--	"golang.org/x/net/websocket"
--)
--
--// Echo the data received on the WebSocket.
--func EchoServer(ws *websocket.Conn) {
--	io.Copy(ws, ws)
--}
--
--// This example demonstrates a trivial echo server.
--func ExampleHandler() {
--	http.Handle("/echo", websocket.Handler(EchoServer))
--	err := http.ListenAndServe(":12345", nil)
--	if err != nil {
--		panic("ListenAndServe: " + err.Error())
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/websocket/hybi.go b/Godeps/_workspace/src/golang.org/x/net/websocket/hybi.go
-deleted file mode 100644
-index f8c0b2e..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/websocket/hybi.go
-+++ /dev/null
-@@ -1,564 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package websocket
--
--// This file implements a protocol of hybi draft.
--// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
--
--import (
--	"bufio"
--	"bytes"
--	"crypto/rand"
--	"crypto/sha1"
--	"encoding/base64"
--	"encoding/binary"
--	"fmt"
--	"io"
--	"io/ioutil"
--	"net/http"
--	"net/url"
--	"strings"
--)
--
--const (
--	websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
--
--	closeStatusNormal            = 1000
--	closeStatusGoingAway         = 1001
--	closeStatusProtocolError     = 1002
--	closeStatusUnsupportedData   = 1003
--	closeStatusFrameTooLarge     = 1004
--	closeStatusNoStatusRcvd      = 1005
--	closeStatusAbnormalClosure   = 1006
--	closeStatusBadMessageData    = 1007
--	closeStatusPolicyViolation   = 1008
--	closeStatusTooBigData        = 1009
--	closeStatusExtensionMismatch = 1010
--
--	maxControlFramePayloadLength = 125
--)
--
--var (
--	ErrBadMaskingKey         = &ProtocolError{"bad masking key"}
--	ErrBadPongMessage        = &ProtocolError{"bad pong message"}
--	ErrBadClosingStatus      = &ProtocolError{"bad closing status"}
--	ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
--	ErrNotImplemented        = &ProtocolError{"not implemented"}
--
--	handshakeHeader = map[string]bool{
--		"Host":                   true,
--		"Upgrade":                true,
--		"Connection":             true,
--		"Sec-Websocket-Key":      true,
--		"Sec-Websocket-Origin":   true,
--		"Sec-Websocket-Version":  true,
--		"Sec-Websocket-Protocol": true,
--		"Sec-Websocket-Accept":   true,
--	}
--)
--
--// A hybiFrameHeader is a frame header as defined in hybi draft.
--type hybiFrameHeader struct {
--	Fin        bool
--	Rsv        [3]bool
--	OpCode     byte
--	Length     int64
--	MaskingKey []byte
--
--	data *bytes.Buffer
--}
--
--// A hybiFrameReader is a reader for hybi frame.
--type hybiFrameReader struct {
--	reader io.Reader
--
--	header hybiFrameHeader
--	pos    int64
--	length int
--}
--
--func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
--	n, err = frame.reader.Read(msg)
--	if err != nil {
--		return 0, err
--	}
--	if frame.header.MaskingKey != nil {
--		for i := 0; i < n; i++ {
--			msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
--			frame.pos++
--		}
--	}
--	return n, err
--}
--
--func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
--
--func (frame *hybiFrameReader) HeaderReader() io.Reader {
--	if frame.header.data == nil {
--		return nil
--	}
--	if frame.header.data.Len() == 0 {
--		return nil
--	}
--	return frame.header.data
--}
--
--func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
--
--func (frame *hybiFrameReader) Len() (n int) { return frame.length }
--
--// A hybiFrameReaderFactory creates new frame reader based on its frame type.
--type hybiFrameReaderFactory struct {
--	*bufio.Reader
--}
--
--// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
--// See Section 5.2 Base Framing protocol for detail.
--// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
--func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
--	hybiFrame := new(hybiFrameReader)
--	frame = hybiFrame
--	var header []byte
--	var b byte
--	// First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
--	b, err = buf.ReadByte()
--	if err != nil {
--		return
--	}
--	header = append(header, b)
--	hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
--	for i := 0; i < 3; i++ {
--		j := uint(6 - i)
--		hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
--	}
--	hybiFrame.header.OpCode = header[0] & 0x0f
--
--	// Second byte. Mask/Payload len(7bits)
--	b, err = buf.ReadByte()
--	if err != nil {
--		return
--	}
--	header = append(header, b)
--	mask := (b & 0x80) != 0
--	b &= 0x7f
--	lengthFields := 0
--	switch {
--	case b <= 125: // Payload length 7bits.
--		hybiFrame.header.Length = int64(b)
--	case b == 126: // Payload length 7+16bits
--		lengthFields = 2
--	case b == 127: // Payload length 7+64bits
--		lengthFields = 8
--	}
--	for i := 0; i < lengthFields; i++ {
--		b, err = buf.ReadByte()
--		if err != nil {
--			return
--		}
--		header = append(header, b)
--		hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
--	}
--	if mask {
--		// Masking key. 4 bytes.
--		for i := 0; i < 4; i++ {
--			b, err = buf.ReadByte()
--			if err != nil {
--				return
--			}
--			header = append(header, b)
--			hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
--		}
--	}
--	hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
--	hybiFrame.header.data = bytes.NewBuffer(header)
--	hybiFrame.length = len(header) + int(hybiFrame.header.Length)
--	return
--}
--
--// A HybiFrameWriter is a writer for hybi frame.
--type hybiFrameWriter struct {
--	writer *bufio.Writer
--
--	header *hybiFrameHeader
--}
--
--func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
--	var header []byte
--	var b byte
--	if frame.header.Fin {
--		b |= 0x80
--	}
--	for i := 0; i < 3; i++ {
--		if frame.header.Rsv[i] {
--			j := uint(6 - i)
--			b |= 1 << j
--		}
--	}
--	b |= frame.header.OpCode
--	header = append(header, b)
--	if frame.header.MaskingKey != nil {
--		b = 0x80
--	} else {
--		b = 0
--	}
--	lengthFields := 0
--	length := len(msg)
--	switch {
--	case length <= 125:
--		b |= byte(length)
--	case length < 65536:
--		b |= 126
--		lengthFields = 2
--	default:
--		b |= 127
--		lengthFields = 8
--	}
--	header = append(header, b)
--	for i := 0; i < lengthFields; i++ {
--		j := uint((lengthFields - i - 1) * 8)
--		b = byte((length >> j) & 0xff)
--		header = append(header, b)
--	}
--	if frame.header.MaskingKey != nil {
--		if len(frame.header.MaskingKey) != 4 {
--			return 0, ErrBadMaskingKey
--		}
--		header = append(header, frame.header.MaskingKey...)
--		frame.writer.Write(header)
--		data := make([]byte, length)
--		for i := range data {
--			data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
--		}
--		frame.writer.Write(data)
--		err = frame.writer.Flush()
--		return length, err
--	}
--	frame.writer.Write(header)
--	frame.writer.Write(msg)
--	err = frame.writer.Flush()
--	return length, err
--}
--
--func (frame *hybiFrameWriter) Close() error { return nil }
--
--type hybiFrameWriterFactory struct {
--	*bufio.Writer
--	needMaskingKey bool
--}
--
--func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
--	frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
--	if buf.needMaskingKey {
--		frameHeader.MaskingKey, err = generateMaskingKey()
--		if err != nil {
--			return nil, err
--		}
--	}
--	return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
--}
--
--type hybiFrameHandler struct {
--	conn        *Conn
--	payloadType byte
--}
--
--func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (r frameReader, err error) {
--	if handler.conn.IsServerConn() {
--		// The client MUST mask all frames sent to the server.
--		if frame.(*hybiFrameReader).header.MaskingKey == nil {
--			handler.WriteClose(closeStatusProtocolError)
--			return nil, io.EOF
--		}
--	} else {
--		// The server MUST NOT mask all frames.
--		if frame.(*hybiFrameReader).header.MaskingKey != nil {
--			handler.WriteClose(closeStatusProtocolError)
--			return nil, io.EOF
--		}
--	}
--	if header := frame.HeaderReader(); header != nil {
--		io.Copy(ioutil.Discard, header)
--	}
--	switch frame.PayloadType() {
--	case ContinuationFrame:
--		frame.(*hybiFrameReader).header.OpCode = handler.payloadType
--	case TextFrame, BinaryFrame:
--		handler.payloadType = frame.PayloadType()
--	case CloseFrame:
--		return nil, io.EOF
--	case PingFrame:
--		pingMsg := make([]byte, maxControlFramePayloadLength)
--		n, err := io.ReadFull(frame, pingMsg)
--		if err != nil && err != io.ErrUnexpectedEOF {
--			return nil, err
--		}
--		io.Copy(ioutil.Discard, frame)
--		n, err = handler.WritePong(pingMsg[:n])
--		if err != nil {
--			return nil, err
--		}
--		return nil, nil
--	case PongFrame:
--		return nil, ErrNotImplemented
--	}
--	return frame, nil
--}
--
--func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
--	handler.conn.wio.Lock()
--	defer handler.conn.wio.Unlock()
--	w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
--	if err != nil {
--		return err
--	}
--	msg := make([]byte, 2)
--	binary.BigEndian.PutUint16(msg, uint16(status))
--	_, err = w.Write(msg)
--	w.Close()
--	return err
--}
--
--func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
--	handler.conn.wio.Lock()
--	defer handler.conn.wio.Unlock()
--	w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
--	if err != nil {
--		return 0, err
--	}
--	n, err = w.Write(msg)
--	w.Close()
--	return n, err
--}
--
--// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
--func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
--	if buf == nil {
--		br := bufio.NewReader(rwc)
--		bw := bufio.NewWriter(rwc)
--		buf = bufio.NewReadWriter(br, bw)
--	}
--	ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
--		frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
--		frameWriterFactory: hybiFrameWriterFactory{
--			buf.Writer, request == nil},
--		PayloadType:        TextFrame,
--		defaultCloseStatus: closeStatusNormal}
--	ws.frameHandler = &hybiFrameHandler{conn: ws}
--	return ws
--}
--
--// generateMaskingKey generates a masking key for a frame.
--func generateMaskingKey() (maskingKey []byte, err error) {
--	maskingKey = make([]byte, 4)
--	if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
--		return
--	}
--	return
--}
--
--// generateNonce generates a nonce consisting of a randomly selected 16-byte
--// value that has been base64-encoded.
--func generateNonce() (nonce []byte) {
--	key := make([]byte, 16)
--	if _, err := io.ReadFull(rand.Reader, key); err != nil {
--		panic(err)
--	}
--	nonce = make([]byte, 24)
--	base64.StdEncoding.Encode(nonce, key)
--	return
--}
--
--// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
--// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
--func getNonceAccept(nonce []byte) (expected []byte, err error) {
--	h := sha1.New()
--	if _, err = h.Write(nonce); err != nil {
--		return
--	}
--	if _, err = h.Write([]byte(websocketGUID)); err != nil {
--		return
--	}
--	expected = make([]byte, 28)
--	base64.StdEncoding.Encode(expected, h.Sum(nil))
--	return
--}
--
--// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
--func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
--	bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
--
--	bw.WriteString("Host: " + config.Location.Host + "\r\n")
--	bw.WriteString("Upgrade: websocket\r\n")
--	bw.WriteString("Connection: Upgrade\r\n")
--	nonce := generateNonce()
--	if config.handshakeData != nil {
--		nonce = []byte(config.handshakeData["key"])
--	}
--	bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
--	bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
--
--	if config.Version != ProtocolVersionHybi13 {
--		return ErrBadProtocolVersion
--	}
--
--	bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
--	if len(config.Protocol) > 0 {
--		bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
--	}
--	// TODO(ukai): send Sec-WebSocket-Extensions.
--	err = config.Header.WriteSubset(bw, handshakeHeader)
--	if err != nil {
--		return err
--	}
--
--	bw.WriteString("\r\n")
--	if err = bw.Flush(); err != nil {
--		return err
--	}
--
--	resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
--	if err != nil {
--		return err
--	}
--	if resp.StatusCode != 101 {
--		return ErrBadStatus
--	}
--	if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
--		strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
--		return ErrBadUpgrade
--	}
--	expectedAccept, err := getNonceAccept(nonce)
--	if err != nil {
--		return err
--	}
--	if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
--		return ErrChallengeResponse
--	}
--	if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
--		return ErrUnsupportedExtensions
--	}
--	offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
--	if offeredProtocol != "" {
--		protocolMatched := false
--		for i := 0; i < len(config.Protocol); i++ {
--			if config.Protocol[i] == offeredProtocol {
--				protocolMatched = true
--				break
--			}
--		}
--		if !protocolMatched {
--			return ErrBadWebSocketProtocol
--		}
--		config.Protocol = []string{offeredProtocol}
--	}
--
--	return nil
--}
--
--// newHybiClientConn creates a client WebSocket connection after handshake.
--func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
--	return newHybiConn(config, buf, rwc, nil)
--}
--
--// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
--type hybiServerHandshaker struct {
--	*Config
--	accept []byte
--}
--
--func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
--	c.Version = ProtocolVersionHybi13
--	if req.Method != "GET" {
--		return http.StatusMethodNotAllowed, ErrBadRequestMethod
--	}
--	// HTTP version can be safely ignored.
--
--	if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
--		!strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
--		return http.StatusBadRequest, ErrNotWebSocket
--	}
--
--	key := req.Header.Get("Sec-Websocket-Key")
--	if key == "" {
--		return http.StatusBadRequest, ErrChallengeResponse
--	}
--	version := req.Header.Get("Sec-Websocket-Version")
--	switch version {
--	case "13":
--		c.Version = ProtocolVersionHybi13
--	default:
--		return http.StatusBadRequest, ErrBadWebSocketVersion
--	}
--	var scheme string
--	if req.TLS != nil {
--		scheme = "wss"
--	} else {
--		scheme = "ws"
--	}
--	c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
--	if err != nil {
--		return http.StatusBadRequest, err
--	}
--	protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
--	if protocol != "" {
--		protocols := strings.Split(protocol, ",")
--		for i := 0; i < len(protocols); i++ {
--			c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
--		}
--	}
--	c.accept, err = getNonceAccept([]byte(key))
--	if err != nil {
--		return http.StatusInternalServerError, err
--	}
--	return http.StatusSwitchingProtocols, nil
--}
--
--// Origin parses Origin header in "req".
--// If origin is "null", returns (nil, nil).
--func Origin(config *Config, req *http.Request) (*url.URL, error) {
--	var origin string
--	switch config.Version {
--	case ProtocolVersionHybi13:
--		origin = req.Header.Get("Origin")
--	}
--	if origin == "null" {
--		return nil, nil
--	}
--	return url.ParseRequestURI(origin)
--}
--
--func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
--	if len(c.Protocol) > 0 {
--		if len(c.Protocol) != 1 {
--			// You need choose a Protocol in Handshake func in Server.
--			return ErrBadWebSocketProtocol
--		}
--	}
--	buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
--	buf.WriteString("Upgrade: websocket\r\n")
--	buf.WriteString("Connection: Upgrade\r\n")
--	buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
--	if len(c.Protocol) > 0 {
--		buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
--	}
--	// TODO(ukai): send Sec-WebSocket-Extensions.
--	if c.Header != nil {
--		err := c.Header.WriteSubset(buf, handshakeHeader)
--		if err != nil {
--			return err
--		}
--	}
--	buf.WriteString("\r\n")
--	return buf.Flush()
--}
--
--func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
--	return newHybiServerConn(c.Config, buf, rwc, request)
--}
--
--// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
--func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
--	return newHybiConn(config, buf, rwc, request)
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/websocket/hybi_test.go b/Godeps/_workspace/src/golang.org/x/net/websocket/hybi_test.go
-deleted file mode 100644
-index d6a1910..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/websocket/hybi_test.go
-+++ /dev/null
-@@ -1,590 +0,0 @@
--// Copyright 2011 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package websocket
--
--import (
--	"bufio"
--	"bytes"
--	"fmt"
--	"io"
--	"net/http"
--	"net/url"
--	"strings"
--	"testing"
--)
--
--// Test the getNonceAccept function with values in
--// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
--func TestSecWebSocketAccept(t *testing.T) {
--	nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==")
--	expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=")
--	accept, err := getNonceAccept(nonce)
--	if err != nil {
--		t.Errorf("getNonceAccept: returned error %v", err)
--		return
--	}
--	if !bytes.Equal(expected, accept) {
--		t.Errorf("getNonceAccept: expected %q got %q", expected, accept)
--	}
--}
--
--func TestHybiClientHandshake(t *testing.T) {
--	b := bytes.NewBuffer([]byte{})
--	bw := bufio.NewWriter(b)
--	br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols
--Upgrade: websocket
--Connection: Upgrade
--Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=
--Sec-WebSocket-Protocol: chat
--
--`))
--	var err error
--	config := new(Config)
--	config.Location, err = url.ParseRequestURI("ws://server.example.com/chat")
--	if err != nil {
--		t.Fatal("location url", err)
--	}
--	config.Origin, err = url.ParseRequestURI("http://example.com")
--	if err != nil {
--		t.Fatal("origin url", err)
--	}
--	config.Protocol = append(config.Protocol, "chat")
--	config.Protocol = append(config.Protocol, "superchat")
--	config.Version = ProtocolVersionHybi13
--
--	config.handshakeData = map[string]string{
--		"key": "dGhlIHNhbXBsZSBub25jZQ==",
--	}
--	err = hybiClientHandshake(config, br, bw)
--	if err != nil {
--		t.Errorf("handshake failed: %v", err)
--	}
--	req, err := http.ReadRequest(bufio.NewReader(b))
--	if err != nil {
--		t.Fatalf("read request: %v", err)
--	}
--	if req.Method != "GET" {
--		t.Errorf("request method expected GET, but got %q", req.Method)
--	}
--	if req.URL.Path != "/chat" {
--		t.Errorf("request path expected /chat, but got %q", req.URL.Path)
--	}
--	if req.Proto != "HTTP/1.1" {
--		t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto)
--	}
--	if req.Host != "server.example.com" {
--		t.Errorf("request Host expected server.example.com, but got %v", req.Host)
--	}
--	var expectedHeader = map[string]string{
--		"Connection":             "Upgrade",
--		"Upgrade":                "websocket",
--		"Sec-Websocket-Key":      config.handshakeData["key"],
--		"Origin":                 config.Origin.String(),
--		"Sec-Websocket-Protocol": "chat, superchat",
--		"Sec-Websocket-Version":  fmt.Sprintf("%d", ProtocolVersionHybi13),
--	}
--	for k, v := range expectedHeader {
--		if req.Header.Get(k) != v {
--			t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k)))
--		}
--	}
--}
--
--func TestHybiClientHandshakeWithHeader(t *testing.T) {
--	b := bytes.NewBuffer([]byte{})
--	bw := bufio.NewWriter(b)
--	br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols
--Upgrade: websocket
--Connection: Upgrade
--Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=
--Sec-WebSocket-Protocol: chat
--
--`))
--	var err error
--	config := new(Config)
--	config.Location, err = url.ParseRequestURI("ws://server.example.com/chat")
--	if err != nil {
--		t.Fatal("location url", err)
--	}
--	config.Origin, err = url.ParseRequestURI("http://example.com")
--	if err != nil {
--		t.Fatal("origin url", err)
--	}
--	config.Protocol = append(config.Protocol, "chat")
--	config.Protocol = append(config.Protocol, "superchat")
--	config.Version = ProtocolVersionHybi13
--	config.Header = http.Header(make(map[string][]string))
--	config.Header.Add("User-Agent", "test")
--
--	config.handshakeData = map[string]string{
--		"key": "dGhlIHNhbXBsZSBub25jZQ==",
--	}
--	err = hybiClientHandshake(config, br, bw)
--	if err != nil {
--		t.Errorf("handshake failed: %v", err)
--	}
--	req, err := http.ReadRequest(bufio.NewReader(b))
--	if err != nil {
--		t.Fatalf("read request: %v", err)
--	}
--	if req.Method != "GET" {
--		t.Errorf("request method expected GET, but got %q", req.Method)
--	}
--	if req.URL.Path != "/chat" {
--		t.Errorf("request path expected /chat, but got %q", req.URL.Path)
--	}
--	if req.Proto != "HTTP/1.1" {
--		t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto)
--	}
--	if req.Host != "server.example.com" {
--		t.Errorf("request Host expected server.example.com, but got %v", req.Host)
--	}
--	var expectedHeader = map[string]string{
--		"Connection":             "Upgrade",
--		"Upgrade":                "websocket",
--		"Sec-Websocket-Key":      config.handshakeData["key"],
--		"Origin":                 config.Origin.String(),
--		"Sec-Websocket-Protocol": "chat, superchat",
--		"Sec-Websocket-Version":  fmt.Sprintf("%d", ProtocolVersionHybi13),
--		"User-Agent":             "test",
--	}
--	for k, v := range expectedHeader {
--		if req.Header.Get(k) != v {
--			t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k)))
--		}
--	}
--}
--
--func TestHybiServerHandshake(t *testing.T) {
--	config := new(Config)
--	handshaker := &hybiServerHandshaker{Config: config}
--	br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1
--Host: server.example.com
--Upgrade: websocket
--Connection: Upgrade
--Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
--Origin: http://example.com
--Sec-WebSocket-Protocol: chat, superchat
--Sec-WebSocket-Version: 13
--
--`))
--	req, err := http.ReadRequest(br)
--	if err != nil {
--		t.Fatal("request", err)
--	}
--	code, err := handshaker.ReadHandshake(br, req)
--	if err != nil {
--		t.Errorf("handshake failed: %v", err)
--	}
--	if code != http.StatusSwitchingProtocols {
--		t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code)
--	}
--	expectedProtocols := []string{"chat", "superchat"}
--	if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) {
--		t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol)
--	}
--	b := bytes.NewBuffer([]byte{})
--	bw := bufio.NewWriter(b)
--
--	config.Protocol = config.Protocol[:1]
--
--	err = handshaker.AcceptHandshake(bw)
--	if err != nil {
--		t.Errorf("handshake response failed: %v", err)
--	}
--	expectedResponse := strings.Join([]string{
--		"HTTP/1.1 101 Switching Protocols",
--		"Upgrade: websocket",
--		"Connection: Upgrade",
--		"Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=",
--		"Sec-WebSocket-Protocol: chat",
--		"", ""}, "\r\n")
--
--	if b.String() != expectedResponse {
--		t.Errorf("handshake expected %q but got %q", expectedResponse, b.String())
--	}
--}
--
--func TestHybiServerHandshakeNoSubProtocol(t *testing.T) {
--	config := new(Config)
--	handshaker := &hybiServerHandshaker{Config: config}
--	br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1
--Host: server.example.com
--Upgrade: websocket
--Connection: Upgrade
--Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
--Origin: http://example.com
--Sec-WebSocket-Version: 13
--
--`))
--	req, err := http.ReadRequest(br)
--	if err != nil {
--		t.Fatal("request", err)
--	}
--	code, err := handshaker.ReadHandshake(br, req)
--	if err != nil {
--		t.Errorf("handshake failed: %v", err)
--	}
--	if code != http.StatusSwitchingProtocols {
--		t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code)
--	}
--	if len(config.Protocol) != 0 {
--		t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol))
--	}
--	b := bytes.NewBuffer([]byte{})
--	bw := bufio.NewWriter(b)
--
--	err = handshaker.AcceptHandshake(bw)
--	if err != nil {
--		t.Errorf("handshake response failed: %v", err)
--	}
--	expectedResponse := strings.Join([]string{
--		"HTTP/1.1 101 Switching Protocols",
--		"Upgrade: websocket",
--		"Connection: Upgrade",
--		"Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=",
--		"", ""}, "\r\n")
--
--	if b.String() != expectedResponse {
--		t.Errorf("handshake expected %q but got %q", expectedResponse, b.String())
--	}
--}
--
--func TestHybiServerHandshakeHybiBadVersion(t *testing.T) {
--	config := new(Config)
--	handshaker := &hybiServerHandshaker{Config: config}
--	br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1
--Host: server.example.com
--Upgrade: websocket
--Connection: Upgrade
--Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
--Sec-WebSocket-Origin: http://example.com
--Sec-WebSocket-Protocol: chat, superchat
--Sec-WebSocket-Version: 9
--
--`))
--	req, err := http.ReadRequest(br)
--	if err != nil {
--		t.Fatal("request", err)
--	}
--	code, err := handshaker.ReadHandshake(br, req)
--	if err != ErrBadWebSocketVersion {
--		t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err)
--	}
--	if code != http.StatusBadRequest {
--		t.Errorf("status expected %q but got %q", http.StatusBadRequest, code)
--	}
--}
--
--func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) {
--	b := bytes.NewBuffer([]byte{})
--	frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false}
--	w, _ := frameWriterFactory.NewFrameWriter(TextFrame)
--	w.(*hybiFrameWriter).header = frameHeader
--	_, err := w.Write(testPayload)
--	w.Close()
--	if err != nil {
--		t.Errorf("Write error %q", err)
--	}
--	var expectedFrame []byte
--	expectedFrame = append(expectedFrame, testHeader...)
--	expectedFrame = append(expectedFrame, testMaskedPayload...)
--	if !bytes.Equal(expectedFrame, b.Bytes()) {
--		t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes())
--	}
--	frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)}
--	r, err := frameReaderFactory.NewFrameReader()
--	if err != nil {
--		t.Errorf("Read error %q", err)
--	}
--	if header := r.HeaderReader(); header == nil {
--		t.Errorf("no header")
--	} else {
--		actualHeader := make([]byte, r.Len())
--		n, err := header.Read(actualHeader)
--		if err != nil {
--			t.Errorf("Read header error %q", err)
--		} else {
--			if n < len(testHeader) {
--				t.Errorf("header too short %q got %q", testHeader, actualHeader[:n])
--			}
--			if !bytes.Equal(testHeader, actualHeader[:n]) {
--				t.Errorf("header expected %q got %q", testHeader, actualHeader[:n])
--			}
--		}
--	}
--	if trailer := r.TrailerReader(); trailer != nil {
--		t.Errorf("unexpected trailer %q", trailer)
--	}
--	frame := r.(*hybiFrameReader)
--	if frameHeader.Fin != frame.header.Fin ||
--		frameHeader.OpCode != frame.header.OpCode ||
--		len(testPayload) != int(frame.header.Length) {
--		t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame)
--	}
--	payload := make([]byte, len(testPayload))
--	_, err = r.Read(payload)
--	if err != nil {
--		t.Errorf("read %v", err)
--	}
--	if !bytes.Equal(testPayload, payload) {
--		t.Errorf("payload %q vs %q", testPayload, payload)
--	}
--}
--
--func TestHybiShortTextFrame(t *testing.T) {
--	frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame}
--	payload := []byte("hello")
--	testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader)
--
--	payload = make([]byte, 125)
--	testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader)
--}
--
--func TestHybiShortMaskedTextFrame(t *testing.T) {
--	frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame,
--		MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}}
--	payload := []byte("hello")
--	maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3}
--	header := []byte{0x81, 0x85}
--	header = append(header, frameHeader.MaskingKey...)
--	testHybiFrame(t, header, payload, maskedPayload, frameHeader)
--}
--
--func TestHybiShortBinaryFrame(t *testing.T) {
--	frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame}
--	payload := []byte("hello")
--	testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader)
--
--	payload = make([]byte, 125)
--	testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader)
--}
--
--func TestHybiControlFrame(t *testing.T) {
--	frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame}
--	payload := []byte("hello")
--	testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader)
--
--	frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame}
--	testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader)
--
--	frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame}
--	payload = []byte{0x03, 0xe8} // 1000
--	testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader)
--}
--
--func TestHybiLongFrame(t *testing.T) {
--	frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame}
--	payload := make([]byte, 126)
--	testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader)
--
--	payload = make([]byte, 65535)
--	testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader)
--
--	payload = make([]byte, 65536)
--	testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader)
--}
--
--func TestHybiClientRead(t *testing.T) {
--	wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o',
--		0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping
--		0x81, 0x05, 'w', 'o', 'r', 'l', 'd'}
--	br := bufio.NewReader(bytes.NewBuffer(wireData))
--	bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
--	conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil)
--
--	msg := make([]byte, 512)
--	n, err := conn.Read(msg)
--	if err != nil {
--		t.Errorf("read 1st frame, error %q", err)
--	}
--	if n != 5 {
--		t.Errorf("read 1st frame, expect 5, got %d", n)
--	}
--	if !bytes.Equal(wireData[2:7], msg[:n]) {
--		t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n])
--	}
--	n, err = conn.Read(msg)
--	if err != nil {
--		t.Errorf("read 2nd frame, error %q", err)
--	}
--	if n != 5 {
--		t.Errorf("read 2nd frame, expect 5, got %d", n)
--	}
--	if !bytes.Equal(wireData[16:21], msg[:n]) {
--		t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n])
--	}
--	n, err = conn.Read(msg)
--	if err == nil {
--		t.Errorf("read not EOF")
--	}
--	if n != 0 {
--		t.Errorf("expect read 0, got %d", n)
--	}
--}
--
--func TestHybiShortRead(t *testing.T) {
--	wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o',
--		0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping
--		0x81, 0x05, 'w', 'o', 'r', 'l', 'd'}
--	br := bufio.NewReader(bytes.NewBuffer(wireData))
--	bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
--	conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil)
--
--	step := 0
--	pos := 0
--	expectedPos := []int{2, 5, 16, 19}
--	expectedLen := []int{3, 2, 3, 2}
--	for {
--		msg := make([]byte, 3)
--		n, err := conn.Read(msg)
--		if step >= len(expectedPos) {
--			if err == nil {
--				t.Errorf("read not EOF")
--			}
--			if n != 0 {
--				t.Errorf("expect read 0, got %d", n)
--			}
--			return
--		}
--		pos = expectedPos[step]
--		endPos := pos + expectedLen[step]
--		if err != nil {
--			t.Errorf("read from %d, got error %q", pos, err)
--			return
--		}
--		if n != endPos-pos {
--			t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n)
--		}
--		if !bytes.Equal(wireData[pos:endPos], msg[:n]) {
--			t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n])
--		}
--		step++
--	}
--}
--
--func TestHybiServerRead(t *testing.T) {
--	wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20,
--		0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello
--		0x89, 0x85, 0xcc, 0x55, 0x80, 0x20,
--		0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello
--		0x81, 0x85, 0xed, 0x83, 0xb4, 0x24,
--		0x9a, 0xec, 0xc6, 0x48, 0x89, // world
--	}
--	br := bufio.NewReader(bytes.NewBuffer(wireData))
--	bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
--	conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request))
--
--	expected := [][]byte{[]byte("hello"), []byte("world")}
--
--	msg := make([]byte, 512)
--	n, err := conn.Read(msg)
--	if err != nil {
--		t.Errorf("read 1st frame, error %q", err)
--	}
--	if n != 5 {
--		t.Errorf("read 1st frame, expect 5, got %d", n)
--	}
--	if !bytes.Equal(expected[0], msg[:n]) {
--		t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n])
--	}
--
--	n, err = conn.Read(msg)
--	if err != nil {
--		t.Errorf("read 2nd frame, error %q", err)
--	}
--	if n != 5 {
--		t.Errorf("read 2nd frame, expect 5, got %d", n)
--	}
--	if !bytes.Equal(expected[1], msg[:n]) {
--		t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n])
--	}
--
--	n, err = conn.Read(msg)
--	if err == nil {
--		t.Errorf("read not EOF")
--	}
--	if n != 0 {
--		t.Errorf("expect read 0, got %d", n)
--	}
--}
--
--func TestHybiServerReadWithoutMasking(t *testing.T) {
--	wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'}
--	br := bufio.NewReader(bytes.NewBuffer(wireData))
--	bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
--	conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request))
--	// server MUST close the connection upon receiving a non-masked frame.
--	msg := make([]byte, 512)
--	_, err := conn.Read(msg)
--	if err != io.EOF {
--		t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err)
--	}
--}
--
--func TestHybiClientReadWithMasking(t *testing.T) {
--	wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20,
--		0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello
--	}
--	br := bufio.NewReader(bytes.NewBuffer(wireData))
--	bw := bufio.NewWriter(bytes.NewBuffer([]byte{}))
--	conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil)
--
--	// client MUST close the connection upon receiving a masked frame.
--	msg := make([]byte, 512)
--	_, err := conn.Read(msg)
--	if err != io.EOF {
--		t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err)
--	}
--}
--
--// Test the hybiServerHandshaker supports firefox implementation and
--// checks Connection request header include (but it's not necessary
--// equal to) "upgrade"
--func TestHybiServerFirefoxHandshake(t *testing.T) {
--	config := new(Config)
--	handshaker := &hybiServerHandshaker{Config: config}
--	br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1
--Host: server.example.com
--Upgrade: websocket
--Connection: keep-alive, upgrade
--Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==
--Origin: http://example.com
--Sec-WebSocket-Protocol: chat, superchat
--Sec-WebSocket-Version: 13
--
--`))
--	req, err := http.ReadRequest(br)
--	if err != nil {
--		t.Fatal("request", err)
--	}
--	code, err := handshaker.ReadHandshake(br, req)
--	if err != nil {
--		t.Errorf("handshake failed: %v", err)
--	}
--	if code != http.StatusSwitchingProtocols {
--		t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code)
--	}
--	b := bytes.NewBuffer([]byte{})
--	bw := bufio.NewWriter(b)
--
--	config.Protocol = []string{"chat"}
--
--	err = handshaker.AcceptHandshake(bw)
--	if err != nil {
--		t.Errorf("handshake response failed: %v", err)
--	}
--	expectedResponse := strings.Join([]string{
--		"HTTP/1.1 101 Switching Protocols",
--		"Upgrade: websocket",
--		"Connection: Upgrade",
--		"Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=",
--		"Sec-WebSocket-Protocol: chat",
--		"", ""}, "\r\n")
--
--	if b.String() != expectedResponse {
--		t.Errorf("handshake expected %q but got %q", expectedResponse, b.String())
--	}
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/websocket/server.go b/Godeps/_workspace/src/golang.org/x/net/websocket/server.go
-deleted file mode 100644
-index 7032213..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/websocket/server.go
-+++ /dev/null
-@@ -1,114 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package websocket
--
--import (
--	"bufio"
--	"fmt"
--	"io"
--	"net/http"
--)
--
--func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
--	var hs serverHandshaker = &hybiServerHandshaker{Config: config}
--	code, err := hs.ReadHandshake(buf.Reader, req)
--	if err == ErrBadWebSocketVersion {
--		fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
--		fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
--		buf.WriteString("\r\n")
--		buf.WriteString(err.Error())
--		buf.Flush()
--		return
--	}
--	if err != nil {
--		fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
--		buf.WriteString("\r\n")
--		buf.WriteString(err.Error())
--		buf.Flush()
--		return
--	}
--	if handshake != nil {
--		err = handshake(config, req)
--		if err != nil {
--			code = http.StatusForbidden
--			fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
--			buf.WriteString("\r\n")
--			buf.Flush()
--			return
--		}
--	}
--	err = hs.AcceptHandshake(buf.Writer)
--	if err != nil {
--		code = http.StatusBadRequest
--		fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
--		buf.WriteString("\r\n")
--		buf.Flush()
--		return
--	}
--	conn = hs.NewServerConn(buf, rwc, req)
--	return
--}
--
--// Server represents a server of a WebSocket.
--type Server struct {
--	// Config is a WebSocket configuration for new WebSocket connection.
--	Config
--
--	// Handshake is an optional function in WebSocket handshake.
--	// For example, you can check, or don't check Origin header.
--	// Another example, you can select config.Protocol.
--	Handshake func(*Config, *http.Request) error
--
--	// Handler handles a WebSocket connection.
--	Handler
--}
--
--// ServeHTTP implements the http.Handler interface for a WebSocket
--func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
--	s.serveWebSocket(w, req)
--}
--
--func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
--	rwc, buf, err := w.(http.Hijacker).Hijack()
--	if err != nil {
--		panic("Hijack failed: " + err.Error())
--		return
--	}
--	// The server should abort the WebSocket connection if it finds
--	// the client did not send a handshake that matches with protocol
--	// specification.
--	defer rwc.Close()
--	conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
--	if err != nil {
--		return
--	}
--	if conn == nil {
--		panic("unexpected nil conn")
--	}
--	s.Handler(conn)
--}
--
--// Handler is a simple interface to a WebSocket browser client.
--// It checks if Origin header is valid URL by default.
--// You might want to verify websocket.Conn.Config().Origin in the func.
--// If you use Server instead of Handler, you could call websocket.Origin and
--// check the origin in your Handshake func. So, if you want to accept
--// non-browser client, which doesn't send Origin header, you could use Server
--//. that doesn't check origin in its Handshake.
--type Handler func(*Conn)
--
--func checkOrigin(config *Config, req *http.Request) (err error) {
--	config.Origin, err = Origin(config, req)
--	if err == nil && config.Origin == nil {
--		return fmt.Errorf("null origin")
--	}
--	return err
--}
--
--// ServeHTTP implements the http.Handler interface for a WebSocket
--func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
--	s := Server{Handler: h, Handshake: checkOrigin}
--	s.serveWebSocket(w, req)
--}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/websocket/websocket.go b/Godeps/_workspace/src/golang.org/x/net/websocket/websocket.go
-deleted file mode 100644
-index b8d2e6d..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/websocket/websocket.go
-+++ /dev/null
-@@ -1,411 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--// Package websocket implements a client and server for the WebSocket protocol
--// as specified in RFC 6455.
--package websocket // import "golang.org/x/net/websocket"
--
--import (
--	"bufio"
--	"crypto/tls"
--	"encoding/json"
--	"errors"
--	"io"
--	"io/ioutil"
--	"net"
--	"net/http"
--	"net/url"
--	"sync"
--	"time"
--)
--
--const (
--	ProtocolVersionHybi13    = 13
--	ProtocolVersionHybi      = ProtocolVersionHybi13
--	SupportedProtocolVersion = "13"
--
--	ContinuationFrame = 0
--	TextFrame         = 1
--	BinaryFrame       = 2
--	CloseFrame        = 8
--	PingFrame         = 9
--	PongFrame         = 10
--	UnknownFrame      = 255
--)
--
--// ProtocolError represents WebSocket protocol errors.
--type ProtocolError struct {
--	ErrorString string
--}
--
--func (err *ProtocolError) Error() string { return err.ErrorString }
--
--var (
--	ErrBadProtocolVersion   = &ProtocolError{"bad protocol version"}
--	ErrBadScheme            = &ProtocolError{"bad scheme"}
--	ErrBadStatus            = &ProtocolError{"bad status"}
--	ErrBadUpgrade           = &ProtocolError{"missing or bad upgrade"}
--	ErrBadWebSocketOrigin   = &ProtocolError{"missing or bad WebSocket-Origin"}
--	ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
--	ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
--	ErrBadWebSocketVersion  = &ProtocolError{"missing or bad WebSocket Version"}
--	ErrChallengeResponse    = &ProtocolError{"mismatch challenge/response"}
--	ErrBadFrame             = &ProtocolError{"bad frame"}
--	ErrBadFrameBoundary     = &ProtocolError{"not on frame boundary"}
--	ErrNotWebSocket         = &ProtocolError{"not websocket protocol"}
--	ErrBadRequestMethod     = &ProtocolError{"bad method"}
--	ErrNotSupported         = &ProtocolError{"not supported"}
--)
--
--// Addr is an implementation of net.Addr for WebSocket.
--type Addr struct {
--	*url.URL
--}
--
--// Network returns the network type for a WebSocket, "websocket".
--func (addr *Addr) Network() string { return "websocket" }
--
--// Config is a WebSocket configuration
--type Config struct {
--	// A WebSocket server address.
--	Location *url.URL
--
--	// A Websocket client origin.
--	Origin *url.URL
--
--	// WebSocket subprotocols.
--	Protocol []string
--
--	// WebSocket protocol version.
--	Version int
--
--	// TLS config for secure WebSocket (wss).
--	TlsConfig *tls.Config
--
--	// Additional header fields to be sent in WebSocket opening handshake.
--	Header http.Header
--
--	handshakeData map[string]string
--}
--
--// serverHandshaker is an interface to handle WebSocket server side handshake.
--type serverHandshaker interface {
--	// ReadHandshake reads handshake request message from client.
--	// Returns http response code and error if any.
--	ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
--
--	// AcceptHandshake accepts the client handshake request and sends
--	// handshake response back to client.
--	AcceptHandshake(buf *bufio.Writer) (err error)
--
--	// NewServerConn creates a new WebSocket connection.
--	NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
--}
--
--// frameReader is an interface to read a WebSocket frame.
--type frameReader interface {
--	// Reader is to read payload of the frame.
--	io.Reader
--
--	// PayloadType returns payload type.
--	PayloadType() byte
--
--	// HeaderReader returns a reader to read header of the frame.
--	HeaderReader() io.Reader
--
--	// TrailerReader returns a reader to read trailer of the frame.
--	// If it returns nil, there is no trailer in the frame.
--	TrailerReader() io.Reader
--
--	// Len returns total length of the frame, including header and trailer.
--	Len() int
--}
--
--// frameReaderFactory is an interface to creates new frame reader.
--type frameReaderFactory interface {
--	NewFrameReader() (r frameReader, err error)
--}
--
--// frameWriter is an interface to write a WebSocket frame.
--type frameWriter interface {
--	// Writer is to write payload of the frame.
--	io.WriteCloser
--}
--
--// frameWriterFactory is an interface to create new frame writer.
--type frameWriterFactory interface {
--	NewFrameWriter(payloadType byte) (w frameWriter, err error)
--}
--
--type frameHandler interface {
--	HandleFrame(frame frameReader) (r frameReader, err error)
--	WriteClose(status int) (err error)
--}
--
--// Conn represents a WebSocket connection.
--type Conn struct {
--	config  *Config
--	request *http.Request
--
--	buf *bufio.ReadWriter
--	rwc io.ReadWriteCloser
--
--	rio sync.Mutex
--	frameReaderFactory
--	frameReader
--
--	wio sync.Mutex
--	frameWriterFactory
--
--	frameHandler
--	PayloadType        byte
--	defaultCloseStatus int
--}
--
--// Read implements the io.Reader interface:
--// it reads data of a frame from the WebSocket connection.
--// if msg is not large enough for the frame data, it fills the msg and next Read
--// will read the rest of the frame data.
--// it reads Text frame or Binary frame.
--func (ws *Conn) Read(msg []byte) (n int, err error) {
--	ws.rio.Lock()
--	defer ws.rio.Unlock()
--again:
--	if ws.frameReader == nil {
--		frame, err := ws.frameReaderFactory.NewFrameReader()
--		if err != nil {
--			return 0, err
--		}
--		ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
--		if err != nil {
--			return 0, err
--		}
--		if ws.frameReader == nil {
--			goto again
--		}
--	}
--	n, err = ws.frameReader.Read(msg)
--	if err == io.EOF {
--		if trailer := ws.frameReader.TrailerReader(); trailer != nil {
--			io.Copy(ioutil.Discard, trailer)
--		}
--		ws.frameReader = nil
--		goto again
--	}
--	return n, err
--}
--
--// Write implements the io.Writer interface:
--// it writes data as a frame to the WebSocket connection.
--func (ws *Conn) Write(msg []byte) (n int, err error) {
--	ws.wio.Lock()
--	defer ws.wio.Unlock()
--	w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
--	if err != nil {
--		return 0, err
--	}
--	n, err = w.Write(msg)
--	w.Close()
--	if err != nil {
--		return n, err
--	}
--	return n, err
--}
--
--// Close implements the io.Closer interface.
--func (ws *Conn) Close() error {
--	err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
--	if err != nil {
--		return err
--	}
--	return ws.rwc.Close()
--}
--
--func (ws *Conn) IsClientConn() bool { return ws.request == nil }
--func (ws *Conn) IsServerConn() bool { return ws.request != nil }
--
--// LocalAddr returns the WebSocket Origin for the connection for client, or
--// the WebSocket location for server.
--func (ws *Conn) LocalAddr() net.Addr {
--	if ws.IsClientConn() {
--		return &Addr{ws.config.Origin}
--	}
--	return &Addr{ws.config.Location}
--}
--
--// RemoteAddr returns the WebSocket location for the connection for client, or
--// the Websocket Origin for server.
--func (ws *Conn) RemoteAddr() net.Addr {
--	if ws.IsClientConn() {
--		return &Addr{ws.config.Location}
--	}
--	return &Addr{ws.config.Origin}
--}
--
--var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
--
--// SetDeadline sets the connection's network read & write deadlines.
--func (ws *Conn) SetDeadline(t time.Time) error {
--	if conn, ok := ws.rwc.(net.Conn); ok {
--		return conn.SetDeadline(t)
--	}
--	return errSetDeadline
--}
--
--// SetReadDeadline sets the connection's network read deadline.
--func (ws *Conn) SetReadDeadline(t time.Time) error {
--	if conn, ok := ws.rwc.(net.Conn); ok {
--		return conn.SetReadDeadline(t)
--	}
--	return errSetDeadline
--}
--
--// SetWriteDeadline sets the connection's network write deadline.
--func (ws *Conn) SetWriteDeadline(t time.Time) error {
--	if conn, ok := ws.rwc.(net.Conn); ok {
--		return conn.SetWriteDeadline(t)
--	}
--	return errSetDeadline
--}
--
--// Config returns the WebSocket config.
--func (ws *Conn) Config() *Config { return ws.config }
--
--// Request returns the http request upgraded to the WebSocket.
--// It is nil for client side.
--func (ws *Conn) Request() *http.Request { return ws.request }
--
--// Codec represents a symmetric pair of functions that implement a codec.
--type Codec struct {
--	Marshal   func(v interface{}) (data []byte, payloadType byte, err error)
--	Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
--}
--
--// Send sends v marshaled by cd.Marshal as single frame to ws.
--func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
--	data, payloadType, err := cd.Marshal(v)
--	if err != nil {
--		return err
--	}
--	ws.wio.Lock()
--	defer ws.wio.Unlock()
--	w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
--	if err != nil {
--		return err
--	}
--	_, err = w.Write(data)
--	w.Close()
--	return err
--}
--
--// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores in v.
--func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
--	ws.rio.Lock()
--	defer ws.rio.Unlock()
--	if ws.frameReader != nil {
--		_, err = io.Copy(ioutil.Discard, ws.frameReader)
--		if err != nil {
--			return err
--		}
--		ws.frameReader = nil
--	}
--again:
--	frame, err := ws.frameReaderFactory.NewFrameReader()
--	if err != nil {
--		return err
--	}
--	frame, err = ws.frameHandler.HandleFrame(frame)
--	if err != nil {
--		return err
--	}
--	if frame == nil {
--		goto again
--	}
--	payloadType := frame.PayloadType()
--	data, err := ioutil.ReadAll(frame)
--	if err != nil {
--		return err
--	}
--	return cd.Unmarshal(data, payloadType, v)
--}
--
--func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
--	switch data := v.(type) {
--	case string:
--		return []byte(data), TextFrame, nil
--	case []byte:
--		return data, BinaryFrame, nil
--	}
--	return nil, UnknownFrame, ErrNotSupported
--}
--
--func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
--	switch data := v.(type) {
--	case *string:
--		*data = string(msg)
--		return nil
--	case *[]byte:
--		*data = msg
--		return nil
--	}
--	return ErrNotSupported
--}
--
--/*
--Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
--To send/receive text frame, use string type.
--To send/receive binary frame, use []byte type.
--
--Trivial usage:
--
--	import "websocket"
--
--	// receive text frame
--	var message string
--	websocket.Message.Receive(ws, &message)
--
--	// send text frame
--	message = "hello"
--	websocket.Message.Send(ws, message)
--
--	// receive binary frame
--	var data []byte
--	websocket.Message.Receive(ws, &data)
--
--	// send binary frame
--	data = []byte{0, 1, 2}
--	websocket.Message.Send(ws, data)
--
--*/
--var Message = Codec{marshal, unmarshal}
--
--func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
--	msg, err = json.Marshal(v)
--	return msg, TextFrame, err
--}
--
--func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
--	return json.Unmarshal(msg, v)
--}
--
--/*
--JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
--
--Trivial usage:
--
--	import "websocket"
--
--	type T struct {
--		Msg string
--		Count int
--	}
--
--	// receive JSON type T
--	var data T
--	websocket.JSON.Receive(ws, &data)
--
--	// send JSON type T
--	websocket.JSON.Send(ws, data)
--*/
--var JSON = Codec{jsonMarshal, jsonUnmarshal}
-diff --git a/Godeps/_workspace/src/golang.org/x/net/websocket/websocket_test.go b/Godeps/_workspace/src/golang.org/x/net/websocket/websocket_test.go
-deleted file mode 100644
-index 48f14b6..0000000
---- a/Godeps/_workspace/src/golang.org/x/net/websocket/websocket_test.go
-+++ /dev/null
-@@ -1,341 +0,0 @@
--// Copyright 2009 The Go Authors. All rights reserved.
--// Use of this source code is governed by a BSD-style
--// license that can be found in the LICENSE file.
--
--package websocket
--
--import (
--	"bytes"
--	"fmt"
--	"io"
--	"log"
--	"net"
--	"net/http"
--	"net/http/httptest"
--	"net/url"
--	"strings"
--	"sync"
--	"testing"
--)
--
--var serverAddr string
--var once sync.Once
--
--func echoServer(ws *Conn) { io.Copy(ws, ws) }
--
--type Count struct {
--	S string
--	N int
--}
--
--func countServer(ws *Conn) {
--	for {
--		var count Count
--		err := JSON.Receive(ws, &count)
--		if err != nil {
--			return
--		}
--		count.N++
--		count.S = strings.Repeat(count.S, count.N)
--		err = JSON.Send(ws, count)
--		if err != nil {
--			return
--		}
--	}
--}
--
--func subProtocolHandshake(config *Config, req *http.Request) error {
--	for _, proto := range config.Protocol {
--		if proto == "chat" {
--			config.Protocol = []string{proto}
--			return nil
--		}
--	}
--	return ErrBadWebSocketProtocol
--}
--
--func subProtoServer(ws *Conn) {
--	for _, proto := range ws.Config().Protocol {
--		io.WriteString(ws, proto)
--	}
--}
--
--func startServer() {
--	http.Handle("/echo", Handler(echoServer))
--	http.Handle("/count", Handler(countServer))
--	subproto := Server{
--		Handshake: subProtocolHandshake,
--		Handler:   Handler(subProtoServer),
--	}
--	http.Handle("/subproto", subproto)
--	server := httptest.NewServer(nil)
--	serverAddr = server.Listener.Addr().String()
--	log.Print("Test WebSocket server listening on ", serverAddr)
--}
--
--func newConfig(t *testing.T, path string) *Config {
--	config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost")
--	return config
--}
--
--func TestEcho(t *testing.T) {
--	once.Do(startServer)
--
--	// websocket.Dial()
--	client, err := net.Dial("tcp", serverAddr)
--	if err != nil {
--		t.Fatal("dialing", err)
--	}
--	conn, err := NewClient(newConfig(t, "/echo"), client)
--	if err != nil {
--		t.Errorf("WebSocket handshake error: %v", err)
--		return
--	}
--
--	msg := []byte("hello, world\n")
--	if _, err := conn.Write(msg); err != nil {
--		t.Errorf("Write: %v", err)
--	}
--	var actual_msg = make([]byte, 512)
--	n, err := conn.Read(actual_msg)
--	if err != nil {
--		t.Errorf("Read: %v", err)
--	}
--	actual_msg = actual_msg[0:n]
--	if !bytes.Equal(msg, actual_msg) {
--		t.Errorf("Echo: expected %q got %q", msg, actual_msg)
--	}
--	conn.Close()
--}
--
--func TestAddr(t *testing.T) {
--	once.Do(startServer)
--
--	// websocket.Dial()
--	client, err := net.Dial("tcp", serverAddr)
--	if err != nil {
--		t.Fatal("dialing", err)
--	}
--	conn, err := NewClient(newConfig(t, "/echo"), client)
--	if err != nil {
--		t.Errorf("WebSocket handshake error: %v", err)
--		return
--	}
--
--	ra := conn.RemoteAddr().String()
--	if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") {
--		t.Errorf("Bad remote addr: %v", ra)
--	}
--	la := conn.LocalAddr().String()
--	if !strings.HasPrefix(la, "http://") {
--		t.Errorf("Bad local addr: %v", la)
--	}
--	conn.Close()
--}
--
--func TestCount(t *testing.T) {
--	once.Do(startServer)
--
--	// websocket.Dial()
--	client, err := net.Dial("tcp", serverAddr)
--	if err != nil {
--		t.Fatal("dialing", err)
--	}
--	conn, err := NewClient(newConfig(t, "/count"), client)
--	if err != nil {
--		t.Errorf("WebSocket handshake error: %v", err)
--		return
--	}
--
--	var count Count
--	count.S = "hello"
--	if err := JSON.Send(conn, count); err != nil {
--		t.Errorf("Write: %v", err)
--	}
--	if err := JSON.Receive(conn, &count); err != nil {
--		t.Errorf("Read: %v", err)
--	}
--	if count.N != 1 {
--		t.Errorf("count: expected %d got %d", 1, count.N)
--	}
--	if count.S != "hello" {
--		t.Errorf("count: expected %q got %q", "hello", count.S)
--	}
--	if err := JSON.Send(conn, count); err != nil {
--		t.Errorf("Write: %v", err)
--	}
--	if err := JSON.Receive(conn, &count); err != nil {
--		t.Errorf("Read: %v", err)
--	}
--	if count.N != 2 {
--		t.Errorf("count: expected %d got %d", 2, count.N)
--	}
--	if count.S != "hellohello" {
--		t.Errorf("count: expected %q got %q", "hellohello", count.S)
--	}
--	conn.Close()
--}
--
--func TestWithQuery(t *testing.T) {
--	once.Do(startServer)
--
--	client, err := net.Dial("tcp", serverAddr)
--	if err != nil {
--		t.Fatal("dialing", err)
--	}
--
--	config := newConfig(t, "/echo")
--	config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr))
--	if err != nil {
--		t.Fatal("location url", err)
--	}
--
--	ws, err := NewClient(config, client)
--	if err != nil {
--		t.Errorf("WebSocket handshake: %v", err)
--		return
--	}
--	ws.Close()
--}
--
--func testWithProtocol(t *testing.T, subproto []string) (string, error) {
--	once.Do(startServer)
--
--	client, err := net.Dial("tcp", serverAddr)
--	if err != nil {
--		t.Fatal("dialing", err)
--	}
--
--	config := newConfig(t, "/subproto")
--	config.Protocol = subproto
--
--	ws, err := NewClient(config, client)
--	if err != nil {
--		return "", err
--	}
--	msg := make([]byte, 16)
--	n, err := ws.Read(msg)
--	if err != nil {
--		return "", err
--	}
--	ws.Close()
--	return string(msg[:n]), nil
--}
--
--func TestWithProtocol(t *testing.T) {
--	proto, err := testWithProtocol(t, []string{"chat"})
--	if err != nil {
--		t.Errorf("SubProto: unexpected error: %v", err)
--	}
--	if proto != "chat" {
--		t.Errorf("SubProto: expected %q, got %q", "chat", proto)
--	}
--}
--
--func TestWithTwoProtocol(t *testing.T) {
--	proto, err := testWithProtocol(t, []string{"test", "chat"})
--	if err != nil {
--		t.Errorf("SubProto: unexpected error: %v", err)
--	}
--	if proto != "chat" {
--		t.Errorf("SubProto: expected %q, got %q", "chat", proto)
--	}
--}
--
--func TestWithBadProtocol(t *testing.T) {
--	_, err := testWithProtocol(t, []string{"test"})
--	if err != ErrBadStatus {
--		t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err)
--	}
--}
--
--func TestHTTP(t *testing.T) {
--	once.Do(startServer)
--
--	// If the client did not send a handshake that matches the protocol
--	// specification, the server MUST return an HTTP response with an
--	// appropriate error code (such as 400 Bad Request)
--	resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr))
--	if err != nil {
--		t.Errorf("Get: error %#v", err)
--		return
--	}
--	if resp == nil {
--		t.Error("Get: resp is null")
--		return
--	}
--	if resp.StatusCode != http.StatusBadRequest {
--		t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode)
--	}
--}
--
--func TestTrailingSpaces(t *testing.T) {
--	// http://code.google.com/p/go/issues/detail?id=955
--	// The last runs of this create keys with trailing spaces that should not be
--	// generated by the client.
--	once.Do(startServer)
--	config := newConfig(t, "/echo")
--	for i := 0; i < 30; i++ {
--		// body
--		ws, err := DialConfig(config)
--		if err != nil {
--			t.Errorf("Dial #%d failed: %v", i, err)
--			break
--		}
--		ws.Close()
--	}
--}
--
--func TestDialConfigBadVersion(t *testing.T) {
--	once.Do(startServer)
--	config := newConfig(t, "/echo")
--	config.Version = 1234
--
--	_, err := DialConfig(config)
--
--	if dialerr, ok := err.(*DialError); ok {
--		if dialerr.Err != ErrBadProtocolVersion {
--			t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err)
--		}
--	}
--}
--
--func TestSmallBuffer(t *testing.T) {
--	// http://code.google.com/p/go/issues/detail?id=1145
--	// Read should be able to handle reading a fragment of a frame.
--	once.Do(startServer)
--
--	// websocket.Dial()
--	client, err := net.Dial("tcp", serverAddr)
--	if err != nil {
--		t.Fatal("dialing", err)
--	}
--	conn, err := NewClient(newConfig(t, "/echo"), client)
--	if err != nil {
--		t.Errorf("WebSocket handshake error: %v", err)
--		return
--	}
--
--	msg := []byte("hello, world\n")
--	if _, err := conn.Write(msg); err != nil {
--		t.Errorf("Write: %v", err)
--	}
--	var small_msg = make([]byte, 8)
--	n, err := conn.Read(small_msg)
--	if err != nil {
--		t.Errorf("Read: %v", err)
--	}
--	if !bytes.Equal(msg[:len(small_msg)], small_msg) {
--		t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg)
--	}
--	var second_msg = make([]byte, len(msg))
--	n, err = conn.Read(second_msg)
--	if err != nil {
--		t.Errorf("Read: %v", err)
--	}
--	second_msg = second_msg[0:n]
--	if !bytes.Equal(msg[len(small_msg):], second_msg) {
--		t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg)
--	}
--	conn.Close()
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE b/Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE
-deleted file mode 100644
-index a68e67f..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE
-+++ /dev/null
-@@ -1,188 +0,0 @@
--
--Copyright (c) 2011-2014 - Canonical Inc.
--
--This software is licensed under the LGPLv3, included below.
--
--As a special exception to the GNU Lesser General Public License version 3
--("LGPL3"), the copyright holders of this Library give you permission to
--convey to a third party a Combined Work that links statically or dynamically
--to this Library without providing any Minimal Corresponding Source or
--Minimal Application Code as set out in 4d or providing the installation
--information set out in section 4e, provided that you comply with the other
--provisions of LGPL3 and provided that you meet, for the Application the
--terms and conditions of the license(s) which apply to the Application.
--
--Except as stated in this special exception, the provisions of LGPL3 will
--continue to comply in full to this Library. If you modify this Library, you
--may apply this exception to your version of this Library, but you are not
--obliged to do so. If you do not wish to do so, delete this exception
--statement from your version. This exception does not (and cannot) modify any
--license terms which apply to the Application, with which you must still
--comply.
--
--
--                   GNU LESSER GENERAL PUBLIC LICENSE
--                       Version 3, 29 June 2007
--
-- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
-- Everyone is permitted to copy and distribute verbatim copies
-- of this license document, but changing it is not allowed.
--
--
--  This version of the GNU Lesser General Public License incorporates
--the terms and conditions of version 3 of the GNU General Public
--License, supplemented by the additional permissions listed below.
--
--  0. Additional Definitions.
--
--  As used herein, "this License" refers to version 3 of the GNU Lesser
--General Public License, and the "GNU GPL" refers to version 3 of the GNU
--General Public License.
--
--  "The Library" refers to a covered work governed by this License,
--other than an Application or a Combined Work as defined below.
--
--  An "Application" is any work that makes use of an interface provided
--by the Library, but which is not otherwise based on the Library.
--Defining a subclass of a class defined by the Library is deemed a mode
--of using an interface provided by the Library.
--
--  A "Combined Work" is a work produced by combining or linking an
--Application with the Library.  The particular version of the Library
--with which the Combined Work was made is also called the "Linked
--Version".
--
--  The "Minimal Corresponding Source" for a Combined Work means the
--Corresponding Source for the Combined Work, excluding any source code
--for portions of the Combined Work that, considered in isolation, are
--based on the Application, and not on the Linked Version.
--
--  The "Corresponding Application Code" for a Combined Work means the
--object code and/or source code for the Application, including any data
--and utility programs needed for reproducing the Combined Work from the
--Application, but excluding the System Libraries of the Combined Work.
--
--  1. Exception to Section 3 of the GNU GPL.
--
--  You may convey a covered work under sections 3 and 4 of this License
--without being bound by section 3 of the GNU GPL.
--
--  2. Conveying Modified Versions.
--
--  If you modify a copy of the Library, and, in your modifications, a
--facility refers to a function or data to be supplied by an Application
--that uses the facility (other than as an argument passed when the
--facility is invoked), then you may convey a copy of the modified
--version:
--
--   a) under this License, provided that you make a good faith effort to
--   ensure that, in the event an Application does not supply the
--   function or data, the facility still operates, and performs
--   whatever part of its purpose remains meaningful, or
--
--   b) under the GNU GPL, with none of the additional permissions of
--   this License applicable to that copy.
--
--  3. Object Code Incorporating Material from Library Header Files.
--
--  The object code form of an Application may incorporate material from
--a header file that is part of the Library.  You may convey such object
--code under terms of your choice, provided that, if the incorporated
--material is not limited to numerical parameters, data structure
--layouts and accessors, or small macros, inline functions and templates
--(ten or fewer lines in length), you do both of the following:
--
--   a) Give prominent notice with each copy of the object code that the
--   Library is used in it and that the Library and its use are
--   covered by this License.
--
--   b) Accompany the object code with a copy of the GNU GPL and this license
--   document.
--
--  4. Combined Works.
--
--  You may convey a Combined Work under terms of your choice that,
--taken together, effectively do not restrict modification of the
--portions of the Library contained in the Combined Work and reverse
--engineering for debugging such modifications, if you also do each of
--the following:
--
--   a) Give prominent notice with each copy of the Combined Work that
--   the Library is used in it and that the Library and its use are
--   covered by this License.
--
--   b) Accompany the Combined Work with a copy of the GNU GPL and this license
--   document.
--
--   c) For a Combined Work that displays copyright notices during
--   execution, include the copyright notice for the Library among
--   these notices, as well as a reference directing the user to the
--   copies of the GNU GPL and this license document.
--
--   d) Do one of the following:
--
--       0) Convey the Minimal Corresponding Source under the terms of this
--       License, and the Corresponding Application Code in a form
--       suitable for, and under terms that permit, the user to
--       recombine or relink the Application with a modified version of
--       the Linked Version to produce a modified Combined Work, in the
--       manner specified by section 6 of the GNU GPL for conveying
--       Corresponding Source.
--
--       1) Use a suitable shared library mechanism for linking with the
--       Library.  A suitable mechanism is one that (a) uses at run time
--       a copy of the Library already present on the user's computer
--       system, and (b) will operate properly with a modified version
--       of the Library that is interface-compatible with the Linked
--       Version.
--
--   e) Provide Installation Information, but only if you would otherwise
--   be required to provide such information under section 6 of the
--   GNU GPL, and only to the extent that such information is
--   necessary to install and execute a modified version of the
--   Combined Work produced by recombining or relinking the
--   Application with a modified version of the Linked Version. (If
--   you use option 4d0, the Installation Information must accompany
--   the Minimal Corresponding Source and Corresponding Application
--   Code. If you use option 4d1, you must provide the Installation
--   Information in the manner specified by section 6 of the GNU GPL
--   for conveying Corresponding Source.)
--
--  5. Combined Libraries.
--
--  You may place library facilities that are a work based on the
--Library side by side in a single library together with other library
--facilities that are not Applications and are not covered by this
--License, and convey such a combined library under terms of your
--choice, if you do both of the following:
--
--   a) Accompany the combined library with a copy of the same work based
--   on the Library, uncombined with any other library facilities,
--   conveyed under the terms of this License.
--
--   b) Give prominent notice with the combined library that part of it
--   is a work based on the Library, and explaining where to find the
--   accompanying uncombined form of the same work.
--
--  6. Revised Versions of the GNU Lesser General Public License.
--
--  The Free Software Foundation may publish revised and/or new versions
--of the GNU Lesser General Public License from time to time. Such new
--versions will be similar in spirit to the present version, but may
--differ in detail to address new problems or concerns.
--
--  Each version is given a distinguishing version number. If the
--Library as you received it specifies that a certain numbered version
--of the GNU Lesser General Public License "or any later version"
--applies to it, you have the option of following the terms and
--conditions either of that published version or of any later version
--published by the Free Software Foundation. If the Library as you
--received it does not specify a version number of the GNU Lesser
--General Public License, you may choose any version of the GNU Lesser
--General Public License ever published by the Free Software Foundation.
--
--  If the Library as you received it specifies that a proxy can decide
--whether future versions of the GNU Lesser General Public License shall
--apply, that proxy's public statement of acceptance of any version is
--permanent authorization for you to choose that version for the
--Library.
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE.libyaml b/Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE.libyaml
-deleted file mode 100644
-index 8da58fb..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/LICENSE.libyaml
-+++ /dev/null
-@@ -1,31 +0,0 @@
--The following files were ported to Go from C files of libyaml, and thus
--are still covered by their original copyright and license:
--
--    apic.go
--    emitterc.go
--    parserc.go
--    readerc.go
--    scannerc.go
--    writerc.go
--    yamlh.go
--    yamlprivateh.go
--
--Copyright (c) 2006 Kirill Simonov
--
--Permission is hereby granted, free of charge, to any person obtaining a copy of
--this software and associated documentation files (the "Software"), to deal in
--the Software without restriction, including without limitation the rights to
--use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
--of the Software, and to permit persons to whom the Software is furnished to do
--so, subject to the following conditions:
--
--The above copyright notice and this permission notice shall be included in all
--copies or substantial portions of the Software.
--
--THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
--IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
--FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
--AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
--LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
--OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
--SOFTWARE.
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/README.md b/Godeps/_workspace/src/gopkg.in/v2/yaml/README.md
-deleted file mode 100644
-index d6c919e..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/README.md
-+++ /dev/null
-@@ -1,128 +0,0 @@
--# YAML support for the Go language
--
--Introduction
--------------
--
--The yaml package enables Go programs to comfortably encode and decode YAML
--values. It was developed within [Canonical](https://www.canonical.com) as
--part of the [juju](https://juju.ubuntu.com) project, and is based on a
--pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
--C library to parse and generate YAML data quickly and reliably.
--
--Compatibility
---------------
--
--The yaml package supports most of YAML 1.1 and 1.2, including support for
--anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
--implemented, and base-60 floats from YAML 1.1 are purposefully not
--supported since they're a poor design and are gone in YAML 1.2.
--
--Installation and usage
------------------------
--
--The import path for the package is *gopkg.in/yaml.v2*.
--
--To install it, run:
--
--    go get gopkg.in/yaml.v2
--
--API documentation
-------------------
--
--If opened in a browser, the import path itself leads to the API documentation:
--
--  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
--
--API stability
---------------
--
--The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
--
--
--License
---------
--
--The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
--
--
--Example
---------
--
--```Go
--package main
--
--import (
--        "fmt"
--        "log"
--
--        "gopkg.in/yaml.v2"
--)
--
--var data = `
--a: Easy!
--b:
--  c: 2
--  d: [3, 4]
--`
--
--type T struct {
--        A string
--        B struct{C int; D []int ",flow"}
--}
--
--func main() {
--        t := T{}
--    
--        err := yaml.Unmarshal([]byte(data), &t)
--        if err != nil {
--                log.Fatalf("error: %v", err)
--        }
--        fmt.Printf("--- t:\n%v\n\n", t)
--    
--        d, err := yaml.Marshal(&t)
--        if err != nil {
--                log.Fatalf("error: %v", err)
--        }
--        fmt.Printf("--- t dump:\n%s\n\n", string(d))
--    
--        m := make(map[interface{}]interface{})
--    
--        err = yaml.Unmarshal([]byte(data), &m)
--        if err != nil {
--                log.Fatalf("error: %v", err)
--        }
--        fmt.Printf("--- m:\n%v\n\n", m)
--    
--        d, err = yaml.Marshal(&m)
--        if err != nil {
--                log.Fatalf("error: %v", err)
--        }
--        fmt.Printf("--- m dump:\n%s\n\n", string(d))
--}
--```
--
--This example will generate the following output:
--
--```
----- t:
--{Easy! {2 [3 4]}}
--
----- t dump:
--a: Easy!
--b:
--  c: 2
--  d: [3, 4]
--
--
----- m:
--map[a:Easy! b:map[c:2 d:[3 4]]]
--
----- m dump:
--a: Easy!
--b:
--  c: 2
--  d:
--  - 3
--  - 4
--```
--
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/apic.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/apic.go
-deleted file mode 100644
-index 95ec014..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/apic.go
-+++ /dev/null
-@@ -1,742 +0,0 @@
--package yaml
--
--import (
--	"io"
--	"os"
--)
--
--func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
--	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
--
--	// Check if we can move the queue at the beginning of the buffer.
--	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
--		if parser.tokens_head != len(parser.tokens) {
--			copy(parser.tokens, parser.tokens[parser.tokens_head:])
--		}
--		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
--		parser.tokens_head = 0
--	}
--	parser.tokens = append(parser.tokens, *token)
--	if pos < 0 {
--		return
--	}
--	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
--	parser.tokens[parser.tokens_head+pos] = *token
--}
--
--// Create a new parser object.
--func yaml_parser_initialize(parser *yaml_parser_t) bool {
--	*parser = yaml_parser_t{
--		raw_buffer: make([]byte, 0, input_raw_buffer_size),
--		buffer:     make([]byte, 0, input_buffer_size),
--	}
--	return true
--}
--
--// Destroy a parser object.
--func yaml_parser_delete(parser *yaml_parser_t) {
--	*parser = yaml_parser_t{}
--}
--
--// String read handler.
--func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
--	if parser.input_pos == len(parser.input) {
--		return 0, io.EOF
--	}
--	n = copy(buffer, parser.input[parser.input_pos:])
--	parser.input_pos += n
--	return n, nil
--}
--
--// File read handler.
--func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
--	return parser.input_file.Read(buffer)
--}
--
--// Set a string input.
--func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
--	if parser.read_handler != nil {
--		panic("must set the input source only once")
--	}
--	parser.read_handler = yaml_string_read_handler
--	parser.input = input
--	parser.input_pos = 0
--}
--
--// Set a file input.
--func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
--	if parser.read_handler != nil {
--		panic("must set the input source only once")
--	}
--	parser.read_handler = yaml_file_read_handler
--	parser.input_file = file
--}
--
--// Set the source encoding.
--func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
--	if parser.encoding != yaml_ANY_ENCODING {
--		panic("must set the encoding only once")
--	}
--	parser.encoding = encoding
--}
--
--// Create a new emitter object.
--func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
--	*emitter = yaml_emitter_t{
--		buffer:     make([]byte, output_buffer_size),
--		raw_buffer: make([]byte, 0, output_raw_buffer_size),
--		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
--		events:     make([]yaml_event_t, 0, initial_queue_size),
--	}
--	return true
--}
--
--// Destroy an emitter object.
--func yaml_emitter_delete(emitter *yaml_emitter_t) {
--	*emitter = yaml_emitter_t{}
--}
--
--// String write handler.
--func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
--	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
--	return nil
--}
--
--// File write handler.
--func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
--	_, err := emitter.output_file.Write(buffer)
--	return err
--}
--
--// Set a string output.
--func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
--	if emitter.write_handler != nil {
--		panic("must set the output target only once")
--	}
--	emitter.write_handler = yaml_string_write_handler
--	emitter.output_buffer = output_buffer
--}
--
--// Set a file output.
--func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
--	if emitter.write_handler != nil {
--		panic("must set the output target only once")
--	}
--	emitter.write_handler = yaml_file_write_handler
--	emitter.output_file = file
--}
--
--// Set the output encoding.
--func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
--	if emitter.encoding != yaml_ANY_ENCODING {
--		panic("must set the output encoding only once")
--	}
--	emitter.encoding = encoding
--}
--
--// Set the canonical output style.
--func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
--	emitter.canonical = canonical
--}
--
--//// Set the indentation increment.
--func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
--	if indent < 2 || indent > 9 {
--		indent = 2
--	}
--	emitter.best_indent = indent
--}
--
--// Set the preferred line width.
--func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
--	if width < 0 {
--		width = -1
--	}
--	emitter.best_width = width
--}
--
--// Set if unescaped non-ASCII characters are allowed.
--func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
--	emitter.unicode = unicode
--}
--
--// Set the preferred line break character.
--func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
--	emitter.line_break = line_break
--}
--
--///*
--// * Destroy a token object.
--// */
--//
--//YAML_DECLARE(void)
--//yaml_token_delete(yaml_token_t *token)
--//{
--//    assert(token);  // Non-NULL token object expected.
--//
--//    switch (token.type)
--//    {
--//        case YAML_TAG_DIRECTIVE_TOKEN:
--//            yaml_free(token.data.tag_directive.handle);
--//            yaml_free(token.data.tag_directive.prefix);
--//            break;
--//
--//        case YAML_ALIAS_TOKEN:
--//            yaml_free(token.data.alias.value);
--//            break;
--//
--//        case YAML_ANCHOR_TOKEN:
--//            yaml_free(token.data.anchor.value);
--//            break;
--//
--//        case YAML_TAG_TOKEN:
--//            yaml_free(token.data.tag.handle);
--//            yaml_free(token.data.tag.suffix);
--//            break;
--//
--//        case YAML_SCALAR_TOKEN:
--//            yaml_free(token.data.scalar.value);
--//            break;
--//
--//        default:
--//            break;
--//    }
--//
--//    memset(token, 0, sizeof(yaml_token_t));
--//}
--//
--///*
--// * Check if a string is a valid UTF-8 sequence.
--// *
--// * Check 'reader.c' for more details on UTF-8 encoding.
--// */
--//
--//static int
--//yaml_check_utf8(yaml_char_t *start, size_t length)
--//{
--//    yaml_char_t *end = start+length;
--//    yaml_char_t *pointer = start;
--//
--//    while (pointer < end) {
--//        unsigned char octet;
--//        unsigned int width;
--//        unsigned int value;
--//        size_t k;
--//
--//        octet = pointer[0];
--//        width = (octet & 0x80) == 0x00 ? 1 :
--//                (octet & 0xE0) == 0xC0 ? 2 :
--//                (octet & 0xF0) == 0xE0 ? 3 :
--//                (octet & 0xF8) == 0xF0 ? 4 : 0;
--//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
--//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
--//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
--//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
--//        if (!width) return 0;
--//        if (pointer+width > end) return 0;
--//        for (k = 1; k < width; k ++) {
--//            octet = pointer[k];
--//            if ((octet & 0xC0) != 0x80) return 0;
--//            value = (value << 6) + (octet & 0x3F);
--//        }
--//        if (!((width == 1) ||
--//            (width == 2 && value >= 0x80) ||
--//            (width == 3 && value >= 0x800) ||
--//            (width == 4 && value >= 0x10000))) return 0;
--//
--//        pointer += width;
--//    }
--//
--//    return 1;
--//}
--//
--
--// Create STREAM-START.
--func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
--	*event = yaml_event_t{
--		typ:      yaml_STREAM_START_EVENT,
--		encoding: encoding,
--	}
--	return true
--}
--
--// Create STREAM-END.
--func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
--	*event = yaml_event_t{
--		typ: yaml_STREAM_END_EVENT,
--	}
--	return true
--}
--
--// Create DOCUMENT-START.
--func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
--	tag_directives []yaml_tag_directive_t, implicit bool) bool {
--	*event = yaml_event_t{
--		typ:               yaml_DOCUMENT_START_EVENT,
--		version_directive: version_directive,
--		tag_directives:    tag_directives,
--		implicit:          implicit,
--	}
--	return true
--}
--
--// Create DOCUMENT-END.
--func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
--	*event = yaml_event_t{
--		typ:      yaml_DOCUMENT_END_EVENT,
--		implicit: implicit,
--	}
--	return true
--}
--
--///*
--// * Create ALIAS.
--// */
--//
--//YAML_DECLARE(int)
--//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
--//{
--//    mark yaml_mark_t = { 0, 0, 0 }
--//    anchor_copy *yaml_char_t = NULL
--//
--//    assert(event) // Non-NULL event object is expected.
--//    assert(anchor) // Non-NULL anchor is expected.
--//
--//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
--//
--//    anchor_copy = yaml_strdup(anchor)
--//    if (!anchor_copy)
--//        return 0
--//
--//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
--//
--//    return 1
--//}
--
--// Create SCALAR.
--func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
--	*event = yaml_event_t{
--		typ:             yaml_SCALAR_EVENT,
--		anchor:          anchor,
--		tag:             tag,
--		value:           value,
--		implicit:        plain_implicit,
--		quoted_implicit: quoted_implicit,
--		style:           yaml_style_t(style),
--	}
--	return true
--}
--
--// Create SEQUENCE-START.
--func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
--	*event = yaml_event_t{
--		typ:      yaml_SEQUENCE_START_EVENT,
--		anchor:   anchor,
--		tag:      tag,
--		implicit: implicit,
--		style:    yaml_style_t(style),
--	}
--	return true
--}
--
--// Create SEQUENCE-END.
--func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
--	*event = yaml_event_t{
--		typ: yaml_SEQUENCE_END_EVENT,
--	}
--	return true
--}
--
--// Create MAPPING-START.
--func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
--	*event = yaml_event_t{
--		typ:      yaml_MAPPING_START_EVENT,
--		anchor:   anchor,
--		tag:      tag,
--		implicit: implicit,
--		style:    yaml_style_t(style),
--	}
--	return true
--}
--
--// Create MAPPING-END.
--func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
--	*event = yaml_event_t{
--		typ: yaml_MAPPING_END_EVENT,
--	}
--	return true
--}
--
--// Destroy an event object.
--func yaml_event_delete(event *yaml_event_t) {
--	*event = yaml_event_t{}
--}
--
--///*
--// * Create a document object.
--// */
--//
--//YAML_DECLARE(int)
--//yaml_document_initialize(document *yaml_document_t,
--//        version_directive *yaml_version_directive_t,
--//        tag_directives_start *yaml_tag_directive_t,
--//        tag_directives_end *yaml_tag_directive_t,
--//        start_implicit int, end_implicit int)
--//{
--//    struct {
--//        error yaml_error_type_t
--//    } context
--//    struct {
--//        start *yaml_node_t
--//        end *yaml_node_t
--//        top *yaml_node_t
--//    } nodes = { NULL, NULL, NULL }
--//    version_directive_copy *yaml_version_directive_t = NULL
--//    struct {
--//        start *yaml_tag_directive_t
--//        end *yaml_tag_directive_t
--//        top *yaml_tag_directive_t
--//    } tag_directives_copy = { NULL, NULL, NULL }
--//    value yaml_tag_directive_t = { NULL, NULL }
--//    mark yaml_mark_t = { 0, 0, 0 }
--//
--//    assert(document) // Non-NULL document object is expected.
--//    assert((tag_directives_start && tag_directives_end) ||
--//            (tag_directives_start == tag_directives_end))
--//                            // Valid tag directives are expected.
--//
--//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
--//
--//    if (version_directive) {
--//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
--//        if (!version_directive_copy) goto error
--//        version_directive_copy.major = version_directive.major
--//        version_directive_copy.minor = version_directive.minor
--//    }
--//
--//    if (tag_directives_start != tag_directives_end) {
--//        tag_directive *yaml_tag_directive_t
--//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
--//            goto error
--//        for (tag_directive = tag_directives_start
--//                tag_directive != tag_directives_end; tag_directive ++) {
--//            assert(tag_directive.handle)
--//            assert(tag_directive.prefix)
--//            if (!yaml_check_utf8(tag_directive.handle,
--//                        strlen((char *)tag_directive.handle)))
--//                goto error
--//            if (!yaml_check_utf8(tag_directive.prefix,
--//                        strlen((char *)tag_directive.prefix)))
--//                goto error
--//            value.handle = yaml_strdup(tag_directive.handle)
--//            value.prefix = yaml_strdup(tag_directive.prefix)
--//            if (!value.handle || !value.prefix) goto error
--//            if (!PUSH(&context, tag_directives_copy, value))
--//                goto error
--//            value.handle = NULL
--//            value.prefix = NULL
--//        }
--//    }
--//
--//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
--//            tag_directives_copy.start, tag_directives_copy.top,
--//            start_implicit, end_implicit, mark, mark)
--//
--//    return 1
--//
--//error:
--//    STACK_DEL(&context, nodes)
--//    yaml_free(version_directive_copy)
--//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
--//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
--//        yaml_free(value.handle)
--//        yaml_free(value.prefix)
--//    }
--//    STACK_DEL(&context, tag_directives_copy)
--//    yaml_free(value.handle)
--//    yaml_free(value.prefix)
--//
--//    return 0
--//}
--//
--///*
--// * Destroy a document object.
--// */
--//
--//YAML_DECLARE(void)
--//yaml_document_delete(document *yaml_document_t)
--//{
--//    struct {
--//        error yaml_error_type_t
--//    } context
--//    tag_directive *yaml_tag_directive_t
--//
--//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
--//
--//    assert(document) // Non-NULL document object is expected.
--//
--//    while (!STACK_EMPTY(&context, document.nodes)) {
--//        node yaml_node_t = POP(&context, document.nodes)
--//        yaml_free(node.tag)
--//        switch (node.type) {
--//            case YAML_SCALAR_NODE:
--//                yaml_free(node.data.scalar.value)
--//                break
--//            case YAML_SEQUENCE_NODE:
--//                STACK_DEL(&context, node.data.sequence.items)
--//                break
--//            case YAML_MAPPING_NODE:
--//                STACK_DEL(&context, node.data.mapping.pairs)
--//                break
--//            default:
--//                assert(0) // Should not happen.
--//        }
--//    }
--//    STACK_DEL(&context, document.nodes)
--//
--//    yaml_free(document.version_directive)
--//    for (tag_directive = document.tag_directives.start
--//            tag_directive != document.tag_directives.end
--//            tag_directive++) {
--//        yaml_free(tag_directive.handle)
--//        yaml_free(tag_directive.prefix)
--//    }
--//    yaml_free(document.tag_directives.start)
--//
--//    memset(document, 0, sizeof(yaml_document_t))
--//}
--//
--///**
--// * Get a document node.
--// */
--//
--//YAML_DECLARE(yaml_node_t *)
--//yaml_document_get_node(document *yaml_document_t, index int)
--//{
--//    assert(document) // Non-NULL document object is expected.
--//
--//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
--//        return document.nodes.start + index - 1
--//    }
--//    return NULL
--//}
--//
--///**
--// * Get the root object.
--// */
--//
--//YAML_DECLARE(yaml_node_t *)
--//yaml_document_get_root_node(document *yaml_document_t)
--//{
--//    assert(document) // Non-NULL document object is expected.
--//
--//    if (document.nodes.top != document.nodes.start) {
--//        return document.nodes.start
--//    }
--//    return NULL
--//}
--//
--///*
--// * Add a scalar node to a document.
--// */
--//
--//YAML_DECLARE(int)
--//yaml_document_add_scalar(document *yaml_document_t,
--//        tag *yaml_char_t, value *yaml_char_t, length int,
--//        style yaml_scalar_style_t)
--//{
--//    struct {
--//        error yaml_error_type_t
--//    } context
--//    mark yaml_mark_t = { 0, 0, 0 }
--//    tag_copy *yaml_char_t = NULL
--//    value_copy *yaml_char_t = NULL
--//    node yaml_node_t
--//
--//    assert(document) // Non-NULL document object is expected.
--//    assert(value) // Non-NULL value is expected.
--//
--//    if (!tag) {
--//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
--//    }
--//
--//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
--//    tag_copy = yaml_strdup(tag)
--//    if (!tag_copy) goto error
--//
--//    if (length < 0) {
--//        length = strlen((char *)value)
--//    }
--//
--//    if (!yaml_check_utf8(value, length)) goto error
--//    value_copy = yaml_malloc(length+1)
--//    if (!value_copy) goto error
--//    memcpy(value_copy, value, length)
--//    value_copy[length] = '\0'
--//
--//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
--//    if (!PUSH(&context, document.nodes, node)) goto error
--//
--//    return document.nodes.top - document.nodes.start
--//
--//error:
--//    yaml_free(tag_copy)
--//    yaml_free(value_copy)
--//
--//    return 0
--//}
--//
--///*
--// * Add a sequence node to a document.
--// */
--//
--//YAML_DECLARE(int)
--//yaml_document_add_sequence(document *yaml_document_t,
--//        tag *yaml_char_t, style yaml_sequence_style_t)
--//{
--//    struct {
--//        error yaml_error_type_t
--//    } context
--//    mark yaml_mark_t = { 0, 0, 0 }
--//    tag_copy *yaml_char_t = NULL
--//    struct {
--//        start *yaml_node_item_t
--//        end *yaml_node_item_t
--//        top *yaml_node_item_t
--//    } items = { NULL, NULL, NULL }
--//    node yaml_node_t
--//
--//    assert(document) // Non-NULL document object is expected.
--//
--//    if (!tag) {
--//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
--//    }
--//
--//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
--//    tag_copy = yaml_strdup(tag)
--//    if (!tag_copy) goto error
--//
--//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
--//
--//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
--//            style, mark, mark)
--//    if (!PUSH(&context, document.nodes, node)) goto error
--//
--//    return document.nodes.top - document.nodes.start
--//
--//error:
--//    STACK_DEL(&context, items)
--//    yaml_free(tag_copy)
--//
--//    return 0
--//}
--//
--///*
--// * Add a mapping node to a document.
--// */
--//
--//YAML_DECLARE(int)
--//yaml_document_add_mapping(document *yaml_document_t,
--//        tag *yaml_char_t, style yaml_mapping_style_t)
--//{
--//    struct {
--//        error yaml_error_type_t
--//    } context
--//    mark yaml_mark_t = { 0, 0, 0 }
--//    tag_copy *yaml_char_t = NULL
--//    struct {
--//        start *yaml_node_pair_t
--//        end *yaml_node_pair_t
--//        top *yaml_node_pair_t
--//    } pairs = { NULL, NULL, NULL }
--//    node yaml_node_t
--//
--//    assert(document) // Non-NULL document object is expected.
--//
--//    if (!tag) {
--//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
--//    }
--//
--//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
--//    tag_copy = yaml_strdup(tag)
--//    if (!tag_copy) goto error
--//
--//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
--//
--//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
--//            style, mark, mark)
--//    if (!PUSH(&context, document.nodes, node)) goto error
--//
--//    return document.nodes.top - document.nodes.start
--//
--//error:
--//    STACK_DEL(&context, pairs)
--//    yaml_free(tag_copy)
--//
--//    return 0
--//}
--//
--///*
--// * Append an item to a sequence node.
--// */
--//
--//YAML_DECLARE(int)
--//yaml_document_append_sequence_item(document *yaml_document_t,
--//        sequence int, item int)
--//{
--//    struct {
--//        error yaml_error_type_t
--//    } context
--//
--//    assert(document) // Non-NULL document is required.
--//    assert(sequence > 0
--//            && document.nodes.start + sequence <= document.nodes.top)
--//                            // Valid sequence id is required.
--//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
--//                            // A sequence node is required.
--//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
--//                            // Valid item id is required.
--//
--//    if (!PUSH(&context,
--//                document.nodes.start[sequence-1].data.sequence.items, item))
--//        return 0
--//
--//    return 1
--//}
--//
--///*
--// * Append a pair of a key and a value to a mapping node.
--// */
--//
--//YAML_DECLARE(int)
--//yaml_document_append_mapping_pair(document *yaml_document_t,
--//        mapping int, key int, value int)
--//{
--//    struct {
--//        error yaml_error_type_t
--//    } context
--//
--//    pair yaml_node_pair_t
--//
--//    assert(document) // Non-NULL document is required.
--//    assert(mapping > 0
--//            && document.nodes.start + mapping <= document.nodes.top)
--//                            // Valid mapping id is required.
--//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
--//                            // A mapping node is required.
--//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
--//                            // Valid key id is required.
--//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
--//                            // Valid value id is required.
--//
--//    pair.key = key
--//    pair.value = value
--//
--//    if (!PUSH(&context,
--//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
--//        return 0
--//
--//    return 1
--//}
--//
--//
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/decode.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/decode.go
-deleted file mode 100644
-index c50c629..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/decode.go
-+++ /dev/null
-@@ -1,665 +0,0 @@
--package yaml
--
--import (
--	"encoding"
--	"encoding/base64"
--	"fmt"
--	"math"
--	"reflect"
--	"strconv"
--	"time"
--)
--
--const (
--	documentNode = 1 << iota
--	mappingNode
--	sequenceNode
--	scalarNode
--	aliasNode
--)
--
--type node struct {
--	kind         int
--	line, column int
--	tag          string
--	value        string
--	implicit     bool
--	children     []*node
--	anchors      map[string]*node
--}
--
--// ----------------------------------------------------------------------------
--// Parser, produces a node tree out of a libyaml event stream.
--
--type parser struct {
--	parser  yaml_parser_t
--	event   yaml_event_t
--	doc     *node
--}
--
--func newParser(b []byte) *parser {
--	p := parser{}
--	if !yaml_parser_initialize(&p.parser) {
--		panic("failed to initialize YAML emitter")
--	}
--
--	if len(b) == 0 {
--		b = []byte{'\n'}
--	}
--
--	yaml_parser_set_input_string(&p.parser, b)
--
--	p.skip()
--	if p.event.typ != yaml_STREAM_START_EVENT {
--		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
--	}
--	p.skip()
--	return &p
--}
--
--func (p *parser) destroy() {
--	if p.event.typ != yaml_NO_EVENT {
--		yaml_event_delete(&p.event)
--	}
--	yaml_parser_delete(&p.parser)
--}
--
--func (p *parser) skip() {
--	if p.event.typ != yaml_NO_EVENT {
--		if p.event.typ == yaml_STREAM_END_EVENT {
--			failf("attempted to go past the end of stream; corrupted value?")
--		}
--		yaml_event_delete(&p.event)
--	}
--	if !yaml_parser_parse(&p.parser, &p.event) {
--		p.fail()
--	}
--}
--
--func (p *parser) fail() {
--	var where string
--	var line int
--	if p.parser.problem_mark.line != 0 {
--		line = p.parser.problem_mark.line
--	} else if p.parser.context_mark.line != 0 {
--		line = p.parser.context_mark.line
--	}
--	if line != 0 {
--		where = "line " + strconv.Itoa(line) + ": "
--	}
--	var msg string
--	if len(p.parser.problem) > 0 {
--		msg = p.parser.problem
--	} else {
--		msg = "unknown problem parsing YAML content"
--	}
--	failf("%s%s", where, msg)
--}
--
--func (p *parser) anchor(n *node, anchor []byte) {
--	if anchor != nil {
--		p.doc.anchors[string(anchor)] = n
--	}
--}
--
--func (p *parser) parse() *node {
--	switch p.event.typ {
--	case yaml_SCALAR_EVENT:
--		return p.scalar()
--	case yaml_ALIAS_EVENT:
--		return p.alias()
--	case yaml_MAPPING_START_EVENT:
--		return p.mapping()
--	case yaml_SEQUENCE_START_EVENT:
--		return p.sequence()
--	case yaml_DOCUMENT_START_EVENT:
--		return p.document()
--	case yaml_STREAM_END_EVENT:
--		// Happens when attempting to decode an empty buffer.
--		return nil
--	default:
--		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
--	}
--	panic("unreachable")
--}
--
--func (p *parser) node(kind int) *node {
--	return &node{
--		kind:   kind,
--		line:   p.event.start_mark.line,
--		column: p.event.start_mark.column,
--	}
--}
--
--func (p *parser) document() *node {
--	n := p.node(documentNode)
--	n.anchors = make(map[string]*node)
--	p.doc = n
--	p.skip()
--	n.children = append(n.children, p.parse())
--	if p.event.typ != yaml_DOCUMENT_END_EVENT {
--		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
--	}
--	p.skip()
--	return n
--}
--
--func (p *parser) alias() *node {
--	n := p.node(aliasNode)
--	n.value = string(p.event.anchor)
--	p.skip()
--	return n
--}
--
--func (p *parser) scalar() *node {
--	n := p.node(scalarNode)
--	n.value = string(p.event.value)
--	n.tag = string(p.event.tag)
--	n.implicit = p.event.implicit
--	p.anchor(n, p.event.anchor)
--	p.skip()
--	return n
--}
--
--func (p *parser) sequence() *node {
--	n := p.node(sequenceNode)
--	p.anchor(n, p.event.anchor)
--	p.skip()
--	for p.event.typ != yaml_SEQUENCE_END_EVENT {
--		n.children = append(n.children, p.parse())
--	}
--	p.skip()
--	return n
--}
--
--func (p *parser) mapping() *node {
--	n := p.node(mappingNode)
--	p.anchor(n, p.event.anchor)
--	p.skip()
--	for p.event.typ != yaml_MAPPING_END_EVENT {
--		n.children = append(n.children, p.parse(), p.parse())
--	}
--	p.skip()
--	return n
--}
--
--// ----------------------------------------------------------------------------
--// Decoder, unmarshals a node into a provided value.
--
--type decoder struct {
--	doc     *node
--	aliases map[string]bool
--	mapType reflect.Type
--	terrors []string
--}
--
--var (
--	mapItemType = reflect.TypeOf(MapItem{})
--	durationType = reflect.TypeOf(time.Duration(0))
--	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
--	ifaceType = defaultMapType.Elem()
--)
--
--func newDecoder() *decoder {
--	d := &decoder{mapType: defaultMapType}
--	d.aliases = make(map[string]bool)
--	return d
--}
--
--func (d *decoder) terror(n *node, tag string, out reflect.Value) {
--	if n.tag != "" {
--		tag = n.tag
--	}
--	value := n.value
--	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
--		if len(value) > 10 {
--			value = " `" + value[:7] + "...`"
--		} else {
--			value = " `" + value + "`"
--		}
--	}
--	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
--}
--
--func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
--	terrlen := len(d.terrors)
--	err := u.UnmarshalYAML(func(v interface{}) (err error) {
--		defer handleErr(&err)
--		d.unmarshal(n, reflect.ValueOf(v))
--		if len(d.terrors) > terrlen {
--			issues := d.terrors[terrlen:]
--			d.terrors = d.terrors[:terrlen]
--			return &TypeError{issues}
--		}
--		return nil
--	})
--	if e, ok := err.(*TypeError); ok {
--		d.terrors = append(d.terrors, e.Errors...)
--		return false
--	}
--	if err != nil {
--		fail(err)
--	}
--	return true
--}
--
--// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
--// if a value is found to implement it.
--// It returns the initialized and dereferenced out value, whether
--// unmarshalling was already done by UnmarshalYAML, and if so whether
--// its types unmarshalled appropriately.
--//
--// If n holds a null value, prepare returns before doing anything.
--func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
--	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
--		return out, false, false
--	}
--	again := true
--	for again {
--		again = false
--		if out.Kind() == reflect.Ptr {
--			if out.IsNil() {
--				out.Set(reflect.New(out.Type().Elem()))
--			}
--			out = out.Elem()
--			again = true
--		}
--		if out.CanAddr() {
--			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
--				good = d.callUnmarshaler(n, u)
--				return out, true, good
--			}
--		}
--	}
--	return out, false, false
--}
--
--func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
--	switch n.kind {
--	case documentNode:
--		return d.document(n, out)
--	case aliasNode:
--		return d.alias(n, out)
--	}
--	out, unmarshaled, good := d.prepare(n, out)
--	if unmarshaled {
--		return good
--	}
--	switch n.kind {
--	case scalarNode:
--		good = d.scalar(n, out)
--	case mappingNode:
--		good = d.mapping(n, out)
--	case sequenceNode:
--		good = d.sequence(n, out)
--	default:
--		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
--	}
--	return good
--}
--
--func (d *decoder) document(n *node, out reflect.Value) (good bool) {
--	if len(n.children) == 1 {
--		d.doc = n
--		d.unmarshal(n.children[0], out)
--		return true
--	}
--	return false
--}
--
--func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
--	an, ok := d.doc.anchors[n.value]
--	if !ok {
--		failf("unknown anchor '%s' referenced", n.value)
--	}
--	if d.aliases[n.value] {
--		failf("anchor '%s' value contains itself", n.value)
--	}
--	d.aliases[n.value] = true
--	good = d.unmarshal(an, out)
--	delete(d.aliases, n.value)
--	return good
--}
--
--var zeroValue reflect.Value
--
--func resetMap(out reflect.Value) {
--	for _, k := range out.MapKeys() {
--		out.SetMapIndex(k, zeroValue)
--	}
--}
--
--func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
--	var tag string
--	var resolved interface{}
--	if n.tag == "" && !n.implicit {
--		tag = yaml_STR_TAG
--		resolved = n.value
--	} else {
--		tag, resolved = resolve(n.tag, n.value)
--		if tag == yaml_BINARY_TAG {
--			data, err := base64.StdEncoding.DecodeString(resolved.(string))
--			if err != nil {
--				failf("!!binary value contains invalid base64 data")
--			}
--			resolved = string(data)
--		}
--	}
--	if resolved == nil {
--		if out.Kind() == reflect.Map && !out.CanAddr() {
--			resetMap(out)
--		} else {
--			out.Set(reflect.Zero(out.Type()))
--		}
--		return true
--	}
--	if s, ok := resolved.(string); ok && out.CanAddr() {
--		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
--			err := u.UnmarshalText([]byte(s))
--			if err != nil {
--				fail(err)
--			}
--			return true
--		}
--	}
--	switch out.Kind() {
--	case reflect.String:
--		if tag == yaml_BINARY_TAG {
--			out.SetString(resolved.(string))
--			good = true
--		} else if resolved != nil {
--			out.SetString(n.value)
--			good = true
--		}
--	case reflect.Interface:
--		if resolved == nil {
--			out.Set(reflect.Zero(out.Type()))
--		} else {
--			out.Set(reflect.ValueOf(resolved))
--		}
--		good = true
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		switch resolved := resolved.(type) {
--		case int:
--			if !out.OverflowInt(int64(resolved)) {
--				out.SetInt(int64(resolved))
--				good = true
--			}
--		case int64:
--			if !out.OverflowInt(resolved) {
--				out.SetInt(resolved)
--				good = true
--			}
--		case uint64:
--			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
--				out.SetInt(int64(resolved))
--				good = true
--			}
--		case float64:
--			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
--				out.SetInt(int64(resolved))
--				good = true
--			}
--		case string:
--			if out.Type() == durationType {
--				d, err := time.ParseDuration(resolved)
--				if err == nil {
--					out.SetInt(int64(d))
--					good = true
--				}
--			}
--		}
--	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
--		switch resolved := resolved.(type) {
--		case int:
--			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
--				out.SetUint(uint64(resolved))
--				good = true
--			}
--		case int64:
--			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
--				out.SetUint(uint64(resolved))
--				good = true
--			}
--		case uint64:
--			if !out.OverflowUint(uint64(resolved)) {
--				out.SetUint(uint64(resolved))
--				good = true
--			}
--		case float64:
--			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
--				out.SetUint(uint64(resolved))
--				good = true
--			}
--		}
--	case reflect.Bool:
--		switch resolved := resolved.(type) {
--		case bool:
--			out.SetBool(resolved)
--			good = true
--		}
--	case reflect.Float32, reflect.Float64:
--		switch resolved := resolved.(type) {
--		case int:
--			out.SetFloat(float64(resolved))
--			good = true
--		case int64:
--			out.SetFloat(float64(resolved))
--			good = true
--		case uint64:
--			out.SetFloat(float64(resolved))
--			good = true
--		case float64:
--			out.SetFloat(resolved)
--			good = true
--		}
--	case reflect.Ptr:
--		if out.Type().Elem() == reflect.TypeOf(resolved) {
--			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
--			elem := reflect.New(out.Type().Elem())
--			elem.Elem().Set(reflect.ValueOf(resolved))
--			out.Set(elem)
--			good = true
--		}
--	}
--	if !good {
--		d.terror(n, tag, out)
--	}
--	return good
--}
--
--func settableValueOf(i interface{}) reflect.Value {
--	v := reflect.ValueOf(i)
--	sv := reflect.New(v.Type()).Elem()
--	sv.Set(v)
--	return sv
--}
--
--func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
--	var iface reflect.Value
--	switch out.Kind() {
--	case reflect.Slice:
--		// okay
--	case reflect.Interface:
--		// No type hints. Will have to use a generic sequence.
--		iface = out
--		out = settableValueOf(make([]interface{}, 0))
--	default:
--		d.terror(n, yaml_SEQ_TAG, out)
--		return false
--	}
--	et := out.Type().Elem()
--
--	l := len(n.children)
--	for i := 0; i < l; i++ {
--		e := reflect.New(et).Elem()
--		if ok := d.unmarshal(n.children[i], e); ok {
--			out.Set(reflect.Append(out, e))
--		}
--	}
--	if iface.IsValid() {
--		iface.Set(out)
--	}
--	return true
--}
--
--
--
--func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
--	switch out.Kind() {
--	case reflect.Struct:
--		return d.mappingStruct(n, out)
--	case reflect.Slice:
--		return d.mappingSlice(n, out)
--	case reflect.Map:
--		// okay
--	case reflect.Interface:
--		if d.mapType.Kind() == reflect.Map {
--			iface := out
--			out = reflect.MakeMap(d.mapType)
--			iface.Set(out)
--		} else {
--			slicev := reflect.New(d.mapType).Elem()
--			if !d.mappingSlice(n, slicev) {
--				return false
--			}
--			out.Set(slicev)
--			return true
--		}
--	default:
--		d.terror(n, yaml_MAP_TAG, out)
--		return false
--	}
--	outt := out.Type()
--	kt := outt.Key()
--	et := outt.Elem()
--
--	mapType := d.mapType
--	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
--		d.mapType = outt
--	}
--
--	if out.IsNil() {
--		out.Set(reflect.MakeMap(outt))
--	}
--	l := len(n.children)
--	for i := 0; i < l; i += 2 {
--		if isMerge(n.children[i]) {
--			d.merge(n.children[i+1], out)
--			continue
--		}
--		k := reflect.New(kt).Elem()
--		if d.unmarshal(n.children[i], k) {
--			kkind := k.Kind()
--			if kkind == reflect.Interface {
--				kkind = k.Elem().Kind()
--			}
--			if kkind == reflect.Map || kkind == reflect.Slice {
--				failf("invalid map key: %#v", k.Interface())
--			}
--			e := reflect.New(et).Elem()
--			if d.unmarshal(n.children[i+1], e) {
--				out.SetMapIndex(k, e)
--			}
--		}
--	}
--	d.mapType = mapType
--	return true
--}
--
--func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
--	outt := out.Type()
--	if outt.Elem() != mapItemType {
--		d.terror(n, yaml_MAP_TAG, out)
--		return false
--	}
--
--	mapType := d.mapType
--	d.mapType = outt
--
--	var slice []MapItem
--	var l = len(n.children)
--	for i := 0; i < l; i += 2 {
--		if isMerge(n.children[i]) {
--			d.merge(n.children[i+1], out)
--			continue
--		}
--		item := MapItem{}
--		k := reflect.ValueOf(&item.Key).Elem()
--		if d.unmarshal(n.children[i], k) {
--			v := reflect.ValueOf(&item.Value).Elem()
--			if d.unmarshal(n.children[i+1], v) {
--				slice = append(slice, item)
--			}
--		}
--	}
--	out.Set(reflect.ValueOf(slice))
--	d.mapType = mapType
--	return true
--}
--
--func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
--	sinfo, err := getStructInfo(out.Type())
--	if err != nil {
--		panic(err)
--	}
--	name := settableValueOf("")
--	l := len(n.children)
--	for i := 0; i < l; i += 2 {
--		ni := n.children[i]
--		if isMerge(ni) {
--			d.merge(n.children[i+1], out)
--			continue
--		}
--		if !d.unmarshal(ni, name) {
--			continue
--		}
--		if info, ok := sinfo.FieldsMap[name.String()]; ok {
--			var field reflect.Value
--			if info.Inline == nil {
--				field = out.Field(info.Num)
--			} else {
--				field = out.FieldByIndex(info.Inline)
--			}
--			d.unmarshal(n.children[i+1], field)
--		}
--	}
--	return true
--}
--
--func failWantMap() {
--	failf("map merge requires map or sequence of maps as the value")
--}
--
--func (d *decoder) merge(n *node, out reflect.Value) {
--	switch n.kind {
--	case mappingNode:
--		d.unmarshal(n, out)
--	case aliasNode:
--		an, ok := d.doc.anchors[n.value]
--		if ok && an.kind != mappingNode {
--			failWantMap()
--		}
--		d.unmarshal(n, out)
--	case sequenceNode:
--		// Step backwards as earlier nodes take precedence.
--		for i := len(n.children) - 1; i >= 0; i-- {
--			ni := n.children[i]
--			if ni.kind == aliasNode {
--				an, ok := d.doc.anchors[ni.value]
--				if ok && an.kind != mappingNode {
--					failWantMap()
--				}
--			} else if ni.kind != mappingNode {
--				failWantMap()
--			}
--			d.unmarshal(ni, out)
--		}
--	default:
--		failWantMap()
--	}
--}
--
--func isMerge(n *node) bool {
--	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/decode_test.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/decode_test.go
-deleted file mode 100644
-index 90ffcc0..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/decode_test.go
-+++ /dev/null
-@@ -1,902 +0,0 @@
--package yaml_test
--
--import (
--	"errors"
--	. "gopkg.in/check.v1"
--	"gopkg.in/yaml.v2"
--	"math"
--	"net"
--	"reflect"
--	"strings"
--	"time"
--)
--
--var unmarshalIntTest = 123
--
--var unmarshalTests = []struct {
--	data  string
--	value interface{}
--}{
--	{
--		"",
--		&struct{}{},
--	}, {
--		"{}", &struct{}{},
--	}, {
--		"v: hi",
--		map[string]string{"v": "hi"},
--	}, {
--		"v: hi", map[string]interface{}{"v": "hi"},
--	}, {
--		"v: true",
--		map[string]string{"v": "true"},
--	}, {
--		"v: true",
--		map[string]interface{}{"v": true},
--	}, {
--		"v: 10",
--		map[string]interface{}{"v": 10},
--	}, {
--		"v: 0b10",
--		map[string]interface{}{"v": 2},
--	}, {
--		"v: 0xA",
--		map[string]interface{}{"v": 10},
--	}, {
--		"v: 4294967296",
--		map[string]int64{"v": 4294967296},
--	}, {
--		"v: 0.1",
--		map[string]interface{}{"v": 0.1},
--	}, {
--		"v: .1",
--		map[string]interface{}{"v": 0.1},
--	}, {
--		"v: .Inf",
--		map[string]interface{}{"v": math.Inf(+1)},
--	}, {
--		"v: -.Inf",
--		map[string]interface{}{"v": math.Inf(-1)},
--	}, {
--		"v: -10",
--		map[string]interface{}{"v": -10},
--	}, {
--		"v: -.1",
--		map[string]interface{}{"v": -0.1},
--	},
--
--	// Simple values.
--	{
--		"123",
--		&unmarshalIntTest,
--	},
--
--	// Floats from spec
--	{
--		"canonical: 6.8523e+5",
--		map[string]interface{}{"canonical": 6.8523e+5},
--	}, {
--		"expo: 685.230_15e+03",
--		map[string]interface{}{"expo": 685.23015e+03},
--	}, {
--		"fixed: 685_230.15",
--		map[string]interface{}{"fixed": 685230.15},
--	}, {
--		"neginf: -.inf",
--		map[string]interface{}{"neginf": math.Inf(-1)},
--	}, {
--		"fixed: 685_230.15",
--		map[string]float64{"fixed": 685230.15},
--	},
--	//{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
--	//{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
--
--	// Bools from spec
--	{
--		"canonical: y",
--		map[string]interface{}{"canonical": true},
--	}, {
--		"answer: NO",
--		map[string]interface{}{"answer": false},
--	}, {
--		"logical: True",
--		map[string]interface{}{"logical": true},
--	}, {
--		"option: on",
--		map[string]interface{}{"option": true},
--	}, {
--		"option: on",
--		map[string]bool{"option": true},
--	},
--	// Ints from spec
--	{
--		"canonical: 685230",
--		map[string]interface{}{"canonical": 685230},
--	}, {
--		"decimal: +685_230",
--		map[string]interface{}{"decimal": 685230},
--	}, {
--		"octal: 02472256",
--		map[string]interface{}{"octal": 685230},
--	}, {
--		"hexa: 0x_0A_74_AE",
--		map[string]interface{}{"hexa": 685230},
--	}, {
--		"bin: 0b1010_0111_0100_1010_1110",
--		map[string]interface{}{"bin": 685230},
--	}, {
--		"bin: -0b101010",
--		map[string]interface{}{"bin": -42},
--	}, {
--		"decimal: +685_230",
--		map[string]int{"decimal": 685230},
--	},
--
--	//{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
--
--	// Nulls from spec
--	{
--		"empty:",
--		map[string]interface{}{"empty": nil},
--	}, {
--		"canonical: ~",
--		map[string]interface{}{"canonical": nil},
--	}, {
--		"english: null",
--		map[string]interface{}{"english": nil},
--	}, {
--		"~: null key",
--		map[interface{}]string{nil: "null key"},
--	}, {
--		"empty:",
--		map[string]*bool{"empty": nil},
--	},
--
--	// Flow sequence
--	{
--		"seq: [A,B]",
--		map[string]interface{}{"seq": []interface{}{"A", "B"}},
--	}, {
--		"seq: [A,B,C,]",
--		map[string][]string{"seq": []string{"A", "B", "C"}},
--	}, {
--		"seq: [A,1,C]",
--		map[string][]string{"seq": []string{"A", "1", "C"}},
--	}, {
--		"seq: [A,1,C]",
--		map[string][]int{"seq": []int{1}},
--	}, {
--		"seq: [A,1,C]",
--		map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
--	},
--	// Block sequence
--	{
--		"seq:\n - A\n - B",
--		map[string]interface{}{"seq": []interface{}{"A", "B"}},
--	}, {
--		"seq:\n - A\n - B\n - C",
--		map[string][]string{"seq": []string{"A", "B", "C"}},
--	}, {
--		"seq:\n - A\n - 1\n - C",
--		map[string][]string{"seq": []string{"A", "1", "C"}},
--	}, {
--		"seq:\n - A\n - 1\n - C",
--		map[string][]int{"seq": []int{1}},
--	}, {
--		"seq:\n - A\n - 1\n - C",
--		map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
--	},
--
--	// Literal block scalar
--	{
--		"scalar: | # Comment\n\n literal\n\n \ttext\n\n",
--		map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
--	},
--
--	// Folded block scalar
--	{
--		"scalar: > # Comment\n\n folded\n line\n \n next\n line\n  * one\n  * two\n\n last\n line\n\n",
--		map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
--	},
--
--	// Map inside interface with no type hints.
--	{
--		"a: {b: c}",
--		map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
--	},
--
--	// Structs and type conversions.
--	{
--		"hello: world",
--		&struct{ Hello string }{"world"},
--	}, {
--		"a: {b: c}",
--		&struct{ A struct{ B string } }{struct{ B string }{"c"}},
--	}, {
--		"a: {b: c}",
--		&struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
--	}, {
--		"a: {b: c}",
--		&struct{ A map[string]string }{map[string]string{"b": "c"}},
--	}, {
--		"a: {b: c}",
--		&struct{ A *map[string]string }{&map[string]string{"b": "c"}},
--	}, {
--		"a:",
--		&struct{ A map[string]string }{},
--	}, {
--		"a: 1",
--		&struct{ A int }{1},
--	}, {
--		"a: 1",
--		&struct{ A float64 }{1},
--	}, {
--		"a: 1.0",
--		&struct{ A int }{1},
--	}, {
--		"a: 1.0",
--		&struct{ A uint }{1},
--	}, {
--		"a: [1, 2]",
--		&struct{ A []int }{[]int{1, 2}},
--	}, {
--		"a: 1",
--		&struct{ B int }{0},
--	}, {
--		"a: 1",
--		&struct {
--			B int "a"
--		}{1},
--	}, {
--		"a: y",
--		&struct{ A bool }{true},
--	},
--
--	// Some cross type conversions
--	{
--		"v: 42",
--		map[string]uint{"v": 42},
--	}, {
--		"v: -42",
--		map[string]uint{},
--	}, {
--		"v: 4294967296",
--		map[string]uint64{"v": 4294967296},
--	}, {
--		"v: -4294967296",
--		map[string]uint64{},
--	},
--
--	// int
--	{
--		"int_max: 2147483647",
--		map[string]int{"int_max": math.MaxInt32},
--	},
--	{
--		"int_min: -2147483648",
--		map[string]int{"int_min": math.MinInt32},
--	},
--	{
--		"int_overflow: 9223372036854775808", // math.MaxInt64 + 1
--		map[string]int{},
--	},
--
--	// int64
--	{
--		"int64_max: 9223372036854775807",
--		map[string]int64{"int64_max": math.MaxInt64},
--	},
--	{
--		"int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
--		map[string]int64{"int64_max_base2": math.MaxInt64},
--	},
--	{
--		"int64_min: -9223372036854775808",
--		map[string]int64{"int64_min": math.MinInt64},
--	},
--	{
--		"int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
--		map[string]int64{"int64_neg_base2": -math.MaxInt64},
--	},
--	{
--		"int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
--		map[string]int64{},
--	},
--
--	// uint
--	{
--		"uint_min: 0",
--		map[string]uint{"uint_min": 0},
--	},
--	{
--		"uint_max: 4294967295",
--		map[string]uint{"uint_max": math.MaxUint32},
--	},
--	{
--		"uint_underflow: -1",
--		map[string]uint{},
--	},
--
--	// uint64
--	{
--		"uint64_min: 0",
--		map[string]uint{"uint64_min": 0},
--	},
--	{
--		"uint64_max: 18446744073709551615",
--		map[string]uint64{"uint64_max": math.MaxUint64},
--	},
--	{
--		"uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
--		map[string]uint64{"uint64_max_base2": math.MaxUint64},
--	},
--	{
--		"uint64_maxint64: 9223372036854775807",
--		map[string]uint64{"uint64_maxint64": math.MaxInt64},
--	},
--	{
--		"uint64_underflow: -1",
--		map[string]uint64{},
--	},
--
--	// float32
--	{
--		"float32_max: 3.40282346638528859811704183484516925440e+38",
--		map[string]float32{"float32_max": math.MaxFloat32},
--	},
--	{
--		"float32_nonzero: 1.401298464324817070923729583289916131280e-45",
--		map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
--	},
--	{
--		"float32_maxuint64: 18446744073709551615",
--		map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
--	},
--	{
--		"float32_maxuint64+1: 18446744073709551616",
--		map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
--	},
--
--	// float64
--	{
--		"float64_max: 1.797693134862315708145274237317043567981e+308",
--		map[string]float64{"float64_max": math.MaxFloat64},
--	},
--	{
--		"float64_nonzero: 4.940656458412465441765687928682213723651e-324",
--		map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
--	},
--	{
--		"float64_maxuint64: 18446744073709551615",
--		map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
--	},
--	{
--		"float64_maxuint64+1: 18446744073709551616",
--		map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
--	},
--
--	// Overflow cases.
--	{
--		"v: 4294967297",
--		map[string]int32{},
--	}, {
--		"v: 128",
--		map[string]int8{},
--	},
--
--	// Quoted values.
--	{
--		"'1': '\"2\"'",
--		map[interface{}]interface{}{"1": "\"2\""},
--	}, {
--		"v:\n- A\n- 'B\n\n  C'\n",
--		map[string][]string{"v": []string{"A", "B\nC"}},
--	},
--
--	// Explicit tags.
--	{
--		"v: !!float '1.1'",
--		map[string]interface{}{"v": 1.1},
--	}, {
--		"v: !!null ''",
--		map[string]interface{}{"v": nil},
--	}, {
--		"%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
--		map[string]interface{}{"v": 1},
--	},
--
--	// Anchors and aliases.
--	{
--		"a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
--		&struct{ A, B, C, D int }{1, 2, 1, 2},
--	}, {
--		"a: &a {c: 1}\nb: *a",
--		&struct {
--			A, B struct {
--				C int
--			}
--		}{struct{ C int }{1}, struct{ C int }{1}},
--	}, {
--		"a: &a [1, 2]\nb: *a",
--		&struct{ B []int }{[]int{1, 2}},
--	},
--
--	// Bug #1133337
--	{
--		"foo: ''",
--		map[string]*string{"foo": new(string)},
--	}, {
--		"foo: null",
--		map[string]string{"foo": ""},
--	}, {
--		"foo: null",
--		map[string]interface{}{"foo": nil},
--	},
--
--	// Ignored field
--	{
--		"a: 1\nb: 2\n",
--		&struct {
--			A int
--			B int "-"
--		}{1, 0},
--	},
--
--	// Bug #1191981
--	{
--		"" +
--			"%YAML 1.1\n" +
--			"--- !!str\n" +
--			`"Generic line break (no glyph)\n\` + "\n" +
--			` Generic line break (glyphed)\n\` + "\n" +
--			` Line separator\u2028\` + "\n" +
--			` Paragraph separator\u2029"` + "\n",
--		"" +
--			"Generic line break (no glyph)\n" +
--			"Generic line break (glyphed)\n" +
--			"Line separator\u2028Paragraph separator\u2029",
--	},
--
--	// Struct inlining
--	{
--		"a: 1\nb: 2\nc: 3\n",
--		&struct {
--			A int
--			C inlineB `yaml:",inline"`
--		}{1, inlineB{2, inlineC{3}}},
--	},
--
--	// bug 1243827
--	{
--		"a: -b_c",
--		map[string]interface{}{"a": "-b_c"},
--	},
--	{
--		"a: +b_c",
--		map[string]interface{}{"a": "+b_c"},
--	},
--	{
--		"a: 50cent_of_dollar",
--		map[string]interface{}{"a": "50cent_of_dollar"},
--	},
--
--	// Duration
--	{
--		"a: 3s",
--		map[string]time.Duration{"a": 3 * time.Second},
--	},
--
--	// Issue #24.
--	{
--		"a: <foo>",
--		map[string]string{"a": "<foo>"},
--	},
--
--	// Base 60 floats are obsolete and unsupported.
--	{
--		"a: 1:1\n",
--		map[string]string{"a": "1:1"},
--	},
--
--	// Binary data.
--	{
--		"a: !!binary gIGC\n",
--		map[string]string{"a": "\x80\x81\x82"},
--	}, {
--		"a: !!binary |\n  " + strings.Repeat("kJCQ", 17) + "kJ\n  CQ\n",
--		map[string]string{"a": strings.Repeat("\x90", 54)},
--	}, {
--		"a: !!binary |\n  " + strings.Repeat("A", 70) + "\n  ==\n",
--		map[string]string{"a": strings.Repeat("\x00", 52)},
--	},
--
--	// Ordered maps.
--	{
--		"{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
--		&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
--	},
--
--	// Issue #39.
--	{
--		"a:\n b:\n  c: d\n",
--		map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
--	},
--
--	// Custom map type.
--	{
--		"a: {b: c}",
--		M{"a": M{"b": "c"}},
--	},
--
--	// Support encoding.TextUnmarshaler.
--	{
--		"a: 1.2.3.4\n",
--		map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
--	},
--}
--
--type M map[interface{}]interface{}
--
--type inlineB struct {
--	B       int
--	inlineC `yaml:",inline"`
--}
--
--type inlineC struct {
--	C int
--}
--
--func (s *S) TestUnmarshal(c *C) {
--	for _, item := range unmarshalTests {
--		t := reflect.ValueOf(item.value).Type()
--		var value interface{}
--		switch t.Kind() {
--		case reflect.Map:
--			value = reflect.MakeMap(t).Interface()
--		case reflect.String:
--			value = reflect.New(t).Interface()
--		case reflect.Ptr:
--			value = reflect.New(t.Elem()).Interface()
--		default:
--			c.Fatalf("missing case for %s", t)
--		}
--		err := yaml.Unmarshal([]byte(item.data), value)
--		if _, ok := err.(*yaml.TypeError); !ok {
--			c.Assert(err, IsNil)
--		}
--		if t.Kind() == reflect.String {
--			c.Assert(*value.(*string), Equals, item.value)
--		} else {
--			c.Assert(value, DeepEquals, item.value)
--		}
--	}
--}
--
--func (s *S) TestUnmarshalNaN(c *C) {
--	value := map[string]interface{}{}
--	err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
--	c.Assert(err, IsNil)
--	c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
--}
--
--var unmarshalErrorTests = []struct {
--	data, error string
--}{
--	{"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
--	{"v: [A,", "yaml: line 1: did not find expected node content"},
--	{"v:\n- [A,", "yaml: line 2: did not find expected node content"},
--	{"a: *b\n", "yaml: unknown anchor 'b' referenced"},
--	{"a: &a\n  b: *a\n", "yaml: anchor 'a' value contains itself"},
--	{"value: -", "yaml: block sequence entries are not allowed in this context"},
--	{"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
--	{"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
--	{"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
--}
--
--func (s *S) TestUnmarshalErrors(c *C) {
--	for _, item := range unmarshalErrorTests {
--		var value interface{}
--		err := yaml.Unmarshal([]byte(item.data), &value)
--		c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
--	}
--}
--
--var unmarshalerTests = []struct {
--	data, tag string
--	value     interface{}
--}{
--	{"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
--	{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
--	{"_: 10", "!!int", 10},
--	{"_: null", "!!null", nil},
--	{`_: BAR!`, "!!str", "BAR!"},
--	{`_: "BAR!"`, "!!str", "BAR!"},
--	{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
--}
--
--var unmarshalerResult = map[int]error{}
--
--type unmarshalerType struct {
--	value interface{}
--}
--
--func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
--	if err := unmarshal(&o.value); err != nil {
--		return err
--	}
--	if i, ok := o.value.(int); ok {
--		if result, ok := unmarshalerResult[i]; ok {
--			return result
--		}
--	}
--	return nil
--}
--
--type unmarshalerPointer struct {
--	Field *unmarshalerType "_"
--}
--
--type unmarshalerValue struct {
--	Field unmarshalerType "_"
--}
--
--func (s *S) TestUnmarshalerPointerField(c *C) {
--	for _, item := range unmarshalerTests {
--		obj := &unmarshalerPointer{}
--		err := yaml.Unmarshal([]byte(item.data), obj)
--		c.Assert(err, IsNil)
--		if item.value == nil {
--			c.Assert(obj.Field, IsNil)
--		} else {
--			c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
--			c.Assert(obj.Field.value, DeepEquals, item.value)
--		}
--	}
--}
--
--func (s *S) TestUnmarshalerValueField(c *C) {
--	for _, item := range unmarshalerTests {
--		obj := &unmarshalerValue{}
--		err := yaml.Unmarshal([]byte(item.data), obj)
--		c.Assert(err, IsNil)
--		c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
--		c.Assert(obj.Field.value, DeepEquals, item.value)
--	}
--}
--
--func (s *S) TestUnmarshalerWholeDocument(c *C) {
--	obj := &unmarshalerType{}
--	err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
--	c.Assert(err, IsNil)
--	value, ok := obj.value.(map[interface{}]interface{})
--	c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
--	c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
--}
--
--func (s *S) TestUnmarshalerTypeError(c *C) {
--	unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
--	unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
--	defer func() {
--		delete(unmarshalerResult, 2)
--		delete(unmarshalerResult, 4)
--	}()
--
--	type T struct {
--		Before int
--		After  int
--		M      map[string]*unmarshalerType
--	}
--	var v T
--	data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
--	err := yaml.Unmarshal([]byte(data), &v)
--	c.Assert(err, ErrorMatches, ""+
--		"yaml: unmarshal errors:\n"+
--		"  line 1: cannot unmarshal !!str `A` into int\n"+
--		"  foo\n"+
--		"  bar\n"+
--		"  line 1: cannot unmarshal !!str `B` into int")
--	c.Assert(v.M["abc"], NotNil)
--	c.Assert(v.M["def"], IsNil)
--	c.Assert(v.M["ghi"], NotNil)
--	c.Assert(v.M["jkl"], IsNil)
--
--	c.Assert(v.M["abc"].value, Equals, 1)
--	c.Assert(v.M["ghi"].value, Equals, 3)
--}
--
--type proxyTypeError struct{}
--
--func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
--	var s string
--	var a int32
--	var b int64
--	if err := unmarshal(&s); err != nil {
--		panic(err)
--	}
--	if s == "a" {
--		if err := unmarshal(&b); err == nil {
--			panic("should have failed")
--		}
--		return unmarshal(&a)
--	}
--	if err := unmarshal(&a); err == nil {
--		panic("should have failed")
--	}
--	return unmarshal(&b)
--}
--
--func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
--	type T struct {
--		Before int
--		After  int
--		M      map[string]*proxyTypeError
--	}
--	var v T
--	data := `{before: A, m: {abc: a, def: b}, after: B}`
--	err := yaml.Unmarshal([]byte(data), &v)
--	c.Assert(err, ErrorMatches, ""+
--		"yaml: unmarshal errors:\n"+
--		"  line 1: cannot unmarshal !!str `A` into int\n"+
--		"  line 1: cannot unmarshal !!str `a` into int32\n"+
--		"  line 1: cannot unmarshal !!str `b` into int64\n"+
--		"  line 1: cannot unmarshal !!str `B` into int")
--}
--
--type failingUnmarshaler struct{}
--
--var failingErr = errors.New("failingErr")
--
--func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
--	return failingErr
--}
--
--func (s *S) TestUnmarshalerError(c *C) {
--	err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
--	c.Assert(err, Equals, failingErr)
--}
--
--// From http://yaml.org/type/merge.html
--var mergeTests = `
--anchors:
--  list:
--    - &CENTER { "x": 1, "y": 2 }
--    - &LEFT   { "x": 0, "y": 2 }
--    - &BIG    { "r": 10 }
--    - &SMALL  { "r": 1 }
--
--# All the following maps are equal:
--
--plain:
--  # Explicit keys
--  "x": 1
--  "y": 2
--  "r": 10
--  label: center/big
--
--mergeOne:
--  # Merge one map
--  << : *CENTER
--  "r": 10
--  label: center/big
--
--mergeMultiple:
--  # Merge multiple maps
--  << : [ *CENTER, *BIG ]
--  label: center/big
--
--override:
--  # Override
--  << : [ *BIG, *LEFT, *SMALL ]
--  "x": 1
--  label: center/big
--
--shortTag:
--  # Explicit short merge tag
--  !!merge "<<" : [ *CENTER, *BIG ]
--  label: center/big
--
--longTag:
--  # Explicit merge long tag
--  !<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
--  label: center/big
--
--inlineMap:
--  # Inlined map 
--  << : {"x": 1, "y": 2, "r": 10}
--  label: center/big
--
--inlineSequenceMap:
--  # Inlined map in sequence
--  << : [ *CENTER, {"r": 10} ]
--  label: center/big
--`
--
--func (s *S) TestMerge(c *C) {
--	var want = map[interface{}]interface{}{
--		"x":     1,
--		"y":     2,
--		"r":     10,
--		"label": "center/big",
--	}
--
--	var m map[interface{}]interface{}
--	err := yaml.Unmarshal([]byte(mergeTests), &m)
--	c.Assert(err, IsNil)
--	for name, test := range m {
--		if name == "anchors" {
--			continue
--		}
--		c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
--	}
--}
--
--func (s *S) TestMergeStruct(c *C) {
--	type Data struct {
--		X, Y, R int
--		Label   string
--	}
--	want := Data{1, 2, 10, "center/big"}
--
--	var m map[string]Data
--	err := yaml.Unmarshal([]byte(mergeTests), &m)
--	c.Assert(err, IsNil)
--	for name, test := range m {
--		if name == "anchors" {
--			continue
--		}
--		c.Assert(test, Equals, want, Commentf("test %q failed", name))
--	}
--}
--
--var unmarshalNullTests = []func() interface{}{
--	func() interface{} { var v interface{}; v = "v"; return &v },
--	func() interface{} { var s = "s"; return &s },
--	func() interface{} { var s = "s"; sptr := &s; return &sptr },
--	func() interface{} { var i = 1; return &i },
--	func() interface{} { var i = 1; iptr := &i; return &iptr },
--	func() interface{} { m := map[string]int{"s": 1}; return &m },
--	func() interface{} { m := map[string]int{"s": 1}; return m },
--}
--
--func (s *S) TestUnmarshalNull(c *C) {
--	for _, test := range unmarshalNullTests {
--		item := test()
--		zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
--		err := yaml.Unmarshal([]byte("null"), item)
--		c.Assert(err, IsNil)
--		if reflect.TypeOf(item).Kind() == reflect.Map {
--			c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
--		} else {
--			c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
--		}
--	}
--}
--
--//var data []byte
--//func init() {
--//	var err error
--//	data, err = ioutil.ReadFile("/tmp/file.yaml")
--//	if err != nil {
--//		panic(err)
--//	}
--//}
--//
--//func (s *S) BenchmarkUnmarshal(c *C) {
--//	var err error
--//	for i := 0; i < c.N; i++ {
--//		var v map[string]interface{}
--//		err = yaml.Unmarshal(data, &v)
--//	}
--//	if err != nil {
--//		panic(err)
--//	}
--//}
--//
--//func (s *S) BenchmarkMarshal(c *C) {
--//	var v map[string]interface{}
--//	yaml.Unmarshal(data, &v)
--//	c.ResetTimer()
--//	for i := 0; i < c.N; i++ {
--//		yaml.Marshal(&v)
--//	}
--//}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/emitterc.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/emitterc.go
-deleted file mode 100644
-index 9b3dc4a..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/emitterc.go
-+++ /dev/null
-@@ -1,1685 +0,0 @@
--package yaml
--
--import (
--	"bytes"
--)
--
--// Flush the buffer if needed.
--func flush(emitter *yaml_emitter_t) bool {
--	if emitter.buffer_pos+5 >= len(emitter.buffer) {
--		return yaml_emitter_flush(emitter)
--	}
--	return true
--}
--
--// Put a character to the output buffer.
--func put(emitter *yaml_emitter_t, value byte) bool {
--	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
--		return false
--	}
--	emitter.buffer[emitter.buffer_pos] = value
--	emitter.buffer_pos++
--	emitter.column++
--	return true
--}
--
--// Put a line break to the output buffer.
--func put_break(emitter *yaml_emitter_t) bool {
--	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
--		return false
--	}
--	switch emitter.line_break {
--	case yaml_CR_BREAK:
--		emitter.buffer[emitter.buffer_pos] = '\r'
--		emitter.buffer_pos += 1
--	case yaml_LN_BREAK:
--		emitter.buffer[emitter.buffer_pos] = '\n'
--		emitter.buffer_pos += 1
--	case yaml_CRLN_BREAK:
--		emitter.buffer[emitter.buffer_pos+0] = '\r'
--		emitter.buffer[emitter.buffer_pos+1] = '\n'
--		emitter.buffer_pos += 2
--	default:
--		panic("unknown line break setting")
--	}
--	emitter.column = 0
--	emitter.line++
--	return true
--}
--
--// Copy a character from a string into buffer.
--func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
--	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
--		return false
--	}
--	p := emitter.buffer_pos
--	w := width(s[*i])
--	switch w {
--	case 4:
--		emitter.buffer[p+3] = s[*i+3]
--		fallthrough
--	case 3:
--		emitter.buffer[p+2] = s[*i+2]
--		fallthrough
--	case 2:
--		emitter.buffer[p+1] = s[*i+1]
--		fallthrough
--	case 1:
--		emitter.buffer[p+0] = s[*i+0]
--	default:
--		panic("unknown character width")
--	}
--	emitter.column++
--	emitter.buffer_pos += w
--	*i += w
--	return true
--}
--
--// Write a whole string into buffer.
--func write_all(emitter *yaml_emitter_t, s []byte) bool {
--	for i := 0; i < len(s); {
--		if !write(emitter, s, &i) {
--			return false
--		}
--	}
--	return true
--}
--
--// Copy a line break character from a string into buffer.
--func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
--	if s[*i] == '\n' {
--		if !put_break(emitter) {
--			return false
--		}
--		*i++
--	} else {
--		if !write(emitter, s, i) {
--			return false
--		}
--		emitter.column = 0
--		emitter.line++
--	}
--	return true
--}
--
--// Set an emitter error and return false.
--func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
--	emitter.error = yaml_EMITTER_ERROR
--	emitter.problem = problem
--	return false
--}
--
--// Emit an event.
--func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	emitter.events = append(emitter.events, *event)
--	for !yaml_emitter_need_more_events(emitter) {
--		event := &emitter.events[emitter.events_head]
--		if !yaml_emitter_analyze_event(emitter, event) {
--			return false
--		}
--		if !yaml_emitter_state_machine(emitter, event) {
--			return false
--		}
--		yaml_event_delete(event)
--		emitter.events_head++
--	}
--	return true
--}
--
--// Check if we need to accumulate more events before emitting.
--//
--// We accumulate extra
--//  - 1 event for DOCUMENT-START
--//  - 2 events for SEQUENCE-START
--//  - 3 events for MAPPING-START
--//
--func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
--	if emitter.events_head == len(emitter.events) {
--		return true
--	}
--	var accumulate int
--	switch emitter.events[emitter.events_head].typ {
--	case yaml_DOCUMENT_START_EVENT:
--		accumulate = 1
--		break
--	case yaml_SEQUENCE_START_EVENT:
--		accumulate = 2
--		break
--	case yaml_MAPPING_START_EVENT:
--		accumulate = 3
--		break
--	default:
--		return false
--	}
--	if len(emitter.events)-emitter.events_head > accumulate {
--		return false
--	}
--	var level int
--	for i := emitter.events_head; i < len(emitter.events); i++ {
--		switch emitter.events[i].typ {
--		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
--			level++
--		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
--			level--
--		}
--		if level == 0 {
--			return false
--		}
--	}
--	return true
--}
--
--// Append a directive to the directives stack.
--func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
--	for i := 0; i < len(emitter.tag_directives); i++ {
--		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
--			if allow_duplicates {
--				return true
--			}
--			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
--		}
--	}
--
--	// [Go] Do we actually need to copy this given garbage collection
--	// and the lack of deallocating destructors?
--	tag_copy := yaml_tag_directive_t{
--		handle: make([]byte, len(value.handle)),
--		prefix: make([]byte, len(value.prefix)),
--	}
--	copy(tag_copy.handle, value.handle)
--	copy(tag_copy.prefix, value.prefix)
--	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
--	return true
--}
--
--// Increase the indentation level.
--func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
--	emitter.indents = append(emitter.indents, emitter.indent)
--	if emitter.indent < 0 {
--		if flow {
--			emitter.indent = emitter.best_indent
--		} else {
--			emitter.indent = 0
--		}
--	} else if !indentless {
--		emitter.indent += emitter.best_indent
--	}
--	return true
--}
--
--// State dispatcher.
--func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	switch emitter.state {
--	default:
--	case yaml_EMIT_STREAM_START_STATE:
--		return yaml_emitter_emit_stream_start(emitter, event)
--
--	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
--		return yaml_emitter_emit_document_start(emitter, event, true)
--
--	case yaml_EMIT_DOCUMENT_START_STATE:
--		return yaml_emitter_emit_document_start(emitter, event, false)
--
--	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
--		return yaml_emitter_emit_document_content(emitter, event)
--
--	case yaml_EMIT_DOCUMENT_END_STATE:
--		return yaml_emitter_emit_document_end(emitter, event)
--
--	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
--		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
--
--	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
--		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
--
--	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
--		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
--
--	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
--		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
--
--	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
--		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
--
--	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
--		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
--
--	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
--		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
--
--	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
--		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
--
--	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
--		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
--
--	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
--		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
--
--	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
--		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
--
--	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
--		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
--
--	case yaml_EMIT_END_STATE:
--		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
--	}
--	panic("invalid emitter state")
--}
--
--// Expect STREAM-START.
--func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	if event.typ != yaml_STREAM_START_EVENT {
--		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
--	}
--	if emitter.encoding == yaml_ANY_ENCODING {
--		emitter.encoding = event.encoding
--		if emitter.encoding == yaml_ANY_ENCODING {
--			emitter.encoding = yaml_UTF8_ENCODING
--		}
--	}
--	if emitter.best_indent < 2 || emitter.best_indent > 9 {
--		emitter.best_indent = 2
--	}
--	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
--		emitter.best_width = 80
--	}
--	if emitter.best_width < 0 {
--		emitter.best_width = 1<<31 - 1
--	}
--	if emitter.line_break == yaml_ANY_BREAK {
--		emitter.line_break = yaml_LN_BREAK
--	}
--
--	emitter.indent = -1
--	emitter.line = 0
--	emitter.column = 0
--	emitter.whitespace = true
--	emitter.indention = true
--
--	if emitter.encoding != yaml_UTF8_ENCODING {
--		if !yaml_emitter_write_bom(emitter) {
--			return false
--		}
--	}
--	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
--	return true
--}
--
--// Expect DOCUMENT-START or STREAM-END.
--func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
--
--	if event.typ == yaml_DOCUMENT_START_EVENT {
--
--		if event.version_directive != nil {
--			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
--				return false
--			}
--		}
--
--		for i := 0; i < len(event.tag_directives); i++ {
--			tag_directive := &event.tag_directives[i]
--			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
--				return false
--			}
--			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
--				return false
--			}
--		}
--
--		for i := 0; i < len(default_tag_directives); i++ {
--			tag_directive := &default_tag_directives[i]
--			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
--				return false
--			}
--		}
--
--		implicit := event.implicit
--		if !first || emitter.canonical {
--			implicit = false
--		}
--
--		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
--			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
--				return false
--			}
--			if !yaml_emitter_write_indent(emitter) {
--				return false
--			}
--		}
--
--		if event.version_directive != nil {
--			implicit = false
--			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
--				return false
--			}
--			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
--				return false
--			}
--			if !yaml_emitter_write_indent(emitter) {
--				return false
--			}
--		}
--
--		if len(event.tag_directives) > 0 {
--			implicit = false
--			for i := 0; i < len(event.tag_directives); i++ {
--				tag_directive := &event.tag_directives[i]
--				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
--					return false
--				}
--				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
--					return false
--				}
--				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
--					return false
--				}
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--			}
--		}
--
--		if yaml_emitter_check_empty_document(emitter) {
--			implicit = false
--		}
--		if !implicit {
--			if !yaml_emitter_write_indent(emitter) {
--				return false
--			}
--			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
--				return false
--			}
--			if emitter.canonical {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--			}
--		}
--
--		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
--		return true
--	}
--
--	if event.typ == yaml_STREAM_END_EVENT {
--		if emitter.open_ended {
--			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
--				return false
--			}
--			if !yaml_emitter_write_indent(emitter) {
--				return false
--			}
--		}
--		if !yaml_emitter_flush(emitter) {
--			return false
--		}
--		emitter.state = yaml_EMIT_END_STATE
--		return true
--	}
--
--	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
--}
--
--// Expect the root node.
--func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
--	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
--}
--
--// Expect DOCUMENT-END.
--func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	if event.typ != yaml_DOCUMENT_END_EVENT {
--		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
--	}
--	if !yaml_emitter_write_indent(emitter) {
--		return false
--	}
--	if !event.implicit {
--		// [Go] Allocate the slice elsewhere.
--		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
--			return false
--		}
--		if !yaml_emitter_write_indent(emitter) {
--			return false
--		}
--	}
--	if !yaml_emitter_flush(emitter) {
--		return false
--	}
--	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
--	emitter.tag_directives = emitter.tag_directives[:0]
--	return true
--}
--
--// Expect a flow item node.
--func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
--	if first {
--		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
--			return false
--		}
--		if !yaml_emitter_increase_indent(emitter, true, false) {
--			return false
--		}
--		emitter.flow_level++
--	}
--
--	if event.typ == yaml_SEQUENCE_END_EVENT {
--		emitter.flow_level--
--		emitter.indent = emitter.indents[len(emitter.indents)-1]
--		emitter.indents = emitter.indents[:len(emitter.indents)-1]
--		if emitter.canonical && !first {
--			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
--				return false
--			}
--			if !yaml_emitter_write_indent(emitter) {
--				return false
--			}
--		}
--		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
--			return false
--		}
--		emitter.state = emitter.states[len(emitter.states)-1]
--		emitter.states = emitter.states[:len(emitter.states)-1]
--
--		return true
--	}
--
--	if !first {
--		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
--			return false
--		}
--	}
--
--	if emitter.canonical || emitter.column > emitter.best_width {
--		if !yaml_emitter_write_indent(emitter) {
--			return false
--		}
--	}
--	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
--	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
--}
--
--// Expect a flow key node.
--func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
--	if first {
--		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
--			return false
--		}
--		if !yaml_emitter_increase_indent(emitter, true, false) {
--			return false
--		}
--		emitter.flow_level++
--	}
--
--	if event.typ == yaml_MAPPING_END_EVENT {
--		emitter.flow_level--
--		emitter.indent = emitter.indents[len(emitter.indents)-1]
--		emitter.indents = emitter.indents[:len(emitter.indents)-1]
--		if emitter.canonical && !first {
--			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
--				return false
--			}
--			if !yaml_emitter_write_indent(emitter) {
--				return false
--			}
--		}
--		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
--			return false
--		}
--		emitter.state = emitter.states[len(emitter.states)-1]
--		emitter.states = emitter.states[:len(emitter.states)-1]
--		return true
--	}
--
--	if !first {
--		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
--			return false
--		}
--	}
--	if emitter.canonical || emitter.column > emitter.best_width {
--		if !yaml_emitter_write_indent(emitter) {
--			return false
--		}
--	}
--
--	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
--		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
--		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
--	}
--	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
--		return false
--	}
--	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
--	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
--}
--
--// Expect a flow value node.
--func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
--	if simple {
--		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
--			return false
--		}
--	} else {
--		if emitter.canonical || emitter.column > emitter.best_width {
--			if !yaml_emitter_write_indent(emitter) {
--				return false
--			}
--		}
--		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
--			return false
--		}
--	}
--	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
--	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
--}
--
--// Expect a block item node.
--func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
--	if first {
--		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
--			return false
--		}
--	}
--	if event.typ == yaml_SEQUENCE_END_EVENT {
--		emitter.indent = emitter.indents[len(emitter.indents)-1]
--		emitter.indents = emitter.indents[:len(emitter.indents)-1]
--		emitter.state = emitter.states[len(emitter.states)-1]
--		emitter.states = emitter.states[:len(emitter.states)-1]
--		return true
--	}
--	if !yaml_emitter_write_indent(emitter) {
--		return false
--	}
--	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
--		return false
--	}
--	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
--	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
--}
--
--// Expect a block key node.
--func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
--	if first {
--		if !yaml_emitter_increase_indent(emitter, false, false) {
--			return false
--		}
--	}
--	if event.typ == yaml_MAPPING_END_EVENT {
--		emitter.indent = emitter.indents[len(emitter.indents)-1]
--		emitter.indents = emitter.indents[:len(emitter.indents)-1]
--		emitter.state = emitter.states[len(emitter.states)-1]
--		emitter.states = emitter.states[:len(emitter.states)-1]
--		return true
--	}
--	if !yaml_emitter_write_indent(emitter) {
--		return false
--	}
--	if yaml_emitter_check_simple_key(emitter) {
--		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
--		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
--	}
--	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
--		return false
--	}
--	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
--	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
--}
--
--// Expect a block value node.
--func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
--	if simple {
--		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
--			return false
--		}
--	} else {
--		if !yaml_emitter_write_indent(emitter) {
--			return false
--		}
--		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
--			return false
--		}
--	}
--	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
--	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
--}
--
--// Expect a node.
--func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
--	root bool, sequence bool, mapping bool, simple_key bool) bool {
--
--	emitter.root_context = root
--	emitter.sequence_context = sequence
--	emitter.mapping_context = mapping
--	emitter.simple_key_context = simple_key
--
--	switch event.typ {
--	case yaml_ALIAS_EVENT:
--		return yaml_emitter_emit_alias(emitter, event)
--	case yaml_SCALAR_EVENT:
--		return yaml_emitter_emit_scalar(emitter, event)
--	case yaml_SEQUENCE_START_EVENT:
--		return yaml_emitter_emit_sequence_start(emitter, event)
--	case yaml_MAPPING_START_EVENT:
--		return yaml_emitter_emit_mapping_start(emitter, event)
--	default:
--		return yaml_emitter_set_emitter_error(emitter,
--			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
--	}
--	return false
--}
--
--// Expect ALIAS.
--func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	if !yaml_emitter_process_anchor(emitter) {
--		return false
--	}
--	emitter.state = emitter.states[len(emitter.states)-1]
--	emitter.states = emitter.states[:len(emitter.states)-1]
--	return true
--}
--
--// Expect SCALAR.
--func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	if !yaml_emitter_select_scalar_style(emitter, event) {
--		return false
--	}
--	if !yaml_emitter_process_anchor(emitter) {
--		return false
--	}
--	if !yaml_emitter_process_tag(emitter) {
--		return false
--	}
--	if !yaml_emitter_increase_indent(emitter, true, false) {
--		return false
--	}
--	if !yaml_emitter_process_scalar(emitter) {
--		return false
--	}
--	emitter.indent = emitter.indents[len(emitter.indents)-1]
--	emitter.indents = emitter.indents[:len(emitter.indents)-1]
--	emitter.state = emitter.states[len(emitter.states)-1]
--	emitter.states = emitter.states[:len(emitter.states)-1]
--	return true
--}
--
--// Expect SEQUENCE-START.
--func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	if !yaml_emitter_process_anchor(emitter) {
--		return false
--	}
--	if !yaml_emitter_process_tag(emitter) {
--		return false
--	}
--	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
--		yaml_emitter_check_empty_sequence(emitter) {
--		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
--	} else {
--		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
--	}
--	return true
--}
--
--// Expect MAPPING-START.
--func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--	if !yaml_emitter_process_anchor(emitter) {
--		return false
--	}
--	if !yaml_emitter_process_tag(emitter) {
--		return false
--	}
--	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
--		yaml_emitter_check_empty_mapping(emitter) {
--		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
--	} else {
--		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
--	}
--	return true
--}
--
--// Check if the document content is an empty scalar.
--func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
--	return false // [Go] Huh?
--}
--
--// Check if the next events represent an empty sequence.
--func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
--	if len(emitter.events)-emitter.events_head < 2 {
--		return false
--	}
--	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
--		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
--}
--
--// Check if the next events represent an empty mapping.
--func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
--	if len(emitter.events)-emitter.events_head < 2 {
--		return false
--	}
--	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
--		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
--}
--
--// Check if the next node can be expressed as a simple key.
--func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
--	length := 0
--	switch emitter.events[emitter.events_head].typ {
--	case yaml_ALIAS_EVENT:
--		length += len(emitter.anchor_data.anchor)
--	case yaml_SCALAR_EVENT:
--		if emitter.scalar_data.multiline {
--			return false
--		}
--		length += len(emitter.anchor_data.anchor) +
--			len(emitter.tag_data.handle) +
--			len(emitter.tag_data.suffix) +
--			len(emitter.scalar_data.value)
--	case yaml_SEQUENCE_START_EVENT:
--		if !yaml_emitter_check_empty_sequence(emitter) {
--			return false
--		}
--		length += len(emitter.anchor_data.anchor) +
--			len(emitter.tag_data.handle) +
--			len(emitter.tag_data.suffix)
--	case yaml_MAPPING_START_EVENT:
--		if !yaml_emitter_check_empty_mapping(emitter) {
--			return false
--		}
--		length += len(emitter.anchor_data.anchor) +
--			len(emitter.tag_data.handle) +
--			len(emitter.tag_data.suffix)
--	default:
--		return false
--	}
--	return length <= 128
--}
--
--// Determine an acceptable scalar style.
--func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--
--	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
--	if no_tag && !event.implicit && !event.quoted_implicit {
--		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
--	}
--
--	style := event.scalar_style()
--	if style == yaml_ANY_SCALAR_STYLE {
--		style = yaml_PLAIN_SCALAR_STYLE
--	}
--	if emitter.canonical {
--		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
--	}
--	if emitter.simple_key_context && emitter.scalar_data.multiline {
--		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
--	}
--
--	if style == yaml_PLAIN_SCALAR_STYLE {
--		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
--			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
--			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
--		}
--		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
--			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
--		}
--		if no_tag && !event.implicit {
--			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
--		}
--	}
--	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
--		if !emitter.scalar_data.single_quoted_allowed {
--			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
--		}
--	}
--	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
--		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
--			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
--		}
--	}
--
--	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
--		emitter.tag_data.handle = []byte{'!'}
--	}
--	emitter.scalar_data.style = style
--	return true
--}
--
--// Write an achor.
--func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
--	if emitter.anchor_data.anchor == nil {
--		return true
--	}
--	c := []byte{'&'}
--	if emitter.anchor_data.alias {
--		c[0] = '*'
--	}
--	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
--		return false
--	}
--	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
--}
--
--// Write a tag.
--func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
--	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
--		return true
--	}
--	if len(emitter.tag_data.handle) > 0 {
--		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
--			return false
--		}
--		if len(emitter.tag_data.suffix) > 0 {
--			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
--				return false
--			}
--		}
--	} else {
--		// [Go] Allocate these slices elsewhere.
--		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
--			return false
--		}
--		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
--			return false
--		}
--		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
--			return false
--		}
--	}
--	return true
--}
--
--// Write a scalar.
--func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
--	switch emitter.scalar_data.style {
--	case yaml_PLAIN_SCALAR_STYLE:
--		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
--
--	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
--		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
--
--	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
--		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
--
--	case yaml_LITERAL_SCALAR_STYLE:
--		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
--
--	case yaml_FOLDED_SCALAR_STYLE:
--		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
--	}
--	panic("unknown scalar style")
--}
--
--// Check if a %YAML directive is valid.
--func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
--	if version_directive.major != 1 || version_directive.minor != 1 {
--		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
--	}
--	return true
--}
--
--// Check if a %TAG directive is valid.
--func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
--	handle := tag_directive.handle
--	prefix := tag_directive.prefix
--	if len(handle) == 0 {
--		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
--	}
--	if handle[0] != '!' {
--		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
--	}
--	if handle[len(handle)-1] != '!' {
--		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
--	}
--	for i := 1; i < len(handle)-1; i += width(handle[i]) {
--		if !is_alpha(handle, i) {
--			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
--		}
--	}
--	if len(prefix) == 0 {
--		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
--	}
--	return true
--}
--
--// Check if an anchor is valid.
--func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
--	if len(anchor) == 0 {
--		problem := "anchor value must not be empty"
--		if alias {
--			problem = "alias value must not be empty"
--		}
--		return yaml_emitter_set_emitter_error(emitter, problem)
--	}
--	for i := 0; i < len(anchor); i += width(anchor[i]) {
--		if !is_alpha(anchor, i) {
--			problem := "anchor value must contain alphanumerical characters only"
--			if alias {
--				problem = "alias value must contain alphanumerical characters only"
--			}
--			return yaml_emitter_set_emitter_error(emitter, problem)
--		}
--	}
--	emitter.anchor_data.anchor = anchor
--	emitter.anchor_data.alias = alias
--	return true
--}
--
--// Check if a tag is valid.
--func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
--	if len(tag) == 0 {
--		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
--	}
--	for i := 0; i < len(emitter.tag_directives); i++ {
--		tag_directive := &emitter.tag_directives[i]
--		if bytes.HasPrefix(tag, tag_directive.prefix) {
--			emitter.tag_data.handle = tag_directive.handle
--			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
--			return true
--		}
--	}
--	emitter.tag_data.suffix = tag
--	return true
--}
--
--// Check if a scalar is valid.
--func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
--	var (
--		block_indicators   = false
--		flow_indicators    = false
--		line_breaks        = false
--		special_characters = false
--
--		leading_space  = false
--		leading_break  = false
--		trailing_space = false
--		trailing_break = false
--		break_space    = false
--		space_break    = false
--
--		preceeded_by_whitespace = false
--		followed_by_whitespace  = false
--		previous_space          = false
--		previous_break          = false
--	)
--
--	emitter.scalar_data.value = value
--
--	if len(value) == 0 {
--		emitter.scalar_data.multiline = false
--		emitter.scalar_data.flow_plain_allowed = false
--		emitter.scalar_data.block_plain_allowed = true
--		emitter.scalar_data.single_quoted_allowed = true
--		emitter.scalar_data.block_allowed = false
--		return true
--	}
--
--	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
--		block_indicators = true
--		flow_indicators = true
--	}
--
--	preceeded_by_whitespace = true
--	for i, w := 0, 0; i < len(value); i += w {
--		w = width(value[0])
--		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
--
--		if i == 0 {
--			switch value[i] {
--			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
--				flow_indicators = true
--				block_indicators = true
--			case '?', ':':
--				flow_indicators = true
--				if followed_by_whitespace {
--					block_indicators = true
--				}
--			case '-':
--				if followed_by_whitespace {
--					flow_indicators = true
--					block_indicators = true
--				}
--			}
--		} else {
--			switch value[i] {
--			case ',', '?', '[', ']', '{', '}':
--				flow_indicators = true
--			case ':':
--				flow_indicators = true
--				if followed_by_whitespace {
--					block_indicators = true
--				}
--			case '#':
--				if preceeded_by_whitespace {
--					flow_indicators = true
--					block_indicators = true
--				}
--			}
--		}
--
--		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
--			special_characters = true
--		}
--		if is_space(value, i) {
--			if i == 0 {
--				leading_space = true
--			}
--			if i+width(value[i]) == len(value) {
--				trailing_space = true
--			}
--			if previous_break {
--				break_space = true
--			}
--			previous_space = true
--			previous_break = false
--		} else if is_break(value, i) {
--			line_breaks = true
--			if i == 0 {
--				leading_break = true
--			}
--			if i+width(value[i]) == len(value) {
--				trailing_break = true
--			}
--			if previous_space {
--				space_break = true
--			}
--			previous_space = false
--			previous_break = true
--		} else {
--			previous_space = false
--			previous_break = false
--		}
--
--		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
--		preceeded_by_whitespace = is_blankz(value, i)
--	}
--
--	emitter.scalar_data.multiline = line_breaks
--	emitter.scalar_data.flow_plain_allowed = true
--	emitter.scalar_data.block_plain_allowed = true
--	emitter.scalar_data.single_quoted_allowed = true
--	emitter.scalar_data.block_allowed = true
--
--	if leading_space || leading_break || trailing_space || trailing_break {
--		emitter.scalar_data.flow_plain_allowed = false
--		emitter.scalar_data.block_plain_allowed = false
--	}
--	if trailing_space {
--		emitter.scalar_data.block_allowed = false
--	}
--	if break_space {
--		emitter.scalar_data.flow_plain_allowed = false
--		emitter.scalar_data.block_plain_allowed = false
--		emitter.scalar_data.single_quoted_allowed = false
--	}
--	if space_break || special_characters {
--		emitter.scalar_data.flow_plain_allowed = false
--		emitter.scalar_data.block_plain_allowed = false
--		emitter.scalar_data.single_quoted_allowed = false
--		emitter.scalar_data.block_allowed = false
--	}
--	if line_breaks {
--		emitter.scalar_data.flow_plain_allowed = false
--		emitter.scalar_data.block_plain_allowed = false
--	}
--	if flow_indicators {
--		emitter.scalar_data.flow_plain_allowed = false
--	}
--	if block_indicators {
--		emitter.scalar_data.block_plain_allowed = false
--	}
--	return true
--}
--
--// Check if the event data is valid.
--func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
--
--	emitter.anchor_data.anchor = nil
--	emitter.tag_data.handle = nil
--	emitter.tag_data.suffix = nil
--	emitter.scalar_data.value = nil
--
--	switch event.typ {
--	case yaml_ALIAS_EVENT:
--		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
--			return false
--		}
--
--	case yaml_SCALAR_EVENT:
--		if len(event.anchor) > 0 {
--			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
--				return false
--			}
--		}
--		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
--			if !yaml_emitter_analyze_tag(emitter, event.tag) {
--				return false
--			}
--		}
--		if !yaml_emitter_analyze_scalar(emitter, event.value) {
--			return false
--		}
--
--	case yaml_SEQUENCE_START_EVENT:
--		if len(event.anchor) > 0 {
--			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
--				return false
--			}
--		}
--		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
--			if !yaml_emitter_analyze_tag(emitter, event.tag) {
--				return false
--			}
--		}
--
--	case yaml_MAPPING_START_EVENT:
--		if len(event.anchor) > 0 {
--			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
--				return false
--			}
--		}
--		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
--			if !yaml_emitter_analyze_tag(emitter, event.tag) {
--				return false
--			}
--		}
--	}
--	return true
--}
--
--// Write the BOM character.
--func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
--	if !flush(emitter) {
--		return false
--	}
--	pos := emitter.buffer_pos
--	emitter.buffer[pos+0] = '\xEF'
--	emitter.buffer[pos+1] = '\xBB'
--	emitter.buffer[pos+2] = '\xBF'
--	emitter.buffer_pos += 3
--	return true
--}
--
--func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
--	indent := emitter.indent
--	if indent < 0 {
--		indent = 0
--	}
--	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
--		if !put_break(emitter) {
--			return false
--		}
--	}
--	for emitter.column < indent {
--		if !put(emitter, ' ') {
--			return false
--		}
--	}
--	emitter.whitespace = true
--	emitter.indention = true
--	return true
--}
--
--func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
--	if need_whitespace && !emitter.whitespace {
--		if !put(emitter, ' ') {
--			return false
--		}
--	}
--	if !write_all(emitter, indicator) {
--		return false
--	}
--	emitter.whitespace = is_whitespace
--	emitter.indention = (emitter.indention && is_indention)
--	emitter.open_ended = false
--	return true
--}
--
--func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
--	if !write_all(emitter, value) {
--		return false
--	}
--	emitter.whitespace = false
--	emitter.indention = false
--	return true
--}
--
--func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
--	if !emitter.whitespace {
--		if !put(emitter, ' ') {
--			return false
--		}
--	}
--	if !write_all(emitter, value) {
--		return false
--	}
--	emitter.whitespace = false
--	emitter.indention = false
--	return true
--}
--
--func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
--	if need_whitespace && !emitter.whitespace {
--		if !put(emitter, ' ') {
--			return false
--		}
--	}
--	for i := 0; i < len(value); {
--		var must_write bool
--		switch value[i] {
--		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
--			must_write = true
--		default:
--			must_write = is_alpha(value, i)
--		}
--		if must_write {
--			if !write(emitter, value, &i) {
--				return false
--			}
--		} else {
--			w := width(value[i])
--			for k := 0; k < w; k++ {
--				octet := value[i]
--				i++
--				if !put(emitter, '%') {
--					return false
--				}
--
--				c := octet >> 4
--				if c < 10 {
--					c += '0'
--				} else {
--					c += 'A' - 10
--				}
--				if !put(emitter, c) {
--					return false
--				}
--
--				c = octet & 0x0f
--				if c < 10 {
--					c += '0'
--				} else {
--					c += 'A' - 10
--				}
--				if !put(emitter, c) {
--					return false
--				}
--			}
--		}
--	}
--	emitter.whitespace = false
--	emitter.indention = false
--	return true
--}
--
--func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
--	if !emitter.whitespace {
--		if !put(emitter, ' ') {
--			return false
--		}
--	}
--
--	spaces := false
--	breaks := false
--	for i := 0; i < len(value); {
--		if is_space(value, i) {
--			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--				i += width(value[i])
--			} else {
--				if !write(emitter, value, &i) {
--					return false
--				}
--			}
--			spaces = true
--		} else if is_break(value, i) {
--			if !breaks && value[i] == '\n' {
--				if !put_break(emitter) {
--					return false
--				}
--			}
--			if !write_break(emitter, value, &i) {
--				return false
--			}
--			emitter.indention = true
--			breaks = true
--		} else {
--			if breaks {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--			}
--			if !write(emitter, value, &i) {
--				return false
--			}
--			emitter.indention = false
--			spaces = false
--			breaks = false
--		}
--	}
--
--	emitter.whitespace = false
--	emitter.indention = false
--	if emitter.root_context {
--		emitter.open_ended = true
--	}
--
--	return true
--}
--
--func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
--
--	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
--		return false
--	}
--
--	spaces := false
--	breaks := false
--	for i := 0; i < len(value); {
--		if is_space(value, i) {
--			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--				i += width(value[i])
--			} else {
--				if !write(emitter, value, &i) {
--					return false
--				}
--			}
--			spaces = true
--		} else if is_break(value, i) {
--			if !breaks && value[i] == '\n' {
--				if !put_break(emitter) {
--					return false
--				}
--			}
--			if !write_break(emitter, value, &i) {
--				return false
--			}
--			emitter.indention = true
--			breaks = true
--		} else {
--			if breaks {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--			}
--			if value[i] == '\'' {
--				if !put(emitter, '\'') {
--					return false
--				}
--			}
--			if !write(emitter, value, &i) {
--				return false
--			}
--			emitter.indention = false
--			spaces = false
--			breaks = false
--		}
--	}
--	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
--		return false
--	}
--	emitter.whitespace = false
--	emitter.indention = false
--	return true
--}
--
--func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
--	spaces := false
--	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
--		return false
--	}
--
--	for i := 0; i < len(value); {
--		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
--			is_bom(value, i) || is_break(value, i) ||
--			value[i] == '"' || value[i] == '\\' {
--
--			octet := value[i]
--
--			var w int
--			var v rune
--			switch {
--			case octet&0x80 == 0x00:
--				w, v = 1, rune(octet&0x7F)
--			case octet&0xE0 == 0xC0:
--				w, v = 2, rune(octet&0x1F)
--			case octet&0xF0 == 0xE0:
--				w, v = 3, rune(octet&0x0F)
--			case octet&0xF8 == 0xF0:
--				w, v = 4, rune(octet&0x07)
--			}
--			for k := 1; k < w; k++ {
--				octet = value[i+k]
--				v = (v << 6) + (rune(octet) & 0x3F)
--			}
--			i += w
--
--			if !put(emitter, '\\') {
--				return false
--			}
--
--			var ok bool
--			switch v {
--			case 0x00:
--				ok = put(emitter, '0')
--			case 0x07:
--				ok = put(emitter, 'a')
--			case 0x08:
--				ok = put(emitter, 'b')
--			case 0x09:
--				ok = put(emitter, 't')
--			case 0x0A:
--				ok = put(emitter, 'n')
--			case 0x0b:
--				ok = put(emitter, 'v')
--			case 0x0c:
--				ok = put(emitter, 'f')
--			case 0x0d:
--				ok = put(emitter, 'r')
--			case 0x1b:
--				ok = put(emitter, 'e')
--			case 0x22:
--				ok = put(emitter, '"')
--			case 0x5c:
--				ok = put(emitter, '\\')
--			case 0x85:
--				ok = put(emitter, 'N')
--			case 0xA0:
--				ok = put(emitter, '_')
--			case 0x2028:
--				ok = put(emitter, 'L')
--			case 0x2029:
--				ok = put(emitter, 'P')
--			default:
--				if v <= 0xFF {
--					ok = put(emitter, 'x')
--					w = 2
--				} else if v <= 0xFFFF {
--					ok = put(emitter, 'u')
--					w = 4
--				} else {
--					ok = put(emitter, 'U')
--					w = 8
--				}
--				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
--					digit := byte((v >> uint(k)) & 0x0F)
--					if digit < 10 {
--						ok = put(emitter, digit+'0')
--					} else {
--						ok = put(emitter, digit+'A'-10)
--					}
--				}
--			}
--			if !ok {
--				return false
--			}
--			spaces = false
--		} else if is_space(value, i) {
--			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--				if is_space(value, i+1) {
--					if !put(emitter, '\\') {
--						return false
--					}
--				}
--				i += width(value[i])
--			} else if !write(emitter, value, &i) {
--				return false
--			}
--			spaces = true
--		} else {
--			if !write(emitter, value, &i) {
--				return false
--			}
--			spaces = false
--		}
--	}
--	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
--		return false
--	}
--	emitter.whitespace = false
--	emitter.indention = false
--	return true
--}
--
--func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
--	if is_space(value, 0) || is_break(value, 0) {
--		indent_hint := []byte{'0' + byte(emitter.best_indent)}
--		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
--			return false
--		}
--	}
--
--	emitter.open_ended = false
--
--	var chomp_hint [1]byte
--	if len(value) == 0 {
--		chomp_hint[0] = '-'
--	} else {
--		i := len(value) - 1
--		for value[i]&0xC0 == 0x80 {
--			i--
--		}
--		if !is_break(value, i) {
--			chomp_hint[0] = '-'
--		} else if i == 0 {
--			chomp_hint[0] = '+'
--			emitter.open_ended = true
--		} else {
--			i--
--			for value[i]&0xC0 == 0x80 {
--				i--
--			}
--			if is_break(value, i) {
--				chomp_hint[0] = '+'
--				emitter.open_ended = true
--			}
--		}
--	}
--	if chomp_hint[0] != 0 {
--		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
--			return false
--		}
--	}
--	return true
--}
--
--func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
--	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
--		return false
--	}
--	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
--		return false
--	}
--	if !put_break(emitter) {
--		return false
--	}
--	emitter.indention = true
--	emitter.whitespace = true
--	breaks := true
--	for i := 0; i < len(value); {
--		if is_break(value, i) {
--			if !write_break(emitter, value, &i) {
--				return false
--			}
--			emitter.indention = true
--			breaks = true
--		} else {
--			if breaks {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--			}
--			if !write(emitter, value, &i) {
--				return false
--			}
--			emitter.indention = false
--			breaks = false
--		}
--	}
--
--	return true
--}
--
--func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
--	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
--		return false
--	}
--	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
--		return false
--	}
--
--	if !put_break(emitter) {
--		return false
--	}
--	emitter.indention = true
--	emitter.whitespace = true
--
--	breaks := true
--	leading_spaces := true
--	for i := 0; i < len(value); {
--		if is_break(value, i) {
--			if !breaks && !leading_spaces && value[i] == '\n' {
--				k := 0
--				for is_break(value, k) {
--					k += width(value[k])
--				}
--				if !is_blankz(value, k) {
--					if !put_break(emitter) {
--						return false
--					}
--				}
--			}
--			if !write_break(emitter, value, &i) {
--				return false
--			}
--			emitter.indention = true
--			breaks = true
--		} else {
--			if breaks {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--				leading_spaces = is_blank(value, i)
--			}
--			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
--				if !yaml_emitter_write_indent(emitter) {
--					return false
--				}
--				i += width(value[i])
--			} else {
--				if !write(emitter, value, &i) {
--					return false
--				}
--			}
--			emitter.indention = false
--			breaks = false
--		}
--	}
--	return true
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/encode.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/encode.go
-deleted file mode 100644
-index 972bc03..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/encode.go
-+++ /dev/null
-@@ -1,290 +0,0 @@
--package yaml
--
--import (
--	"encoding"
--	"reflect"
--	"regexp"
--	"sort"
--	"strconv"
--	"strings"
--	"time"
--)
--
--type encoder struct {
--	emitter yaml_emitter_t
--	event   yaml_event_t
--	out     []byte
--	flow    bool
--}
--
--func newEncoder() (e *encoder) {
--	e = &encoder{}
--	e.must(yaml_emitter_initialize(&e.emitter))
--	yaml_emitter_set_output_string(&e.emitter, &e.out)
--	yaml_emitter_set_unicode(&e.emitter, true)
--	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
--	e.emit()
--	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
--	e.emit()
--	return e
--}
--
--func (e *encoder) finish() {
--	e.must(yaml_document_end_event_initialize(&e.event, true))
--	e.emit()
--	e.emitter.open_ended = false
--	e.must(yaml_stream_end_event_initialize(&e.event))
--	e.emit()
--}
--
--func (e *encoder) destroy() {
--	yaml_emitter_delete(&e.emitter)
--}
--
--func (e *encoder) emit() {
--	// This will internally delete the e.event value.
--	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
--		e.must(false)
--	}
--}
--
--func (e *encoder) must(ok bool) {
--	if !ok {
--		msg := e.emitter.problem
--		if msg == "" {
--			msg = "unknown problem generating YAML content"
--		}
--		failf("%s", msg)
--	}
--}
--
--func (e *encoder) marshal(tag string, in reflect.Value) {
--	if !in.IsValid() {
--		e.nilv()
--		return
--	}
--	iface := in.Interface()
--	if m, ok := iface.(Marshaler); ok {
--		v, err := m.MarshalYAML()
--		if err != nil {
--			fail(err)
--		}
--		if v == nil {
--			e.nilv()
--			return
--		}
--		in = reflect.ValueOf(v)
--	}
--	if m, ok := iface.(encoding.TextMarshaler); ok {
--		text, err := m.MarshalText()
--		if err != nil {
--			fail(err)
--		}
--		in = reflect.ValueOf(string(text))
--	}
--	switch in.Kind() {
--	case reflect.Interface:
--		if in.IsNil() {
--			e.nilv()
--		} else {
--			e.marshal(tag, in.Elem())
--		}
--	case reflect.Map:
--		e.mapv(tag, in)
--	case reflect.Ptr:
--		if in.IsNil() {
--			e.nilv()
--		} else {
--			e.marshal(tag, in.Elem())
--		}
--	case reflect.Struct:
--		e.structv(tag, in)
--	case reflect.Slice:
--		if in.Type().Elem() == mapItemType {
--			e.itemsv(tag, in)
--		} else {
--			e.slicev(tag, in)
--		}
--	case reflect.String:
--		e.stringv(tag, in)
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		if in.Type() == durationType {
--			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
--		} else {
--			e.intv(tag, in)
--		}
--	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
--		e.uintv(tag, in)
--	case reflect.Float32, reflect.Float64:
--		e.floatv(tag, in)
--	case reflect.Bool:
--		e.boolv(tag, in)
--	default:
--		panic("cannot marshal type: " + in.Type().String())
--	}
--}
--
--func (e *encoder) mapv(tag string, in reflect.Value) {
--	e.mappingv(tag, func() {
--		keys := keyList(in.MapKeys())
--		sort.Sort(keys)
--		for _, k := range keys {
--			e.marshal("", k)
--			e.marshal("", in.MapIndex(k))
--		}
--	})
--}
--
--func (e *encoder) itemsv(tag string, in reflect.Value) {
--	e.mappingv(tag, func() {
--		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
--		for _, item := range slice {
--			e.marshal("", reflect.ValueOf(item.Key))
--			e.marshal("", reflect.ValueOf(item.Value))
--		}
--	})
--}
--
--func (e *encoder) structv(tag string, in reflect.Value) {
--	sinfo, err := getStructInfo(in.Type())
--	if err != nil {
--		panic(err)
--	}
--	e.mappingv(tag, func() {
--		for _, info := range sinfo.FieldsList {
--			var value reflect.Value
--			if info.Inline == nil {
--				value = in.Field(info.Num)
--			} else {
--				value = in.FieldByIndex(info.Inline)
--			}
--			if info.OmitEmpty && isZero(value) {
--				continue
--			}
--			e.marshal("", reflect.ValueOf(info.Key))
--			e.flow = info.Flow
--			e.marshal("", value)
--		}
--	})
--}
--
--func (e *encoder) mappingv(tag string, f func()) {
--	implicit := tag == ""
--	style := yaml_BLOCK_MAPPING_STYLE
--	if e.flow {
--		e.flow = false
--		style = yaml_FLOW_MAPPING_STYLE
--	}
--	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
--	e.emit()
--	f()
--	e.must(yaml_mapping_end_event_initialize(&e.event))
--	e.emit()
--}
--
--func (e *encoder) slicev(tag string, in reflect.Value) {
--	implicit := tag == ""
--	style := yaml_BLOCK_SEQUENCE_STYLE
--	if e.flow {
--		e.flow = false
--		style = yaml_FLOW_SEQUENCE_STYLE
--	}
--	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
--	e.emit()
--	n := in.Len()
--	for i := 0; i < n; i++ {
--		e.marshal("", in.Index(i))
--	}
--	e.must(yaml_sequence_end_event_initialize(&e.event))
--	e.emit()
--}
--
--// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
--//
--// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
--// in YAML 1.2 and by this package, but these should be marshalled quoted for
--// the time being for compatibility with other parsers.
--func isBase60Float(s string) (result bool) {
--	// Fast path.
--	if s == "" {
--		return false
--	}
--	c := s[0]
--	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
--		return false
--	}
--	// Do the full match.
--	return base60float.MatchString(s)
--}
--
--// From http://yaml.org/type/float.html, except the regular expression there
--// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
--var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
--
--func (e *encoder) stringv(tag string, in reflect.Value) {
--	var style yaml_scalar_style_t
--	s := in.String()
--	rtag, rs := resolve("", s)
--	if rtag == yaml_BINARY_TAG {
--		if tag == "" || tag == yaml_STR_TAG {
--			tag = rtag
--			s = rs.(string)
--		} else if tag == yaml_BINARY_TAG {
--			failf("explicitly tagged !!binary data must be base64-encoded")
--		} else {
--			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
--		}
--	}
--	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
--		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
--	} else if strings.Contains(s, "\n") {
--		style = yaml_LITERAL_SCALAR_STYLE
--	} else {
--		style = yaml_PLAIN_SCALAR_STYLE
--	}
--	e.emitScalar(s, "", tag, style)
--}
--
--func (e *encoder) boolv(tag string, in reflect.Value) {
--	var s string
--	if in.Bool() {
--		s = "true"
--	} else {
--		s = "false"
--	}
--	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
--}
--
--func (e *encoder) intv(tag string, in reflect.Value) {
--	s := strconv.FormatInt(in.Int(), 10)
--	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
--}
--
--func (e *encoder) uintv(tag string, in reflect.Value) {
--	s := strconv.FormatUint(in.Uint(), 10)
--	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
--}
--
--func (e *encoder) floatv(tag string, in reflect.Value) {
--	// FIXME: Handle 64 bits here.
--	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
--	switch s {
--	case "+Inf":
--		s = ".inf"
--	case "-Inf":
--		s = "-.inf"
--	case "NaN":
--		s = ".nan"
--	}
--	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
--}
--
--func (e *encoder) nilv() {
--	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
--}
--
--func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
--	implicit := tag == ""
--	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
--	e.emit()
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/encode_test.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/encode_test.go
-deleted file mode 100644
-index cdbf64a..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/encode_test.go
-+++ /dev/null
-@@ -1,434 +0,0 @@
--package yaml_test
--
--import (
--	"fmt"
--	"math"
--	"strconv"
--	"strings"
--	"time"
--
--	. "gopkg.in/check.v1"
--	"gopkg.in/yaml.v2"
--	"net"
--)
--
--var marshalIntTest = 123
--
--var marshalTests = []struct {
--	value interface{}
--	data  string
--}{
--	{
--		nil,
--		"null\n",
--	}, {
--		&struct{}{},
--		"{}\n",
--	}, {
--		map[string]string{"v": "hi"},
--		"v: hi\n",
--	}, {
--		map[string]interface{}{"v": "hi"},
--		"v: hi\n",
--	}, {
--		map[string]string{"v": "true"},
--		"v: \"true\"\n",
--	}, {
--		map[string]string{"v": "false"},
--		"v: \"false\"\n",
--	}, {
--		map[string]interface{}{"v": true},
--		"v: true\n",
--	}, {
--		map[string]interface{}{"v": false},
--		"v: false\n",
--	}, {
--		map[string]interface{}{"v": 10},
--		"v: 10\n",
--	}, {
--		map[string]interface{}{"v": -10},
--		"v: -10\n",
--	}, {
--		map[string]uint{"v": 42},
--		"v: 42\n",
--	}, {
--		map[string]interface{}{"v": int64(4294967296)},
--		"v: 4294967296\n",
--	}, {
--		map[string]int64{"v": int64(4294967296)},
--		"v: 4294967296\n",
--	}, {
--		map[string]uint64{"v": 4294967296},
--		"v: 4294967296\n",
--	}, {
--		map[string]interface{}{"v": "10"},
--		"v: \"10\"\n",
--	}, {
--		map[string]interface{}{"v": 0.1},
--		"v: 0.1\n",
--	}, {
--		map[string]interface{}{"v": float64(0.1)},
--		"v: 0.1\n",
--	}, {
--		map[string]interface{}{"v": -0.1},
--		"v: -0.1\n",
--	}, {
--		map[string]interface{}{"v": math.Inf(+1)},
--		"v: .inf\n",
--	}, {
--		map[string]interface{}{"v": math.Inf(-1)},
--		"v: -.inf\n",
--	}, {
--		map[string]interface{}{"v": math.NaN()},
--		"v: .nan\n",
--	}, {
--		map[string]interface{}{"v": nil},
--		"v: null\n",
--	}, {
--		map[string]interface{}{"v": ""},
--		"v: \"\"\n",
--	}, {
--		map[string][]string{"v": []string{"A", "B"}},
--		"v:\n- A\n- B\n",
--	}, {
--		map[string][]string{"v": []string{"A", "B\nC"}},
--		"v:\n- A\n- |-\n  B\n  C\n",
--	}, {
--		map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
--		"v:\n- A\n- 1\n- B:\n  - 2\n  - 3\n",
--	}, {
--		map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
--		"a:\n  b: c\n",
--	}, {
--		map[string]interface{}{"a": "-"},
--		"a: '-'\n",
--	},
--
--	// Simple values.
--	{
--		&marshalIntTest,
--		"123\n",
--	},
--
--	// Structures
--	{
--		&struct{ Hello string }{"world"},
--		"hello: world\n",
--	}, {
--		&struct {
--			A struct {
--				B string
--			}
--		}{struct{ B string }{"c"}},
--		"a:\n  b: c\n",
--	}, {
--		&struct {
--			A *struct {
--				B string
--			}
--		}{&struct{ B string }{"c"}},
--		"a:\n  b: c\n",
--	}, {
--		&struct {
--			A *struct {
--				B string
--			}
--		}{},
--		"a: null\n",
--	}, {
--		&struct{ A int }{1},
--		"a: 1\n",
--	}, {
--		&struct{ A []int }{[]int{1, 2}},
--		"a:\n- 1\n- 2\n",
--	}, {
--		&struct {
--			B int "a"
--		}{1},
--		"a: 1\n",
--	}, {
--		&struct{ A bool }{true},
--		"a: true\n",
--	},
--
--	// Conditional flag
--	{
--		&struct {
--			A int "a,omitempty"
--			B int "b,omitempty"
--		}{1, 0},
--		"a: 1\n",
--	}, {
--		&struct {
--			A int "a,omitempty"
--			B int "b,omitempty"
--		}{0, 0},
--		"{}\n",
--	}, {
--		&struct {
--			A *struct{ X int } "a,omitempty"
--			B int              "b,omitempty"
--		}{nil, 0},
--		"{}\n",
--	},
--
--	// Flow flag
--	{
--		&struct {
--			A []int "a,flow"
--		}{[]int{1, 2}},
--		"a: [1, 2]\n",
--	}, {
--		&struct {
--			A map[string]string "a,flow"
--		}{map[string]string{"b": "c", "d": "e"}},
--		"a: {b: c, d: e}\n",
--	}, {
--		&struct {
--			A struct {
--				B, D string
--			} "a,flow"
--		}{struct{ B, D string }{"c", "e"}},
--		"a: {b: c, d: e}\n",
--	},
--
--	// Unexported field
--	{
--		&struct {
--			u int
--			A int
--		}{0, 1},
--		"a: 1\n",
--	},
--
--	// Ignored field
--	{
--		&struct {
--			A int
--			B int "-"
--		}{1, 2},
--		"a: 1\n",
--	},
--
--	// Struct inlining
--	{
--		&struct {
--			A int
--			C inlineB `yaml:",inline"`
--		}{1, inlineB{2, inlineC{3}}},
--		"a: 1\nb: 2\nc: 3\n",
--	},
--
--	// Duration
--	{
--		map[string]time.Duration{"a": 3 * time.Second},
--		"a: 3s\n",
--	},
--
--	// Issue #24: bug in map merging logic.
--	{
--		map[string]string{"a": "<foo>"},
--		"a: <foo>\n",
--	},
--
--	// Issue #34: marshal unsupported base 60 floats quoted for compatibility
--	// with old YAML 1.1 parsers.
--	{
--		map[string]string{"a": "1:1"},
--		"a: \"1:1\"\n",
--	},
--
--	// Binary data.
--	{
--		map[string]string{"a": "\x00"},
--		"a: \"\\0\"\n",
--	}, {
--		map[string]string{"a": "\x80\x81\x82"},
--		"a: !!binary gIGC\n",
--	}, {
--		map[string]string{"a": strings.Repeat("\x90", 54)},
--		"a: !!binary |\n  " + strings.Repeat("kJCQ", 17) + "kJ\n  CQ\n",
--	},
--
--	// Ordered maps.
--	{
--		&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
--		"b: 2\na: 1\nd: 4\nc: 3\nsub:\n  e: 5\n",
--	},
--
--	// Encode unicode as utf-8 rather than in escaped form.
--	{
--		map[string]string{"a": "你好"},
--		"a: 你好\n",
--	},
--
--	// Support encoding.TextMarshaler.
--	{
--		map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
--		"a: 1.2.3.4\n",
--	},
--}
--
--func (s *S) TestMarshal(c *C) {
--	for _, item := range marshalTests {
--		data, err := yaml.Marshal(item.value)
--		c.Assert(err, IsNil)
--		c.Assert(string(data), Equals, item.data)
--	}
--}
--
--var marshalErrorTests = []struct {
--	value interface{}
--	error string
--	panic string
--}{{
--	value: &struct {
--		B       int
--		inlineB ",inline"
--	}{1, inlineB{2, inlineC{3}}},
--	panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
--}}
--
--func (s *S) TestMarshalErrors(c *C) {
--	for _, item := range marshalErrorTests {
--		if item.panic != "" {
--			c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
--		} else {
--			_, err := yaml.Marshal(item.value)
--			c.Assert(err, ErrorMatches, item.error)
--		}
--	}
--}
--
--func (s *S) TestMarshalTypeCache(c *C) {
--	var data []byte
--	var err error
--	func() {
--		type T struct{ A int }
--		data, err = yaml.Marshal(&T{})
--		c.Assert(err, IsNil)
--	}()
--	func() {
--		type T struct{ B int }
--		data, err = yaml.Marshal(&T{})
--		c.Assert(err, IsNil)
--	}()
--	c.Assert(string(data), Equals, "b: 0\n")
--}
--
--var marshalerTests = []struct {
--	data  string
--	value interface{}
--}{
--	{"_:\n  hi: there\n", map[interface{}]interface{}{"hi": "there"}},
--	{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
--	{"_: 10\n", 10},
--	{"_: null\n", nil},
--	{"_: BAR!\n", "BAR!"},
--}
--
--type marshalerType struct {
--	value interface{}
--}
--
--func (o marshalerType) MarshalYAML() (interface{}, error) {
--	return o.value, nil
--}
--
--type marshalerValue struct {
--	Field marshalerType "_"
--}
--
--func (s *S) TestMarshaler(c *C) {
--	for _, item := range marshalerTests {
--		obj := &marshalerValue{}
--		obj.Field.value = item.value
--		data, err := yaml.Marshal(obj)
--		c.Assert(err, IsNil)
--		c.Assert(string(data), Equals, string(item.data))
--	}
--}
--
--func (s *S) TestMarshalerWholeDocument(c *C) {
--	obj := &marshalerType{}
--	obj.value = map[string]string{"hello": "world!"}
--	data, err := yaml.Marshal(obj)
--	c.Assert(err, IsNil)
--	c.Assert(string(data), Equals, "hello: world!\n")
--}
--
--type failingMarshaler struct{}
--
--func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
--	return nil, failingErr
--}
--
--func (s *S) TestMarshalerError(c *C) {
--	_, err := yaml.Marshal(&failingMarshaler{})
--	c.Assert(err, Equals, failingErr)
--}
--
--func (s *S) TestSortedOutput(c *C) {
--	order := []interface{}{
--		false,
--		true,
--		1,
--		uint(1),
--		1.0,
--		1.1,
--		1.2,
--		2,
--		uint(2),
--		2.0,
--		2.1,
--		"",
--		".1",
--		".2",
--		".a",
--		"1",
--		"2",
--		"a!10",
--		"a/2",
--		"a/10",
--		"a~10",
--		"ab/1",
--		"b/1",
--		"b/01",
--		"b/2",
--		"b/02",
--		"b/3",
--		"b/03",
--		"b1",
--		"b01",
--		"b3",
--		"c2.10",
--		"c10.2",
--		"d1",
--		"d12",
--		"d12a",
--	}
--	m := make(map[interface{}]int)
--	for _, k := range order {
--		m[k] = 1
--	}
--	data, err := yaml.Marshal(m)
--	c.Assert(err, IsNil)
--	out := "\n" + string(data)
--	last := 0
--	for i, k := range order {
--		repr := fmt.Sprint(k)
--		if s, ok := k.(string); ok {
--			if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
--				repr = `"` + repr + `"`
--			}
--		}
--		index := strings.Index(out, "\n"+repr+":")
--		if index == -1 {
--			c.Fatalf("%#v is not in the output: %#v", k, out)
--		}
--		if index < last {
--			c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
--		}
--		last = index
--	}
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/parserc.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/parserc.go
-deleted file mode 100644
-index 0a7037a..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/parserc.go
-+++ /dev/null
-@@ -1,1096 +0,0 @@
--package yaml
--
--import (
--	"bytes"
--)
--
--// The parser implements the following grammar:
--//
--// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
--// implicit_document    ::= block_node DOCUMENT-END*
--// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
--// block_node_or_indentless_sequence    ::=
--//                          ALIAS
--//                          | properties (block_content | indentless_block_sequence)?
--//                          | block_content
--//                          | indentless_block_sequence
--// block_node           ::= ALIAS
--//                          | properties block_content?
--//                          | block_content
--// flow_node            ::= ALIAS
--//                          | properties flow_content?
--//                          | flow_content
--// properties           ::= TAG ANCHOR? | ANCHOR TAG?
--// block_content        ::= block_collection | flow_collection | SCALAR
--// flow_content         ::= flow_collection | SCALAR
--// block_collection     ::= block_sequence | block_mapping
--// flow_collection      ::= flow_sequence | flow_mapping
--// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
--// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
--// block_mapping        ::= BLOCK-MAPPING_START
--//                          ((KEY block_node_or_indentless_sequence?)?
--//                          (VALUE block_node_or_indentless_sequence?)?)*
--//                          BLOCK-END
--// flow_sequence        ::= FLOW-SEQUENCE-START
--//                          (flow_sequence_entry FLOW-ENTRY)*
--//                          flow_sequence_entry?
--//                          FLOW-SEQUENCE-END
--// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
--// flow_mapping         ::= FLOW-MAPPING-START
--//                          (flow_mapping_entry FLOW-ENTRY)*
--//                          flow_mapping_entry?
--//                          FLOW-MAPPING-END
--// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
--
--// Peek the next token in the token queue.
--func peek_token(parser *yaml_parser_t) *yaml_token_t {
--	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
--		return &parser.tokens[parser.tokens_head]
--	}
--	return nil
--}
--
--// Remove the next token from the queue (must be called after peek_token).
--func skip_token(parser *yaml_parser_t) {
--	parser.token_available = false
--	parser.tokens_parsed++
--	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
--	parser.tokens_head++
--}
--
--// Get the next event.
--func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
--	// Erase the event object.
--	*event = yaml_event_t{}
--
--	// No events after the end of the stream or error.
--	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
--		return true
--	}
--
--	// Generate the next event.
--	return yaml_parser_state_machine(parser, event)
--}
--
--// Set parser error.
--func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
--	parser.error = yaml_PARSER_ERROR
--	parser.problem = problem
--	parser.problem_mark = problem_mark
--	return false
--}
--
--func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
--	parser.error = yaml_PARSER_ERROR
--	parser.context = context
--	parser.context_mark = context_mark
--	parser.problem = problem
--	parser.problem_mark = problem_mark
--	return false
--}
--
--// State dispatcher.
--func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
--	//trace("yaml_parser_state_machine", "state:", parser.state.String())
--
--	switch parser.state {
--	case yaml_PARSE_STREAM_START_STATE:
--		return yaml_parser_parse_stream_start(parser, event)
--
--	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
--		return yaml_parser_parse_document_start(parser, event, true)
--
--	case yaml_PARSE_DOCUMENT_START_STATE:
--		return yaml_parser_parse_document_start(parser, event, false)
--
--	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
--		return yaml_parser_parse_document_content(parser, event)
--
--	case yaml_PARSE_DOCUMENT_END_STATE:
--		return yaml_parser_parse_document_end(parser, event)
--
--	case yaml_PARSE_BLOCK_NODE_STATE:
--		return yaml_parser_parse_node(parser, event, true, false)
--
--	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
--		return yaml_parser_parse_node(parser, event, true, true)
--
--	case yaml_PARSE_FLOW_NODE_STATE:
--		return yaml_parser_parse_node(parser, event, false, false)
--
--	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
--		return yaml_parser_parse_block_sequence_entry(parser, event, true)
--
--	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
--		return yaml_parser_parse_block_sequence_entry(parser, event, false)
--
--	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
--		return yaml_parser_parse_indentless_sequence_entry(parser, event)
--
--	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
--		return yaml_parser_parse_block_mapping_key(parser, event, true)
--
--	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
--		return yaml_parser_parse_block_mapping_key(parser, event, false)
--
--	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
--		return yaml_parser_parse_block_mapping_value(parser, event)
--
--	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
--		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
--
--	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
--		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
--
--	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
--		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
--
--	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
--		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
--
--	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
--		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
--
--	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
--		return yaml_parser_parse_flow_mapping_key(parser, event, true)
--
--	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
--		return yaml_parser_parse_flow_mapping_key(parser, event, false)
--
--	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
--		return yaml_parser_parse_flow_mapping_value(parser, event, false)
--
--	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
--		return yaml_parser_parse_flow_mapping_value(parser, event, true)
--
--	default:
--		panic("invalid parser state")
--	}
--	return false
--}
--
--// Parse the production:
--// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
--//              ************
--func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--	if token.typ != yaml_STREAM_START_TOKEN {
--		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
--	}
--	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
--	*event = yaml_event_t{
--		typ:        yaml_STREAM_START_EVENT,
--		start_mark: token.start_mark,
--		end_mark:   token.end_mark,
--		encoding:   token.encoding,
--	}
--	skip_token(parser)
--	return true
--}
--
--// Parse the productions:
--// implicit_document    ::= block_node DOCUMENT-END*
--//                          *
--// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
--//                          *************************
--func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
--
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--
--	// Parse extra document end indicators.
--	if !implicit {
--		for token.typ == yaml_DOCUMENT_END_TOKEN {
--			skip_token(parser)
--			token = peek_token(parser)
--			if token == nil {
--				return false
--			}
--		}
--	}
--
--	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
--		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
--		token.typ != yaml_DOCUMENT_START_TOKEN &&
--		token.typ != yaml_STREAM_END_TOKEN {
--		// Parse an implicit document.
--		if !yaml_parser_process_directives(parser, nil, nil) {
--			return false
--		}
--		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
--		parser.state = yaml_PARSE_BLOCK_NODE_STATE
--
--		*event = yaml_event_t{
--			typ:        yaml_DOCUMENT_START_EVENT,
--			start_mark: token.start_mark,
--			end_mark:   token.end_mark,
--		}
--
--	} else if token.typ != yaml_STREAM_END_TOKEN {
--		// Parse an explicit document.
--		var version_directive *yaml_version_directive_t
--		var tag_directives []yaml_tag_directive_t
--		start_mark := token.start_mark
--		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
--			return false
--		}
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ != yaml_DOCUMENT_START_TOKEN {
--			yaml_parser_set_parser_error(parser,
--				"did not find expected <document start>", token.start_mark)
--			return false
--		}
--		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
--		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
--		end_mark := token.end_mark
--
--		*event = yaml_event_t{
--			typ:               yaml_DOCUMENT_START_EVENT,
--			start_mark:        start_mark,
--			end_mark:          end_mark,
--			version_directive: version_directive,
--			tag_directives:    tag_directives,
--			implicit:          false,
--		}
--		skip_token(parser)
--
--	} else {
--		// Parse the stream end.
--		parser.state = yaml_PARSE_END_STATE
--		*event = yaml_event_t{
--			typ:        yaml_STREAM_END_EVENT,
--			start_mark: token.start_mark,
--			end_mark:   token.end_mark,
--		}
--		skip_token(parser)
--	}
--
--	return true
--}
--
--// Parse the productions:
--// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
--//                                                    ***********
--//
--func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
--		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
--		token.typ == yaml_DOCUMENT_START_TOKEN ||
--		token.typ == yaml_DOCUMENT_END_TOKEN ||
--		token.typ == yaml_STREAM_END_TOKEN {
--		parser.state = parser.states[len(parser.states)-1]
--		parser.states = parser.states[:len(parser.states)-1]
--		return yaml_parser_process_empty_scalar(parser, event,
--			token.start_mark)
--	}
--	return yaml_parser_parse_node(parser, event, true, false)
--}
--
--// Parse the productions:
--// implicit_document    ::= block_node DOCUMENT-END*
--//                                     *************
--// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
--//
--func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--
--	start_mark := token.start_mark
--	end_mark := token.start_mark
--
--	implicit := true
--	if token.typ == yaml_DOCUMENT_END_TOKEN {
--		end_mark = token.end_mark
--		skip_token(parser)
--		implicit = false
--	}
--
--	parser.tag_directives = parser.tag_directives[:0]
--
--	parser.state = yaml_PARSE_DOCUMENT_START_STATE
--	*event = yaml_event_t{
--		typ:        yaml_DOCUMENT_END_EVENT,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--		implicit:   implicit,
--	}
--	return true
--}
--
--// Parse the productions:
--// block_node_or_indentless_sequence    ::=
--//                          ALIAS
--//                          *****
--//                          | properties (block_content | indentless_block_sequence)?
--//                            **********  *
--//                          | block_content | indentless_block_sequence
--//                            *
--// block_node           ::= ALIAS
--//                          *****
--//                          | properties block_content?
--//                            ********** *
--//                          | block_content
--//                            *
--// flow_node            ::= ALIAS
--//                          *****
--//                          | properties flow_content?
--//                            ********** *
--//                          | flow_content
--//                            *
--// properties           ::= TAG ANCHOR? | ANCHOR TAG?
--//                          *************************
--// block_content        ::= block_collection | flow_collection | SCALAR
--//                                                               ******
--// flow_content         ::= flow_collection | SCALAR
--//                                            ******
--func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
--	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
--
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--
--	if token.typ == yaml_ALIAS_TOKEN {
--		parser.state = parser.states[len(parser.states)-1]
--		parser.states = parser.states[:len(parser.states)-1]
--		*event = yaml_event_t{
--			typ:        yaml_ALIAS_EVENT,
--			start_mark: token.start_mark,
--			end_mark:   token.end_mark,
--			anchor:     token.value,
--		}
--		skip_token(parser)
--		return true
--	}
--
--	start_mark := token.start_mark
--	end_mark := token.start_mark
--
--	var tag_token bool
--	var tag_handle, tag_suffix, anchor []byte
--	var tag_mark yaml_mark_t
--	if token.typ == yaml_ANCHOR_TOKEN {
--		anchor = token.value
--		start_mark = token.start_mark
--		end_mark = token.end_mark
--		skip_token(parser)
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ == yaml_TAG_TOKEN {
--			tag_token = true
--			tag_handle = token.value
--			tag_suffix = token.suffix
--			tag_mark = token.start_mark
--			end_mark = token.end_mark
--			skip_token(parser)
--			token = peek_token(parser)
--			if token == nil {
--				return false
--			}
--		}
--	} else if token.typ == yaml_TAG_TOKEN {
--		tag_token = true
--		tag_handle = token.value
--		tag_suffix = token.suffix
--		start_mark = token.start_mark
--		tag_mark = token.start_mark
--		end_mark = token.end_mark
--		skip_token(parser)
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ == yaml_ANCHOR_TOKEN {
--			anchor = token.value
--			end_mark = token.end_mark
--			skip_token(parser)
--			token = peek_token(parser)
--			if token == nil {
--				return false
--			}
--		}
--	}
--
--	var tag []byte
--	if tag_token {
--		if len(tag_handle) == 0 {
--			tag = tag_suffix
--			tag_suffix = nil
--		} else {
--			for i := range parser.tag_directives {
--				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
--					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
--					tag = append(tag, tag_suffix...)
--					break
--				}
--			}
--			if len(tag) == 0 {
--				yaml_parser_set_parser_error_context(parser,
--					"while parsing a node", start_mark,
--					"found undefined tag handle", tag_mark)
--				return false
--			}
--		}
--	}
--
--	implicit := len(tag) == 0
--	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
--		end_mark = token.end_mark
--		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
--		*event = yaml_event_t{
--			typ:        yaml_SEQUENCE_START_EVENT,
--			start_mark: start_mark,
--			end_mark:   end_mark,
--			anchor:     anchor,
--			tag:        tag,
--			implicit:   implicit,
--			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
--		}
--		return true
--	}
--	if token.typ == yaml_SCALAR_TOKEN {
--		var plain_implicit, quoted_implicit bool
--		end_mark = token.end_mark
--		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
--			plain_implicit = true
--		} else if len(tag) == 0 {
--			quoted_implicit = true
--		}
--		parser.state = parser.states[len(parser.states)-1]
--		parser.states = parser.states[:len(parser.states)-1]
--
--		*event = yaml_event_t{
--			typ:             yaml_SCALAR_EVENT,
--			start_mark:      start_mark,
--			end_mark:        end_mark,
--			anchor:          anchor,
--			tag:             tag,
--			value:           token.value,
--			implicit:        plain_implicit,
--			quoted_implicit: quoted_implicit,
--			style:           yaml_style_t(token.style),
--		}
--		skip_token(parser)
--		return true
--	}
--	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
--		// [Go] Some of the events below can be merged as they differ only on style.
--		end_mark = token.end_mark
--		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
--		*event = yaml_event_t{
--			typ:        yaml_SEQUENCE_START_EVENT,
--			start_mark: start_mark,
--			end_mark:   end_mark,
--			anchor:     anchor,
--			tag:        tag,
--			implicit:   implicit,
--			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
--		}
--		return true
--	}
--	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
--		end_mark = token.end_mark
--		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
--		*event = yaml_event_t{
--			typ:        yaml_MAPPING_START_EVENT,
--			start_mark: start_mark,
--			end_mark:   end_mark,
--			anchor:     anchor,
--			tag:        tag,
--			implicit:   implicit,
--			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
--		}
--		return true
--	}
--	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
--		end_mark = token.end_mark
--		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
--		*event = yaml_event_t{
--			typ:        yaml_SEQUENCE_START_EVENT,
--			start_mark: start_mark,
--			end_mark:   end_mark,
--			anchor:     anchor,
--			tag:        tag,
--			implicit:   implicit,
--			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
--		}
--		return true
--	}
--	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
--		end_mark = token.end_mark
--		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
--		*event = yaml_event_t{
--			typ:        yaml_MAPPING_START_EVENT,
--			start_mark: start_mark,
--			end_mark:   end_mark,
--			anchor:     anchor,
--			tag:        tag,
--			implicit:   implicit,
--			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
--		}
--		return true
--	}
--	if len(anchor) > 0 || len(tag) > 0 {
--		parser.state = parser.states[len(parser.states)-1]
--		parser.states = parser.states[:len(parser.states)-1]
--
--		*event = yaml_event_t{
--			typ:             yaml_SCALAR_EVENT,
--			start_mark:      start_mark,
--			end_mark:        end_mark,
--			anchor:          anchor,
--			tag:             tag,
--			implicit:        implicit,
--			quoted_implicit: false,
--			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
--		}
--		return true
--	}
--
--	context := "while parsing a flow node"
--	if block {
--		context = "while parsing a block node"
--	}
--	yaml_parser_set_parser_error_context(parser, context, start_mark,
--		"did not find expected node content", token.start_mark)
--	return false
--}
--
--// Parse the productions:
--// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
--//                    ********************  *********** *             *********
--//
--func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
--	if first {
--		token := peek_token(parser)
--		parser.marks = append(parser.marks, token.start_mark)
--		skip_token(parser)
--	}
--
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--
--	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
--		mark := token.end_mark
--		skip_token(parser)
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
--			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
--			return yaml_parser_parse_node(parser, event, true, false)
--		} else {
--			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
--			return yaml_parser_process_empty_scalar(parser, event, mark)
--		}
--	}
--	if token.typ == yaml_BLOCK_END_TOKEN {
--		parser.state = parser.states[len(parser.states)-1]
--		parser.states = parser.states[:len(parser.states)-1]
--		parser.marks = parser.marks[:len(parser.marks)-1]
--
--		*event = yaml_event_t{
--			typ:        yaml_SEQUENCE_END_EVENT,
--			start_mark: token.start_mark,
--			end_mark:   token.end_mark,
--		}
--
--		skip_token(parser)
--		return true
--	}
--
--	context_mark := parser.marks[len(parser.marks)-1]
--	parser.marks = parser.marks[:len(parser.marks)-1]
--	return yaml_parser_set_parser_error_context(parser,
--		"while parsing a block collection", context_mark,
--		"did not find expected '-' indicator", token.start_mark)
--}
--
--// Parse the productions:
--// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
--//                           *********** *
--func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--
--	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
--		mark := token.end_mark
--		skip_token(parser)
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
--			token.typ != yaml_KEY_TOKEN &&
--			token.typ != yaml_VALUE_TOKEN &&
--			token.typ != yaml_BLOCK_END_TOKEN {
--			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
--			return yaml_parser_parse_node(parser, event, true, false)
--		}
--		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
--		return yaml_parser_process_empty_scalar(parser, event, mark)
--	}
--	parser.state = parser.states[len(parser.states)-1]
--	parser.states = parser.states[:len(parser.states)-1]
--
--	*event = yaml_event_t{
--		typ:        yaml_SEQUENCE_END_EVENT,
--		start_mark: token.start_mark,
--		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
--	}
--	return true
--}
--
--// Parse the productions:
--// block_mapping        ::= BLOCK-MAPPING_START
--//                          *******************
--//                          ((KEY block_node_or_indentless_sequence?)?
--//                            *** *
--//                          (VALUE block_node_or_indentless_sequence?)?)*
--//
--//                          BLOCK-END
--//                          *********
--//
--func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
--	if first {
--		token := peek_token(parser)
--		parser.marks = append(parser.marks, token.start_mark)
--		skip_token(parser)
--	}
--
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--
--	if token.typ == yaml_KEY_TOKEN {
--		mark := token.end_mark
--		skip_token(parser)
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ != yaml_KEY_TOKEN &&
--			token.typ != yaml_VALUE_TOKEN &&
--			token.typ != yaml_BLOCK_END_TOKEN {
--			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
--			return yaml_parser_parse_node(parser, event, true, true)
--		} else {
--			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
--			return yaml_parser_process_empty_scalar(parser, event, mark)
--		}
--	} else if token.typ == yaml_BLOCK_END_TOKEN {
--		parser.state = parser.states[len(parser.states)-1]
--		parser.states = parser.states[:len(parser.states)-1]
--		parser.marks = parser.marks[:len(parser.marks)-1]
--		*event = yaml_event_t{
--			typ:        yaml_MAPPING_END_EVENT,
--			start_mark: token.start_mark,
--			end_mark:   token.end_mark,
--		}
--		skip_token(parser)
--		return true
--	}
--
--	context_mark := parser.marks[len(parser.marks)-1]
--	parser.marks = parser.marks[:len(parser.marks)-1]
--	return yaml_parser_set_parser_error_context(parser,
--		"while parsing a block mapping", context_mark,
--		"did not find expected key", token.start_mark)
--}
--
--// Parse the productions:
--// block_mapping        ::= BLOCK-MAPPING_START
--//
--//                          ((KEY block_node_or_indentless_sequence?)?
--//
--//                          (VALUE block_node_or_indentless_sequence?)?)*
--//                           ***** *
--//                          BLOCK-END
--//
--//
--func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--	if token.typ == yaml_VALUE_TOKEN {
--		mark := token.end_mark
--		skip_token(parser)
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ != yaml_KEY_TOKEN &&
--			token.typ != yaml_VALUE_TOKEN &&
--			token.typ != yaml_BLOCK_END_TOKEN {
--			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
--			return yaml_parser_parse_node(parser, event, true, true)
--		}
--		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
--		return yaml_parser_process_empty_scalar(parser, event, mark)
--	}
--	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
--	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
--}
--
--// Parse the productions:
--// flow_sequence        ::= FLOW-SEQUENCE-START
--//                          *******************
--//                          (flow_sequence_entry FLOW-ENTRY)*
--//                           *                   **********
--//                          flow_sequence_entry?
--//                          *
--//                          FLOW-SEQUENCE-END
--//                          *****************
--// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
--//                          *
--//
--func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
--	if first {
--		token := peek_token(parser)
--		parser.marks = append(parser.marks, token.start_mark)
--		skip_token(parser)
--	}
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
--		if !first {
--			if token.typ == yaml_FLOW_ENTRY_TOKEN {
--				skip_token(parser)
--				token = peek_token(parser)
--				if token == nil {
--					return false
--				}
--			} else {
--				context_mark := parser.marks[len(parser.marks)-1]
--				parser.marks = parser.marks[:len(parser.marks)-1]
--				return yaml_parser_set_parser_error_context(parser,
--					"while parsing a flow sequence", context_mark,
--					"did not find expected ',' or ']'", token.start_mark)
--			}
--		}
--
--		if token.typ == yaml_KEY_TOKEN {
--			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
--			*event = yaml_event_t{
--				typ:        yaml_MAPPING_START_EVENT,
--				start_mark: token.start_mark,
--				end_mark:   token.end_mark,
--				implicit:   true,
--				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
--			}
--			skip_token(parser)
--			return true
--		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
--			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
--			return yaml_parser_parse_node(parser, event, false, false)
--		}
--	}
--
--	parser.state = parser.states[len(parser.states)-1]
--	parser.states = parser.states[:len(parser.states)-1]
--	parser.marks = parser.marks[:len(parser.marks)-1]
--
--	*event = yaml_event_t{
--		typ:        yaml_SEQUENCE_END_EVENT,
--		start_mark: token.start_mark,
--		end_mark:   token.end_mark,
--	}
--
--	skip_token(parser)
--	return true
--}
--
--//
--// Parse the productions:
--// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
--//                                      *** *
--//
--func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--	if token.typ != yaml_VALUE_TOKEN &&
--		token.typ != yaml_FLOW_ENTRY_TOKEN &&
--		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
--		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
--		return yaml_parser_parse_node(parser, event, false, false)
--	}
--	mark := token.end_mark
--	skip_token(parser)
--	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
--	return yaml_parser_process_empty_scalar(parser, event, mark)
--}
--
--// Parse the productions:
--// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
--//                                                      ***** *
--//
--func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--	if token.typ == yaml_VALUE_TOKEN {
--		skip_token(parser)
--		token := peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
--			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
--			return yaml_parser_parse_node(parser, event, false, false)
--		}
--	}
--	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
--	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
--}
--
--// Parse the productions:
--// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
--//                                                                      *
--//
--func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
--	*event = yaml_event_t{
--		typ:        yaml_MAPPING_END_EVENT,
--		start_mark: token.start_mark,
--		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
--	}
--	return true
--}
--
--// Parse the productions:
--// flow_mapping         ::= FLOW-MAPPING-START
--//                          ******************
--//                          (flow_mapping_entry FLOW-ENTRY)*
--//                           *                  **********
--//                          flow_mapping_entry?
--//                          ******************
--//                          FLOW-MAPPING-END
--//                          ****************
--// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
--//                          *           *** *
--//
--func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
--	if first {
--		token := peek_token(parser)
--		parser.marks = append(parser.marks, token.start_mark)
--		skip_token(parser)
--	}
--
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--
--	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
--		if !first {
--			if token.typ == yaml_FLOW_ENTRY_TOKEN {
--				skip_token(parser)
--				token = peek_token(parser)
--				if token == nil {
--					return false
--				}
--			} else {
--				context_mark := parser.marks[len(parser.marks)-1]
--				parser.marks = parser.marks[:len(parser.marks)-1]
--				return yaml_parser_set_parser_error_context(parser,
--					"while parsing a flow mapping", context_mark,
--					"did not find expected ',' or '}'", token.start_mark)
--			}
--		}
--
--		if token.typ == yaml_KEY_TOKEN {
--			skip_token(parser)
--			token = peek_token(parser)
--			if token == nil {
--				return false
--			}
--			if token.typ != yaml_VALUE_TOKEN &&
--				token.typ != yaml_FLOW_ENTRY_TOKEN &&
--				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
--				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
--				return yaml_parser_parse_node(parser, event, false, false)
--			} else {
--				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
--				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
--			}
--		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
--			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
--			return yaml_parser_parse_node(parser, event, false, false)
--		}
--	}
--
--	parser.state = parser.states[len(parser.states)-1]
--	parser.states = parser.states[:len(parser.states)-1]
--	parser.marks = parser.marks[:len(parser.marks)-1]
--	*event = yaml_event_t{
--		typ:        yaml_MAPPING_END_EVENT,
--		start_mark: token.start_mark,
--		end_mark:   token.end_mark,
--	}
--	skip_token(parser)
--	return true
--}
--
--// Parse the productions:
--// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
--//                                   *                  ***** *
--//
--func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--	if empty {
--		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
--		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
--	}
--	if token.typ == yaml_VALUE_TOKEN {
--		skip_token(parser)
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
--			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
--			return yaml_parser_parse_node(parser, event, false, false)
--		}
--	}
--	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
--	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
--}
--
--// Generate an empty scalar event.
--func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
--	*event = yaml_event_t{
--		typ:        yaml_SCALAR_EVENT,
--		start_mark: mark,
--		end_mark:   mark,
--		value:      nil, // Empty
--		implicit:   true,
--		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
--	}
--	return true
--}
--
--var default_tag_directives = []yaml_tag_directive_t{
--	{[]byte("!"), []byte("!")},
--	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
--}
--
--// Parse directives.
--func yaml_parser_process_directives(parser *yaml_parser_t,
--	version_directive_ref **yaml_version_directive_t,
--	tag_directives_ref *[]yaml_tag_directive_t) bool {
--
--	var version_directive *yaml_version_directive_t
--	var tag_directives []yaml_tag_directive_t
--
--	token := peek_token(parser)
--	if token == nil {
--		return false
--	}
--
--	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
--		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
--			if version_directive != nil {
--				yaml_parser_set_parser_error(parser,
--					"found duplicate %YAML directive", token.start_mark)
--				return false
--			}
--			if token.major != 1 || token.minor != 1 {
--				yaml_parser_set_parser_error(parser,
--					"found incompatible YAML document", token.start_mark)
--				return false
--			}
--			version_directive = &yaml_version_directive_t{
--				major: token.major,
--				minor: token.minor,
--			}
--		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
--			value := yaml_tag_directive_t{
--				handle: token.value,
--				prefix: token.prefix,
--			}
--			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
--				return false
--			}
--			tag_directives = append(tag_directives, value)
--		}
--
--		skip_token(parser)
--		token = peek_token(parser)
--		if token == nil {
--			return false
--		}
--	}
--
--	for i := range default_tag_directives {
--		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
--			return false
--		}
--	}
--
--	if version_directive_ref != nil {
--		*version_directive_ref = version_directive
--	}
--	if tag_directives_ref != nil {
--		*tag_directives_ref = tag_directives
--	}
--	return true
--}
--
--// Append a tag directive to the directives stack.
--func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
--	for i := range parser.tag_directives {
--		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
--			if allow_duplicates {
--				return true
--			}
--			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
--		}
--	}
--
--	// [Go] I suspect the copy is unnecessary. This was likely done
--	// because there was no way to track ownership of the data.
--	value_copy := yaml_tag_directive_t{
--		handle: make([]byte, len(value.handle)),
--		prefix: make([]byte, len(value.prefix)),
--	}
--	copy(value_copy.handle, value.handle)
--	copy(value_copy.prefix, value.prefix)
--	parser.tag_directives = append(parser.tag_directives, value_copy)
--	return true
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/readerc.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/readerc.go
-deleted file mode 100644
-index d5fb097..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/readerc.go
-+++ /dev/null
-@@ -1,391 +0,0 @@
--package yaml
--
--import (
--	"io"
--)
--
--// Set the reader error and return 0.
--func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
--	parser.error = yaml_READER_ERROR
--	parser.problem = problem
--	parser.problem_offset = offset
--	parser.problem_value = value
--	return false
--}
--
--// Byte order marks.
--const (
--	bom_UTF8    = "\xef\xbb\xbf"
--	bom_UTF16LE = "\xff\xfe"
--	bom_UTF16BE = "\xfe\xff"
--)
--
--// Determine the input stream encoding by checking the BOM symbol. If no BOM is
--// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
--func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
--	// Ensure that we had enough bytes in the raw buffer.
--	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
--		if !yaml_parser_update_raw_buffer(parser) {
--			return false
--		}
--	}
--
--	// Determine the encoding.
--	buf := parser.raw_buffer
--	pos := parser.raw_buffer_pos
--	avail := len(buf) - pos
--	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
--		parser.encoding = yaml_UTF16LE_ENCODING
--		parser.raw_buffer_pos += 2
--		parser.offset += 2
--	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
--		parser.encoding = yaml_UTF16BE_ENCODING
--		parser.raw_buffer_pos += 2
--		parser.offset += 2
--	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
--		parser.encoding = yaml_UTF8_ENCODING
--		parser.raw_buffer_pos += 3
--		parser.offset += 3
--	} else {
--		parser.encoding = yaml_UTF8_ENCODING
--	}
--	return true
--}
--
--// Update the raw buffer.
--func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
--	size_read := 0
--
--	// Return if the raw buffer is full.
--	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
--		return true
--	}
--
--	// Return on EOF.
--	if parser.eof {
--		return true
--	}
--
--	// Move the remaining bytes in the raw buffer to the beginning.
--	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
--		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
--	}
--	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
--	parser.raw_buffer_pos = 0
--
--	// Call the read handler to fill the buffer.
--	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
--	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
--	if err == io.EOF {
--		parser.eof = true
--	} else if err != nil {
--		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
--	}
--	return true
--}
--
--// Ensure that the buffer contains at least `length` characters.
--// Return true on success, false on failure.
--//
--// The length is supposed to be significantly less that the buffer size.
--func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
--	if parser.read_handler == nil {
--		panic("read handler must be set")
--	}
--
--	// If the EOF flag is set and the raw buffer is empty, do nothing.
--	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
--		return true
--	}
--
--	// Return if the buffer contains enough characters.
--	if parser.unread >= length {
--		return true
--	}
--
--	// Determine the input encoding if it is not known yet.
--	if parser.encoding == yaml_ANY_ENCODING {
--		if !yaml_parser_determine_encoding(parser) {
--			return false
--		}
--	}
--
--	// Move the unread characters to the beginning of the buffer.
--	buffer_len := len(parser.buffer)
--	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
--		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
--		buffer_len -= parser.buffer_pos
--		parser.buffer_pos = 0
--	} else if parser.buffer_pos == buffer_len {
--		buffer_len = 0
--		parser.buffer_pos = 0
--	}
--
--	// Open the whole buffer for writing, and cut it before returning.
--	parser.buffer = parser.buffer[:cap(parser.buffer)]
--
--	// Fill the buffer until it has enough characters.
--	first := true
--	for parser.unread < length {
--
--		// Fill the raw buffer if necessary.
--		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
--			if !yaml_parser_update_raw_buffer(parser) {
--				parser.buffer = parser.buffer[:buffer_len]
--				return false
--			}
--		}
--		first = false
--
--		// Decode the raw buffer.
--	inner:
--		for parser.raw_buffer_pos != len(parser.raw_buffer) {
--			var value rune
--			var width int
--
--			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
--
--			// Decode the next character.
--			switch parser.encoding {
--			case yaml_UTF8_ENCODING:
--				// Decode a UTF-8 character.  Check RFC 3629
--				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
--				//
--				// The following table (taken from the RFC) is used for
--				// decoding.
--				//
--				//    Char. number range |        UTF-8 octet sequence
--				//      (hexadecimal)    |              (binary)
--				//   --------------------+------------------------------------
--				//   0000 0000-0000 007F | 0xxxxxxx
--				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
--				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
--				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
--				//
--				// Additionally, the characters in the range 0xD800-0xDFFF
--				// are prohibited as they are reserved for use with UTF-16
--				// surrogate pairs.
--
--				// Determine the length of the UTF-8 sequence.
--				octet := parser.raw_buffer[parser.raw_buffer_pos]
--				switch {
--				case octet&0x80 == 0x00:
--					width = 1
--				case octet&0xE0 == 0xC0:
--					width = 2
--				case octet&0xF0 == 0xE0:
--					width = 3
--				case octet&0xF8 == 0xF0:
--					width = 4
--				default:
--					// The leading octet is invalid.
--					return yaml_parser_set_reader_error(parser,
--						"invalid leading UTF-8 octet",
--						parser.offset, int(octet))
--				}
--
--				// Check if the raw buffer contains an incomplete character.
--				if width > raw_unread {
--					if parser.eof {
--						return yaml_parser_set_reader_error(parser,
--							"incomplete UTF-8 octet sequence",
--							parser.offset, -1)
--					}
--					break inner
--				}
--
--				// Decode the leading octet.
--				switch {
--				case octet&0x80 == 0x00:
--					value = rune(octet & 0x7F)
--				case octet&0xE0 == 0xC0:
--					value = rune(octet & 0x1F)
--				case octet&0xF0 == 0xE0:
--					value = rune(octet & 0x0F)
--				case octet&0xF8 == 0xF0:
--					value = rune(octet & 0x07)
--				default:
--					value = 0
--				}
--
--				// Check and decode the trailing octets.
--				for k := 1; k < width; k++ {
--					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
--
--					// Check if the octet is valid.
--					if (octet & 0xC0) != 0x80 {
--						return yaml_parser_set_reader_error(parser,
--							"invalid trailing UTF-8 octet",
--							parser.offset+k, int(octet))
--					}
--
--					// Decode the octet.
--					value = (value << 6) + rune(octet&0x3F)
--				}
--
--				// Check the length of the sequence against the value.
--				switch {
--				case width == 1:
--				case width == 2 && value >= 0x80:
--				case width == 3 && value >= 0x800:
--				case width == 4 && value >= 0x10000:
--				default:
--					return yaml_parser_set_reader_error(parser,
--						"invalid length of a UTF-8 sequence",
--						parser.offset, -1)
--				}
--
--				// Check the range of the value.
--				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
--					return yaml_parser_set_reader_error(parser,
--						"invalid Unicode character",
--						parser.offset, int(value))
--				}
--
--			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
--				var low, high int
--				if parser.encoding == yaml_UTF16LE_ENCODING {
--					low, high = 0, 1
--				} else {
--					high, low = 1, 0
--				}
--
--				// The UTF-16 encoding is not as simple as one might
--				// naively think.  Check RFC 2781
--				// (http://www.ietf.org/rfc/rfc2781.txt).
--				//
--				// Normally, two subsequent bytes describe a Unicode
--				// character.  However a special technique (called a
--				// surrogate pair) is used for specifying character
--				// values larger than 0xFFFF.
--				//
--				// A surrogate pair consists of two pseudo-characters:
--				//      high surrogate area (0xD800-0xDBFF)
--				//      low surrogate area (0xDC00-0xDFFF)
--				//
--				// The following formulas are used for decoding
--				// and encoding characters using surrogate pairs:
--				//
--				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
--				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
--				//  W1 = 110110yyyyyyyyyy
--				//  W2 = 110111xxxxxxxxxx
--				//
--				// where U is the character value, W1 is the high surrogate
--				// area, W2 is the low surrogate area.
--
--				// Check for incomplete UTF-16 character.
--				if raw_unread < 2 {
--					if parser.eof {
--						return yaml_parser_set_reader_error(parser,
--							"incomplete UTF-16 character",
--							parser.offset, -1)
--					}
--					break inner
--				}
--
--				// Get the character.
--				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
--					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
--
--				// Check for unexpected low surrogate area.
--				if value&0xFC00 == 0xDC00 {
--					return yaml_parser_set_reader_error(parser,
--						"unexpected low surrogate area",
--						parser.offset, int(value))
--				}
--
--				// Check for a high surrogate area.
--				if value&0xFC00 == 0xD800 {
--					width = 4
--
--					// Check for incomplete surrogate pair.
--					if raw_unread < 4 {
--						if parser.eof {
--							return yaml_parser_set_reader_error(parser,
--								"incomplete UTF-16 surrogate pair",
--								parser.offset, -1)
--						}
--						break inner
--					}
--
--					// Get the next character.
--					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
--						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
--
--					// Check for a low surrogate area.
--					if value2&0xFC00 != 0xDC00 {
--						return yaml_parser_set_reader_error(parser,
--							"expected low surrogate area",
--							parser.offset+2, int(value2))
--					}
--
--					// Generate the value of the surrogate pair.
--					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
--				} else {
--					width = 2
--				}
--
--			default:
--				panic("impossible")
--			}
--
--			// Check if the character is in the allowed range:
--			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
--			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
--			//      | [#x10000-#x10FFFF]                        (32 bit)
--			switch {
--			case value == 0x09:
--			case value == 0x0A:
--			case value == 0x0D:
--			case value >= 0x20 && value <= 0x7E:
--			case value == 0x85:
--			case value >= 0xA0 && value <= 0xD7FF:
--			case value >= 0xE000 && value <= 0xFFFD:
--			case value >= 0x10000 && value <= 0x10FFFF:
--			default:
--				return yaml_parser_set_reader_error(parser,
--					"control characters are not allowed",
--					parser.offset, int(value))
--			}
--
--			// Move the raw pointers.
--			parser.raw_buffer_pos += width
--			parser.offset += width
--
--			// Finally put the character into the buffer.
--			if value <= 0x7F {
--				// 0000 0000-0000 007F . 0xxxxxxx
--				parser.buffer[buffer_len+0] = byte(value)
--			} else if value <= 0x7FF {
--				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
--				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
--				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
--			} else if value <= 0xFFFF {
--				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
--				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
--				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
--				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
--			} else {
--				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
--				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
--				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
--				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
--				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
--			}
--			buffer_len += width
--
--			parser.unread++
--		}
--
--		// On EOF, put NUL into the buffer and return.
--		if parser.eof {
--			parser.buffer[buffer_len] = 0
--			buffer_len++
--			parser.unread++
--			break
--		}
--	}
--	parser.buffer = parser.buffer[:buffer_len]
--	return true
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/resolve.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/resolve.go
-deleted file mode 100644
-index 93a8632..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/resolve.go
-+++ /dev/null
-@@ -1,203 +0,0 @@
--package yaml
--
--import (
--	"encoding/base64"
--	"math"
--	"strconv"
--	"strings"
--	"unicode/utf8"
--)
--
--type resolveMapItem struct {
--	value interface{}
--	tag   string
--}
--
--var resolveTable = make([]byte, 256)
--var resolveMap = make(map[string]resolveMapItem)
--
--func init() {
--	t := resolveTable
--	t[int('+')] = 'S' // Sign
--	t[int('-')] = 'S'
--	for _, c := range "0123456789" {
--		t[int(c)] = 'D' // Digit
--	}
--	for _, c := range "yYnNtTfFoO~" {
--		t[int(c)] = 'M' // In map
--	}
--	t[int('.')] = '.' // Float (potentially in map)
--
--	var resolveMapList = []struct {
--		v   interface{}
--		tag string
--		l   []string
--	}{
--		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
--		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
--		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
--		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
--		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
--		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
--		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
--		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
--		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
--		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
--		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
--		{"<<", yaml_MERGE_TAG, []string{"<<"}},
--	}
--
--	m := resolveMap
--	for _, item := range resolveMapList {
--		for _, s := range item.l {
--			m[s] = resolveMapItem{item.v, item.tag}
--		}
--	}
--}
--
--const longTagPrefix = "tag:yaml.org,2002:"
--
--func shortTag(tag string) string {
--	// TODO This can easily be made faster and produce less garbage.
--	if strings.HasPrefix(tag, longTagPrefix) {
--		return "!!" + tag[len(longTagPrefix):]
--	}
--	return tag
--}
--
--func longTag(tag string) string {
--	if strings.HasPrefix(tag, "!!") {
--		return longTagPrefix + tag[2:]
--	}
--	return tag
--}
--
--func resolvableTag(tag string) bool {
--	switch tag {
--	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
--		return true
--	}
--	return false
--}
--
--func resolve(tag string, in string) (rtag string, out interface{}) {
--	if !resolvableTag(tag) {
--		return tag, in
--	}
--
--	defer func() {
--		switch tag {
--		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
--			return
--		}
--		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
--	}()
--
--	// Any data is accepted as a !!str or !!binary.
--	// Otherwise, the prefix is enough of a hint about what it might be.
--	hint := byte('N')
--	if in != "" {
--		hint = resolveTable[in[0]]
--	}
--	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
--		// Handle things we can lookup in a map.
--		if item, ok := resolveMap[in]; ok {
--			return item.tag, item.value
--		}
--
--		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
--		// are purposefully unsupported here. They're still quoted on
--		// the way out for compatibility with other parser, though.
--
--		switch hint {
--		case 'M':
--			// We've already checked the map above.
--
--		case '.':
--			// Not in the map, so maybe a normal float.
--			floatv, err := strconv.ParseFloat(in, 64)
--			if err == nil {
--				return yaml_FLOAT_TAG, floatv
--			}
--
--		case 'D', 'S':
--			// Int, float, or timestamp.
--			plain := strings.Replace(in, "_", "", -1)
--			intv, err := strconv.ParseInt(plain, 0, 64)
--			if err == nil {
--				if intv == int64(int(intv)) {
--					return yaml_INT_TAG, int(intv)
--				} else {
--					return yaml_INT_TAG, intv
--				}
--			}
--			uintv, err := strconv.ParseUint(plain, 0, 64)
--			if err == nil {
--				return yaml_INT_TAG, uintv
--			}
--			floatv, err := strconv.ParseFloat(plain, 64)
--			if err == nil {
--				return yaml_FLOAT_TAG, floatv
--			}
--			if strings.HasPrefix(plain, "0b") {
--				intv, err := strconv.ParseInt(plain[2:], 2, 64)
--				if err == nil {
--					if intv == int64(int(intv)) {
--						return yaml_INT_TAG, int(intv)
--					} else {
--						return yaml_INT_TAG, intv
--					}
--				}
--				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
--				if err == nil {
--					return yaml_INT_TAG, uintv
--				}
--			} else if strings.HasPrefix(plain, "-0b") {
--				intv, err := strconv.ParseInt(plain[3:], 2, 64)
--				if err == nil {
--					if intv == int64(int(intv)) {
--						return yaml_INT_TAG, -int(intv)
--					} else {
--						return yaml_INT_TAG, -intv
--					}
--				}
--			}
--			// XXX Handle timestamps here.
--
--		default:
--			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
--		}
--	}
--	if tag == yaml_BINARY_TAG {
--		return yaml_BINARY_TAG, in
--	}
--	if utf8.ValidString(in) {
--		return yaml_STR_TAG, in
--	}
--	return yaml_BINARY_TAG, encodeBase64(in)
--}
--
--// encodeBase64 encodes s as base64 that is broken up into multiple lines
--// as appropriate for the resulting length.
--func encodeBase64(s string) string {
--	const lineLen = 70
--	encLen := base64.StdEncoding.EncodedLen(len(s))
--	lines := encLen/lineLen + 1
--	buf := make([]byte, encLen*2+lines)
--	in := buf[0:encLen]
--	out := buf[encLen:]
--	base64.StdEncoding.Encode(in, []byte(s))
--	k := 0
--	for i := 0; i < len(in); i += lineLen {
--		j := i + lineLen
--		if j > len(in) {
--			j = len(in)
--		}
--		k += copy(out[k:], in[i:j])
--		if lines > 1 {
--			out[k] = '\n'
--			k++
--		}
--	}
--	return string(out[:k])
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/scannerc.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/scannerc.go
-deleted file mode 100644
-index fe93b19..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/scannerc.go
-+++ /dev/null
-@@ -1,2710 +0,0 @@
--package yaml
--
--import (
--	"bytes"
--	"fmt"
--)
--
--// Introduction
--// ************
--//
--// The following notes assume that you are familiar with the YAML specification
--// (http://yaml.org/spec/cvs/current.html).  We mostly follow it, although in
--// some cases we are less restrictive that it requires.
--//
--// The process of transforming a YAML stream into a sequence of events is
--// divided on two steps: Scanning and Parsing.
--//
--// The Scanner transforms the input stream into a sequence of tokens, while the
--// parser transform the sequence of tokens produced by the Scanner into a
--// sequence of parsing events.
--//
--// The Scanner is rather clever and complicated. The Parser, on the contrary,
--// is a straightforward implementation of a recursive-descendant parser (or,
--// LL(1) parser, as it is usually called).
--//
--// Actually there are two issues of Scanning that might be called "clever", the
--// rest is quite straightforward.  The issues are "block collection start" and
--// "simple keys".  Both issues are explained below in details.
--//
--// Here the Scanning step is explained and implemented.  We start with the list
--// of all the tokens produced by the Scanner together with short descriptions.
--//
--// Now, tokens:
--//
--//      STREAM-START(encoding)          # The stream start.
--//      STREAM-END                      # The stream end.
--//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
--//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
--//      DOCUMENT-START                  # '---'
--//      DOCUMENT-END                    # '...'
--//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
--//      BLOCK-MAPPING-START             # sequence or a block mapping.
--//      BLOCK-END                       # Indentation decrease.
--//      FLOW-SEQUENCE-START             # '['
--//      FLOW-SEQUENCE-END               # ']'
--//      BLOCK-SEQUENCE-START            # '{'
--//      BLOCK-SEQUENCE-END              # '}'
--//      BLOCK-ENTRY                     # '-'
--//      FLOW-ENTRY                      # ','
--//      KEY                             # '?' or nothing (simple keys).
--//      VALUE                           # ':'
--//      ALIAS(anchor)                   # '*anchor'
--//      ANCHOR(anchor)                  # '&anchor'
--//      TAG(handle,suffix)              # '!handle!suffix'
--//      SCALAR(value,style)             # A scalar.
--//
--// The following two tokens are "virtual" tokens denoting the beginning and the
--// end of the stream:
--//
--//      STREAM-START(encoding)
--//      STREAM-END
--//
--// We pass the information about the input stream encoding with the
--// STREAM-START token.
--//
--// The next two tokens are responsible for tags:
--//
--//      VERSION-DIRECTIVE(major,minor)
--//      TAG-DIRECTIVE(handle,prefix)
--//
--// Example:
--//
--//      %YAML   1.1
--//      %TAG    !   !foo
--//      %TAG    !yaml!  tag:yaml.org,2002:
--//      ---
--//
--// The correspoding sequence of tokens:
--//
--//      STREAM-START(utf-8)
--//      VERSION-DIRECTIVE(1,1)
--//      TAG-DIRECTIVE("!","!foo")
--//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
--//      DOCUMENT-START
--//      STREAM-END
--//
--// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
--// line.
--//
--// The document start and end indicators are represented by:
--//
--//      DOCUMENT-START
--//      DOCUMENT-END
--//
--// Note that if a YAML stream contains an implicit document (without '---'
--// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
--// produced.
--//
--// In the following examples, we present whole documents together with the
--// produced tokens.
--//
--//      1. An implicit document:
--//
--//          'a scalar'
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          SCALAR("a scalar",single-quoted)
--//          STREAM-END
--//
--//      2. An explicit document:
--//
--//          ---
--//          'a scalar'
--//          ...
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          DOCUMENT-START
--//          SCALAR("a scalar",single-quoted)
--//          DOCUMENT-END
--//          STREAM-END
--//
--//      3. Several documents in a stream:
--//
--//          'a scalar'
--//          ---
--//          'another scalar'
--//          ---
--//          'yet another scalar'
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          SCALAR("a scalar",single-quoted)
--//          DOCUMENT-START
--//          SCALAR("another scalar",single-quoted)
--//          DOCUMENT-START
--//          SCALAR("yet another scalar",single-quoted)
--//          STREAM-END
--//
--// We have already introduced the SCALAR token above.  The following tokens are
--// used to describe aliases, anchors, tag, and scalars:
--//
--//      ALIAS(anchor)
--//      ANCHOR(anchor)
--//      TAG(handle,suffix)
--//      SCALAR(value,style)
--//
--// The following series of examples illustrate the usage of these tokens:
--//
--//      1. A recursive sequence:
--//
--//          &A [ *A ]
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          ANCHOR("A")
--//          FLOW-SEQUENCE-START
--//          ALIAS("A")
--//          FLOW-SEQUENCE-END
--//          STREAM-END
--//
--//      2. A tagged scalar:
--//
--//          !!float "3.14"  # A good approximation.
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          TAG("!!","float")
--//          SCALAR("3.14",double-quoted)
--//          STREAM-END
--//
--//      3. Various scalar styles:
--//
--//          --- # Implicit empty plain scalars do not produce tokens.
--//          --- a plain scalar
--//          --- 'a single-quoted scalar'
--//          --- "a double-quoted scalar"
--//          --- |-
--//            a literal scalar
--//          --- >-
--//            a folded
--//            scalar
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          DOCUMENT-START
--//          DOCUMENT-START
--//          SCALAR("a plain scalar",plain)
--//          DOCUMENT-START
--//          SCALAR("a single-quoted scalar",single-quoted)
--//          DOCUMENT-START
--//          SCALAR("a double-quoted scalar",double-quoted)
--//          DOCUMENT-START
--//          SCALAR("a literal scalar",literal)
--//          DOCUMENT-START
--//          SCALAR("a folded scalar",folded)
--//          STREAM-END
--//
--// Now it's time to review collection-related tokens. We will start with
--// flow collections:
--//
--//      FLOW-SEQUENCE-START
--//      FLOW-SEQUENCE-END
--//      FLOW-MAPPING-START
--//      FLOW-MAPPING-END
--//      FLOW-ENTRY
--//      KEY
--//      VALUE
--//
--// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
--// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
--// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
--// indicators '?' and ':', which are used for denoting mapping keys and values,
--// are represented by the KEY and VALUE tokens.
--//
--// The following examples show flow collections:
--//
--//      1. A flow sequence:
--//
--//          [item 1, item 2, item 3]
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          FLOW-SEQUENCE-START
--//          SCALAR("item 1",plain)
--//          FLOW-ENTRY
--//          SCALAR("item 2",plain)
--//          FLOW-ENTRY
--//          SCALAR("item 3",plain)
--//          FLOW-SEQUENCE-END
--//          STREAM-END
--//
--//      2. A flow mapping:
--//
--//          {
--//              a simple key: a value,  # Note that the KEY token is produced.
--//              ? a complex key: another value,
--//          }
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          FLOW-MAPPING-START
--//          KEY
--//          SCALAR("a simple key",plain)
--//          VALUE
--//          SCALAR("a value",plain)
--//          FLOW-ENTRY
--//          KEY
--//          SCALAR("a complex key",plain)
--//          VALUE
--//          SCALAR("another value",plain)
--//          FLOW-ENTRY
--//          FLOW-MAPPING-END
--//          STREAM-END
--//
--// A simple key is a key which is not denoted by the '?' indicator.  Note that
--// the Scanner still produce the KEY token whenever it encounters a simple key.
--//
--// For scanning block collections, the following tokens are used (note that we
--// repeat KEY and VALUE here):
--//
--//      BLOCK-SEQUENCE-START
--//      BLOCK-MAPPING-START
--//      BLOCK-END
--//      BLOCK-ENTRY
--//      KEY
--//      VALUE
--//
--// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
--// increase that precedes a block collection (cf. the INDENT token in Python).
--// The token BLOCK-END denote indentation decrease that ends a block collection
--// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
--// that makes detections of these tokens more complex.
--//
--// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
--// '-', '?', and ':' correspondingly.
--//
--// The following examples show how the tokens BLOCK-SEQUENCE-START,
--// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
--//
--//      1. Block sequences:
--//
--//          - item 1
--//          - item 2
--//          -
--//            - item 3.1
--//            - item 3.2
--//          -
--//            key 1: value 1
--//            key 2: value 2
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          BLOCK-SEQUENCE-START
--//          BLOCK-ENTRY
--//          SCALAR("item 1",plain)
--//          BLOCK-ENTRY
--//          SCALAR("item 2",plain)
--//          BLOCK-ENTRY
--//          BLOCK-SEQUENCE-START
--//          BLOCK-ENTRY
--//          SCALAR("item 3.1",plain)
--//          BLOCK-ENTRY
--//          SCALAR("item 3.2",plain)
--//          BLOCK-END
--//          BLOCK-ENTRY
--//          BLOCK-MAPPING-START
--//          KEY
--//          SCALAR("key 1",plain)
--//          VALUE
--//          SCALAR("value 1",plain)
--//          KEY
--//          SCALAR("key 2",plain)
--//          VALUE
--//          SCALAR("value 2",plain)
--//          BLOCK-END
--//          BLOCK-END
--//          STREAM-END
--//
--//      2. Block mappings:
--//
--//          a simple key: a value   # The KEY token is produced here.
--//          ? a complex key
--//          : another value
--//          a mapping:
--//            key 1: value 1
--//            key 2: value 2
--//          a sequence:
--//            - item 1
--//            - item 2
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          BLOCK-MAPPING-START
--//          KEY
--//          SCALAR("a simple key",plain)
--//          VALUE
--//          SCALAR("a value",plain)
--//          KEY
--//          SCALAR("a complex key",plain)
--//          VALUE
--//          SCALAR("another value",plain)
--//          KEY
--//          SCALAR("a mapping",plain)
--//          BLOCK-MAPPING-START
--//          KEY
--//          SCALAR("key 1",plain)
--//          VALUE
--//          SCALAR("value 1",plain)
--//          KEY
--//          SCALAR("key 2",plain)
--//          VALUE
--//          SCALAR("value 2",plain)
--//          BLOCK-END
--//          KEY
--//          SCALAR("a sequence",plain)
--//          VALUE
--//          BLOCK-SEQUENCE-START
--//          BLOCK-ENTRY
--//          SCALAR("item 1",plain)
--//          BLOCK-ENTRY
--//          SCALAR("item 2",plain)
--//          BLOCK-END
--//          BLOCK-END
--//          STREAM-END
--//
--// YAML does not always require to start a new block collection from a new
--// line.  If the current line contains only '-', '?', and ':' indicators, a new
--// block collection may start at the current line.  The following examples
--// illustrate this case:
--//
--//      1. Collections in a sequence:
--//
--//          - - item 1
--//            - item 2
--//          - key 1: value 1
--//            key 2: value 2
--//          - ? complex key
--//            : complex value
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          BLOCK-SEQUENCE-START
--//          BLOCK-ENTRY
--//          BLOCK-SEQUENCE-START
--//          BLOCK-ENTRY
--//          SCALAR("item 1",plain)
--//          BLOCK-ENTRY
--//          SCALAR("item 2",plain)
--//          BLOCK-END
--//          BLOCK-ENTRY
--//          BLOCK-MAPPING-START
--//          KEY
--//          SCALAR("key 1",plain)
--//          VALUE
--//          SCALAR("value 1",plain)
--//          KEY
--//          SCALAR("key 2",plain)
--//          VALUE
--//          SCALAR("value 2",plain)
--//          BLOCK-END
--//          BLOCK-ENTRY
--//          BLOCK-MAPPING-START
--//          KEY
--//          SCALAR("complex key")
--//          VALUE
--//          SCALAR("complex value")
--//          BLOCK-END
--//          BLOCK-END
--//          STREAM-END
--//
--//      2. Collections in a mapping:
--//
--//          ? a sequence
--//          : - item 1
--//            - item 2
--//          ? a mapping
--//          : key 1: value 1
--//            key 2: value 2
--//
--//      Tokens:
--//
--//          STREAM-START(utf-8)
--//          BLOCK-MAPPING-START
--//          KEY
--//          SCALAR("a sequence",plain)
--//          VALUE
--//          BLOCK-SEQUENCE-START
--//          BLOCK-ENTRY
--//          SCALAR("item 1",plain)
--//          BLOCK-ENTRY
--//          SCALAR("item 2",plain)
--//          BLOCK-END
--//          KEY
--//          SCALAR("a mapping",plain)
--//          VALUE
--//          BLOCK-MAPPING-START
--//          KEY
--//          SCALAR("key 1",plain)
--//          VALUE
--//          SCALAR("value 1",plain)
--//          KEY
--//          SCALAR("key 2",plain)
--//          VALUE
--//          SCALAR("value 2",plain)
--//          BLOCK-END
--//          BLOCK-END
--//          STREAM-END
--//
--// YAML also permits non-indented sequences if they are included into a block
--// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
--//
--//      key:
--//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
--//      - item 2
--//
--// Tokens:
--//
--//      STREAM-START(utf-8)
--//      BLOCK-MAPPING-START
--//      KEY
--//      SCALAR("key",plain)
--//      VALUE
--//      BLOCK-ENTRY
--//      SCALAR("item 1",plain)
--//      BLOCK-ENTRY
--//      SCALAR("item 2",plain)
--//      BLOCK-END
--//
--
--// Ensure that the buffer contains the required number of characters.
--// Return true on success, false on failure (reader error or memory error).
--func cache(parser *yaml_parser_t, length int) bool {
--	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
--	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
--}
--
--// Advance the buffer pointer.
--func skip(parser *yaml_parser_t) {
--	parser.mark.index++
--	parser.mark.column++
--	parser.unread--
--	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
--}
--
--func skip_line(parser *yaml_parser_t) {
--	if is_crlf(parser.buffer, parser.buffer_pos) {
--		parser.mark.index += 2
--		parser.mark.column = 0
--		parser.mark.line++
--		parser.unread -= 2
--		parser.buffer_pos += 2
--	} else if is_break(parser.buffer, parser.buffer_pos) {
--		parser.mark.index++
--		parser.mark.column = 0
--		parser.mark.line++
--		parser.unread--
--		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
--	}
--}
--
--// Copy a character to a string buffer and advance pointers.
--func read(parser *yaml_parser_t, s []byte) []byte {
--	w := width(parser.buffer[parser.buffer_pos])
--	if w == 0 {
--		panic("invalid character sequence")
--	}
--	if len(s) == 0 {
--		s = make([]byte, 0, 32)
--	}
--	if w == 1 && len(s)+w <= cap(s) {
--		s = s[:len(s)+1]
--		s[len(s)-1] = parser.buffer[parser.buffer_pos]
--		parser.buffer_pos++
--	} else {
--		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
--		parser.buffer_pos += w
--	}
--	parser.mark.index++
--	parser.mark.column++
--	parser.unread--
--	return s
--}
--
--// Copy a line break character to a string buffer and advance pointers.
--func read_line(parser *yaml_parser_t, s []byte) []byte {
--	buf := parser.buffer
--	pos := parser.buffer_pos
--	switch {
--	case buf[pos] == '\r' && buf[pos+1] == '\n':
--		// CR LF . LF
--		s = append(s, '\n')
--		parser.buffer_pos += 2
--		parser.mark.index++
--		parser.unread--
--	case buf[pos] == '\r' || buf[pos] == '\n':
--		// CR|LF . LF
--		s = append(s, '\n')
--		parser.buffer_pos += 1
--	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
--		// NEL . LF
--		s = append(s, '\n')
--		parser.buffer_pos += 2
--	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
--		// LS|PS . LS|PS
--		s = append(s, buf[parser.buffer_pos:pos+3]...)
--		parser.buffer_pos += 3
--	default:
--		return s
--	}
--	parser.mark.index++
--	parser.mark.column = 0
--	parser.mark.line++
--	parser.unread--
--	return s
--}
--
--// Get the next token.
--func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
--	// Erase the token object.
--	*token = yaml_token_t{} // [Go] Is this necessary?
--
--	// No tokens after STREAM-END or error.
--	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
--		return true
--	}
--
--	// Ensure that the tokens queue contains enough tokens.
--	if !parser.token_available {
--		if !yaml_parser_fetch_more_tokens(parser) {
--			return false
--		}
--	}
--
--	// Fetch the next token from the queue.
--	*token = parser.tokens[parser.tokens_head]
--	parser.tokens_head++
--	parser.tokens_parsed++
--	parser.token_available = false
--
--	if token.typ == yaml_STREAM_END_TOKEN {
--		parser.stream_end_produced = true
--	}
--	return true
--}
--
--// Set the scanner error and return false.
--func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
--	parser.error = yaml_SCANNER_ERROR
--	parser.context = context
--	parser.context_mark = context_mark
--	parser.problem = problem
--	parser.problem_mark = parser.mark
--	return false
--}
--
--func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
--	context := "while parsing a tag"
--	if directive {
--		context = "while parsing a %TAG directive"
--	}
--	return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
--}
--
--func trace(args ...interface{}) func() {
--	pargs := append([]interface{}{"+++"}, args...)
--	fmt.Println(pargs...)
--	pargs = append([]interface{}{"---"}, args...)
--	return func() { fmt.Println(pargs...) }
--}
--
--// Ensure that the tokens queue contains at least one token which can be
--// returned to the Parser.
--func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
--	// While we need more tokens to fetch, do it.
--	for {
--		// Check if we really need to fetch more tokens.
--		need_more_tokens := false
--
--		if parser.tokens_head == len(parser.tokens) {
--			// Queue is empty.
--			need_more_tokens = true
--		} else {
--			// Check if any potential simple key may occupy the head position.
--			if !yaml_parser_stale_simple_keys(parser) {
--				return false
--			}
--
--			for i := range parser.simple_keys {
--				simple_key := &parser.simple_keys[i]
--				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
--					need_more_tokens = true
--					break
--				}
--			}
--		}
--
--		// We are finished.
--		if !need_more_tokens {
--			break
--		}
--		// Fetch the next token.
--		if !yaml_parser_fetch_next_token(parser) {
--			return false
--		}
--	}
--
--	parser.token_available = true
--	return true
--}
--
--// The dispatcher for token fetchers.
--func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
--	// Ensure that the buffer is initialized.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--
--	// Check if we just started scanning.  Fetch STREAM-START then.
--	if !parser.stream_start_produced {
--		return yaml_parser_fetch_stream_start(parser)
--	}
--
--	// Eat whitespaces and comments until we reach the next token.
--	if !yaml_parser_scan_to_next_token(parser) {
--		return false
--	}
--
--	// Remove obsolete potential simple keys.
--	if !yaml_parser_stale_simple_keys(parser) {
--		return false
--	}
--
--	// Check the indentation level against the current column.
--	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
--		return false
--	}
--
--	// Ensure that the buffer contains at least 4 characters.  4 is the length
--	// of the longest indicators ('--- ' and '... ').
--	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
--		return false
--	}
--
--	// Is it the end of the stream?
--	if is_z(parser.buffer, parser.buffer_pos) {
--		return yaml_parser_fetch_stream_end(parser)
--	}
--
--	// Is it a directive?
--	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
--		return yaml_parser_fetch_directive(parser)
--	}
--
--	buf := parser.buffer
--	pos := parser.buffer_pos
--
--	// Is it the document start indicator?
--	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
--		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
--	}
--
--	// Is it the document end indicator?
--	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
--		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
--	}
--
--	// Is it the flow sequence start indicator?
--	if buf[pos] == '[' {
--		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
--	}
--
--	// Is it the flow mapping start indicator?
--	if parser.buffer[parser.buffer_pos] == '{' {
--		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
--	}
--
--	// Is it the flow sequence end indicator?
--	if parser.buffer[parser.buffer_pos] == ']' {
--		return yaml_parser_fetch_flow_collection_end(parser,
--			yaml_FLOW_SEQUENCE_END_TOKEN)
--	}
--
--	// Is it the flow mapping end indicator?
--	if parser.buffer[parser.buffer_pos] == '}' {
--		return yaml_parser_fetch_flow_collection_end(parser,
--			yaml_FLOW_MAPPING_END_TOKEN)
--	}
--
--	// Is it the flow entry indicator?
--	if parser.buffer[parser.buffer_pos] == ',' {
--		return yaml_parser_fetch_flow_entry(parser)
--	}
--
--	// Is it the block entry indicator?
--	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
--		return yaml_parser_fetch_block_entry(parser)
--	}
--
--	// Is it the key indicator?
--	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
--		return yaml_parser_fetch_key(parser)
--	}
--
--	// Is it the value indicator?
--	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
--		return yaml_parser_fetch_value(parser)
--	}
--
--	// Is it an alias?
--	if parser.buffer[parser.buffer_pos] == '*' {
--		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
--	}
--
--	// Is it an anchor?
--	if parser.buffer[parser.buffer_pos] == '&' {
--		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
--	}
--
--	// Is it a tag?
--	if parser.buffer[parser.buffer_pos] == '!' {
--		return yaml_parser_fetch_tag(parser)
--	}
--
--	// Is it a literal scalar?
--	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
--		return yaml_parser_fetch_block_scalar(parser, true)
--	}
--
--	// Is it a folded scalar?
--	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
--		return yaml_parser_fetch_block_scalar(parser, false)
--	}
--
--	// Is it a single-quoted scalar?
--	if parser.buffer[parser.buffer_pos] == '\'' {
--		return yaml_parser_fetch_flow_scalar(parser, true)
--	}
--
--	// Is it a double-quoted scalar?
--	if parser.buffer[parser.buffer_pos] == '"' {
--		return yaml_parser_fetch_flow_scalar(parser, false)
--	}
--
--	// Is it a plain scalar?
--	//
--	// A plain scalar may start with any non-blank characters except
--	//
--	//      '-', '?', ':', ',', '[', ']', '{', '}',
--	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
--	//      '%', '@', '`'.
--	//
--	// In the block context (and, for the '-' indicator, in the flow context
--	// too), it may also start with the characters
--	//
--	//      '-', '?', ':'
--	//
--	// if it is followed by a non-space character.
--	//
--	// The last rule is more restrictive than the specification requires.
--	// [Go] Make this logic more reasonable.
--	//switch parser.buffer[parser.buffer_pos] {
--	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
--	//}
--	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
--		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
--		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
--		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
--		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
--		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
--		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
--		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
--		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
--		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
--		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
--		(parser.flow_level == 0 &&
--			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
--			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
--		return yaml_parser_fetch_plain_scalar(parser)
--	}
--
--	// If we don't determine the token type so far, it is an error.
--	return yaml_parser_set_scanner_error(parser,
--		"while scanning for the next token", parser.mark,
--		"found character that cannot start any token")
--}
--
--// Check the list of potential simple keys and remove the positions that
--// cannot contain simple keys anymore.
--func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
--	// Check for a potential simple key for each flow level.
--	for i := range parser.simple_keys {
--		simple_key := &parser.simple_keys[i]
--
--		// The specification requires that a simple key
--		//
--		//  - is limited to a single line,
--		//  - is shorter than 1024 characters.
--		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
--
--			// Check if the potential simple key to be removed is required.
--			if simple_key.required {
--				return yaml_parser_set_scanner_error(parser,
--					"while scanning a simple key", simple_key.mark,
--					"could not find expected ':'")
--			}
--			simple_key.possible = false
--		}
--	}
--	return true
--}
--
--// Check if a simple key may start at the current position and add it if
--// needed.
--func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
--	// A simple key is required at the current position if the scanner is in
--	// the block context and the current column coincides with the indentation
--	// level.
--
--	required := parser.flow_level == 0 && parser.indent == parser.mark.column
--
--	// A simple key is required only when it is the first token in the current
--	// line.  Therefore it is always allowed.  But we add a check anyway.
--	if required && !parser.simple_key_allowed {
--		panic("should not happen")
--	}
--
--	//
--	// If the current position may start a simple key, save it.
--	//
--	if parser.simple_key_allowed {
--		simple_key := yaml_simple_key_t{
--			possible:     true,
--			required:     required,
--			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
--		}
--		simple_key.mark = parser.mark
--
--		if !yaml_parser_remove_simple_key(parser) {
--			return false
--		}
--		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
--	}
--	return true
--}
--
--// Remove a potential simple key at the current flow level.
--func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
--	i := len(parser.simple_keys) - 1
--	if parser.simple_keys[i].possible {
--		// If the key is required, it is an error.
--		if parser.simple_keys[i].required {
--			return yaml_parser_set_scanner_error(parser,
--				"while scanning a simple key", parser.simple_keys[i].mark,
--				"could not find expected ':'")
--		}
--	}
--	// Remove the key from the stack.
--	parser.simple_keys[i].possible = false
--	return true
--}
--
--// Increase the flow level and resize the simple key list if needed.
--func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
--	// Reset the simple key on the next level.
--	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
--
--	// Increase the flow level.
--	parser.flow_level++
--	return true
--}
--
--// Decrease the flow level.
--func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
--	if parser.flow_level > 0 {
--		parser.flow_level--
--		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
--	}
--	return true
--}
--
--// Push the current indentation level to the stack and set the new level
--// the current column is greater than the indentation level.  In this case,
--// append or insert the specified token into the token queue.
--func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
--	// In the flow context, do nothing.
--	if parser.flow_level > 0 {
--		return true
--	}
--
--	if parser.indent < column {
--		// Push the current indentation level to the stack and set the new
--		// indentation level.
--		parser.indents = append(parser.indents, parser.indent)
--		parser.indent = column
--
--		// Create a token and insert it into the queue.
--		token := yaml_token_t{
--			typ:        typ,
--			start_mark: mark,
--			end_mark:   mark,
--		}
--		if number > -1 {
--			number -= parser.tokens_parsed
--		}
--		yaml_insert_token(parser, number, &token)
--	}
--	return true
--}
--
--// Pop indentation levels from the indents stack until the current level
--// becomes less or equal to the column.  For each intendation level, append
--// the BLOCK-END token.
--func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
--	// In the flow context, do nothing.
--	if parser.flow_level > 0 {
--		return true
--	}
--
--	// Loop through the intendation levels in the stack.
--	for parser.indent > column {
--		// Create a token and append it to the queue.
--		token := yaml_token_t{
--			typ:        yaml_BLOCK_END_TOKEN,
--			start_mark: parser.mark,
--			end_mark:   parser.mark,
--		}
--		yaml_insert_token(parser, -1, &token)
--
--		// Pop the indentation level.
--		parser.indent = parser.indents[len(parser.indents)-1]
--		parser.indents = parser.indents[:len(parser.indents)-1]
--	}
--	return true
--}
--
--// Initialize the scanner and produce the STREAM-START token.
--func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
--
--	// Set the initial indentation.
--	parser.indent = -1
--
--	// Initialize the simple key stack.
--	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
--
--	// A simple key is allowed at the beginning of the stream.
--	parser.simple_key_allowed = true
--
--	// We have started.
--	parser.stream_start_produced = true
--
--	// Create the STREAM-START token and append it to the queue.
--	token := yaml_token_t{
--		typ:        yaml_STREAM_START_TOKEN,
--		start_mark: parser.mark,
--		end_mark:   parser.mark,
--		encoding:   parser.encoding,
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the STREAM-END token and shut down the scanner.
--func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
--
--	// Force new line.
--	if parser.mark.column != 0 {
--		parser.mark.column = 0
--		parser.mark.line++
--	}
--
--	// Reset the indentation level.
--	if !yaml_parser_unroll_indent(parser, -1) {
--		return false
--	}
--
--	// Reset simple keys.
--	if !yaml_parser_remove_simple_key(parser) {
--		return false
--	}
--
--	parser.simple_key_allowed = false
--
--	// Create the STREAM-END token and append it to the queue.
--	token := yaml_token_t{
--		typ:        yaml_STREAM_END_TOKEN,
--		start_mark: parser.mark,
--		end_mark:   parser.mark,
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
--func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
--	// Reset the indentation level.
--	if !yaml_parser_unroll_indent(parser, -1) {
--		return false
--	}
--
--	// Reset simple keys.
--	if !yaml_parser_remove_simple_key(parser) {
--		return false
--	}
--
--	parser.simple_key_allowed = false
--
--	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
--	token := yaml_token_t{}
--	if !yaml_parser_scan_directive(parser, &token) {
--		return false
--	}
--	// Append the token to the queue.
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the DOCUMENT-START or DOCUMENT-END token.
--func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
--	// Reset the indentation level.
--	if !yaml_parser_unroll_indent(parser, -1) {
--		return false
--	}
--
--	// Reset simple keys.
--	if !yaml_parser_remove_simple_key(parser) {
--		return false
--	}
--
--	parser.simple_key_allowed = false
--
--	// Consume the token.
--	start_mark := parser.mark
--
--	skip(parser)
--	skip(parser)
--	skip(parser)
--
--	end_mark := parser.mark
--
--	// Create the DOCUMENT-START or DOCUMENT-END token.
--	token := yaml_token_t{
--		typ:        typ,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--	}
--	// Append the token to the queue.
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
--func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
--	// The indicators '[' and '{' may start a simple key.
--	if !yaml_parser_save_simple_key(parser) {
--		return false
--	}
--
--	// Increase the flow level.
--	if !yaml_parser_increase_flow_level(parser) {
--		return false
--	}
--
--	// A simple key may follow the indicators '[' and '{'.
--	parser.simple_key_allowed = true
--
--	// Consume the token.
--	start_mark := parser.mark
--	skip(parser)
--	end_mark := parser.mark
--
--	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
--	token := yaml_token_t{
--		typ:        typ,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--	}
--	// Append the token to the queue.
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
--func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
--	// Reset any potential simple key on the current flow level.
--	if !yaml_parser_remove_simple_key(parser) {
--		return false
--	}
--
--	// Decrease the flow level.
--	if !yaml_parser_decrease_flow_level(parser) {
--		return false
--	}
--
--	// No simple keys after the indicators ']' and '}'.
--	parser.simple_key_allowed = false
--
--	// Consume the token.
--
--	start_mark := parser.mark
--	skip(parser)
--	end_mark := parser.mark
--
--	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
--	token := yaml_token_t{
--		typ:        typ,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--	}
--	// Append the token to the queue.
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the FLOW-ENTRY token.
--func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
--	// Reset any potential simple keys on the current flow level.
--	if !yaml_parser_remove_simple_key(parser) {
--		return false
--	}
--
--	// Simple keys are allowed after ','.
--	parser.simple_key_allowed = true
--
--	// Consume the token.
--	start_mark := parser.mark
--	skip(parser)
--	end_mark := parser.mark
--
--	// Create the FLOW-ENTRY token and append it to the queue.
--	token := yaml_token_t{
--		typ:        yaml_FLOW_ENTRY_TOKEN,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the BLOCK-ENTRY token.
--func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
--	// Check if the scanner is in the block context.
--	if parser.flow_level == 0 {
--		// Check if we are allowed to start a new entry.
--		if !parser.simple_key_allowed {
--			return yaml_parser_set_scanner_error(parser, "", parser.mark,
--				"block sequence entries are not allowed in this context")
--		}
--		// Add the BLOCK-SEQUENCE-START token if needed.
--		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
--			return false
--		}
--	} else {
--		// It is an error for the '-' indicator to occur in the flow context,
--		// but we let the Parser detect and report about it because the Parser
--		// is able to point to the context.
--	}
--
--	// Reset any potential simple keys on the current flow level.
--	if !yaml_parser_remove_simple_key(parser) {
--		return false
--	}
--
--	// Simple keys are allowed after '-'.
--	parser.simple_key_allowed = true
--
--	// Consume the token.
--	start_mark := parser.mark
--	skip(parser)
--	end_mark := parser.mark
--
--	// Create the BLOCK-ENTRY token and append it to the queue.
--	token := yaml_token_t{
--		typ:        yaml_BLOCK_ENTRY_TOKEN,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the KEY token.
--func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
--
--	// In the block context, additional checks are required.
--	if parser.flow_level == 0 {
--		// Check if we are allowed to start a new key (not nessesary simple).
--		if !parser.simple_key_allowed {
--			return yaml_parser_set_scanner_error(parser, "", parser.mark,
--				"mapping keys are not allowed in this context")
--		}
--		// Add the BLOCK-MAPPING-START token if needed.
--		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
--			return false
--		}
--	}
--
--	// Reset any potential simple keys on the current flow level.
--	if !yaml_parser_remove_simple_key(parser) {
--		return false
--	}
--
--	// Simple keys are allowed after '?' in the block context.
--	parser.simple_key_allowed = parser.flow_level == 0
--
--	// Consume the token.
--	start_mark := parser.mark
--	skip(parser)
--	end_mark := parser.mark
--
--	// Create the KEY token and append it to the queue.
--	token := yaml_token_t{
--		typ:        yaml_KEY_TOKEN,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the VALUE token.
--func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
--
--	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
--
--	// Have we found a simple key?
--	if simple_key.possible {
--		// Create the KEY token and insert it into the queue.
--		token := yaml_token_t{
--			typ:        yaml_KEY_TOKEN,
--			start_mark: simple_key.mark,
--			end_mark:   simple_key.mark,
--		}
--		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
--
--		// In the block context, we may need to add the BLOCK-MAPPING-START token.
--		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
--			simple_key.token_number,
--			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
--			return false
--		}
--
--		// Remove the simple key.
--		simple_key.possible = false
--
--		// A simple key cannot follow another simple key.
--		parser.simple_key_allowed = false
--
--	} else {
--		// The ':' indicator follows a complex key.
--
--		// In the block context, extra checks are required.
--		if parser.flow_level == 0 {
--
--			// Check if we are allowed to start a complex value.
--			if !parser.simple_key_allowed {
--				return yaml_parser_set_scanner_error(parser, "", parser.mark,
--					"mapping values are not allowed in this context")
--			}
--
--			// Add the BLOCK-MAPPING-START token if needed.
--			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
--				return false
--			}
--		}
--
--		// Simple keys after ':' are allowed in the block context.
--		parser.simple_key_allowed = parser.flow_level == 0
--	}
--
--	// Consume the token.
--	start_mark := parser.mark
--	skip(parser)
--	end_mark := parser.mark
--
--	// Create the VALUE token and append it to the queue.
--	token := yaml_token_t{
--		typ:        yaml_VALUE_TOKEN,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the ALIAS or ANCHOR token.
--func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
--	// An anchor or an alias could be a simple key.
--	if !yaml_parser_save_simple_key(parser) {
--		return false
--	}
--
--	// A simple key cannot follow an anchor or an alias.
--	parser.simple_key_allowed = false
--
--	// Create the ALIAS or ANCHOR token and append it to the queue.
--	var token yaml_token_t
--	if !yaml_parser_scan_anchor(parser, &token, typ) {
--		return false
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the TAG token.
--func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
--	// A tag could be a simple key.
--	if !yaml_parser_save_simple_key(parser) {
--		return false
--	}
--
--	// A simple key cannot follow a tag.
--	parser.simple_key_allowed = false
--
--	// Create the TAG token and append it to the queue.
--	var token yaml_token_t
--	if !yaml_parser_scan_tag(parser, &token) {
--		return false
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
--func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
--	// Remove any potential simple keys.
--	if !yaml_parser_remove_simple_key(parser) {
--		return false
--	}
--
--	// A simple key may follow a block scalar.
--	parser.simple_key_allowed = true
--
--	// Create the SCALAR token and append it to the queue.
--	var token yaml_token_t
--	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
--		return false
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
--func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
--	// A plain scalar could be a simple key.
--	if !yaml_parser_save_simple_key(parser) {
--		return false
--	}
--
--	// A simple key cannot follow a flow scalar.
--	parser.simple_key_allowed = false
--
--	// Create the SCALAR token and append it to the queue.
--	var token yaml_token_t
--	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
--		return false
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Produce the SCALAR(...,plain) token.
--func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
--	// A plain scalar could be a simple key.
--	if !yaml_parser_save_simple_key(parser) {
--		return false
--	}
--
--	// A simple key cannot follow a flow scalar.
--	parser.simple_key_allowed = false
--
--	// Create the SCALAR token and append it to the queue.
--	var token yaml_token_t
--	if !yaml_parser_scan_plain_scalar(parser, &token) {
--		return false
--	}
--	yaml_insert_token(parser, -1, &token)
--	return true
--}
--
--// Eat whitespaces and comments until the next token is found.
--func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
--
--	// Until the next token is not found.
--	for {
--		// Allow the BOM mark to start a line.
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
--			skip(parser)
--		}
--
--		// Eat whitespaces.
--		// Tabs are allowed:
--		//  - in the flow context
--		//  - in the block context, but not at the beginning of the line or
--		//  after '-', '?', or ':' (complex value).
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--
--		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
--			skip(parser)
--			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--				return false
--			}
--		}
--
--		// Eat a comment until a line break.
--		if parser.buffer[parser.buffer_pos] == '#' {
--			for !is_breakz(parser.buffer, parser.buffer_pos) {
--				skip(parser)
--				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--					return false
--				}
--			}
--		}
--
--		// If it is a line break, eat it.
--		if is_break(parser.buffer, parser.buffer_pos) {
--			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--				return false
--			}
--			skip_line(parser)
--
--			// In the block context, a new line may start a simple key.
--			if parser.flow_level == 0 {
--				parser.simple_key_allowed = true
--			}
--		} else {
--			break // We have found a token.
--		}
--	}
--
--	return true
--}
--
--// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
--//
--// Scope:
--//      %YAML    1.1    # a comment \n
--//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--//      %TAG    !yaml!  tag:yaml.org,2002:  \n
--//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--//
--func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
--	// Eat '%'.
--	start_mark := parser.mark
--	skip(parser)
--
--	// Scan the directive name.
--	var name []byte
--	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
--		return false
--	}
--
--	// Is it a YAML directive?
--	if bytes.Equal(name, []byte("YAML")) {
--		// Scan the VERSION directive value.
--		var major, minor int8
--		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
--			return false
--		}
--		end_mark := parser.mark
--
--		// Create a VERSION-DIRECTIVE token.
--		*token = yaml_token_t{
--			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
--			start_mark: start_mark,
--			end_mark:   end_mark,
--			major:      major,
--			minor:      minor,
--		}
--
--		// Is it a TAG directive?
--	} else if bytes.Equal(name, []byte("TAG")) {
--		// Scan the TAG directive value.
--		var handle, prefix []byte
--		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
--			return false
--		}
--		end_mark := parser.mark
--
--		// Create a TAG-DIRECTIVE token.
--		*token = yaml_token_t{
--			typ:        yaml_TAG_DIRECTIVE_TOKEN,
--			start_mark: start_mark,
--			end_mark:   end_mark,
--			value:      handle,
--			prefix:     prefix,
--		}
--
--		// Unknown directive.
--	} else {
--		yaml_parser_set_scanner_error(parser, "while scanning a directive",
--			start_mark, "found uknown directive name")
--		return false
--	}
--
--	// Eat the rest of the line including any comments.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--
--	for is_blank(parser.buffer, parser.buffer_pos) {
--		skip(parser)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	if parser.buffer[parser.buffer_pos] == '#' {
--		for !is_breakz(parser.buffer, parser.buffer_pos) {
--			skip(parser)
--			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--				return false
--			}
--		}
--	}
--
--	// Check if we are at the end of the line.
--	if !is_breakz(parser.buffer, parser.buffer_pos) {
--		yaml_parser_set_scanner_error(parser, "while scanning a directive",
--			start_mark, "did not find expected comment or line break")
--		return false
--	}
--
--	// Eat a line break.
--	if is_break(parser.buffer, parser.buffer_pos) {
--		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--			return false
--		}
--		skip_line(parser)
--	}
--
--	return true
--}
--
--// Scan the directive name.
--//
--// Scope:
--//      %YAML   1.1     # a comment \n
--//       ^^^^
--//      %TAG    !yaml!  tag:yaml.org,2002:  \n
--//       ^^^
--//
--func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
--	// Consume the directive name.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--
--	var s []byte
--	for is_alpha(parser.buffer, parser.buffer_pos) {
--		s = read(parser, s)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	// Check if the name is empty.
--	if len(s) == 0 {
--		yaml_parser_set_scanner_error(parser, "while scanning a directive",
--			start_mark, "could not find expected directive name")
--		return false
--	}
--
--	// Check for an blank character after the name.
--	if !is_blankz(parser.buffer, parser.buffer_pos) {
--		yaml_parser_set_scanner_error(parser, "while scanning a directive",
--			start_mark, "found unexpected non-alphabetical character")
--		return false
--	}
--	*name = s
--	return true
--}
--
--// Scan the value of VERSION-DIRECTIVE.
--//
--// Scope:
--//      %YAML   1.1     # a comment \n
--//           ^^^^^^
--func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
--	// Eat whitespaces.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	for is_blank(parser.buffer, parser.buffer_pos) {
--		skip(parser)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	// Consume the major version number.
--	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
--		return false
--	}
--
--	// Eat '.'.
--	if parser.buffer[parser.buffer_pos] != '.' {
--		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
--			start_mark, "did not find expected digit or '.' character")
--	}
--
--	skip(parser)
--
--	// Consume the minor version number.
--	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
--		return false
--	}
--	return true
--}
--
--const max_number_length = 2
--
--// Scan the version number of VERSION-DIRECTIVE.
--//
--// Scope:
--//      %YAML   1.1     # a comment \n
--//              ^
--//      %YAML   1.1     # a comment \n
--//                ^
--func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
--
--	// Repeat while the next character is digit.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	var value, length int8
--	for is_digit(parser.buffer, parser.buffer_pos) {
--		// Check if the number is too long.
--		length++
--		if length > max_number_length {
--			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
--				start_mark, "found extremely long version number")
--		}
--		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
--		skip(parser)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	// Check if the number was present.
--	if length == 0 {
--		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
--			start_mark, "did not find expected version number")
--	}
--	*number = value
--	return true
--}
--
--// Scan the value of a TAG-DIRECTIVE token.
--//
--// Scope:
--//      %TAG    !yaml!  tag:yaml.org,2002:  \n
--//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--//
--func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
--	var handle_value, prefix_value []byte
--
--	// Eat whitespaces.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--
--	for is_blank(parser.buffer, parser.buffer_pos) {
--		skip(parser)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	// Scan a handle.
--	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
--		return false
--	}
--
--	// Expect a whitespace.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	if !is_blank(parser.buffer, parser.buffer_pos) {
--		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
--			start_mark, "did not find expected whitespace")
--		return false
--	}
--
--	// Eat whitespaces.
--	for is_blank(parser.buffer, parser.buffer_pos) {
--		skip(parser)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	// Scan a prefix.
--	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
--		return false
--	}
--
--	// Expect a whitespace or line break.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	if !is_blankz(parser.buffer, parser.buffer_pos) {
--		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
--			start_mark, "did not find expected whitespace or line break")
--		return false
--	}
--
--	*handle = handle_value
--	*prefix = prefix_value
--	return true
--}
--
--func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
--	var s []byte
--
--	// Eat the indicator character.
--	start_mark := parser.mark
--	skip(parser)
--
--	// Consume the value.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--
--	for is_alpha(parser.buffer, parser.buffer_pos) {
--		s = read(parser, s)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	end_mark := parser.mark
--
--	/*
--	 * Check if length of the anchor is greater than 0 and it is followed by
--	 * a whitespace character or one of the indicators:
--	 *
--	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
--	 */
--
--	if len(s) == 0 ||
--		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
--			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
--			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
--			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
--			parser.buffer[parser.buffer_pos] == '`') {
--		context := "while scanning an alias"
--		if typ == yaml_ANCHOR_TOKEN {
--			context = "while scanning an anchor"
--		}
--		yaml_parser_set_scanner_error(parser, context, start_mark,
--			"did not find expected alphabetic or numeric character")
--		return false
--	}
--
--	// Create a token.
--	*token = yaml_token_t{
--		typ:        typ,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--		value:      s,
--	}
--
--	return true
--}
--
--/*
-- * Scan a TAG token.
-- */
--
--func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
--	var handle, suffix []byte
--
--	start_mark := parser.mark
--
--	// Check if the tag is in the canonical form.
--	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--		return false
--	}
--
--	if parser.buffer[parser.buffer_pos+1] == '<' {
--		// Keep the handle as ''
--
--		// Eat '!<'
--		skip(parser)
--		skip(parser)
--
--		// Consume the tag value.
--		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
--			return false
--		}
--
--		// Check for '>' and eat it.
--		if parser.buffer[parser.buffer_pos] != '>' {
--			yaml_parser_set_scanner_error(parser, "while scanning a tag",
--				start_mark, "did not find the expected '>'")
--			return false
--		}
--
--		skip(parser)
--	} else {
--		// The tag has either the '!suffix' or the '!handle!suffix' form.
--
--		// First, try to scan a handle.
--		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
--			return false
--		}
--
--		// Check if it is, indeed, handle.
--		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
--			// Scan the suffix now.
--			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
--				return false
--			}
--		} else {
--			// It wasn't a handle after all.  Scan the rest of the tag.
--			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
--				return false
--			}
--
--			// Set the handle to '!'.
--			handle = []byte{'!'}
--
--			// A special case: the '!' tag.  Set the handle to '' and the
--			// suffix to '!'.
--			if len(suffix) == 0 {
--				handle, suffix = suffix, handle
--			}
--		}
--	}
--
--	// Check the character which ends the tag.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	if !is_blankz(parser.buffer, parser.buffer_pos) {
--		yaml_parser_set_scanner_error(parser, "while scanning a tag",
--			start_mark, "did not find expected whitespace or line break")
--		return false
--	}
--
--	end_mark := parser.mark
--
--	// Create a token.
--	*token = yaml_token_t{
--		typ:        yaml_TAG_TOKEN,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--		value:      handle,
--		suffix:     suffix,
--	}
--	return true
--}
--
--// Scan a tag handle.
--func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
--	// Check the initial '!' character.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	if parser.buffer[parser.buffer_pos] != '!' {
--		yaml_parser_set_scanner_tag_error(parser, directive,
--			start_mark, "did not find expected '!'")
--		return false
--	}
--
--	var s []byte
--
--	// Copy the '!' character.
--	s = read(parser, s)
--
--	// Copy all subsequent alphabetical and numerical characters.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	for is_alpha(parser.buffer, parser.buffer_pos) {
--		s = read(parser, s)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	// Check if the trailing character is '!' and copy it.
--	if parser.buffer[parser.buffer_pos] == '!' {
--		s = read(parser, s)
--	} else {
--		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
--		// directive, it's an error.  If it's a tag token, it must be a part of URI.
--		if directive && !(s[0] == '!' && s[1] == 0) {
--			yaml_parser_set_scanner_tag_error(parser, directive,
--				start_mark, "did not find expected '!'")
--			return false
--		}
--	}
--
--	*handle = s
--	return true
--}
--
--// Scan a tag.
--func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
--	//size_t length = head ? strlen((char *)head) : 0
--	var s []byte
--
--	// Copy the head if needed.
--	//
--	// Note that we don't copy the leading '!' character.
--	if len(head) > 1 {
--		s = append(s, head[1:]...)
--	}
--
--	// Scan the tag.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--
--	// The set of characters that may appear in URI is as follows:
--	//
--	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
--	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
--	//      '%'.
--	// [Go] Convert this into more reasonable logic.
--	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
--		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
--		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
--		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
--		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
--		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
--		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
--		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
--		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
--		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
--		parser.buffer[parser.buffer_pos] == '%' {
--		// Check if it is a URI-escape sequence.
--		if parser.buffer[parser.buffer_pos] == '%' {
--			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
--				return false
--			}
--		} else {
--			s = read(parser, s)
--		}
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--
--	// Check if the tag is non-empty.
--	if len(s) == 0 {
--		yaml_parser_set_scanner_tag_error(parser, directive,
--			start_mark, "did not find expected tag URI")
--		return false
--	}
--	*uri = s
--	return true
--}
--
--// Decode an URI-escape sequence corresponding to a single UTF-8 character.
--func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
--
--	// Decode the required number of characters.
--	w := 1024
--	for w > 0 {
--		// Check for a URI-escaped octet.
--		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
--			return false
--		}
--
--		if !(parser.buffer[parser.buffer_pos] == '%' &&
--			is_hex(parser.buffer, parser.buffer_pos+1) &&
--			is_hex(parser.buffer, parser.buffer_pos+2)) {
--			return yaml_parser_set_scanner_tag_error(parser, directive,
--				start_mark, "did not find URI escaped octet")
--		}
--
--		// Get the octet.
--		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
--
--		// If it is the leading octet, determine the length of the UTF-8 sequence.
--		if w == 1024 {
--			w = width(octet)
--			if w == 0 {
--				return yaml_parser_set_scanner_tag_error(parser, directive,
--					start_mark, "found an incorrect leading UTF-8 octet")
--			}
--		} else {
--			// Check if the trailing octet is correct.
--			if octet&0xC0 != 0x80 {
--				return yaml_parser_set_scanner_tag_error(parser, directive,
--					start_mark, "found an incorrect trailing UTF-8 octet")
--			}
--		}
--
--		// Copy the octet and move the pointers.
--		*s = append(*s, octet)
--		skip(parser)
--		skip(parser)
--		skip(parser)
--		w--
--	}
--	return true
--}
--
--// Scan a block scalar.
--func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
--	// Eat the indicator '|' or '>'.
--	start_mark := parser.mark
--	skip(parser)
--
--	// Scan the additional block scalar indicators.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--
--	// Check for a chomping indicator.
--	var chomping, increment int
--	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
--		// Set the chomping method and eat the indicator.
--		if parser.buffer[parser.buffer_pos] == '+' {
--			chomping = +1
--		} else {
--			chomping = -1
--		}
--		skip(parser)
--
--		// Check for an indentation indicator.
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--		if is_digit(parser.buffer, parser.buffer_pos) {
--			// Check that the intendation is greater than 0.
--			if parser.buffer[parser.buffer_pos] == '0' {
--				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
--					start_mark, "found an intendation indicator equal to 0")
--				return false
--			}
--
--			// Get the intendation level and eat the indicator.
--			increment = as_digit(parser.buffer, parser.buffer_pos)
--			skip(parser)
--		}
--
--	} else if is_digit(parser.buffer, parser.buffer_pos) {
--		// Do the same as above, but in the opposite order.
--
--		if parser.buffer[parser.buffer_pos] == '0' {
--			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
--				start_mark, "found an intendation indicator equal to 0")
--			return false
--		}
--		increment = as_digit(parser.buffer, parser.buffer_pos)
--		skip(parser)
--
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
--			if parser.buffer[parser.buffer_pos] == '+' {
--				chomping = +1
--			} else {
--				chomping = -1
--			}
--			skip(parser)
--		}
--	}
--
--	// Eat whitespaces and comments to the end of the line.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	for is_blank(parser.buffer, parser.buffer_pos) {
--		skip(parser)
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--	}
--	if parser.buffer[parser.buffer_pos] == '#' {
--		for !is_breakz(parser.buffer, parser.buffer_pos) {
--			skip(parser)
--			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--				return false
--			}
--		}
--	}
--
--	// Check if we are at the end of the line.
--	if !is_breakz(parser.buffer, parser.buffer_pos) {
--		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
--			start_mark, "did not find expected comment or line break")
--		return false
--	}
--
--	// Eat a line break.
--	if is_break(parser.buffer, parser.buffer_pos) {
--		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--			return false
--		}
--		skip_line(parser)
--	}
--
--	end_mark := parser.mark
--
--	// Set the intendation level if it was specified.
--	var indent int
--	if increment > 0 {
--		if parser.indent >= 0 {
--			indent = parser.indent + increment
--		} else {
--			indent = increment
--		}
--	}
--
--	// Scan the leading line breaks and determine the indentation level if needed.
--	var s, leading_break, trailing_breaks []byte
--	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
--		return false
--	}
--
--	// Scan the block scalar content.
--	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--		return false
--	}
--	var leading_blank, trailing_blank bool
--	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
--		// We are at the beginning of a non-empty line.
--
--		// Is it a trailing whitespace?
--		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
--
--		// Check if we need to fold the leading line break.
--		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
--			// Do we need to join the lines by space?
--			if len(trailing_breaks) == 0 {
--				s = append(s, ' ')
--			}
--		} else {
--			s = append(s, leading_break...)
--		}
--		leading_break = leading_break[:0]
--
--		// Append the remaining line breaks.
--		s = append(s, trailing_breaks...)
--		trailing_breaks = trailing_breaks[:0]
--
--		// Is it a leading whitespace?
--		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
--
--		// Consume the current line.
--		for !is_breakz(parser.buffer, parser.buffer_pos) {
--			s = read(parser, s)
--			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--				return false
--			}
--		}
--
--		// Consume the line break.
--		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--			return false
--		}
--
--		leading_break = read_line(parser, leading_break)
--
--		// Eat the following intendation spaces and line breaks.
--		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
--			return false
--		}
--	}
--
--	// Chomp the tail.
--	if chomping != -1 {
--		s = append(s, leading_break...)
--	}
--	if chomping == 1 {
--		s = append(s, trailing_breaks...)
--	}
--
--	// Create a token.
--	*token = yaml_token_t{
--		typ:        yaml_SCALAR_TOKEN,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--		value:      s,
--		style:      yaml_LITERAL_SCALAR_STYLE,
--	}
--	if !literal {
--		token.style = yaml_FOLDED_SCALAR_STYLE
--	}
--	return true
--}
--
--// Scan intendation spaces and line breaks for a block scalar.  Determine the
--// intendation level if needed.
--func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
--	*end_mark = parser.mark
--
--	// Eat the intendation spaces and line breaks.
--	max_indent := 0
--	for {
--		// Eat the intendation spaces.
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
--			skip(parser)
--			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--				return false
--			}
--		}
--		if parser.mark.column > max_indent {
--			max_indent = parser.mark.column
--		}
--
--		// Check for a tab character messing the intendation.
--		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
--			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
--				start_mark, "found a tab character where an intendation space is expected")
--		}
--
--		// Have we found a non-empty line?
--		if !is_break(parser.buffer, parser.buffer_pos) {
--			break
--		}
--
--		// Consume the line break.
--		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--			return false
--		}
--		// [Go] Should really be returning breaks instead.
--		*breaks = read_line(parser, *breaks)
--		*end_mark = parser.mark
--	}
--
--	// Determine the indentation level if needed.
--	if *indent == 0 {
--		*indent = max_indent
--		if *indent < parser.indent+1 {
--			*indent = parser.indent + 1
--		}
--		if *indent < 1 {
--			*indent = 1
--		}
--	}
--	return true
--}
--
--// Scan a quoted scalar.
--func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
--	// Eat the left quote.
--	start_mark := parser.mark
--	skip(parser)
--
--	// Consume the content of the quoted scalar.
--	var s, leading_break, trailing_breaks, whitespaces []byte
--	for {
--		// Check that there are no document indicators at the beginning of the line.
--		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
--			return false
--		}
--
--		if parser.mark.column == 0 &&
--			((parser.buffer[parser.buffer_pos+0] == '-' &&
--				parser.buffer[parser.buffer_pos+1] == '-' &&
--				parser.buffer[parser.buffer_pos+2] == '-') ||
--				(parser.buffer[parser.buffer_pos+0] == '.' &&
--					parser.buffer[parser.buffer_pos+1] == '.' &&
--					parser.buffer[parser.buffer_pos+2] == '.')) &&
--			is_blankz(parser.buffer, parser.buffer_pos+3) {
--			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
--				start_mark, "found unexpected document indicator")
--			return false
--		}
--
--		// Check for EOF.
--		if is_z(parser.buffer, parser.buffer_pos) {
--			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
--				start_mark, "found unexpected end of stream")
--			return false
--		}
--
--		// Consume non-blank characters.
--		leading_blanks := false
--		for !is_blankz(parser.buffer, parser.buffer_pos) {
--			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
--				// Is is an escaped single quote.
--				s = append(s, '\'')
--				skip(parser)
--				skip(parser)
--
--			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
--				// It is a right single quote.
--				break
--			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
--				// It is a right double quote.
--				break
--
--			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
--				// It is an escaped line break.
--				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
--					return false
--				}
--				skip(parser)
--				skip_line(parser)
--				leading_blanks = true
--				break
--
--			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
--				// It is an escape sequence.
--				code_length := 0
--
--				// Check the escape character.
--				switch parser.buffer[parser.buffer_pos+1] {
--				case '0':
--					s = append(s, 0)
--				case 'a':
--					s = append(s, '\x07')
--				case 'b':
--					s = append(s, '\x08')
--				case 't', '\t':
--					s = append(s, '\x09')
--				case 'n':
--					s = append(s, '\x0A')
--				case 'v':
--					s = append(s, '\x0B')
--				case 'f':
--					s = append(s, '\x0C')
--				case 'r':
--					s = append(s, '\x0D')
--				case 'e':
--					s = append(s, '\x1B')
--				case ' ':
--					s = append(s, '\x20')
--				case '"':
--					s = append(s, '"')
--				case '\'':
--					s = append(s, '\'')
--				case '\\':
--					s = append(s, '\\')
--				case 'N': // NEL (#x85)
--					s = append(s, '\xC2')
--					s = append(s, '\x85')
--				case '_': // #xA0
--					s = append(s, '\xC2')
--					s = append(s, '\xA0')
--				case 'L': // LS (#x2028)
--					s = append(s, '\xE2')
--					s = append(s, '\x80')
--					s = append(s, '\xA8')
--				case 'P': // PS (#x2029)
--					s = append(s, '\xE2')
--					s = append(s, '\x80')
--					s = append(s, '\xA9')
--				case 'x':
--					code_length = 2
--				case 'u':
--					code_length = 4
--				case 'U':
--					code_length = 8
--				default:
--					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
--						start_mark, "found unknown escape character")
--					return false
--				}
--
--				skip(parser)
--				skip(parser)
--
--				// Consume an arbitrary escape code.
--				if code_length > 0 {
--					var value int
--
--					// Scan the character value.
--					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
--						return false
--					}
--					for k := 0; k < code_length; k++ {
--						if !is_hex(parser.buffer, parser.buffer_pos+k) {
--							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
--								start_mark, "did not find expected hexdecimal number")
--							return false
--						}
--						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
--					}
--
--					// Check the value and write the character.
--					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
--						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
--							start_mark, "found invalid Unicode character escape code")
--						return false
--					}
--					if value <= 0x7F {
--						s = append(s, byte(value))
--					} else if value <= 0x7FF {
--						s = append(s, byte(0xC0+(value>>6)))
--						s = append(s, byte(0x80+(value&0x3F)))
--					} else if value <= 0xFFFF {
--						s = append(s, byte(0xE0+(value>>12)))
--						s = append(s, byte(0x80+((value>>6)&0x3F)))
--						s = append(s, byte(0x80+(value&0x3F)))
--					} else {
--						s = append(s, byte(0xF0+(value>>18)))
--						s = append(s, byte(0x80+((value>>12)&0x3F)))
--						s = append(s, byte(0x80+((value>>6)&0x3F)))
--						s = append(s, byte(0x80+(value&0x3F)))
--					}
--
--					// Advance the pointer.
--					for k := 0; k < code_length; k++ {
--						skip(parser)
--					}
--				}
--			} else {
--				// It is a non-escaped non-blank character.
--				s = read(parser, s)
--			}
--			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--				return false
--			}
--		}
--
--		// Check if we are at the end of the scalar.
--		if single {
--			if parser.buffer[parser.buffer_pos] == '\'' {
--				break
--			}
--		} else {
--			if parser.buffer[parser.buffer_pos] == '"' {
--				break
--			}
--		}
--
--		// Consume blank characters.
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--
--		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
--			if is_blank(parser.buffer, parser.buffer_pos) {
--				// Consume a space or a tab character.
--				if !leading_blanks {
--					whitespaces = read(parser, whitespaces)
--				} else {
--					skip(parser)
--				}
--			} else {
--				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--					return false
--				}
--
--				// Check if it is a first line break.
--				if !leading_blanks {
--					whitespaces = whitespaces[:0]
--					leading_break = read_line(parser, leading_break)
--					leading_blanks = true
--				} else {
--					trailing_breaks = read_line(parser, trailing_breaks)
--				}
--			}
--			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--				return false
--			}
--		}
--
--		// Join the whitespaces or fold line breaks.
--		if leading_blanks {
--			// Do we need to fold line breaks?
--			if len(leading_break) > 0 && leading_break[0] == '\n' {
--				if len(trailing_breaks) == 0 {
--					s = append(s, ' ')
--				} else {
--					s = append(s, trailing_breaks...)
--				}
--			} else {
--				s = append(s, leading_break...)
--				s = append(s, trailing_breaks...)
--			}
--			trailing_breaks = trailing_breaks[:0]
--			leading_break = leading_break[:0]
--		} else {
--			s = append(s, whitespaces...)
--			whitespaces = whitespaces[:0]
--		}
--	}
--
--	// Eat the right quote.
--	skip(parser)
--	end_mark := parser.mark
--
--	// Create a token.
--	*token = yaml_token_t{
--		typ:        yaml_SCALAR_TOKEN,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--		value:      s,
--		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
--	}
--	if !single {
--		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
--	}
--	return true
--}
--
--// Scan a plain scalar.
--func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
--
--	var s, leading_break, trailing_breaks, whitespaces []byte
--	var leading_blanks bool
--	var indent = parser.indent + 1
--
--	start_mark := parser.mark
--	end_mark := parser.mark
--
--	// Consume the content of the plain scalar.
--	for {
--		// Check for a document indicator.
--		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
--			return false
--		}
--		if parser.mark.column == 0 &&
--			((parser.buffer[parser.buffer_pos+0] == '-' &&
--				parser.buffer[parser.buffer_pos+1] == '-' &&
--				parser.buffer[parser.buffer_pos+2] == '-') ||
--				(parser.buffer[parser.buffer_pos+0] == '.' &&
--					parser.buffer[parser.buffer_pos+1] == '.' &&
--					parser.buffer[parser.buffer_pos+2] == '.')) &&
--			is_blankz(parser.buffer, parser.buffer_pos+3) {
--			break
--		}
--
--		// Check for a comment.
--		if parser.buffer[parser.buffer_pos] == '#' {
--			break
--		}
--
--		// Consume non-blank characters.
--		for !is_blankz(parser.buffer, parser.buffer_pos) {
--
--			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
--			if parser.flow_level > 0 &&
--				parser.buffer[parser.buffer_pos] == ':' &&
--				!is_blankz(parser.buffer, parser.buffer_pos+1) {
--				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
--					start_mark, "found unexpected ':'")
--				return false
--			}
--
--			// Check for indicators that may end a plain scalar.
--			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
--				(parser.flow_level > 0 &&
--					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
--						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
--						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
--						parser.buffer[parser.buffer_pos] == '}')) {
--				break
--			}
--
--			// Check if we need to join whitespaces and breaks.
--			if leading_blanks || len(whitespaces) > 0 {
--				if leading_blanks {
--					// Do we need to fold line breaks?
--					if leading_break[0] == '\n' {
--						if len(trailing_breaks) == 0 {
--							s = append(s, ' ')
--						} else {
--							s = append(s, trailing_breaks...)
--						}
--					} else {
--						s = append(s, leading_break...)
--						s = append(s, trailing_breaks...)
--					}
--					trailing_breaks = trailing_breaks[:0]
--					leading_break = leading_break[:0]
--					leading_blanks = false
--				} else {
--					s = append(s, whitespaces...)
--					whitespaces = whitespaces[:0]
--				}
--			}
--
--			// Copy the character.
--			s = read(parser, s)
--
--			end_mark = parser.mark
--			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--				return false
--			}
--		}
--
--		// Is it the end?
--		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
--			break
--		}
--
--		// Consume blank characters.
--		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--			return false
--		}
--
--		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
--			if is_blank(parser.buffer, parser.buffer_pos) {
--
--				// Check for tab character that abuse intendation.
--				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
--					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
--						start_mark, "found a tab character that violate intendation")
--					return false
--				}
--
--				// Consume a space or a tab character.
--				if !leading_blanks {
--					whitespaces = read(parser, whitespaces)
--				} else {
--					skip(parser)
--				}
--			} else {
--				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
--					return false
--				}
--
--				// Check if it is a first line break.
--				if !leading_blanks {
--					whitespaces = whitespaces[:0]
--					leading_break = read_line(parser, leading_break)
--					leading_blanks = true
--				} else {
--					trailing_breaks = read_line(parser, trailing_breaks)
--				}
--			}
--			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
--				return false
--			}
--		}
--
--		// Check intendation level.
--		if parser.flow_level == 0 && parser.mark.column < indent {
--			break
--		}
--	}
--
--	// Create a token.
--	*token = yaml_token_t{
--		typ:        yaml_SCALAR_TOKEN,
--		start_mark: start_mark,
--		end_mark:   end_mark,
--		value:      s,
--		style:      yaml_PLAIN_SCALAR_STYLE,
--	}
--
--	// Note that we change the 'simple_key_allowed' flag.
--	if leading_blanks {
--		parser.simple_key_allowed = true
--	}
--	return true
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/sorter.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/sorter.go
-deleted file mode 100644
-index 5958822..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/sorter.go
-+++ /dev/null
-@@ -1,104 +0,0 @@
--package yaml
--
--import (
--	"reflect"
--	"unicode"
--)
--
--type keyList []reflect.Value
--
--func (l keyList) Len() int      { return len(l) }
--func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
--func (l keyList) Less(i, j int) bool {
--	a := l[i]
--	b := l[j]
--	ak := a.Kind()
--	bk := b.Kind()
--	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
--		a = a.Elem()
--		ak = a.Kind()
--	}
--	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
--		b = b.Elem()
--		bk = b.Kind()
--	}
--	af, aok := keyFloat(a)
--	bf, bok := keyFloat(b)
--	if aok && bok {
--		if af != bf {
--			return af < bf
--		}
--		if ak != bk {
--			return ak < bk
--		}
--		return numLess(a, b)
--	}
--	if ak != reflect.String || bk != reflect.String {
--		return ak < bk
--	}
--	ar, br := []rune(a.String()), []rune(b.String())
--	for i := 0; i < len(ar) && i < len(br); i++ {
--		if ar[i] == br[i] {
--			continue
--		}
--		al := unicode.IsLetter(ar[i])
--		bl := unicode.IsLetter(br[i])
--		if al && bl {
--			return ar[i] < br[i]
--		}
--		if al || bl {
--			return bl
--		}
--		var ai, bi int
--		var an, bn int64
--		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
--			an = an*10 + int64(ar[ai]-'0')
--		}
--		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
--			bn = bn*10 + int64(br[bi]-'0')
--		}
--		if an != bn {
--			return an < bn
--		}
--		if ai != bi {
--			return ai < bi
--		}
--		return ar[i] < br[i]
--	}
--	return len(ar) < len(br)
--}
--
--// keyFloat returns a float value for v if it is a number/bool
--// and whether it is a number/bool or not.
--func keyFloat(v reflect.Value) (f float64, ok bool) {
--	switch v.Kind() {
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		return float64(v.Int()), true
--	case reflect.Float32, reflect.Float64:
--		return v.Float(), true
--	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
--		return float64(v.Uint()), true
--	case reflect.Bool:
--		if v.Bool() {
--			return 1, true
--		}
--		return 0, true
--	}
--	return 0, false
--}
--
--// numLess returns whether a < b.
--// a and b must necessarily have the same kind.
--func numLess(a, b reflect.Value) bool {
--	switch a.Kind() {
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		return a.Int() < b.Int()
--	case reflect.Float32, reflect.Float64:
--		return a.Float() < b.Float()
--	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
--		return a.Uint() < b.Uint()
--	case reflect.Bool:
--		return !a.Bool() && b.Bool()
--	}
--	panic("not a number")
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/suite_test.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/suite_test.go
-deleted file mode 100644
-index c5cf1ed..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/suite_test.go
-+++ /dev/null
-@@ -1,12 +0,0 @@
--package yaml_test
--
--import (
--	. "gopkg.in/check.v1"
--	"testing"
--)
--
--func Test(t *testing.T) { TestingT(t) }
--
--type S struct{}
--
--var _ = Suite(&S{})
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/writerc.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/writerc.go
-deleted file mode 100644
-index 190362f..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/writerc.go
-+++ /dev/null
-@@ -1,89 +0,0 @@
--package yaml
--
--// Set the writer error and return false.
--func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
--	emitter.error = yaml_WRITER_ERROR
--	emitter.problem = problem
--	return false
--}
--
--// Flush the output buffer.
--func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
--	if emitter.write_handler == nil {
--		panic("write handler not set")
--	}
--
--	// Check if the buffer is empty.
--	if emitter.buffer_pos == 0 {
--		return true
--	}
--
--	// If the output encoding is UTF-8, we don't need to recode the buffer.
--	if emitter.encoding == yaml_UTF8_ENCODING {
--		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
--			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
--		}
--		emitter.buffer_pos = 0
--		return true
--	}
--
--	// Recode the buffer into the raw buffer.
--	var low, high int
--	if emitter.encoding == yaml_UTF16LE_ENCODING {
--		low, high = 0, 1
--	} else {
--		high, low = 1, 0
--	}
--
--	pos := 0
--	for pos < emitter.buffer_pos {
--		// See the "reader.c" code for more details on UTF-8 encoding.  Note
--		// that we assume that the buffer contains a valid UTF-8 sequence.
--
--		// Read the next UTF-8 character.
--		octet := emitter.buffer[pos]
--
--		var w int
--		var value rune
--		switch {
--		case octet&0x80 == 0x00:
--			w, value = 1, rune(octet&0x7F)
--		case octet&0xE0 == 0xC0:
--			w, value = 2, rune(octet&0x1F)
--		case octet&0xF0 == 0xE0:
--			w, value = 3, rune(octet&0x0F)
--		case octet&0xF8 == 0xF0:
--			w, value = 4, rune(octet&0x07)
--		}
--		for k := 1; k < w; k++ {
--			octet = emitter.buffer[pos+k]
--			value = (value << 6) + (rune(octet) & 0x3F)
--		}
--		pos += w
--
--		// Write the character.
--		if value < 0x10000 {
--			var b [2]byte
--			b[high] = byte(value >> 8)
--			b[low] = byte(value & 0xFF)
--			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
--		} else {
--			// Write the character using a surrogate pair (check "reader.c").
--			var b [4]byte
--			value -= 0x10000
--			b[high] = byte(0xD8 + (value >> 18))
--			b[low] = byte((value >> 10) & 0xFF)
--			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
--			b[low+2] = byte(value & 0xFF)
--			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
--		}
--	}
--
--	// Write the raw buffer.
--	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
--		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
--	}
--	emitter.buffer_pos = 0
--	emitter.raw_buffer = emitter.raw_buffer[:0]
--	return true
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/yaml.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/yaml.go
-deleted file mode 100644
-index 5d1b86c..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/yaml.go
-+++ /dev/null
-@@ -1,334 +0,0 @@
--// Package yaml implements YAML support for the Go language.
--//
--// Source code and other details for the project are available at GitHub:
--//
--//   https://github.com/go-yaml/yaml
--//
--package yaml
--
--import (
--	"errors"
--	"fmt"
--	"reflect"
--	"strings"
--	"sync"
--)
--
--// MapSlice encodes and decodes as a YAML map.
--// The order of keys is preserved when encoding and decoding.
--type MapSlice []MapItem
--
--// MapItem is an item in a MapSlice.
--type MapItem struct {
--	Key, Value interface{}
--}
--
--// The Unmarshaler interface may be implemented by types to customize their
--// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
--// method receives a function that may be called to unmarshal the original
--// YAML value into a field or variable. It is safe to call the unmarshal
--// function parameter more than once if necessary.
--type Unmarshaler interface {
--	UnmarshalYAML(unmarshal func(interface{}) error) error
--}
--
--
--// The Marshaler interface may be implemented by types to customize their
--// behavior when being marshaled into a YAML document. The returned value
--// is marshaled in place of the original value implementing Marshaler.
--//
--// If an error is returned by MarshalYAML, the marshaling procedure stops
--// and returns with the provided error.
--type Marshaler interface {
--	MarshalYAML() (interface{}, error)
--}
--
--// Unmarshal decodes the first document found within the in byte slice
--// and assigns decoded values into the out value.
--//
--// Maps and pointers (to a struct, string, int, etc) are accepted as out
--// values. If an internal pointer within a struct is not initialized,
--// the yaml package will initialize it if necessary for unmarshalling
--// the provided data. The out parameter must not be nil.
--//
--// The type of the decoded values should be compatible with the respective
--// values in out. If one or more values cannot be decoded due to a type
--// mismatches, decoding continues partially until the end of the YAML
--// content, and a *yaml.TypeError is returned with details for all
--// missed values.
--//
--// Struct fields are only unmarshalled if they are exported (have an
--// upper case first letter), and are unmarshalled using the field name
--// lowercased as the default key. Custom keys may be defined via the
--// "yaml" name in the field tag: the content preceding the first comma
--// is used as the key, and the following comma-separated options are
--// used to tweak the marshalling process (see Marshal).
--// Conflicting names result in a runtime error.
--//
--// For example:
--//
--//     type T struct {
--//         F int `yaml:"a,omitempty"`
--//         B int
--//     }
--//     var t T
--//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
--//
--// See the documentation of Marshal for the format of tags and a list of
--// supported tag options.
--//
--func Unmarshal(in []byte, out interface{}) (err error) {
--	defer handleErr(&err)
--	d := newDecoder()
--	p := newParser(in)
--	defer p.destroy()
--	node := p.parse()
--	if node != nil {
--		v := reflect.ValueOf(out)
--		if v.Kind() == reflect.Ptr && !v.IsNil() {
--			v = v.Elem()
--		}
--		d.unmarshal(node, v)
--	}
--	if d.terrors != nil {
--		return &TypeError{d.terrors}
--	}
--	return nil
--}
--
--// Marshal serializes the value provided into a YAML document. The structure
--// of the generated document will reflect the structure of the value itself.
--// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
--//
--// Struct fields are only unmarshalled if they are exported (have an upper case
--// first letter), and are unmarshalled using the field name lowercased as the
--// default key. Custom keys may be defined via the "yaml" name in the field
--// tag: the content preceding the first comma is used as the key, and the
--// following comma-separated options are used to tweak the marshalling process.
--// Conflicting names result in a runtime error.
--//
--// The field tag format accepted is:
--//
--//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
--//
--// The following flags are currently supported:
--//
--//     omitempty    Only include the field if it's not set to the zero
--//                  value for the type or to empty slices or maps.
--//                  Does not apply to zero valued structs.
--//
--//     flow         Marshal using a flow style (useful for structs,
--//                  sequences and maps.
--//
--//     inline       Inline the struct it's applied to, so its fields
--//                  are processed as if they were part of the outer
--//                  struct.
--//
--// In addition, if the key is "-", the field is ignored.
--//
--// For example:
--//
--//     type T struct {
--//         F int "a,omitempty"
--//         B int
--//     }
--//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
--//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
--//
--func Marshal(in interface{}) (out []byte, err error) {
--	defer handleErr(&err)
--	e := newEncoder()
--	defer e.destroy()
--	e.marshal("", reflect.ValueOf(in))
--	e.finish()
--	out = e.out
--	return
--}
--
--func handleErr(err *error) {
--	if v := recover(); v != nil {
--		if e, ok := v.(yamlError); ok {
--			*err = e.err
--		} else {
--			panic(v)
--		}
--	}
--}
--
--type yamlError struct {
--	err error
--}
--
--func fail(err error) {
--	panic(yamlError{err})
--}
--
--func failf(format string, args ...interface{}) {
--	panic(yamlError{fmt.Errorf("yaml: " + format, args...)})
--}
--
--// A TypeError is returned by Unmarshal when one or more fields in
--// the YAML document cannot be properly decoded into the requested
--// types. When this error is returned, the value is still
--// unmarshaled partially.
--type TypeError struct {
--	Errors []string
--}
--
--func (e *TypeError) Error() string {
--	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
--}
--
--// --------------------------------------------------------------------------
--// Maintain a mapping of keys to structure field indexes
--
--// The code in this section was copied from mgo/bson.
--
--// structInfo holds details for the serialization of fields of
--// a given struct.
--type structInfo struct {
--	FieldsMap  map[string]fieldInfo
--	FieldsList []fieldInfo
--
--	// InlineMap is the number of the field in the struct that
--	// contains an ,inline map, or -1 if there's none.
--	InlineMap int
--}
--
--type fieldInfo struct {
--	Key       string
--	Num       int
--	OmitEmpty bool
--	Flow      bool
--
--	// Inline holds the field index if the field is part of an inlined struct.
--	Inline []int
--}
--
--var structMap = make(map[reflect.Type]*structInfo)
--var fieldMapMutex sync.RWMutex
--
--func getStructInfo(st reflect.Type) (*structInfo, error) {
--	fieldMapMutex.RLock()
--	sinfo, found := structMap[st]
--	fieldMapMutex.RUnlock()
--	if found {
--		return sinfo, nil
--	}
--
--	n := st.NumField()
--	fieldsMap := make(map[string]fieldInfo)
--	fieldsList := make([]fieldInfo, 0, n)
--	inlineMap := -1
--	for i := 0; i != n; i++ {
--		field := st.Field(i)
--		if field.PkgPath != "" {
--			continue // Private field
--		}
--
--		info := fieldInfo{Num: i}
--
--		tag := field.Tag.Get("yaml")
--		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
--			tag = string(field.Tag)
--		}
--		if tag == "-" {
--			continue
--		}
--
--		inline := false
--		fields := strings.Split(tag, ",")
--		if len(fields) > 1 {
--			for _, flag := range fields[1:] {
--				switch flag {
--				case "omitempty":
--					info.OmitEmpty = true
--				case "flow":
--					info.Flow = true
--				case "inline":
--					inline = true
--				default:
--					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
--				}
--			}
--			tag = fields[0]
--		}
--
--		if inline {
--			switch field.Type.Kind() {
--			// TODO: Implement support for inline maps.
--			//case reflect.Map:
--			//	if inlineMap >= 0 {
--			//		return nil, errors.New("Multiple ,inline maps in struct " + st.String())
--			//	}
--			//	if field.Type.Key() != reflect.TypeOf("") {
--			//		return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
--			//	}
--			//	inlineMap = info.Num
--			case reflect.Struct:
--				sinfo, err := getStructInfo(field.Type)
--				if err != nil {
--					return nil, err
--				}
--				for _, finfo := range sinfo.FieldsList {
--					if _, found := fieldsMap[finfo.Key]; found {
--						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
--						return nil, errors.New(msg)
--					}
--					if finfo.Inline == nil {
--						finfo.Inline = []int{i, finfo.Num}
--					} else {
--						finfo.Inline = append([]int{i}, finfo.Inline...)
--					}
--					fieldsMap[finfo.Key] = finfo
--					fieldsList = append(fieldsList, finfo)
--				}
--			default:
--				//return nil, errors.New("Option ,inline needs a struct value or map field")
--				return nil, errors.New("Option ,inline needs a struct value field")
--			}
--			continue
--		}
--
--		if tag != "" {
--			info.Key = tag
--		} else {
--			info.Key = strings.ToLower(field.Name)
--		}
--
--		if _, found = fieldsMap[info.Key]; found {
--			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
--			return nil, errors.New(msg)
--		}
--
--		fieldsList = append(fieldsList, info)
--		fieldsMap[info.Key] = info
--	}
--
--	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
--
--	fieldMapMutex.Lock()
--	structMap[st] = sinfo
--	fieldMapMutex.Unlock()
--	return sinfo, nil
--}
--
--func isZero(v reflect.Value) bool {
--	switch v.Kind() {
--	case reflect.String:
--		return len(v.String()) == 0
--	case reflect.Interface, reflect.Ptr:
--		return v.IsNil()
--	case reflect.Slice:
--		return v.Len() == 0
--	case reflect.Map:
--		return v.Len() == 0
--	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
--		return v.Int() == 0
--	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
--		return v.Uint() == 0
--	case reflect.Bool:
--		return !v.Bool()
--	}
--	return false
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/yamlh.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/yamlh.go
-deleted file mode 100644
-index 4b020b1..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/yamlh.go
-+++ /dev/null
-@@ -1,716 +0,0 @@
--package yaml
--
--import (
--	"io"
--)
--
--// The version directive data.
--type yaml_version_directive_t struct {
--	major int8 // The major version number.
--	minor int8 // The minor version number.
--}
--
--// The tag directive data.
--type yaml_tag_directive_t struct {
--	handle []byte // The tag handle.
--	prefix []byte // The tag prefix.
--}
--
--type yaml_encoding_t int
--
--// The stream encoding.
--const (
--	// Let the parser choose the encoding.
--	yaml_ANY_ENCODING yaml_encoding_t = iota
--
--	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
--	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
--	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
--)
--
--type yaml_break_t int
--
--// Line break types.
--const (
--	// Let the parser choose the break type.
--	yaml_ANY_BREAK yaml_break_t = iota
--
--	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
--	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
--	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
--)
--
--type yaml_error_type_t int
--
--// Many bad things could happen with the parser and emitter.
--const (
--	// No error is produced.
--	yaml_NO_ERROR yaml_error_type_t = iota
--
--	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
--	yaml_READER_ERROR   // Cannot read or decode the input stream.
--	yaml_SCANNER_ERROR  // Cannot scan the input stream.
--	yaml_PARSER_ERROR   // Cannot parse the input stream.
--	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
--	yaml_WRITER_ERROR   // Cannot write to the output stream.
--	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
--)
--
--// The pointer position.
--type yaml_mark_t struct {
--	index  int // The position index.
--	line   int // The position line.
--	column int // The position column.
--}
--
--// Node Styles
--
--type yaml_style_t int8
--
--type yaml_scalar_style_t yaml_style_t
--
--// Scalar styles.
--const (
--	// Let the emitter choose the style.
--	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
--
--	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
--	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
--	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
--	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
--	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
--)
--
--type yaml_sequence_style_t yaml_style_t
--
--// Sequence styles.
--const (
--	// Let the emitter choose the style.
--	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
--
--	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
--	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
--)
--
--type yaml_mapping_style_t yaml_style_t
--
--// Mapping styles.
--const (
--	// Let the emitter choose the style.
--	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
--
--	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
--	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
--)
--
--// Tokens
--
--type yaml_token_type_t int
--
--// Token types.
--const (
--	// An empty token.
--	yaml_NO_TOKEN yaml_token_type_t = iota
--
--	yaml_STREAM_START_TOKEN // A STREAM-START token.
--	yaml_STREAM_END_TOKEN   // A STREAM-END token.
--
--	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
--	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
--	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
--	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
--
--	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
--	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
--	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
--
--	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
--	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
--	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
--	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
--
--	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
--	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
--	yaml_KEY_TOKEN         // A KEY token.
--	yaml_VALUE_TOKEN       // A VALUE token.
--
--	yaml_ALIAS_TOKEN  // An ALIAS token.
--	yaml_ANCHOR_TOKEN // An ANCHOR token.
--	yaml_TAG_TOKEN    // A TAG token.
--	yaml_SCALAR_TOKEN // A SCALAR token.
--)
--
--func (tt yaml_token_type_t) String() string {
--	switch tt {
--	case yaml_NO_TOKEN:
--		return "yaml_NO_TOKEN"
--	case yaml_STREAM_START_TOKEN:
--		return "yaml_STREAM_START_TOKEN"
--	case yaml_STREAM_END_TOKEN:
--		return "yaml_STREAM_END_TOKEN"
--	case yaml_VERSION_DIRECTIVE_TOKEN:
--		return "yaml_VERSION_DIRECTIVE_TOKEN"
--	case yaml_TAG_DIRECTIVE_TOKEN:
--		return "yaml_TAG_DIRECTIVE_TOKEN"
--	case yaml_DOCUMENT_START_TOKEN:
--		return "yaml_DOCUMENT_START_TOKEN"
--	case yaml_DOCUMENT_END_TOKEN:
--		return "yaml_DOCUMENT_END_TOKEN"
--	case yaml_BLOCK_SEQUENCE_START_TOKEN:
--		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
--	case yaml_BLOCK_MAPPING_START_TOKEN:
--		return "yaml_BLOCK_MAPPING_START_TOKEN"
--	case yaml_BLOCK_END_TOKEN:
--		return "yaml_BLOCK_END_TOKEN"
--	case yaml_FLOW_SEQUENCE_START_TOKEN:
--		return "yaml_FLOW_SEQUENCE_START_TOKEN"
--	case yaml_FLOW_SEQUENCE_END_TOKEN:
--		return "yaml_FLOW_SEQUENCE_END_TOKEN"
--	case yaml_FLOW_MAPPING_START_TOKEN:
--		return "yaml_FLOW_MAPPING_START_TOKEN"
--	case yaml_FLOW_MAPPING_END_TOKEN:
--		return "yaml_FLOW_MAPPING_END_TOKEN"
--	case yaml_BLOCK_ENTRY_TOKEN:
--		return "yaml_BLOCK_ENTRY_TOKEN"
--	case yaml_FLOW_ENTRY_TOKEN:
--		return "yaml_FLOW_ENTRY_TOKEN"
--	case yaml_KEY_TOKEN:
--		return "yaml_KEY_TOKEN"
--	case yaml_VALUE_TOKEN:
--		return "yaml_VALUE_TOKEN"
--	case yaml_ALIAS_TOKEN:
--		return "yaml_ALIAS_TOKEN"
--	case yaml_ANCHOR_TOKEN:
--		return "yaml_ANCHOR_TOKEN"
--	case yaml_TAG_TOKEN:
--		return "yaml_TAG_TOKEN"
--	case yaml_SCALAR_TOKEN:
--		return "yaml_SCALAR_TOKEN"
--	}
--	return "<unknown token>"
--}
--
--// The token structure.
--type yaml_token_t struct {
--	// The token type.
--	typ yaml_token_type_t
--
--	// The start/end of the token.
--	start_mark, end_mark yaml_mark_t
--
--	// The stream encoding (for yaml_STREAM_START_TOKEN).
--	encoding yaml_encoding_t
--
--	// The alias/anchor/scalar value or tag/tag directive handle
--	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
--	value []byte
--
--	// The tag suffix (for yaml_TAG_TOKEN).
--	suffix []byte
--
--	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
--	prefix []byte
--
--	// The scalar style (for yaml_SCALAR_TOKEN).
--	style yaml_scalar_style_t
--
--	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
--	major, minor int8
--}
--
--// Events
--
--type yaml_event_type_t int8
--
--// Event types.
--const (
--	// An empty event.
--	yaml_NO_EVENT yaml_event_type_t = iota
--
--	yaml_STREAM_START_EVENT   // A STREAM-START event.
--	yaml_STREAM_END_EVENT     // A STREAM-END event.
--	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
--	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
--	yaml_ALIAS_EVENT          // An ALIAS event.
--	yaml_SCALAR_EVENT         // A SCALAR event.
--	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
--	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
--	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
--	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
--)
--
--// The event structure.
--type yaml_event_t struct {
--
--	// The event type.
--	typ yaml_event_type_t
--
--	// The start and end of the event.
--	start_mark, end_mark yaml_mark_t
--
--	// The document encoding (for yaml_STREAM_START_EVENT).
--	encoding yaml_encoding_t
--
--	// The version directive (for yaml_DOCUMENT_START_EVENT).
--	version_directive *yaml_version_directive_t
--
--	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
--	tag_directives []yaml_tag_directive_t
--
--	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
--	anchor []byte
--
--	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
--	tag []byte
--
--	// The scalar value (for yaml_SCALAR_EVENT).
--	value []byte
--
--	// Is the document start/end indicator implicit, or the tag optional?
--	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
--	implicit bool
--
--	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
--	quoted_implicit bool
--
--	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
--	style yaml_style_t
--}
--
--func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
--func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
--func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
--
--// Nodes
--
--const (
--	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
--	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
--	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
--	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
--	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
--	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
--
--	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
--	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
--
--	// Not in original libyaml.
--	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
--	yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
--
--	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
--	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
--	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
--)
--
--type yaml_node_type_t int
--
--// Node types.
--const (
--	// An empty node.
--	yaml_NO_NODE yaml_node_type_t = iota
--
--	yaml_SCALAR_NODE   // A scalar node.
--	yaml_SEQUENCE_NODE // A sequence node.
--	yaml_MAPPING_NODE  // A mapping node.
--)
--
--// An element of a sequence node.
--type yaml_node_item_t int
--
--// An element of a mapping node.
--type yaml_node_pair_t struct {
--	key   int // The key of the element.
--	value int // The value of the element.
--}
--
--// The node structure.
--type yaml_node_t struct {
--	typ yaml_node_type_t // The node type.
--	tag []byte           // The node tag.
--
--	// The node data.
--
--	// The scalar parameters (for yaml_SCALAR_NODE).
--	scalar struct {
--		value  []byte              // The scalar value.
--		length int                 // The length of the scalar value.
--		style  yaml_scalar_style_t // The scalar style.
--	}
--
--	// The sequence parameters (for YAML_SEQUENCE_NODE).
--	sequence struct {
--		items_data []yaml_node_item_t    // The stack of sequence items.
--		style      yaml_sequence_style_t // The sequence style.
--	}
--
--	// The mapping parameters (for yaml_MAPPING_NODE).
--	mapping struct {
--		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
--		pairs_start *yaml_node_pair_t    // The beginning of the stack.
--		pairs_end   *yaml_node_pair_t    // The end of the stack.
--		pairs_top   *yaml_node_pair_t    // The top of the stack.
--		style       yaml_mapping_style_t // The mapping style.
--	}
--
--	start_mark yaml_mark_t // The beginning of the node.
--	end_mark   yaml_mark_t // The end of the node.
--
--}
--
--// The document structure.
--type yaml_document_t struct {
--
--	// The document nodes.
--	nodes []yaml_node_t
--
--	// The version directive.
--	version_directive *yaml_version_directive_t
--
--	// The list of tag directives.
--	tag_directives_data  []yaml_tag_directive_t
--	tag_directives_start int // The beginning of the tag directives list.
--	tag_directives_end   int // The end of the tag directives list.
--
--	start_implicit int // Is the document start indicator implicit?
--	end_implicit   int // Is the document end indicator implicit?
--
--	// The start/end of the document.
--	start_mark, end_mark yaml_mark_t
--}
--
--// The prototype of a read handler.
--//
--// The read handler is called when the parser needs to read more bytes from the
--// source. The handler should write not more than size bytes to the buffer.
--// The number of written bytes should be set to the size_read variable.
--//
--// [in,out]   data        A pointer to an application data specified by
--//                        yaml_parser_set_input().
--// [out]      buffer      The buffer to write the data from the source.
--// [in]       size        The size of the buffer.
--// [out]      size_read   The actual number of bytes read from the source.
--//
--// On success, the handler should return 1.  If the handler failed,
--// the returned value should be 0. On EOF, the handler should set the
--// size_read to 0 and return 1.
--type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
--
--// This structure holds information about a potential simple key.
--type yaml_simple_key_t struct {
--	possible     bool        // Is a simple key possible?
--	required     bool        // Is a simple key required?
--	token_number int         // The number of the token.
--	mark         yaml_mark_t // The position mark.
--}
--
--// The states of the parser.
--type yaml_parser_state_t int
--
--const (
--	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
--
--	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
--	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
--	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
--	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
--	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
--	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
--	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
--	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
--	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
--	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
--	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
--	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
--	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
--	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
--	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
--	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
--	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
--	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
--	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
--	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
--	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
--	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
--	yaml_PARSE_END_STATE                               // Expect nothing.
--)
--
--func (ps yaml_parser_state_t) String() string {
--	switch ps {
--	case yaml_PARSE_STREAM_START_STATE:
--		return "yaml_PARSE_STREAM_START_STATE"
--	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
--		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
--	case yaml_PARSE_DOCUMENT_START_STATE:
--		return "yaml_PARSE_DOCUMENT_START_STATE"
--	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
--		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
--	case yaml_PARSE_DOCUMENT_END_STATE:
--		return "yaml_PARSE_DOCUMENT_END_STATE"
--	case yaml_PARSE_BLOCK_NODE_STATE:
--		return "yaml_PARSE_BLOCK_NODE_STATE"
--	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
--		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
--	case yaml_PARSE_FLOW_NODE_STATE:
--		return "yaml_PARSE_FLOW_NODE_STATE"
--	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
--		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
--	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
--		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
--	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
--		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
--	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
--		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
--	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
--		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
--	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
--		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
--	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
--		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
--	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
--		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
--	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
--		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
--	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
--		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
--	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
--		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
--	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
--		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
--	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
--		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
--	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
--		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
--	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
--		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
--	case yaml_PARSE_END_STATE:
--		return "yaml_PARSE_END_STATE"
--	}
--	return "<unknown parser state>"
--}
--
--// This structure holds aliases data.
--type yaml_alias_data_t struct {
--	anchor []byte      // The anchor.
--	index  int         // The node id.
--	mark   yaml_mark_t // The anchor mark.
--}
--
--// The parser structure.
--//
--// All members are internal. Manage the structure using the
--// yaml_parser_ family of functions.
--type yaml_parser_t struct {
--
--	// Error handling
--
--	error yaml_error_type_t // Error type.
--
--	problem string // Error description.
--
--	// The byte about which the problem occured.
--	problem_offset int
--	problem_value  int
--	problem_mark   yaml_mark_t
--
--	// The error context.
--	context      string
--	context_mark yaml_mark_t
--
--	// Reader stuff
--
--	read_handler yaml_read_handler_t // Read handler.
--
--	input_file io.Reader // File input data.
--	input      []byte    // String input data.
--	input_pos  int
--
--	eof bool // EOF flag
--
--	buffer     []byte // The working buffer.
--	buffer_pos int    // The current position of the buffer.
--
--	unread int // The number of unread characters in the buffer.
--
--	raw_buffer     []byte // The raw buffer.
--	raw_buffer_pos int    // The current position of the buffer.
--
--	encoding yaml_encoding_t // The input encoding.
--
--	offset int         // The offset of the current position (in bytes).
--	mark   yaml_mark_t // The mark of the current position.
--
--	// Scanner stuff
--
--	stream_start_produced bool // Have we started to scan the input stream?
--	stream_end_produced   bool // Have we reached the end of the input stream?
--
--	flow_level int // The number of unclosed '[' and '{' indicators.
--
--	tokens          []yaml_token_t // The tokens queue.
--	tokens_head     int            // The head of the tokens queue.
--	tokens_parsed   int            // The number of tokens fetched from the queue.
--	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
--
--	indent  int   // The current indentation level.
--	indents []int // The indentation levels stack.
--
--	simple_key_allowed bool                // May a simple key occur at the current position?
--	simple_keys        []yaml_simple_key_t // The stack of simple keys.
--
--	// Parser stuff
--
--	state          yaml_parser_state_t    // The current parser state.
--	states         []yaml_parser_state_t  // The parser states stack.
--	marks          []yaml_mark_t          // The stack of marks.
--	tag_directives []yaml_tag_directive_t // The list of TAG directives.
--
--	// Dumper stuff
--
--	aliases []yaml_alias_data_t // The alias data.
--
--	document *yaml_document_t // The currently parsed document.
--}
--
--// Emitter Definitions
--
--// The prototype of a write handler.
--//
--// The write handler is called when the emitter needs to flush the accumulated
--// characters to the output.  The handler should write @a size bytes of the
--// @a buffer to the output.
--//
--// @param[in,out]   data        A pointer to an application data specified by
--//                              yaml_emitter_set_output().
--// @param[in]       buffer      The buffer with bytes to be written.
--// @param[in]       size        The size of the buffer.
--//
--// @returns On success, the handler should return @c 1.  If the handler failed,
--// the returned value should be @c 0.
--//
--type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
--
--type yaml_emitter_state_t int
--
--// The emitter states.
--const (
--	// Expect STREAM-START.
--	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
--
--	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
--	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
--	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
--	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
--	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
--	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
--	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
--	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
--	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
--	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
--	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
--	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
--	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
--	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
--	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
--	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
--	yaml_EMIT_END_STATE                        // Expect nothing.
--)
--
--// The emitter structure.
--//
--// All members are internal.  Manage the structure using the @c yaml_emitter_
--// family of functions.
--type yaml_emitter_t struct {
--
--	// Error handling
--
--	error   yaml_error_type_t // Error type.
--	problem string            // Error description.
--
--	// Writer stuff
--
--	write_handler yaml_write_handler_t // Write handler.
--
--	output_buffer *[]byte   // String output data.
--	output_file   io.Writer // File output data.
--
--	buffer     []byte // The working buffer.
--	buffer_pos int    // The current position of the buffer.
--
--	raw_buffer     []byte // The raw buffer.
--	raw_buffer_pos int    // The current position of the buffer.
--
--	encoding yaml_encoding_t // The stream encoding.
--
--	// Emitter stuff
--
--	canonical   bool         // If the output is in the canonical style?
--	best_indent int          // The number of indentation spaces.
--	best_width  int          // The preferred width of the output lines.
--	unicode     bool         // Allow unescaped non-ASCII characters?
--	line_break  yaml_break_t // The preferred line break.
--
--	state  yaml_emitter_state_t   // The current emitter state.
--	states []yaml_emitter_state_t // The stack of states.
--
--	events      []yaml_event_t // The event queue.
--	events_head int            // The head of the event queue.
--
--	indents []int // The stack of indentation levels.
--
--	tag_directives []yaml_tag_directive_t // The list of tag directives.
--
--	indent int // The current indentation level.
--
--	flow_level int // The current flow level.
--
--	root_context       bool // Is it the document root context?
--	sequence_context   bool // Is it a sequence context?
--	mapping_context    bool // Is it a mapping context?
--	simple_key_context bool // Is it a simple mapping key context?
--
--	line       int  // The current line.
--	column     int  // The current column.
--	whitespace bool // If the last character was a whitespace?
--	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
--	open_ended bool // If an explicit document end is required?
--
--	// Anchor analysis.
--	anchor_data struct {
--		anchor []byte // The anchor value.
--		alias  bool   // Is it an alias?
--	}
--
--	// Tag analysis.
--	tag_data struct {
--		handle []byte // The tag handle.
--		suffix []byte // The tag suffix.
--	}
--
--	// Scalar analysis.
--	scalar_data struct {
--		value                 []byte              // The scalar value.
--		multiline             bool                // Does the scalar contain line breaks?
--		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
--		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
--		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
--		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
--		style                 yaml_scalar_style_t // The output style.
--	}
--
--	// Dumper stuff
--
--	opened bool // If the stream was already opened?
--	closed bool // If the stream was already closed?
--
--	// The information associated with the document nodes.
--	anchors *struct {
--		references int  // The number of references.
--		anchor     int  // The anchor id.
--		serialized bool // If the node has been emitted?
--	}
--
--	last_anchor_id int // The last assigned anchor id.
--
--	document *yaml_document_t // The currently emitted document.
--}
-diff --git a/Godeps/_workspace/src/gopkg.in/v2/yaml/yamlprivateh.go b/Godeps/_workspace/src/gopkg.in/v2/yaml/yamlprivateh.go
-deleted file mode 100644
-index 8110ce3..0000000
---- a/Godeps/_workspace/src/gopkg.in/v2/yaml/yamlprivateh.go
-+++ /dev/null
-@@ -1,173 +0,0 @@
--package yaml
--
--const (
--	// The size of the input raw buffer.
--	input_raw_buffer_size = 512
--
--	// The size of the input buffer.
--	// It should be possible to decode the whole raw buffer.
--	input_buffer_size = input_raw_buffer_size * 3
--
--	// The size of the output buffer.
--	output_buffer_size = 128
--
--	// The size of the output raw buffer.
--	// It should be possible to encode the whole output buffer.
--	output_raw_buffer_size = (output_buffer_size*2 + 2)
--
--	// The size of other stacks and queues.
--	initial_stack_size  = 16
--	initial_queue_size  = 16
--	initial_string_size = 16
--)
--
--// Check if the character at the specified position is an alphabetical
--// character, a digit, '_', or '-'.
--func is_alpha(b []byte, i int) bool {
--	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
--}
--
--// Check if the character at the specified position is a digit.
--func is_digit(b []byte, i int) bool {
--	return b[i] >= '0' && b[i] <= '9'
--}
--
--// Get the value of a digit.
--func as_digit(b []byte, i int) int {
--	return int(b[i]) - '0'
--}
--
--// Check if the character at the specified position is a hex-digit.
--func is_hex(b []byte, i int) bool {
--	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
--}
--
--// Get the value of a hex-digit.
--func as_hex(b []byte, i int) int {
--	bi := b[i]
--	if bi >= 'A' && bi <= 'F' {
--		return int(bi) - 'A' + 10
--	}
--	if bi >= 'a' && bi <= 'f' {
--		return int(bi) - 'a' + 10
--	}
--	return int(bi) - '0'
--}
--
--// Check if the character is ASCII.
--func is_ascii(b []byte, i int) bool {
--	return b[i] <= 0x7F
--}
--
--// Check if the character at the start of the buffer can be printed unescaped.
--func is_printable(b []byte, i int) bool {
--	return ((b[i] == 0x0A) || // . == #x0A
--		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
--		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
--		(b[i] > 0xC2 && b[i] < 0xED) ||
--		(b[i] == 0xED && b[i+1] < 0xA0) ||
--		(b[i] == 0xEE) ||
--		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
--			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
--			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
--}
--
--// Check if the character at the specified position is NUL.
--func is_z(b []byte, i int) bool {
--	return b[i] == 0x00
--}
--
--// Check if the beginning of the buffer is a BOM.
--func is_bom(b []byte, i int) bool {
--	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
--}
--
--// Check if the character at the specified position is space.
--func is_space(b []byte, i int) bool {
--	return b[i] == ' '
--}
--
--// Check if the character at the specified position is tab.
--func is_tab(b []byte, i int) bool {
--	return b[i] == '\t'
--}
--
--// Check if the character at the specified position is blank (space or tab).
--func is_blank(b []byte, i int) bool {
--	//return is_space(b, i) || is_tab(b, i)
--	return b[i] == ' ' || b[i] == '\t'
--}
--
--// Check if the character at the specified position is a line break.
--func is_break(b []byte, i int) bool {
--	return (b[i] == '\r' || // CR (#xD)
--		b[i] == '\n' || // LF (#xA)
--		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
--		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
--		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
--}
--
--func is_crlf(b []byte, i int) bool {
--	return b[i] == '\r' && b[i+1] == '\n'
--}
--
--// Check if the character is a line break or NUL.
--func is_breakz(b []byte, i int) bool {
--	//return is_break(b, i) || is_z(b, i)
--	return (        // is_break:
--	b[i] == '\r' || // CR (#xD)
--		b[i] == '\n' || // LF (#xA)
--		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
--		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
--		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
--		// is_z:
--		b[i] == 0)
--}
--
--// Check if the character is a line break, space, or NUL.
--func is_spacez(b []byte, i int) bool {
--	//return is_space(b, i) || is_breakz(b, i)
--	return ( // is_space:
--	b[i] == ' ' ||
--		// is_breakz:
--		b[i] == '\r' || // CR (#xD)
--		b[i] == '\n' || // LF (#xA)
--		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
--		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
--		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
--		b[i] == 0)
--}
--
--// Check if the character is a line break, space, tab, or NUL.
--func is_blankz(b []byte, i int) bool {
--	//return is_blank(b, i) || is_breakz(b, i)
--	return ( // is_blank:
--	b[i] == ' ' || b[i] == '\t' ||
--		// is_breakz:
--		b[i] == '\r' || // CR (#xD)
--		b[i] == '\n' || // LF (#xA)
--		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
--		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
--		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
--		b[i] == 0)
--}
--
--// Determine the width of the character.
--func width(b byte) int {
--	// Don't replace these by a switch without first
--	// confirming that it is being inlined.
--	if b&0x80 == 0x00 {
--		return 1
--	}
--	if b&0xE0 == 0xC0 {
--		return 2
--	}
--	if b&0xF0 == 0xE0 {
--		return 3
--	}
--	if b&0xF8 == 0xF0 {
--		return 4
--	}
--	return 0
--
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/LICENSE b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/LICENSE
-deleted file mode 100644
-index efa1aa1..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/LICENSE
-+++ /dev/null
-@@ -1,57 +0,0 @@
--Copyright (c) 2012 Péter Surányi. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--
------------------------------------------------------------------------
--Portions of inf.Dec's source code have been derived from Go and are
--covered by the following license:
------------------------------------------------------------------------
--
--Copyright (c) 2009 The Go Authors. All rights reserved.
--
--Redistribution and use in source and binary forms, with or without
--modification, are permitted provided that the following conditions are
--met:
--
--   * Redistributions of source code must retain the above copyright
--notice, this list of conditions and the following disclaimer.
--   * Redistributions in binary form must reproduce the above
--copyright notice, this list of conditions and the following disclaimer
--in the documentation and/or other materials provided with the
--distribution.
--   * Neither the name of Google Inc. nor the names of its
--contributors may be used to endorse or promote products derived from
--this software without specific prior written permission.
--
--THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
--"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
--LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
--A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
--OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
--SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
--LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
--DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
--THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
--(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
--OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/benchmark_test.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/benchmark_test.go
-deleted file mode 100644
-index 27071da..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/benchmark_test.go
-+++ /dev/null
-@@ -1,210 +0,0 @@
--package inf
--
--import (
--	"fmt"
--	"math/big"
--	"math/rand"
--	"sync"
--	"testing"
--)
--
--const maxcap = 1024 * 1024
--const bits = 256
--const maxscale = 32
--
--var once sync.Once
--
--var decInput [][2]Dec
--var intInput [][2]big.Int
--
--var initBench = func() {
--	decInput = make([][2]Dec, maxcap)
--	intInput = make([][2]big.Int, maxcap)
--	max := new(big.Int).Lsh(big.NewInt(1), bits)
--	r := rand.New(rand.NewSource(0))
--	for i := 0; i < cap(decInput); i++ {
--		decInput[i][0].SetUnscaledBig(new(big.Int).Rand(r, max)).
--			SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
--		decInput[i][1].SetUnscaledBig(new(big.Int).Rand(r, max)).
--			SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale)))
--	}
--	for i := 0; i < cap(intInput); i++ {
--		intInput[i][0].Rand(r, max)
--		intInput[i][1].Rand(r, max)
--	}
--}
--
--func doBenchmarkDec1(b *testing.B, f func(z *Dec)) {
--	once.Do(initBench)
--	b.ResetTimer()
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		f(&decInput[i%maxcap][0])
--	}
--}
--
--func doBenchmarkDec2(b *testing.B, f func(x, y *Dec)) {
--	once.Do(initBench)
--	b.ResetTimer()
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		f(&decInput[i%maxcap][0], &decInput[i%maxcap][1])
--	}
--}
--
--func doBenchmarkInt1(b *testing.B, f func(z *big.Int)) {
--	once.Do(initBench)
--	b.ResetTimer()
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		f(&intInput[i%maxcap][0])
--	}
--}
--
--func doBenchmarkInt2(b *testing.B, f func(x, y *big.Int)) {
--	once.Do(initBench)
--	b.ResetTimer()
--	b.StartTimer()
--	for i := 0; i < b.N; i++ {
--		f(&intInput[i%maxcap][0], &intInput[i%maxcap][1])
--	}
--}
--
--func Benchmark_Dec_String(b *testing.B) {
--	doBenchmarkDec1(b, func(x *Dec) {
--		x.String()
--	})
--}
--
--func Benchmark_Dec_StringScan(b *testing.B) {
--	doBenchmarkDec1(b, func(x *Dec) {
--		s := x.String()
--		d := new(Dec)
--		fmt.Sscan(s, d)
--	})
--}
--
--func Benchmark_Dec_GobEncode(b *testing.B) {
--	doBenchmarkDec1(b, func(x *Dec) {
--		x.GobEncode()
--	})
--}
--
--func Benchmark_Dec_GobEnDecode(b *testing.B) {
--	doBenchmarkDec1(b, func(x *Dec) {
--		g, _ := x.GobEncode()
--		new(Dec).GobDecode(g)
--	})
--}
--
--func Benchmark_Dec_Add(b *testing.B) {
--	doBenchmarkDec2(b, func(x, y *Dec) {
--		ys := y.Scale()
--		y.SetScale(x.Scale())
--		_ = new(Dec).Add(x, y)
--		y.SetScale(ys)
--	})
--}
--
--func Benchmark_Dec_AddMixed(b *testing.B) {
--	doBenchmarkDec2(b, func(x, y *Dec) {
--		_ = new(Dec).Add(x, y)
--	})
--}
--
--func Benchmark_Dec_Sub(b *testing.B) {
--	doBenchmarkDec2(b, func(x, y *Dec) {
--		ys := y.Scale()
--		y.SetScale(x.Scale())
--		_ = new(Dec).Sub(x, y)
--		y.SetScale(ys)
--	})
--}
--
--func Benchmark_Dec_SubMixed(b *testing.B) {
--	doBenchmarkDec2(b, func(x, y *Dec) {
--		_ = new(Dec).Sub(x, y)
--	})
--}
--
--func Benchmark_Dec_Mul(b *testing.B) {
--	doBenchmarkDec2(b, func(x, y *Dec) {
--		_ = new(Dec).Mul(x, y)
--	})
--}
--
--func Benchmark_Dec_Mul_QuoExact(b *testing.B) {
--	doBenchmarkDec2(b, func(x, y *Dec) {
--		v := new(Dec).Mul(x, y)
--		_ = new(Dec).QuoExact(v, y)
--	})
--}
--
--func Benchmark_Dec_QuoRound_Fixed_Down(b *testing.B) {
--	doBenchmarkDec2(b, func(x, y *Dec) {
--		_ = new(Dec).QuoRound(x, y, 0, RoundDown)
--	})
--}
--
--func Benchmark_Dec_QuoRound_Fixed_HalfUp(b *testing.B) {
--	doBenchmarkDec2(b, func(x, y *Dec) {
--		_ = new(Dec).QuoRound(x, y, 0, RoundHalfUp)
--	})
--}
--
--func Benchmark_Int_String(b *testing.B) {
--	doBenchmarkInt1(b, func(x *big.Int) {
--		x.String()
--	})
--}
--
--func Benchmark_Int_StringScan(b *testing.B) {
--	doBenchmarkInt1(b, func(x *big.Int) {
--		s := x.String()
--		d := new(big.Int)
--		fmt.Sscan(s, d)
--	})
--}
--
--func Benchmark_Int_GobEncode(b *testing.B) {
--	doBenchmarkInt1(b, func(x *big.Int) {
--		x.GobEncode()
--	})
--}
--
--func Benchmark_Int_GobEnDecode(b *testing.B) {
--	doBenchmarkInt1(b, func(x *big.Int) {
--		g, _ := x.GobEncode()
--		new(big.Int).GobDecode(g)
--	})
--}
--
--func Benchmark_Int_Add(b *testing.B) {
--	doBenchmarkInt2(b, func(x, y *big.Int) {
--		_ = new(big.Int).Add(x, y)
--	})
--}
--
--func Benchmark_Int_Sub(b *testing.B) {
--	doBenchmarkInt2(b, func(x, y *big.Int) {
--		_ = new(big.Int).Sub(x, y)
--	})
--}
--
--func Benchmark_Int_Mul(b *testing.B) {
--	doBenchmarkInt2(b, func(x, y *big.Int) {
--		_ = new(big.Int).Mul(x, y)
--	})
--}
--
--func Benchmark_Int_Quo(b *testing.B) {
--	doBenchmarkInt2(b, func(x, y *big.Int) {
--		_ = new(big.Int).Quo(x, y)
--	})
--}
--
--func Benchmark_Int_QuoRem(b *testing.B) {
--	doBenchmarkInt2(b, func(x, y *big.Int) {
--		_, _ = new(big.Int).QuoRem(x, y, new(big.Int))
--	})
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec.go
-deleted file mode 100644
-index d17ad94..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec.go
-+++ /dev/null
-@@ -1,615 +0,0 @@
--// Package inf (type inf.Dec) implements "infinite-precision" decimal
--// arithmetic.
--// "Infinite precision" describes two characteristics: practically unlimited
--// precision for decimal number representation and no support for calculating
--// with any specific fixed precision.
--// (Although there is no practical limit on precision, inf.Dec can only
--// represent finite decimals.)
--//
--// This package is currently in experimental stage and the API may change.
--//
--// This package does NOT support:
--//  - rounding to specific precisions (as opposed to specific decimal positions)
--//  - the notion of context (each rounding must be explicit)
--//  - NaN and Inf values, and distinguishing between positive and negative zero
--//  - conversions to and from float32/64 types
--//
--// Features considered for possible addition:
--//  + formatting options
--//  + Exp method
--//  + combined operations such as AddRound/MulAdd etc
--//  + exchanging data in decimal32/64/128 formats
--//
--package inf
--
--// TODO:
--//  - avoid excessive deep copying (quo and rounders)
--
--import (
--	"fmt"
--	"io"
--	"math/big"
--	"strings"
--)
--
--// A Dec represents a signed arbitrary-precision decimal.
--// It is a combination of a sign, an arbitrary-precision integer coefficient
--// value, and a signed fixed-precision exponent value.
--// The sign and the coefficient value are handled together as a signed value
--// and referred to as the unscaled value.
--// (Positive and negative zero values are not distinguished.)
--// Since the exponent is most commonly non-positive, it is handled in negated
--// form and referred to as scale.
--//
--// The mathematical value of a Dec equals:
--//
--//  unscaled * 10**(-scale)
--//
--// Note that different Dec representations may have equal mathematical values.
--//
--//  unscaled  scale  String()
--//  -------------------------
--//         0      0    "0"
--//         0      2    "0.00"
--//         0     -2    "0"
--//         1      0    "1"
--//       100      2    "1.00"
--//        10      0   "10"
--//         1     -1   "10"
--//
--// The zero value for a Dec represents the value 0 with scale 0.
--//
--// Operations are typically performed through the *Dec type.
--// The semantics of the assignment operation "=" for "bare" Dec values is
--// undefined and should not be relied on.
--//
--// Methods are typically of the form:
--//
--//	func (z *Dec) Op(x, y *Dec) *Dec
--//
--// and implement operations z = x Op y with the result as receiver; if it
--// is one of the operands it may be overwritten (and its memory reused).
--// To enable chaining of operations, the result is also returned. Methods
--// returning a result other than *Dec take one of the operands as the receiver.
--//
--// A "bare" Quo method (quotient / division operation) is not provided, as the
--// result is not always a finite decimal and thus in general cannot be
--// represented as a Dec.
--// Instead, in the common case when rounding is (potentially) necessary,
--// QuoRound should be used with a Scale and a Rounder.
--// QuoExact or QuoRound with RoundExact can be used in the special cases when it
--// is known that the result is always a finite decimal.
--//
--type Dec struct {
--	unscaled big.Int
--	scale    Scale
--}
--
--// Scale represents the type used for the scale of a Dec.
--type Scale int32
--
--const scaleSize = 4 // bytes in a Scale value
--
--// Scaler represents a method for obtaining the scale to use for the result of
--// an operation on x and y.
--type scaler interface {
--	Scale(x *Dec, y *Dec) Scale
--}
--
--var bigInt = [...]*big.Int{
--	big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4),
--	big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9),
--	big.NewInt(10),
--}
--
--var exp10cache [64]big.Int = func() [64]big.Int {
--	e10, e10i := [64]big.Int{}, bigInt[1]
--	for i, _ := range e10 {
--		e10[i].Set(e10i)
--		e10i = new(big.Int).Mul(e10i, bigInt[10])
--	}
--	return e10
--}()
--
--// NewDec allocates and returns a new Dec set to the given int64 unscaled value
--// and scale.
--func NewDec(unscaled int64, scale Scale) *Dec {
--	return new(Dec).SetUnscaled(unscaled).SetScale(scale)
--}
--
--// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled
--// value and scale.
--func NewDecBig(unscaled *big.Int, scale Scale) *Dec {
--	return new(Dec).SetUnscaledBig(unscaled).SetScale(scale)
--}
--
--// Scale returns the scale of x.
--func (x *Dec) Scale() Scale {
--	return x.scale
--}
--
--// Unscaled returns the unscaled value of x for u and true for ok when the
--// unscaled value can be represented as int64; otherwise it returns an undefined
--// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid
--// checking the validity of the value when the check is known to be redundant.
--func (x *Dec) Unscaled() (u int64, ok bool) {
--	u = x.unscaled.Int64()
--	var i big.Int
--	ok = i.SetInt64(u).Cmp(&x.unscaled) == 0
--	return
--}
--
--// UnscaledBig returns the unscaled value of x as *big.Int.
--func (x *Dec) UnscaledBig() *big.Int {
--	return &x.unscaled
--}
--
--// SetScale sets the scale of z, with the unscaled value unchanged, and returns
--// z.
--// The mathematical value of the Dec changes as if it was multiplied by
--// 10**(oldscale-scale).
--func (z *Dec) SetScale(scale Scale) *Dec {
--	z.scale = scale
--	return z
--}
--
--// SetUnscaled sets the unscaled value of z, with the scale unchanged, and
--// returns z.
--func (z *Dec) SetUnscaled(unscaled int64) *Dec {
--	z.unscaled.SetInt64(unscaled)
--	return z
--}
--
--// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and
--// returns z.
--func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec {
--	z.unscaled.Set(unscaled)
--	return z
--}
--
--// Set sets z to the value of x and returns z.
--// It does nothing if z == x.
--func (z *Dec) Set(x *Dec) *Dec {
--	if z != x {
--		z.SetUnscaledBig(x.UnscaledBig())
--		z.SetScale(x.Scale())
--	}
--	return z
--}
--
--// Sign returns:
--//
--//	-1 if x <  0
--//	 0 if x == 0
--//	+1 if x >  0
--//
--func (x *Dec) Sign() int {
--	return x.UnscaledBig().Sign()
--}
--
--// Neg sets z to -x and returns z.
--func (z *Dec) Neg(x *Dec) *Dec {
--	z.SetScale(x.Scale())
--	z.UnscaledBig().Neg(x.UnscaledBig())
--	return z
--}
--
--// Cmp compares x and y and returns:
--//
--//   -1 if x <  y
--//    0 if x == y
--//   +1 if x >  y
--//
--func (x *Dec) Cmp(y *Dec) int {
--	xx, yy := upscale(x, y)
--	return xx.UnscaledBig().Cmp(yy.UnscaledBig())
--}
--
--// Abs sets z to |x| (the absolute value of x) and returns z.
--func (z *Dec) Abs(x *Dec) *Dec {
--	z.SetScale(x.Scale())
--	z.UnscaledBig().Abs(x.UnscaledBig())
--	return z
--}
--
--// Add sets z to the sum x+y and returns z.
--// The scale of z is the greater of the scales of x and y.
--func (z *Dec) Add(x, y *Dec) *Dec {
--	xx, yy := upscale(x, y)
--	z.SetScale(xx.Scale())
--	z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig())
--	return z
--}
--
--// Sub sets z to the difference x-y and returns z.
--// The scale of z is the greater of the scales of x and y.
--func (z *Dec) Sub(x, y *Dec) *Dec {
--	xx, yy := upscale(x, y)
--	z.SetScale(xx.Scale())
--	z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig())
--	return z
--}
--
--// Mul sets z to the product x*y and returns z.
--// The scale of z is the sum of the scales of x and y.
--func (z *Dec) Mul(x, y *Dec) *Dec {
--	z.SetScale(x.Scale() + y.Scale())
--	z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig())
--	return z
--}
--
--// Round sets z to the value of x rounded to Scale s using Rounder r, and
--// returns z.
--func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec {
--	return z.QuoRound(x, NewDec(1, 0), s, r)
--}
--
--// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the
--// specified scale.
--//
--// If the rounder is RoundExact but the result can not be expressed exactly at
--// the specified scale, QuoRound returns nil, and the value of z is undefined.
--//
--// There is no corresponding Div method; the equivalent can be achieved through
--// the choice of Rounder used.
--//
--func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec {
--	return z.quo(x, y, sclr{s}, r)
--}
--
--func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec {
--	scl := s.Scale(x, y)
--	var zzz *Dec
--	if r.UseRemainder() {
--		zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int))
--		zzz = r.Round(new(Dec), zz, rA, rB)
--	} else {
--		zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil)
--		zzz = r.Round(new(Dec), zz, nil, nil)
--	}
--	if zzz == nil {
--		return nil
--	}
--	return z.Set(zzz)
--}
--
--// QuoExact sets z to the quotient x/y and returns z when x/y is a finite
--// decimal. Otherwise it returns nil and the value of z is undefined.
--//
--// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is
--// calculated so that the remainder will be zero whenever x/y is a finite
--// decimal.
--func (z *Dec) QuoExact(x, y *Dec) *Dec {
--	return z.quo(x, y, scaleQuoExact{}, RoundExact)
--}
--
--// quoRem sets z to the quotient x/y with the scale s, and if useRem is true,
--// it sets remNum and remDen to the numerator and denominator of the remainder.
--// It returns z, remNum and remDen.
--//
--// The remainder is normalized to the range -1 < r < 1 to simplify rounding;
--// that is, the results satisfy the following equation:
--//
--//  x / y = z + (remNum/remDen) * 10**(-z.Scale())
--//
--// See Rounder for more details about rounding.
--//
--func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool,
--	remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) {
--	// difference (required adjustment) compared to "canonical" result scale
--	shift := s - (x.Scale() - y.Scale())
--	// pointers to adjusted unscaled dividend and divisor
--	var ix, iy *big.Int
--	switch {
--	case shift > 0:
--		// increased scale: decimal-shift dividend left
--		ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift))
--		iy = y.UnscaledBig()
--	case shift < 0:
--		// decreased scale: decimal-shift divisor left
--		ix = x.UnscaledBig()
--		iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift))
--	default:
--		ix = x.UnscaledBig()
--		iy = y.UnscaledBig()
--	}
--	// save a copy of iy in case it to be overwritten with the result
--	iy2 := iy
--	if iy == z.UnscaledBig() {
--		iy2 = new(big.Int).Set(iy)
--	}
--	// set scale
--	z.SetScale(s)
--	// set unscaled
--	if useRem {
--		// Int division
--		_, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int))
--		// set remainder
--		remNum.Set(intr)
--		remDen.Set(iy2)
--	} else {
--		z.UnscaledBig().Quo(ix, iy)
--	}
--	return z, remNum, remDen
--}
--
--type sclr struct{ s Scale }
--
--func (s sclr) Scale(x, y *Dec) Scale {
--	return s.s
--}
--
--type scaleQuoExact struct{}
--
--func (sqe scaleQuoExact) Scale(x, y *Dec) Scale {
--	rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig())
--	f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5])
--	var f10 Scale
--	if f2 > f5 {
--		f10 = Scale(f2)
--	} else {
--		f10 = Scale(f5)
--	}
--	return x.Scale() - y.Scale() + f10
--}
--
--func factor(n *big.Int, p *big.Int) int {
--	// could be improved for large factors
--	d, f := n, 0
--	for {
--		dd, dm := new(big.Int).DivMod(d, p, new(big.Int))
--		if dm.Sign() == 0 {
--			f++
--			d = dd
--		} else {
--			break
--		}
--	}
--	return f
--}
--
--func factor2(n *big.Int) int {
--	// could be improved for large factors
--	f := 0
--	for ; n.Bit(f) == 0; f++ {
--	}
--	return f
--}
--
--func upscale(a, b *Dec) (*Dec, *Dec) {
--	if a.Scale() == b.Scale() {
--		return a, b
--	}
--	if a.Scale() > b.Scale() {
--		bb := b.rescale(a.Scale())
--		return a, bb
--	}
--	aa := a.rescale(b.Scale())
--	return aa, b
--}
--
--func exp10(x Scale) *big.Int {
--	if int(x) < len(exp10cache) {
--		return &exp10cache[int(x)]
--	}
--	return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil)
--}
--
--func (x *Dec) rescale(newScale Scale) *Dec {
--	shift := newScale - x.Scale()
--	switch {
--	case shift < 0:
--		e := exp10(-shift)
--		return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale)
--	case shift > 0:
--		e := exp10(shift)
--		return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale)
--	}
--	return x
--}
--
--var zeros = []byte("00000000000000000000000000000000" +
--	"00000000000000000000000000000000")
--var lzeros = Scale(len(zeros))
--
--func appendZeros(s []byte, n Scale) []byte {
--	for i := Scale(0); i < n; i += lzeros {
--		if n > i+lzeros {
--			s = append(s, zeros...)
--		} else {
--			s = append(s, zeros[0:n-i]...)
--		}
--	}
--	return s
--}
--
--func (x *Dec) String() string {
--	if x == nil {
--		return "<nil>"
--	}
--	scale := x.Scale()
--	s := []byte(x.UnscaledBig().String())
--	if scale <= 0 {
--		if scale != 0 && x.unscaled.Sign() != 0 {
--			s = appendZeros(s, -scale)
--		}
--		return string(s)
--	}
--	negbit := Scale(-((x.Sign() - 1) / 2))
--	// scale > 0
--	lens := Scale(len(s))
--	if lens-negbit <= scale {
--		ss := make([]byte, 0, scale+2)
--		if negbit == 1 {
--			ss = append(ss, '-')
--		}
--		ss = append(ss, '0', '.')
--		ss = appendZeros(ss, scale-lens+negbit)
--		ss = append(ss, s[negbit:]...)
--		return string(ss)
--	}
--	// lens > scale
--	ss := make([]byte, 0, lens+1)
--	ss = append(ss, s[:lens-scale]...)
--	ss = append(ss, '.')
--	ss = append(ss, s[lens-scale:]...)
--	return string(ss)
--}
--
--// Format is a support routine for fmt.Formatter. It accepts the decimal
--// formats 'd' and 'f', and handles both equivalently.
--// Width, precision, flags and bases 2, 8, 16 are not supported.
--func (x *Dec) Format(s fmt.State, ch rune) {
--	if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' {
--		fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String())
--		return
--	}
--	fmt.Fprintf(s, x.String())
--}
--
--func (z *Dec) scan(r io.RuneScanner) (*Dec, error) {
--	unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes
--	dp, dg := -1, -1                 // indexes of decimal point, first digit
--loop:
--	for {
--		ch, _, err := r.ReadRune()
--		if err == io.EOF {
--			break loop
--		}
--		if err != nil {
--			return nil, err
--		}
--		switch {
--		case ch == '+' || ch == '-':
--			if len(unscaled) > 0 || dp >= 0 { // must be first character
--				r.UnreadRune()
--				break loop
--			}
--		case ch == '.':
--			if dp >= 0 {
--				r.UnreadRune()
--				break loop
--			}
--			dp = len(unscaled)
--			continue // don't add to unscaled
--		case ch >= '0' && ch <= '9':
--			if dg == -1 {
--				dg = len(unscaled)
--			}
--		default:
--			r.UnreadRune()
--			break loop
--		}
--		unscaled = append(unscaled, byte(ch))
--	}
--	if dg == -1 {
--		return nil, fmt.Errorf("no digits read")
--	}
--	if dp >= 0 {
--		z.SetScale(Scale(len(unscaled) - dp))
--	} else {
--		z.SetScale(0)
--	}
--	_, ok := z.UnscaledBig().SetString(string(unscaled), 10)
--	if !ok {
--		return nil, fmt.Errorf("invalid decimal: %s", string(unscaled))
--	}
--	return z, nil
--}
--
--// SetString sets z to the value of s, interpreted as a decimal (base 10),
--// and returns z and a boolean indicating success. The scale of z is the
--// number of digits after the decimal point (including any trailing 0s),
--// or 0 if there is no decimal point. If SetString fails, the value of z
--// is undefined but the returned value is nil.
--func (z *Dec) SetString(s string) (*Dec, bool) {
--	r := strings.NewReader(s)
--	_, err := z.scan(r)
--	if err != nil {
--		return nil, false
--	}
--	_, _, err = r.ReadRune()
--	if err != io.EOF {
--		return nil, false
--	}
--	// err == io.EOF => scan consumed all of s
--	return z, true
--}
--
--// Scan is a support routine for fmt.Scanner; it sets z to the value of
--// the scanned number. It accepts the decimal formats 'd' and 'f', and
--// handles both equivalently. Bases 2, 8, 16 are not supported.
--// The scale of z is the number of digits after the decimal point
--// (including any trailing 0s), or 0 if there is no decimal point.
--func (z *Dec) Scan(s fmt.ScanState, ch rune) error {
--	if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' {
--		return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch)
--	}
--	s.SkipSpace()
--	_, err := z.scan(s)
--	return err
--}
--
--// Gob encoding version
--const decGobVersion byte = 1
--
--func scaleBytes(s Scale) []byte {
--	buf := make([]byte, scaleSize)
--	i := scaleSize
--	for j := 0; j < scaleSize; j++ {
--		i--
--		buf[i] = byte(s)
--		s >>= 8
--	}
--	return buf
--}
--
--func scale(b []byte) (s Scale) {
--	for j := 0; j < scaleSize; j++ {
--		s <<= 8
--		s |= Scale(b[j])
--	}
--	return
--}
--
--// GobEncode implements the gob.GobEncoder interface.
--func (x *Dec) GobEncode() ([]byte, error) {
--	buf, err := x.UnscaledBig().GobEncode()
--	if err != nil {
--		return nil, err
--	}
--	buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion)
--	return buf, nil
--}
--
--// GobDecode implements the gob.GobDecoder interface.
--func (z *Dec) GobDecode(buf []byte) error {
--	if len(buf) == 0 {
--		return fmt.Errorf("Dec.GobDecode: no data")
--	}
--	b := buf[len(buf)-1]
--	if b != decGobVersion {
--		return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b)
--	}
--	l := len(buf) - scaleSize - 1
--	err := z.UnscaledBig().GobDecode(buf[:l])
--	if err != nil {
--		return err
--	}
--	z.SetScale(scale(buf[l : l+scaleSize]))
--	return nil
--}
--
--// MarshalText implements the encoding.TextMarshaler interface.
--func (x *Dec) MarshalText() ([]byte, error) {
--	return []byte(x.String()), nil
--}
--
--// UnmarshalText implements the encoding.TextUnmarshaler interface.
--func (z *Dec) UnmarshalText(data []byte) error {
--	_, ok := z.SetString(string(data))
--	if !ok {
--		return fmt.Errorf("invalid inf.Dec")
--	}
--	return nil
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_go1_2_test.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_go1_2_test.go
-deleted file mode 100644
-index 5df0f7b..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_go1_2_test.go
-+++ /dev/null
-@@ -1,33 +0,0 @@
--// +build go1.2
--
--package inf
--
--import (
--	"encoding"
--	"encoding/json"
--	"testing"
--)
--
--var _ encoding.TextMarshaler = new(Dec)
--var _ encoding.TextUnmarshaler = new(Dec)
--
--type Obj struct {
--	Val *Dec
--}
--
--func TestDecJsonMarshalUnmarshal(t *testing.T) {
--	o := Obj{Val: NewDec(123, 2)}
--	js, err := json.Marshal(o)
--	if err != nil {
--		t.Fatalf("json.Marshal(%v): got %v, want ok", o, err)
--	}
--	o2 := &Obj{}
--	err = json.Unmarshal(js, o2)
--	if err != nil {
--		t.Fatalf("json.Unmarshal(%#q): got %v, want ok", js, err)
--	}
--	if o.Val.Scale() != o2.Val.Scale() ||
--		o.Val.UnscaledBig().Cmp(o2.Val.UnscaledBig()) != 0 {
--		t.Fatalf("json.Unmarshal(json.Marshal(%v)): want %v, got %v", o, o, o2)
--	}
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_internal_test.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_internal_test.go
-deleted file mode 100644
-index d4fbe3e..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_internal_test.go
-+++ /dev/null
-@@ -1,40 +0,0 @@
--package inf
--
--import (
--	"math/big"
--	"testing"
--)
--
--var decQuoRemZZZ = []struct {
--	z, x, y  *Dec
--	r        *big.Rat
--	srA, srB int
--}{
--	// basic examples
--	{NewDec(1, 0), NewDec(2, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
--	{NewDec(15, 1), NewDec(3, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1},
--	{NewDec(1, 1), NewDec(1, 0), NewDec(10, 0), big.NewRat(0, 1), 0, 1},
--	{NewDec(0, 0), NewDec(2, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
--	{NewDec(0, 0), NewDec(2, 0), NewDec(6, 0), big.NewRat(1, 3), 1, 1},
--	{NewDec(1, 1), NewDec(2, 0), NewDec(12, 0), big.NewRat(2, 3), 1, 1},
--
--	// examples from the Go Language Specification
--	{NewDec(1, 0), NewDec(5, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1},
--	{NewDec(-1, 0), NewDec(-5, 0), NewDec(3, 0), big.NewRat(-2, 3), -1, 1},
--	{NewDec(-1, 0), NewDec(5, 0), NewDec(-3, 0), big.NewRat(-2, 3), 1, -1},
--	{NewDec(1, 0), NewDec(-5, 0), NewDec(-3, 0), big.NewRat(2, 3), -1, -1},
--}
--
--func TestDecQuoRem(t *testing.T) {
--	for i, a := range decQuoRemZZZ {
--		z, rA, rB := new(Dec), new(big.Int), new(big.Int)
--		s := scaleQuoExact{}.Scale(a.x, a.y)
--		z.quoRem(a.x, a.y, s, true, rA, rB)
--		if a.z.Cmp(z) != 0 || a.r.Cmp(new(big.Rat).SetFrac(rA, rB)) != 0 {
--			t.Errorf("#%d QuoRemZZZ got %v, %v, %v; expected %v, %v", i, z, rA, rB, a.z, a.r)
--		}
--		if a.srA != rA.Sign() || a.srB != rB.Sign() {
--			t.Errorf("#%d QuoRemZZZ wrong signs, got %v, %v; expected %v, %v", i, rA.Sign(), rB.Sign(), a.srA, a.srB)
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_test.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_test.go
-deleted file mode 100644
-index 01ac771..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/dec_test.go
-+++ /dev/null
-@@ -1,379 +0,0 @@
--package inf_test
--
--import (
--	"bytes"
--	"encoding/gob"
--	"fmt"
--	"math/big"
--	"strings"
--	"testing"
--
--	"speter.net/go/exp/math/dec/inf"
--)
--
--type decFunZZ func(z, x, y *inf.Dec) *inf.Dec
--type decArgZZ struct {
--	z, x, y *inf.Dec
--}
--
--var decSumZZ = []decArgZZ{
--	{inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
--	{inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
--	{inf.NewDec(1111111110, 0), inf.NewDec(123456789, 0), inf.NewDec(987654321, 0)},
--	{inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(0, 0)},
--	{inf.NewDec(864197532, 0), inf.NewDec(-123456789, 0), inf.NewDec(987654321, 0)},
--	{inf.NewDec(-1111111110, 0), inf.NewDec(-123456789, 0), inf.NewDec(-987654321, 0)},
--	{inf.NewDec(12, 2), inf.NewDec(1, 1), inf.NewDec(2, 2)},
--}
--
--var decProdZZ = []decArgZZ{
--	{inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)},
--	{inf.NewDec(0, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)},
--	{inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(1, 0)},
--	{inf.NewDec(-991*991, 0), inf.NewDec(991, 0), inf.NewDec(-991, 0)},
--	{inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
--	{inf.NewDec(2, -3), inf.NewDec(1, -1), inf.NewDec(2, -2)},
--	{inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)},
--}
--
--func TestDecSignZ(t *testing.T) {
--	var zero inf.Dec
--	for _, a := range decSumZZ {
--		s := a.z.Sign()
--		e := a.z.Cmp(&zero)
--		if s != e {
--			t.Errorf("got %d; want %d for z = %v", s, e, a.z)
--		}
--	}
--}
--
--func TestDecAbsZ(t *testing.T) {
--	var zero inf.Dec
--	for _, a := range decSumZZ {
--		var z inf.Dec
--		z.Abs(a.z)
--		var e inf.Dec
--		e.Set(a.z)
--		if e.Cmp(&zero) < 0 {
--			e.Sub(&zero, &e)
--		}
--		if z.Cmp(&e) != 0 {
--			t.Errorf("got z = %v; want %v", z, e)
--		}
--	}
--}
--
--func testDecFunZZ(t *testing.T, msg string, f decFunZZ, a decArgZZ) {
--	var z inf.Dec
--	f(&z, a.x, a.y)
--	if (&z).Cmp(a.z) != 0 {
--		t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
--	}
--}
--
--func TestDecSumZZ(t *testing.T) {
--	AddZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Add(x, y) }
--	SubZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Sub(x, y) }
--	for _, a := range decSumZZ {
--		arg := a
--		testDecFunZZ(t, "AddZZ", AddZZ, arg)
--
--		arg = decArgZZ{a.z, a.y, a.x}
--		testDecFunZZ(t, "AddZZ symmetric", AddZZ, arg)
--
--		arg = decArgZZ{a.x, a.z, a.y}
--		testDecFunZZ(t, "SubZZ", SubZZ, arg)
--
--		arg = decArgZZ{a.y, a.z, a.x}
--		testDecFunZZ(t, "SubZZ symmetric", SubZZ, arg)
--	}
--}
--
--func TestDecProdZZ(t *testing.T) {
--	MulZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Mul(x, y) }
--	for _, a := range decProdZZ {
--		arg := a
--		testDecFunZZ(t, "MulZZ", MulZZ, arg)
--
--		arg = decArgZZ{a.z, a.y, a.x}
--		testDecFunZZ(t, "MulZZ symmetric", MulZZ, arg)
--	}
--}
--
--var decUnscaledTests = []struct {
--	d  *inf.Dec
--	u  int64 // ignored when ok == false
--	ok bool
--}{
--	{new(inf.Dec), 0, true},
--	{inf.NewDec(-1<<63, 0), -1 << 63, true},
--	{inf.NewDec(-(-1<<63 + 1), 0), -(-1<<63 + 1), true},
--	{new(inf.Dec).Neg(inf.NewDec(-1<<63, 0)), 0, false},
--	{new(inf.Dec).Sub(inf.NewDec(-1<<63, 0), inf.NewDec(1, 0)), 0, false},
--	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), 0, false},
--}
--
--func TestDecUnscaled(t *testing.T) {
--	for i, tt := range decUnscaledTests {
--		u, ok := tt.d.Unscaled()
--		if ok != tt.ok {
--			t.Errorf("#%d Unscaled: got %v, expected %v", i, ok, tt.ok)
--		} else if ok && u != tt.u {
--			t.Errorf("#%d Unscaled: got %v, expected %v", i, u, tt.u)
--		}
--	}
--}
--
--var decRoundTests = [...]struct {
--	in  *inf.Dec
--	s   inf.Scale
--	r   inf.Rounder
--	exp *inf.Dec
--}{
--	{inf.NewDec(123424999999999993, 15), 2, inf.RoundHalfUp, inf.NewDec(12342, 2)},
--	{inf.NewDec(123425000000000001, 15), 2, inf.RoundHalfUp, inf.NewDec(12343, 2)},
--	{inf.NewDec(123424999999999993, 15), 15, inf.RoundHalfUp, inf.NewDec(123424999999999993, 15)},
--	{inf.NewDec(123424999999999993, 15), 16, inf.RoundHalfUp, inf.NewDec(1234249999999999930, 16)},
--	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -1, inf.RoundHalfUp, inf.NewDec(1844674407370955162, -1)},
--	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -2, inf.RoundHalfUp, inf.NewDec(184467440737095516, -2)},
--	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -3, inf.RoundHalfUp, inf.NewDec(18446744073709552, -3)},
--	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -4, inf.RoundHalfUp, inf.NewDec(1844674407370955, -4)},
--	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -5, inf.RoundHalfUp, inf.NewDec(184467440737096, -5)},
--	{inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -6, inf.RoundHalfUp, inf.NewDec(18446744073710, -6)},
--}
--
--func TestDecRound(t *testing.T) {
--	for i, tt := range decRoundTests {
--		z := new(inf.Dec).Round(tt.in, tt.s, tt.r)
--		if tt.exp.Cmp(z) != 0 {
--			t.Errorf("#%d Round got %v; expected %v", i, z, tt.exp)
--		}
--	}
--}
--
--var decStringTests = []struct {
--	in     string
--	out    string
--	val    int64
--	scale  inf.Scale // skip SetString if negative
--	ok     bool
--	scanOk bool
--}{
--	{in: "", ok: false, scanOk: false},
--	{in: "a", ok: false, scanOk: false},
--	{in: "z", ok: false, scanOk: false},
--	{in: "+", ok: false, scanOk: false},
--	{in: "-", ok: false, scanOk: false},
--	{in: "g", ok: false, scanOk: false},
--	{in: ".", ok: false, scanOk: false},
--	{in: ".-0", ok: false, scanOk: false},
--	{in: ".+0", ok: false, scanOk: false},
--	// Scannable but not SetStringable
--	{"0b", "ignored", 0, 0, false, true},
--	{"0x", "ignored", 0, 0, false, true},
--	{"0xg", "ignored", 0, 0, false, true},
--	{"0.0g", "ignored", 0, 1, false, true},
--	// examples from godoc for Dec
--	{"0", "0", 0, 0, true, true},
--	{"0.00", "0.00", 0, 2, true, true},
--	{"ignored", "0", 0, -2, true, false},
--	{"1", "1", 1, 0, true, true},
--	{"1.00", "1.00", 100, 2, true, true},
--	{"10", "10", 10, 0, true, true},
--	{"ignored", "10", 1, -1, true, false},
--	// other tests
--	{"+0", "0", 0, 0, true, true},
--	{"-0", "0", 0, 0, true, true},
--	{"0.0", "0.0", 0, 1, true, true},
--	{"0.1", "0.1", 1, 1, true, true},
--	{"0.", "0", 0, 0, true, true},
--	{"-10", "-10", -1, -1, true, true},
--	{"-1", "-1", -1, 0, true, true},
--	{"-0.1", "-0.1", -1, 1, true, true},
--	{"-0.01", "-0.01", -1, 2, true, true},
--	{"+0.", "0", 0, 0, true, true},
--	{"-0.", "0", 0, 0, true, true},
--	{".0", "0.0", 0, 1, true, true},
--	{"+.0", "0.0", 0, 1, true, true},
--	{"-.0", "0.0", 0, 1, true, true},
--	{"0.0000000000", "0.0000000000", 0, 10, true, true},
--	{"0.0000000001", "0.0000000001", 1, 10, true, true},
--	{"-0.0000000000", "0.0000000000", 0, 10, true, true},
--	{"-0.0000000001", "-0.0000000001", -1, 10, true, true},
--	{"-10", "-10", -10, 0, true, true},
--	{"+10", "10", 10, 0, true, true},
--	{"00", "0", 0, 0, true, true},
--	{"023", "23", 23, 0, true, true},      // decimal, not octal
--	{"-02.3", "-2.3", -23, 1, true, true}, // decimal, not octal
--}
--
--func TestDecGetString(t *testing.T) {
--	z := new(inf.Dec)
--	for i, test := range decStringTests {
--		if !test.ok {
--			continue
--		}
--		z.SetUnscaled(test.val)
--		z.SetScale(test.scale)
--
--		s := z.String()
--		if s != test.out {
--			t.Errorf("#%da got %s; want %s", i, s, test.out)
--		}
--
--		s = fmt.Sprintf("%d", z)
--		if s != test.out {
--			t.Errorf("#%db got %s; want %s", i, s, test.out)
--		}
--	}
--}
--
--func TestDecSetString(t *testing.T) {
--	tmp := new(inf.Dec)
--	for i, test := range decStringTests {
--		if test.scale < 0 {
--			// SetString only supports scale >= 0
--			continue
--		}
--		// initialize to a non-zero value so that issues with parsing
--		// 0 are detected
--		tmp.Set(inf.NewDec(1234567890, 123))
--		n1, ok1 := new(inf.Dec).SetString(test.in)
--		n2, ok2 := tmp.SetString(test.in)
--		expected := inf.NewDec(test.val, test.scale)
--		if ok1 != test.ok || ok2 != test.ok {
--			t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
--			continue
--		}
--		if !ok1 {
--			if n1 != nil {
--				t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
--			}
--			continue
--		}
--		if !ok2 {
--			if n2 != nil {
--				t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
--			}
--			continue
--		}
--
--		if n1.Cmp(expected) != 0 {
--			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
--		}
--		if n2.Cmp(expected) != 0 {
--			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
--		}
--	}
--}
--
--func TestDecScan(t *testing.T) {
--	tmp := new(inf.Dec)
--	for i, test := range decStringTests {
--		if test.scale < 0 {
--			// SetString only supports scale >= 0
--			continue
--		}
--		// initialize to a non-zero value so that issues with parsing
--		// 0 are detected
--		tmp.Set(inf.NewDec(1234567890, 123))
--		n1, n2 := new(inf.Dec), tmp
--		nn1, err1 := fmt.Sscan(test.in, n1)
--		nn2, err2 := fmt.Sscan(test.in, n2)
--		if !test.scanOk {
--			if err1 == nil || err2 == nil {
--				t.Errorf("#%d (input '%s') ok incorrect, should be %t", i, test.in, test.scanOk)
--			}
--			continue
--		}
--		expected := inf.NewDec(test.val, test.scale)
--		if nn1 != 1 || err1 != nil || nn2 != 1 || err2 != nil {
--			t.Errorf("#%d (input '%s') error %d %v, %d %v", i, test.in, nn1, err1, nn2, err2)
--			continue
--		}
--		if n1.Cmp(expected) != 0 {
--			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
--		}
--		if n2.Cmp(expected) != 0 {
--			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
--		}
--	}
--}
--
--var decScanNextTests = []struct {
--	in   string
--	ok   bool
--	next rune
--}{
--	{"", false, 0},
--	{"a", false, 'a'},
--	{"z", false, 'z'},
--	{"+", false, 0},
--	{"-", false, 0},
--	{"g", false, 'g'},
--	{".", false, 0},
--	{".-0", false, '-'},
--	{".+0", false, '+'},
--	{"0b", true, 'b'},
--	{"0x", true, 'x'},
--	{"0xg", true, 'x'},
--	{"0.0g", true, 'g'},
--}
--
--func TestDecScanNext(t *testing.T) {
--	for i, test := range decScanNextTests {
--		rdr := strings.NewReader(test.in)
--		n1 := new(inf.Dec)
--		nn1, _ := fmt.Fscan(rdr, n1)
--		if (test.ok && nn1 == 0) || (!test.ok && nn1 > 0) {
--			t.Errorf("#%d (input '%s') ok incorrect should be %t", i, test.in, test.ok)
--			continue
--		}
--		r := rune(0)
--		nn2, err := fmt.Fscanf(rdr, "%c", &r)
--		if test.next != r {
--			t.Errorf("#%d (input '%s') next incorrect, got %c should be %c, %d, %v", i, test.in, r, test.next, nn2, err)
--		}
--	}
--}
--
--var decGobEncodingTests = []string{
--	"0",
--	"1",
--	"2",
--	"10",
--	"42",
--	"1234567890",
--	"298472983472983471903246121093472394872319615612417471234712061",
--}
--
--func TestDecGobEncoding(t *testing.T) {
--	var medium bytes.Buffer
--	enc := gob.NewEncoder(&medium)
--	dec := gob.NewDecoder(&medium)
--	for i, test := range decGobEncodingTests {
--		for j := 0; j < 2; j++ {
--			for k := inf.Scale(-5); k <= 5; k++ {
--				medium.Reset() // empty buffer for each test case (in case of failures)
--				stest := test
--				if j != 0 {
--					// negative numbers
--					stest = "-" + test
--				}
--				var tx inf.Dec
--				tx.SetString(stest)
--				tx.SetScale(k) // test with positive, negative, and zero scale
--				if err := enc.Encode(&tx); err != nil {
--					t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err)
--				}
--				var rx inf.Dec
--				if err := dec.Decode(&rx); err != nil {
--					t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err)
--				}
--				if rx.Cmp(&tx) != 0 {
--					t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx)
--				}
--			}
--		}
--	}
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/example_test.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/example_test.go
-deleted file mode 100644
-index 52029e0..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/example_test.go
-+++ /dev/null
-@@ -1,62 +0,0 @@
--package inf_test
--
--import (
--	"fmt"
--	"log"
--)
--
--import "speter.net/go/exp/math/dec/inf"
--
--func ExampleDec_SetString() {
--	d := new(inf.Dec)
--	d.SetString("012345.67890") // decimal; leading 0 ignored; trailing 0 kept
--	fmt.Println(d)
--	// Output: 12345.67890
--}
--
--func ExampleDec_Scan() {
--	// The Scan function is rarely used directly;
--	// the fmt package recognizes it as an implementation of fmt.Scanner.
--	d := new(inf.Dec)
--	_, err := fmt.Sscan("184467440.73709551617", d)
--	if err != nil {
--		log.Println("error scanning value:", err)
--	} else {
--		fmt.Println(d)
--	}
--	// Output: 184467440.73709551617
--}
--
--func ExampleDec_QuoRound_scale2RoundDown() {
--	// 10 / 3 is an infinite decimal; it has no exact Dec representation
--	x, y := inf.NewDec(10, 0), inf.NewDec(3, 0)
--	// use 2 digits beyond the decimal point, round towards 0
--	z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundDown)
--	fmt.Println(z)
--	// Output: 3.33
--}
--
--func ExampleDec_QuoRound_scale2RoundCeil() {
--	// -42 / 400 is an finite decimal with 3 digits beyond the decimal point
--	x, y := inf.NewDec(-42, 0), inf.NewDec(400, 0)
--	// use 2 digits beyond decimal point, round towards positive infinity
--	z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundCeil)
--	fmt.Println(z)
--	// Output: -0.10
--}
--
--func ExampleDec_QuoExact_ok() {
--	// 1 / 25 is a finite decimal; it has exact Dec representation
--	x, y := inf.NewDec(1, 0), inf.NewDec(25, 0)
--	z := new(inf.Dec).QuoExact(x, y)
--	fmt.Println(z)
--	// Output: 0.04
--}
--
--func ExampleDec_QuoExact_fail() {
--	// 1 / 3 is an infinite decimal; it has no exact Dec representation
--	x, y := inf.NewDec(1, 0), inf.NewDec(3, 0)
--	z := new(inf.Dec).QuoExact(x, y)
--	fmt.Println(z)
--	// Output: <nil>
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder.go
-deleted file mode 100644
-index 3a97ef5..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder.go
-+++ /dev/null
-@@ -1,145 +0,0 @@
--package inf
--
--import (
--	"math/big"
--)
--
--// Rounder represents a method for rounding the (possibly infinite decimal)
--// result of a division to a finite Dec. It is used by Dec.Round() and
--// Dec.Quo().
--//
--// See the Example for results of using each Rounder with some sample values.
--//
--type Rounder rounder
--
--// See http://speleotrove.com/decimal/damodel.html#refround for more detailed
--// definitions of these rounding modes.
--var (
--	RoundDown     Rounder // towards 0
--	RoundUp       Rounder // away from 0
--	RoundFloor    Rounder // towards -infinity
--	RoundCeil     Rounder // towards +infinity
--	RoundHalfDown Rounder // to nearest; towards 0 if same distance
--	RoundHalfUp   Rounder // to nearest; away from 0 if same distance
--	RoundHalfEven Rounder // to nearest; even last digit if same distance
--)
--
--// RoundExact is to be used in the case when rounding is not necessary.
--// When used with Quo or Round, it returns the result verbatim when it can be
--// expressed exactly with the given precision, and it returns nil otherwise.
--// QuoExact is a shorthand for using Quo with RoundExact.
--var RoundExact Rounder
--
--type rounder interface {
--
--	// When UseRemainder() returns true, the Round() method is passed the
--	// remainder of the division, expressed as the numerator and denominator of
--	// a rational.
--	UseRemainder() bool
--
--	// Round sets the rounded value of a quotient to z, and returns z.
--	// quo is rounded down (truncated towards zero) to the scale obtained from
--	// the Scaler in Quo().
--	//
--	// When the remainder is not used, remNum and remDen are nil.
--	// When used, the remainder is normalized between -1 and 1; that is:
--	//
--	//  -|remDen| < remNum < |remDen|
--	//
--	// remDen has the same sign as y, and remNum is zero or has the same sign
--	// as x.
--	Round(z, quo *Dec, remNum, remDen *big.Int) *Dec
--}
--
--type rndr struct {
--	useRem bool
--	round  func(z, quo *Dec, remNum, remDen *big.Int) *Dec
--}
--
--func (r rndr) UseRemainder() bool {
--	return r.useRem
--}
--
--func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec {
--	return r.round(z, quo, remNum, remDen)
--}
--
--var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)}
--
--func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec {
--	return func(z, q *Dec, rA, rB *big.Int) *Dec {
--		z.Set(q)
--		brA, brB := rA.BitLen(), rB.BitLen()
--		if brA < brB-1 {
--			// brA < brB-1 => |rA| < |rB/2|
--			return z
--		}
--		roundUp := false
--		srA, srB := rA.Sign(), rB.Sign()
--		s := srA * srB
--		if brA == brB-1 {
--			rA2 := new(big.Int).Lsh(rA, 1)
--			if s < 0 {
--				rA2.Neg(rA2)
--			}
--			roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0))
--		} else {
--			// brA > brB-1 => |rA| > |rB/2|
--			roundUp = true
--		}
--		if roundUp {
--			z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1])
--		}
--		return z
--	}
--}
--
--func init() {
--	RoundExact = rndr{true,
--		func(z, q *Dec, rA, rB *big.Int) *Dec {
--			if rA.Sign() != 0 {
--				return nil
--			}
--			return z.Set(q)
--		}}
--	RoundDown = rndr{false,
--		func(z, q *Dec, rA, rB *big.Int) *Dec {
--			return z.Set(q)
--		}}
--	RoundUp = rndr{true,
--		func(z, q *Dec, rA, rB *big.Int) *Dec {
--			z.Set(q)
--			if rA.Sign() != 0 {
--				z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1])
--			}
--			return z
--		}}
--	RoundFloor = rndr{true,
--		func(z, q *Dec, rA, rB *big.Int) *Dec {
--			z.Set(q)
--			if rA.Sign()*rB.Sign() < 0 {
--				z.UnscaledBig().Add(z.UnscaledBig(), intSign[0])
--			}
--			return z
--		}}
--	RoundCeil = rndr{true,
--		func(z, q *Dec, rA, rB *big.Int) *Dec {
--			z.Set(q)
--			if rA.Sign()*rB.Sign() > 0 {
--				z.UnscaledBig().Add(z.UnscaledBig(), intSign[2])
--			}
--			return z
--		}}
--	RoundHalfDown = rndr{true, roundHalf(
--		func(c int, odd uint) bool {
--			return c > 0
--		})}
--	RoundHalfUp = rndr{true, roundHalf(
--		func(c int, odd uint) bool {
--			return c >= 0
--		})}
--	RoundHalfEven = rndr{true, roundHalf(
--		func(c int, odd uint) bool {
--			return c > 0 || c == 0 && odd == 1
--		})}
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder_example_test.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder_example_test.go
-deleted file mode 100644
-index 5c5e4df..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder_example_test.go
-+++ /dev/null
-@@ -1,72 +0,0 @@
--package inf_test
--
--import (
--	"fmt"
--	"os"
--	"text/tabwriter"
--
--	"speter.net/go/exp/math/dec/inf"
--)
--
--// This example displays the results of Dec.Round with each of the Rounders.
--//
--func ExampleRounder() {
--	var vals = []struct {
--		x string
--		s inf.Scale
--	}{
--		{"-0.18", 1}, {"-0.15", 1}, {"-0.12", 1}, {"-0.10", 1},
--		{"-0.08", 1}, {"-0.05", 1}, {"-0.02", 1}, {"0.00", 1},
--		{"0.02", 1}, {"0.05", 1}, {"0.08", 1}, {"0.10", 1},
--		{"0.12", 1}, {"0.15", 1}, {"0.18", 1},
--	}
--
--	var rounders = []struct {
--		name    string
--		rounder inf.Rounder
--	}{
--		{"RoundDown", inf.RoundDown}, {"RoundUp", inf.RoundUp},
--		{"RoundCeil", inf.RoundCeil}, {"RoundFloor", inf.RoundFloor},
--		{"RoundHalfDown", inf.RoundHalfDown}, {"RoundHalfUp", inf.RoundHalfUp},
--		{"RoundHalfEven", inf.RoundHalfEven}, {"RoundExact", inf.RoundExact},
--	}
--
--	fmt.Println("The results of new(inf.Dec).Round(x, s, inf.RoundXXX):\n")
--	w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight)
--	fmt.Fprint(w, "x\ts\t|\t")
--	for _, r := range rounders {
--		fmt.Fprintf(w, "%s\t", r.name[5:])
--	}
--	fmt.Fprintln(w)
--	for _, v := range vals {
--		fmt.Fprintf(w, "%s\t%d\t|\t", v.x, v.s)
--		for _, r := range rounders {
--			x, _ := new(inf.Dec).SetString(v.x)
--			z := new(inf.Dec).Round(x, v.s, r.rounder)
--			fmt.Fprintf(w, "%d\t", z)
--		}
--		fmt.Fprintln(w)
--	}
--	w.Flush()
--
--	// Output:
--	// The results of new(inf.Dec).Round(x, s, inf.RoundXXX):
--	//
--	//      x s | Down   Up Ceil Floor HalfDown HalfUp HalfEven Exact
--	//  -0.18 1 | -0.1 -0.2 -0.1  -0.2     -0.2   -0.2     -0.2 <nil>
--	//  -0.15 1 | -0.1 -0.2 -0.1  -0.2     -0.1   -0.2     -0.2 <nil>
--	//  -0.12 1 | -0.1 -0.2 -0.1  -0.2     -0.1   -0.1     -0.1 <nil>
--	//  -0.10 1 | -0.1 -0.1 -0.1  -0.1     -0.1   -0.1     -0.1  -0.1
--	//  -0.08 1 |  0.0 -0.1  0.0  -0.1     -0.1   -0.1     -0.1 <nil>
--	//  -0.05 1 |  0.0 -0.1  0.0  -0.1      0.0   -0.1      0.0 <nil>
--	//  -0.02 1 |  0.0 -0.1  0.0  -0.1      0.0    0.0      0.0 <nil>
--	//   0.00 1 |  0.0  0.0  0.0   0.0      0.0    0.0      0.0   0.0
--	//   0.02 1 |  0.0  0.1  0.1   0.0      0.0    0.0      0.0 <nil>
--	//   0.05 1 |  0.0  0.1  0.1   0.0      0.0    0.1      0.0 <nil>
--	//   0.08 1 |  0.0  0.1  0.1   0.0      0.1    0.1      0.1 <nil>
--	//   0.10 1 |  0.1  0.1  0.1   0.1      0.1    0.1      0.1   0.1
--	//   0.12 1 |  0.1  0.2  0.2   0.1      0.1    0.1      0.1 <nil>
--	//   0.15 1 |  0.1  0.2  0.2   0.1      0.1    0.2      0.2 <nil>
--	//   0.18 1 |  0.1  0.2  0.2   0.1      0.2    0.2      0.2 <nil>
--
--}
-diff --git a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder_test.go b/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder_test.go
-deleted file mode 100644
-index 757ab97..0000000
---- a/Godeps/_workspace/src/speter.net/go/exp/math/dec/inf/rounder_test.go
-+++ /dev/null
-@@ -1,109 +0,0 @@
--package inf_test
--
--import (
--	"math/big"
--	"testing"
--
--	"speter.net/go/exp/math/dec/inf"
--)
--
--var decRounderInputs = [...]struct {
--	quo    *inf.Dec
--	rA, rB *big.Int
--}{
--	// examples from go language spec
--	{inf.NewDec(1, 0), big.NewInt(2), big.NewInt(3)},   //  5 /  3
--	{inf.NewDec(-1, 0), big.NewInt(-2), big.NewInt(3)}, // -5 /  3
--	{inf.NewDec(-1, 0), big.NewInt(2), big.NewInt(-3)}, //  5 / -3
--	{inf.NewDec(1, 0), big.NewInt(-2), big.NewInt(-3)}, // -5 / -3
--	// examples from godoc
--	{inf.NewDec(-1, 1), big.NewInt(-8), big.NewInt(10)},
--	{inf.NewDec(-1, 1), big.NewInt(-5), big.NewInt(10)},
--	{inf.NewDec(-1, 1), big.NewInt(-2), big.NewInt(10)},
--	{inf.NewDec(0, 1), big.NewInt(-8), big.NewInt(10)},
--	{inf.NewDec(0, 1), big.NewInt(-5), big.NewInt(10)},
--	{inf.NewDec(0, 1), big.NewInt(-2), big.NewInt(10)},
--	{inf.NewDec(0, 1), big.NewInt(0), big.NewInt(1)},
--	{inf.NewDec(0, 1), big.NewInt(2), big.NewInt(10)},
--	{inf.NewDec(0, 1), big.NewInt(5), big.NewInt(10)},
--	{inf.NewDec(0, 1), big.NewInt(8), big.NewInt(10)},
--	{inf.NewDec(1, 1), big.NewInt(2), big.NewInt(10)},
--	{inf.NewDec(1, 1), big.NewInt(5), big.NewInt(10)},
--	{inf.NewDec(1, 1), big.NewInt(8), big.NewInt(10)},
--}
--
--var decRounderResults = [...]struct {
--	rounder inf.Rounder
--	results [len(decRounderInputs)]*inf.Dec
--}{
--	{inf.RoundExact, [...]*inf.Dec{nil, nil, nil, nil,
--		nil, nil, nil, nil, nil, nil,
--		inf.NewDec(0, 1), nil, nil, nil, nil, nil, nil}},
--	{inf.RoundDown, [...]*inf.Dec{
--		inf.NewDec(1, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(1, 0),
--		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
--		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
--		inf.NewDec(0, 1),
--		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
--		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
--	{inf.RoundUp, [...]*inf.Dec{
--		inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
--		inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
--		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
--		inf.NewDec(0, 1),
--		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
--		inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
--	{inf.RoundHalfDown, [...]*inf.Dec{
--		inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
--		inf.NewDec(-2, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
--		inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
--		inf.NewDec(0, 1),
--		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
--		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(2, 1)}},
--	{inf.RoundHalfUp, [...]*inf.Dec{
--		inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
--		inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
--		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(0, 1),
--		inf.NewDec(0, 1),
--		inf.NewDec(0, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
--		inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
--	{inf.RoundHalfEven, [...]*inf.Dec{
--		inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0),
--		inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1),
--		inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
--		inf.NewDec(0, 1),
--		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1),
--		inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
--	{inf.RoundFloor, [...]*inf.Dec{
--		inf.NewDec(1, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(1, 0),
--		inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1),
--		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
--		inf.NewDec(0, 1),
--		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
--		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}},
--	{inf.RoundCeil, [...]*inf.Dec{
--		inf.NewDec(2, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(2, 0),
--		inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1),
--		inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1),
--		inf.NewDec(0, 1),
--		inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1),
--		inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}},
--}
--
--func TestDecRounders(t *testing.T) {
--	for i, a := range decRounderResults {
--		for j, input := range decRounderInputs {
--			q := new(inf.Dec).Set(input.quo)
--			rA, rB := new(big.Int).Set(input.rA), new(big.Int).Set(input.rB)
--			res := a.rounder.Round(new(inf.Dec), q, rA, rB)
--			if a.results[j] == nil && res == nil {
--				continue
--			}
--			if (a.results[j] == nil && res != nil) ||
--				(a.results[j] != nil && res == nil) ||
--				a.results[j].Cmp(res) != 0 {
--				t.Errorf("#%d,%d Rounder got %v; expected %v", i, j, res, a.results[j])
--			}
--		}
--	}
--}
--- 
-2.1.0
-
diff --git a/kubernetes.spec b/kubernetes.spec
index fc4dbfd..7781e1e 100644
--- a/kubernetes.spec
+++ b/kubernetes.spec
@@ -39,7 +39,8 @@ BuildRequires:	etcd
 BuildRequires:	hostname
 
 %if 0%{?fedora}
-Patch1: 0001-patch.patch
+Patch1: rename-import-paths.patch
+Patch2: update-tests-to-etcd-2.0.patch
 
 # needed for go cover.  Not available in RHEL/CentOS (available in Fedora/EPEL)
 BuildRequires:	golang-cover
@@ -359,6 +360,12 @@ install -p -m 644 docs/man/man1/* %{buildroot}%{_mandir}/man1
 # install the place the kubelet defaults to put volumes
 install -d %{buildroot}/var/lib/kubelet
 
+# install devel source codes
+install -d %{buildroot}/%{gopath}/src/%{import_path}
+for d in build cluster cmd contrib examples hack pkg plugin test; do
+    cp -rpav $d %{buildroot}/%{gopath}/src/%{import_path}/
+done
+
 %files
 %doc README.md LICENSE CONTRIB.md CONTRIBUTING.md DESIGN.md
 %{_mandir}/man1/*
@@ -383,6 +390,11 @@ install -d %{buildroot}/var/lib/kubelet
 %config(noreplace) %{_sysconfdir}/%{name}/kubelet
 %config(noreplace) %{_sysconfdir}/%{name}/scheduler
 
+%files devel
+%doc README.md LICENSE CONTRIB.md CONTRIBUTING.md DESIGN.md
+%dir %{gopath}/src/%{provider}.%{provider_tld}/%{project}
+%{gopath}/src/%{import_path}
+
 %pre
 getent group kube >/dev/null || groupadd -r kube
 getent passwd kube >/dev/null || useradd -r -g kube -d / -s /sbin/nologin \
@@ -399,6 +411,7 @@ getent passwd kube >/dev/null || useradd -r -g kube -d / -s /sbin/nologin \
 %changelog
 * Tue Jan 27 2015 jchaloup <jchaloup at redhat.com> - 0.9.1-0.1.git3623a01
 - Bump to upstream 3623a01bf0e90de6345147eef62894057fe04b29
+- update tests for etcd-2.0
 
 * Thu Jan 22 2015 jchaloup <jchaloup at redhat.com> - 0.8.2-571.gitb2f287c
 +- Bump to upstream b2f287c259d856f4c08052a51cd7772c563aff77
-- 
cgit v0.10.2


	http://pkgs.fedoraproject.org/cgit/kubernetes.git/commit/?h=f21&id=187bdc7c7b77faa5b8217b632e1b0c4bae4d6d62


More information about the scm-commits mailing list