2 commits - t/basic template.c
by Jim Meyering
t/basic | 12 ++++++++++++
template.c | 2 +-
2 files changed, 13 insertions(+), 1 deletion(-)
New commits:
commit 8a121adcdc378af16f7250b8d0e9643434aeabd4
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Feb 22 11:09:56 2011 +0100
correct typo in object_attr URL: s,http:/,http://,
* template.c (xml_obj_entry): Fix typo.
* t/basic (bucket): Print all attributes of an object.
For now, the object_attr_list and object_attr URLs are
invalid, but that will be fixed shortly.
diff --git a/t/basic b/t/basic
index 808d62a..82005e2 100644
--- a/t/basic
+++ b/t/basic
@@ -172,6 +172,18 @@ curl -d _key=attr_post -d shape=round -d size=big $bucket || fail=1
test "$(curl $bucket/attr_post/shape)" = round || fail=1
test "$(curl $bucket/attr_post/size)" = big || fail=1
+# Print all attributes of an object:
+curl -d op=parts $bucket/attr_post > out-attr || fail=1
+cat <<EOF > exp-attr
+<object>
+ <object_body path="$bucket/attr_post/body"/>
+ <object_attr_list path="$bucket/attr_post/attrs"/>
+ <object_attr name="shape" path="$bucket/attr_post/attr_shape"/>
+ <object_attr name="size" path="$bucket/attr_post/attr_size"/>
+</object>
+EOF
+compare out-attr exp-attr || fail=1
+
# Ensure that an attempt to add reserved object name fails.
# FIXME: keep this list in sync with the one in rest.c:
# grep '^static.*reserved_name' rest.c
diff --git a/template.c b/template.c
index b3ce671..019dac6 100644
--- a/template.c
+++ b/template.c
@@ -80,7 +80,7 @@ static const char xml_obj_header[] = "\
static const char xml_obj_entry[] = "\
\n\
- <object_attr name=\"%s\" path=\"http:/%s/%s/%s/attr_%s\"/>\
+ <object_attr name=\"%s\" path=\"http://%s/%s/%s/attr_%s\"/>\
";
static const char xml_obj_footer[] = "\
commit 13fe8151f1c86ac2b731f2722780cfa82dec860d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Feb 22 10:47:07 2011 +0100
do not emit invalid xml for an attribute name
* template.c (xml_obj_entry): For each "<object_attr ",
emit a closing "\>".
diff --git a/template.c b/template.c
index bb0a42a..b3ce671 100644
--- a/template.c
+++ b/template.c
@@ -80,7 +80,7 @@ static const char xml_obj_header[] = "\
static const char xml_obj_entry[] = "\
\n\
- <object_attr name=\"%s\" path=\"http:/%s/%s/%s/attr_%s\"\
+ <object_attr name=\"%s\" path=\"http:/%s/%s/%s/attr_%s\"/>\
";
static const char xml_obj_footer[] = "\
13 years, 2 months
Makefile.am
by Jim Meyering
Makefile.am | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
New commits:
commit 24fed793fe235405549761ac92599f739ed78d23
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 17 13:17:37 2011 +0100
build: remove unnecessary uses of xml-related cflags and ldflags
Thus, no result of the m4/libxml2.m4 test is used. However, we'll
leave the configure-time test as well as the spec-file "BuildRequires",
since some soon-to-be-added RHEV-M-related code will use it.
* Makefile.am (iwhd_CPPFLAGS): Remove $(LIBXML_CFLAGS).
It is unnecessary. While iwhd uses libxml2, it does so
only through hail.
(iwhd_LDADD): Remove $(LIBXML_LIBS).
Thanks to Pete Zaitcev for pointing out that $(LIBXML_CFLAGS)
was unnecessary.
2011-02-17 Jim Meyering <meyering(a)redhat.com>
diff --git a/Makefile.am b/Makefile.am
index e93545a..d6a63b0 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -89,7 +89,7 @@ rpm: dist iwhd.spec
*) rpmbuild -ta $(distdir).tar.gz ;; \
esac
-iwhd_CPPFLAGS = $(HAIL_CFLAGS) $(LIBXML_CFLAGS) -I$(top_srcdir)/lib
+iwhd_CPPFLAGS = $(HAIL_CFLAGS) -I$(top_srcdir)/lib
iwhd_LDADD = \
lib/libiwhd.a \
-lgc -lpthread \
@@ -100,7 +100,6 @@ iwhd_LDADD = \
$(JANSSON_LIB) \
$(UHTTPD_LIB) \
$(PTHREAD_LIB) \
- $(LIBXML_LIBS) \
$(HAIL_LIBS)
MOSTLYCLEANFILES += qlexer.c
13 years, 2 months
configure.ac
by Jim Meyering
configure.ac | 1 +
1 file changed, 1 insertion(+)
New commits:
commit 7a8ab6fafc11eed4b267f09d2d5262cf2ac91238
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 17 09:36:48 2011 +0100
turn off -Winline to avoid compiler warning/error on rawhide
* configure.ac (WERROR_CFLAGS): Turn off -Winline
to avoid triggering warning/error on rawhide due to
setup.h's kv_hash_insert_new definition.
diff --git a/configure.ac b/configure.ac
index 84cdf2d..e124f8d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -122,6 +122,7 @@ if test "$gl_gcc_warnings" = yes; then
nw="$nw -Wstrict-overflow" # in bison-generated code
nw="$nw -Wunsafe-loop-optimizations" # in bison-generated code
nw="$nw -Wmissing-noreturn" # yy_fatal_error in flex-generated code
+ nw="$nw -Winline" # setup.h's kv_hash_insert_new
gl_MANYWARN_ALL_GCC([ws])
gl_MANYWARN_COMPLEMENT([ws], [$ws], [$nw])
13 years, 2 months
Makefile.am configure.ac m4/libxml2.m4
by Jim Meyering
Makefile.am | 4 ++--
configure.ac | 5 +----
m4/libxml2.m4 | 46 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 49 insertions(+), 6 deletions(-)
New commits:
commit 86463799ee38c006f2751f9eb69b7edc3398df74
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Feb 16 20:28:03 2011 +0100
configure.ac didn't did not honor xml2-config's settings
In particular, before this change, configure did not use any CFLAGS
for libxml2, and would fail to use the CFLAGS and ld options
recommended (sometimes required) by libxml2.
* m4/libxml2.m4 (gl_LIBXML2): New file.
* configure.ac: Use new macro, gl_LIBXML2.
* Makefile.am (iwhd_CPPFLAGS): Add $(LIBXML_CFLAGS)
(iwhd_LDADD): Use new variable, $(LIBXML_LIBS), not $(XML2_LIB).
diff --git a/Makefile.am b/Makefile.am
index 334ad02..e93545a 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -89,7 +89,7 @@ rpm: dist iwhd.spec
*) rpmbuild -ta $(distdir).tar.gz ;; \
esac
-iwhd_CPPFLAGS = $(HAIL_CFLAGS) -I$(top_srcdir)/lib
+iwhd_CPPFLAGS = $(HAIL_CFLAGS) $(LIBXML_CFLAGS) -I$(top_srcdir)/lib
iwhd_LDADD = \
lib/libiwhd.a \
-lgc -lpthread \
@@ -100,7 +100,7 @@ iwhd_LDADD = \
$(JANSSON_LIB) \
$(UHTTPD_LIB) \
$(PTHREAD_LIB) \
- $(XML2_LIB) \
+ $(LIBXML_LIBS) \
$(HAIL_LIBS)
MOSTLYCLEANFILES += qlexer.c
diff --git a/configure.ac b/configure.ac
index e4a6014..84cdf2d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -70,10 +70,7 @@ PKG_CHECK_MODULES([HAIL],[libhail >= 0.8])
AC_SUBST([HAIL_LIBS])
AC_SUBST([HAIL_CFLAGS])
-AC_CHECK_LIB([xml2], [xmlInitParser],
- [XML2_LIB=-lxml2],
- [AC_MSG_ERROR([Missing required XML2 lib])])
-AC_SUBST([XML2_LIB])
+gl_LIBXML2([2.6.0])
AC_CHECK_HEADER([gc.h], ,
[AC_MSG_ERROR([Missing GC development library: gc-devel or libgc-dev])])
diff --git a/m4/libxml2.m4 b/m4/libxml2.m4
new file mode 100644
index 0000000..2ec2538
--- /dev/null
+++ b/m4/libxml2.m4
@@ -0,0 +1,46 @@
+dnl ==========================================================================
+dnl find libxml2 compile and link flags, derived from code in libvirt
+dnl ==========================================================================
+
+# gl_LIBXML2([MINIMUM_VERSION])
+# -----------------------------
+AC_DEFUN([gl_LIBXML2],
+[
+ gl_xml_min_ver=$1
+ gl_xml_config=xml2-config
+ gl_libxml_found=no
+ LIBXML_CFLAGS=
+ LIBXML_LIBS=
+
+ AC_ARG_WITH([libxml], AC_HELP_STRING([--with-libxml=@<:@PFX@:>@],
+ [libxml2 location]))
+ if test "x$with_libxml" = "xno"; then
+ AC_MSG_CHECKING(for libxml2 libraries >= $gl_xml_min_ver)
+ AC_MSG_ERROR([libxml2 >= $gl_xml_min_ver is required])
+ fi
+
+ if test "$gl_libxml_found" = no; then
+ if test "x$with_libxml" != x; then
+ gl_xml_config=$with_libxml/bin/$gl_xml_config
+ fi
+ AC_MSG_CHECKING([libxml2 $gl_xml_config >= $gl_xml_min_ver])
+ if ! $gl_xml_config --version > /dev/null 2>&1; then
+ AC_MSG_ERROR([Could not find libxml2 (see config.log for details).])
+ fi
+ gl_ver=`$gl_xml_config --version |
+ awk -F. '{ printf "%d", ([$]1 * 1000 + [$]2) * 1000 + [$]3}'`
+ gl_min_ver=`echo $gl_xml_min_ver |
+ awk -F. '{ printf "%d", ([$]1 * 1000 + [$]2) * 1000 + [$]3}'`
+ if test "$gl_ver" -ge "$gl_min_ver"; then
+ LIBXML_LIBS="`$gl_xml_config --libs`"
+ LIBXML_CFLAGS="`$gl_xml_config --cflags`"
+ gl_libxml_found=yes
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_ERROR([You need at least libxml2 $gl_xml_min_ver])
+ fi
+ fi
+
+ AC_SUBST([LIBXML_CFLAGS])
+ AC_SUBST([LIBXML_LIBS])
+])
13 years, 2 months
Makefile.am backend.c configure.ac iwhd.spec.in replica.c rest.c setup.h state_defs.h
by Jim Meyering
Makefile.am | 4 ----
backend.c | 1 -
configure.ac | 5 -----
iwhd.spec.in | 1 -
replica.c | 17 +++++++++++------
rest.c | 1 -
setup.h | 1 -
state_defs.h | 1 -
8 files changed, 11 insertions(+), 20 deletions(-)
New commits:
commit 2acf91ee633f14e39c9844e5769b23d34aa4d019
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Feb 15 16:52:55 2011 +0100
remove final vestiges of glib
* Makefile.am (AM_CPPFLAGS): Remove definition with
hard-coded, version-dependent glib directory names.
(iwhd_LDADD): Remove GLIB2_LIB.
* configure.ac: Don't check for glib.
* iwhd.spec.in (BuildRequires): Remove glib2-devel.
* backend.c: Don't include <glib.h>.
* rest.c: Likewise.
* setup.h: Likewise.
* state_defs.h: Likewise.
* replica.c: (atomic_get, atomic_inc, atomic_get): Define.
Use these instead of glib's g_-prefixed names.
(rep_count): Change type from "gint" to "int".
diff --git a/Makefile.am b/Makefile.am
index fc767f0..334ad02 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -15,9 +15,6 @@
AM_CFLAGS = $(WARN_CFLAGS) $(WERROR_CFLAGS)
-# FIXME: don't hard-code 2.0
-AM_CPPFLAGS = -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include
-
iwhd_YFLAGS = -d
SUBDIRS = lib . gnulib-tests t man
@@ -104,7 +101,6 @@ iwhd_LDADD = \
$(UHTTPD_LIB) \
$(PTHREAD_LIB) \
$(XML2_LIB) \
- $(GLIB2_LIB) \
$(HAIL_LIBS)
MOSTLYCLEANFILES += qlexer.c
diff --git a/backend.c b/backend.c
index 003406f..b44c8c0 100644
--- a/backend.c
+++ b/backend.c
@@ -33,7 +33,6 @@
#include <microhttpd.h>
#include <curl/curl.h>
#include <hstor.h>
-#include <glib.h>
#define GLOBALS_IMPL
#include "iwh.h"
diff --git a/configure.ac b/configure.ac
index e874281..e4a6014 100644
--- a/configure.ac
+++ b/configure.ac
@@ -66,11 +66,6 @@ AC_CHECK_LIB([pthread], [pthread_create],
[AC_MSG_ERROR([Missing required pthread lib])])
AC_SUBST([PTHREAD_LIB])
-AC_CHECK_LIB([glib-2.0], [g_hash_table_new_full],
- [GLIB2_LIB=-lglib-2.0],
- [AC_MSG_ERROR([Missing required glib2 lib])])
-AC_SUBST([GLIB2_LIB])
-
PKG_CHECK_MODULES([HAIL],[libhail >= 0.8])
AC_SUBST([HAIL_LIBS])
AC_SUBST([HAIL_CFLAGS])
diff --git a/iwhd.spec.in b/iwhd.spec.in
index 4b26dce..629dd50 100644
--- a/iwhd.spec.in
+++ b/iwhd.spec.in
@@ -18,7 +18,6 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
BuildRequires: boost-devel
BuildRequires: boost-filesystem
BuildRequires: gc-devel
-BuildRequires: glib2-devel
BuildRequires: hail-devel
BuildRequires: jansson-devel
BuildRequires: libcurl-devel
diff --git a/replica.c b/replica.c
index ee1034a..c7bcff6 100644
--- a/replica.c
+++ b/replica.c
@@ -36,6 +36,11 @@
#include "meta.h"
#include "replica.h"
+// FIXME-maybe: These are gcc-specific.
+#define atomic_get(p) __sync_fetch_and_add (p, 0)
+#define atomic_inc(p) __sync_fetch_and_add (p, 1)
+#define atomic_dec(p) __sync_fetch_and_sub (p, 1)
+
typedef enum {
REPL_PUT, /* store an object */
REPL_ODELETE, /* delete an object */
@@ -62,7 +67,7 @@ static repl_item *queue_head = NULL;
static repl_item *queue_tail = NULL;
static pthread_mutex_t queue_lock;
static sem_t queue_sema;
-static volatile gint rep_count = 0;
+static volatile int rep_count = 0;
static void *
proxy_repl_prod (void *ctx)
@@ -197,8 +202,8 @@ repl_worker (void *notused ATTRIBUTE_UNUSED)
error(0,0,"bad repl type %d (url=%s) skipped",
item->type, item->path);
}
- /* No atomic dec without test? Lame. */
- (void)g_atomic_int_dec_and_test(&rep_count);
+
+ atomic_dec(&rep_count);
}
}
@@ -345,7 +350,7 @@ replicate (const char *url, size_t size, const char *policy, my_state *ms)
}
queue_tail = item;
pthread_mutex_unlock(&queue_lock);
- g_atomic_int_inc(&rep_count);
+ atomic_inc(&rep_count);
sem_post(&queue_sema);
}
}
@@ -393,7 +398,7 @@ replicate_namespace_action (const char *name, repl_t action, my_state *ms)
}
queue_tail = item;
pthread_mutex_unlock(&queue_lock);
- g_atomic_int_inc(&rep_count);
+ atomic_inc(&rep_count);
sem_post(&queue_sema);
}
}
@@ -433,5 +438,5 @@ follow_link (char *object, const char *key)
int
get_rep_count (void)
{
- return g_atomic_int_get(&rep_count);
+ return atomic_get (&rep_count);
}
diff --git a/rest.c b/rest.c
index 1fb1aa8..3192915 100644
--- a/rest.c
+++ b/rest.c
@@ -33,7 +33,6 @@
#include <microhttpd.h>
#include <hstor.h> /* only for ARRAY_SIZE at this point */
#include <curl/curl.h>
-#include <glib.h>
#include "dirname.h"
#include "iwh.h"
diff --git a/setup.h b/setup.h
index b0b6882..ca04931 100644
--- a/setup.h
+++ b/setup.h
@@ -16,7 +16,6 @@
#if !defined(_SETUP_H)
#define _SETUP_H
-#include <glib.h>
#include <curl/curl.h> /* needed by stuff in state_defs.h (from backend.h) */
#include <microhttpd.h> /* ditto */
#include <assert.h>
diff --git a/state_defs.h b/state_defs.h
index 21ecbcd..70c62a9 100644
--- a/state_defs.h
+++ b/state_defs.h
@@ -16,7 +16,6 @@
#if !defined(_STATE_DEFS_H)
#define _STATE_DEFS_H
-#include <glib.h>
#include <microhttpd.h>
#include "hash.h"
#include "mpipe.h"
13 years, 2 months
[repo.or.cz] iwhd.git branch master updated: v0.91-6-gc2384ae
by Jim Meyering
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project iwhd.git.
The branch, master has been updated
via c2384ae5548ef20ba86adf863caa633a8d2479eb (commit)
via c6d9afe4fdeabf17c10839a0cd7999d18c26903d (commit)
from 4cb41b49a2b69509658d4fb90c21efde29476d9d (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
http://repo.or.cz/w/iwhd.git/commit/c2384ae5548ef20ba86adf863caa633a8d2479eb
commit c2384ae5548ef20ba86adf863caa633a8d2479eb
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Feb 15 11:22:39 2011 +0100
point to the new repository
diff --git a/README b/README
new file mode 100644
index 0000000..b02da96
--- /dev/null
+++ b/README
@@ -0,0 +1,7 @@
+The new repository is here:
+ http://git.fedorahosted.org/git/?p=iwhd.git
+
+its clone URLs:
+ git://git.fedorahosted.org/iwhd.git
+ ssh://git.fedorahosted.org/git/iwhd.git
+ http://git.fedorahosted.org/git/iwhd.git
http://repo.or.cz/w/iwhd.git/commit/c6d9afe4fdeabf17c10839a0cd7999d18c26903d
commit c6d9afe4fdeabf17c10839a0cd7999d18c26903d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Feb 15 11:21:48 2011 +0100
remove all files so no one thinks this repo is active; repository moved
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index f95dca3..0000000
--- a/.gitignore
+++ /dev/null
@@ -1,32 +0,0 @@
-*.o
-*/.deps
-*~
-.#*
-.deps
-/GNUmakefile
-/aclocal.m4
-/autom4te.cache/
-/build-aux
-/config.h
-/config.hin
-/config.log
-/config.status
-/configure
-/gnulib-tests
-/iwhd
-/iwhd-*.tar.gz
-/iwhd-qparser.c
-/iwhd-qparser.h
-/iwhd.spec
-/m4/.gitignore
-/maint.mk
-/man/iwhd.8
-/qlexer.c
-/stamp-h1
-/t/*.log
-/t/parser
-/t/parser.c
-ChangeLog
-Makefile
-Makefile.in
-#*#
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index acb2669..0000000
--- a/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "gnulib"]
- path = gnulib
- url = git://git.sv.gnu.org/gnulib.git
diff --git a/.prev-version b/.prev-version
deleted file mode 100644
index 0ac647c..0000000
--- a/.prev-version
+++ /dev/null
@@ -1 +0,0 @@
-0.91
diff --git a/.x-sc_trailing_blank b/.x-sc_trailing_blank
deleted file mode 100644
index 9726d11..0000000
--- a/.x-sc_trailing_blank
+++ /dev/null
@@ -1 +0,0 @@
-^doc/image_repo.odt$
diff --git a/AUTHORS b/AUTHORS
deleted file mode 100644
index e69de29..0000000
diff --git a/COPYING b/COPYING
deleted file mode 100644
index 94a9ed0..0000000
--- a/COPYING
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (C) <year> <name of author>
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (C) <year> <name of author>
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/Makefile.am b/Makefile.am
deleted file mode 100644
index fc767f0..0000000
--- a/Makefile.am
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright (C) 2010-2011 Red Hat, Inc.
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-AM_CFLAGS = $(WARN_CFLAGS) $(WERROR_CFLAGS)
-
-# FIXME: don't hard-code 2.0
-AM_CPPFLAGS = -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include
-
-iwhd_YFLAGS = -d
-
-SUBDIRS = lib . gnulib-tests t man
-ACLOCAL_AMFLAGS = -I m4
-
-# iwhd is short for Image WareHouse Daemon.
-bin_PROGRAMS = iwhd
-
-EXTRA_DIST = - iwhd-qparser.h - iwhd.spec - iwhd.spec.in
-
-BUILT_SOURCES = iwhd-qparser.h qlexer.c
-
-MOSTLYCLEANFILES =
-MAINTAINERCLEANFILES =
-
-iwhd_SOURCES = - auto.c - backend.c - backend.h - gc-wrap.h - iwh.h - meta.cpp - meta.h - mpipe.c - mpipe.h - replica.c - replica.h - qparser.y - query.h - rest.c - setup.c - setup.h - state_defs.h - template.c - template.h
-
-EXTRA_iwhd_SOURCES = qlexer.l
-
-VERSION_no_hyphen = $$(echo $(VERSION)|tr - _)
-
-MOSTLYCLEANFILES += iwhd.spec
-MAINTAINERCLEANFILES += iwhd.spec
-iwhd.spec: iwhd.spec.in Makefile
- rm -f $@-t $@
- v=$$(echo $(VERSION)|tr - .); - sed 's/[@]VERSION@/'"$$v"/ $< > $@-t
- chmod a=r $@-t
- mv $@-t $@
-
-# The following rule accommodates pre-release version number strings
-# like "0.0.273-1621" that contain a "-". Repack the tarball
-# to one with "." substituted for any - in the version string.
-.PHONY: rpm
-rpm: dist iwhd.spec
- chmod 644 $(distdir).tar.gz
- case $(VERSION) in - *-*) - v=$$(echo $(VERSION)|tr - .); - base=$(PACKAGE)-$$v; - tgz=$$base.tar.gz; - tar xf $(distdir).tar.gz - && rm -rf $$base $$tgz - && mv $(distdir) $$base - && tar -czf - $$base > $$tgz - && rpmbuild -ta $$tgz; e=$$? - rm -rf $$base $$tgz; - exit $$e - ;; - *) rpmbuild -ta $(distdir).tar.gz ;; - esac
-
-iwhd_CPPFLAGS = $(HAIL_CFLAGS) -I$(top_srcdir)/lib
-iwhd_LDADD = - lib/libiwhd.a - -lgc -lpthread - -lmongoclient - $(BOOST_SYSTEM_LIB) - $(BOOST_THREAD_LIB) - $(CURL_LIB) - $(JANSSON_LIB) - $(UHTTPD_LIB) - $(PTHREAD_LIB) - $(XML2_LIB) - $(GLIB2_LIB) - $(HAIL_LIBS)
-
-MOSTLYCLEANFILES += qlexer.c
-MAINTAINERCLEANFILES += qlexer.c
-EXTRA_DIST += qlexer.c
diff --git a/NEWS b/NEWS
deleted file mode 100644
index 9a380f9..0000000
--- a/NEWS
+++ /dev/null
@@ -1,28 +0,0 @@
-iwhd NEWS -*- outline -*-
-
-* Noteworthy changes in release ?.? (????-??-??) [?]
-
-
-* Noteworthy changes in release 0.91 (2011-02-10) [stable]
-
-** Bug fixes
-
- not itemized, this time
-
-** New features
-
- new option: --autostart (-a) to automatically start back-end services
-
-** New APIs
-
- Change the primary provider to P (an existing provider name):
- curl -X PUT http://_providers/P/_set_primary
-
- Get primary provider name:
- http://host:$port/_providers/_primary
-
-** Infrastructure
-
- use gnulib
-
- use libgc for garbage collection
diff --git a/README b/README
deleted file mode 100644
index e69de29..0000000
diff --git a/README-hacking b/README-hacking
deleted file mode 100644
index 377e14f..0000000
--- a/README-hacking
+++ /dev/null
@@ -1,106 +0,0 @@
--*- outline -*-
-
-These notes intend to help people working on the checked-out sources.
-These requirements do not apply when building from a distribution tarball.
-
-* Requirements
-
-We've opted to keep only the highest-level sources in the GIT repository.
-This eases our maintenance burden, (fewer merges etc.), but imposes more
-requirements on anyone wishing to build from the just-checked-out sources.
-Note the requirements to build the released archive are much less and
-are just the requirements of the standard ./configure && make procedure.
-Specific development tools and versions will be checked for and listed by
-the bootstrap script. See README-prereq for specific notes on obtaining
-these prerequisite tools.
-
-Valgrind <http://valgrind.org/> is also highly recommended, if
-Valgrind supports your architecture. See also README-valgrind.
-
-While building from a just-cloned source tree may require installing a
-few prerequisites, later, a plain `git pull && make' should be sufficient.
-
-* First GIT checkout
-
-You can get a copy of the source repository like this:
-
- $ git clone git://repo.or.cz/iwhd.git
- $ cd iwhd
-
-As an optional step, if you already have a copy of the gnulib git
-repository on your hard drive, then you can use it as a reference to
-reduce download time and disk space requirements:
-
- $ export GNULIB_SRCDIR=/path/to/gnulib
-
-The next step is to get and check other files needed to build,
-which are extracted from other source packages:
-
- $ ./bootstrap
-
-To use the most-recent gnulib (as opposed to the gnulib version that
-the package last synchronized to), do this next:
-
- $ git submodule foreach git pull origin master
- $ git commit -m 'build: update gnulib submodule to latest' gnulib
-
-And there you are! Just
-
- $ ./configure --quiet #[--enable-gcc-warnings] [*]
- $ make
- $ make check
-
-At this point, there should be no difference between your local copy,
-and the GIT master copy:
-
- $ git diff
-
-should output no difference.
-
-Enjoy!
-
-[*] The --enable-gcc-warnings option is useful only with glibc
-and with a very recent version of gcc. You'll probably also have
-to use recent system headers. If you configure with this option,
-and spot a problem, please be sure to send the report to the bug
-reporting address of this package, and not to that of gnulib, even
-if the problem seems to originate in a gnulib-provided file.
-
-* Submitting patches
-
-If you develop a fix or a new feature, please send it to the
-appropriate bug-reporting address as reported by the --help option of
-each program. One way to do this is to use vc-dwim
-<http://www.gnu.org/software/vc-dwim/>), as follows.
-
- Run the command "vc-dwim --help", copy its definition of the
- "git-changelog-symlink-init" function into your shell, and then run
- this function at the top-level directory of the package.
-
- Edit the ChangeLog file that this command creates, creating a
- properly-formatted entry according to the GNU coding standards
- <http://www.gnu.org/prep/standards/html_node/Change-Logs.html>.
-
- Run the command "vc-dwim" and make sure its output looks good.
-
- Run "vc-dwim --commit".
-
- Run the command "git format-patch --stdout -1", and email its output
- in, using the output's subject line.
-
------
-
-Copyright (C) 2002-2011 Free Software Foundation, Inc.
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/README-prereq b/README-prereq
deleted file mode 100644
index ec9b141..0000000
--- a/README-prereq
+++ /dev/null
@@ -1,47 +0,0 @@
-This gives some notes on obtaining the tools required for development.
-I.E. the tools checked for by the bootstrap script and include:
-
-- Autoconf <http://www.gnu.org/software/autoconf/>
-- Automake <http://www.gnu.org/software/automake/>
-- Bison <http://www.gnu.org/software/bison/>
-- Gettext <http://www.gnu.org/software/gettext/>
-- Git <http://git.or.cz/>
-- Gperf <http://www.gnu.org/software/gperf/>
-- Gzip <http://www.gnu.org/software/gzip/>
-- Rsync <http://samba.anu.edu.au/rsync/>
-- Tar <http://www.gnu.org/software/tar/>
-
-Note please try to install/build official packages for your system.
-If these programs are not available use the following instructions
-to build them and install the results into a directory that you will
-then use when building this package.
-
-Even if the official version of a package for your system is too old,
-please install it, as it may be required to build the newer versions.
-The examples below install into $HOME/coreutils/deps/, so if you are
-going to follow these instructions, first ensure that your $PATH is
-set correctly by running this command:
-
- prefix=$HOME/coreutils/deps
- export PATH=$prefix/bin:$PATH
-
-* autoconf *
-
- # Note Autoconf 2.62 or newer is needed to build automake-1.11.1
- git clone --depth=1 git://git.sv.gnu.org/autoconf.git
- git checkout v2.62
- autoreconf -vi
- ./configure --prefix=$prefix
- make install
-
-* automake *
-
- # Note help2man is required to build automake fully
- git clone git://git.sv.gnu.org/automake.git
- cd automake
- git checkout v1.11.1
- ./bootstrap
- ./configure --prefix=$prefix
- make install
-
-Now you can build this package as described in README-hacking.
diff --git a/auto.c b/auto.c
deleted file mode 100644
index 50eb1c5..0000000
--- a/auto.c
+++ /dev/null
@@ -1,297 +0,0 @@
-#include <config.h>
-
-#include <errno.h>
-#include <error.h>
-#include <fcntl.h>
-#include <netdb.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-#include <unistd.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-
-#include <stdarg.h> /* for microhttpd.h (bug in there) */
-#include <stdint.h> /* for microhttpd.h (bug in there) */
-#include <jansson.h>
-
-#include "iwh.h"
-#include "state_defs.h"
-
-static int auto_db_port;
-static char auto_arg_port[10];
-
-static const char *const auto_arg_mongod[] = {
- "mongod",
- "--port", auto_arg_port,
- "--dbpath", AUTO_DIR_DB,
- /* "--fork", */ /* chdirs god knows where, we cannot use this. */
- /* "--logpath", AUTO_MONGOD_LOG, */ /* required by --fork */
- /* "--logappend", */
- "--pidfilepath", "mongo.pid",
- NULL
-};
-
-/* The --quiet option in mongod is useless, so redirect instead. */
-static const char *const auto_arg_mongod_quiet[] = {
- "mongod",
- "--port", auto_arg_port,
- "--dbpath", AUTO_DIR_DB,
- "--logpath", AUTO_MONGOD_LOG,
- "--pidfilepath", "mongo.pid",
- NULL
-};
-
-static int auto_pid_mongod;
-
-static int
-auto_mkdir (const char *name)
-{
- struct stat statb;
-
- if (mkdir(name, 0777) < 0) {
- if (errno == EEXIST) {
- if (stat(name, &statb) < 0) {
- error (0, errno, "stat %s failed", name);
- return -1;
- }
- if (!S_ISDIR(statb.st_mode)) {
- error (0, 0, "path %s is not a directory",name);
- return -1;
- }
- return 0;
- }
- error(0, errno, "Cannot create %s", name);
- return -1;
- }
- return 0;
-}
-
-static int
-auto_prepare_area (void)
-{
-
- if (auto_mkdir(AUTO_DIR_FS) < 0) {
- return -1;
- }
- if (auto_mkdir(AUTO_DIR_DB) < 0) {
- return -1;
- }
- return 0;
-}
-
-static void
-auto_kill_mongod (int sig)
-{
- if (auto_pid_mongod) {
- kill(auto_pid_mongod, sig);
- }
-}
-
-static int
-auto_spawn (const char *prog, char *argv[])
-{
- struct stat statb;
- pid_t pid;
-
- /*
- * The stat check is purely so that common errors, such as ENOENT
- * if the program is not available, were printed before the fork.
- * This serves no security purpose but only makes stderr more tidy.
- */
- if (stat(prog, &statb) < 0) {
- error (0, errno, "stat %s failed", prog);
- return -1;
- }
- if (!S_ISREG(statb.st_mode)) {
- error (0, 0, "path %s is not a regular file", prog);
- return -1;
- }
-
- pid = fork();
- if (pid < 0) {
- error (0, errno, "fork failed");
- return -1;
- }
-
- if (pid == 0) {
- execvp(prog, argv);
- error (EXIT_FAILURE, errno, "failed to run command %s", prog);
- }
-
- /*
- * This is where you'd normally run waitpid for your daemon, so that
- * argument check failures were caught at least. In case of mongod,
- * daemonizing it is a whole can of worms, so we do not. On the
- * upside, it stays on our session (and process group) and dies
- * cleanly on keyboard interrupt.
- */
-
- return pid;
-}
-
-static int
-auto_test_mongod(void)
-{
- union {
- struct sockaddr_in a4;
- struct sockaddr a;
- } addr;
- int sfd;
- int rc;
-
- /*
- * We hardcode IPv4 because Mongo often listens on IPv4 only.
- */
- memset(&addr, 0, sizeof(addr));
- addr.a4.sin_family = AF_INET;
- addr.a4.sin_port = htons(auto_db_port);
- addr.a4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-
- DPRINTF("trying to connect to mongod (host 127.0.0.1 port %u) ...n",
- auto_db_port);
-
- sfd = socket(addr.a.sa_family, SOCK_STREAM, 0);
- if (sfd < 0) {
- error(0, errno, "socket");
- return -1;
- }
-
- rc = connect(sfd, &addr.a, sizeof(addr.a4));
- if (rc != 0) {
- DPRINTF("connect: %sn", strerror(errno));
- close(sfd);
- return 1;
- }
-
- close(sfd);
- return 0;
-}
-
-static int
-auto_wait_mongod(void)
-{
- struct timespec ts;
- time_t start_time;
- int rc;
-
- start_time = time(NULL);
- for (;;) {
- rc = auto_test_mongod();
- if (rc == 0)
- break;
- if (time(NULL) >= start_time + 20) {
- error(0, 0, "failed to verify mongod using port %s",
- auto_arg_port);
- return -1;
- }
-
- ts.tv_sec = 1;
- ts.tv_nsec = 0;
- nanosleep(&ts, NULL);
- }
- DPRINTF("mongod went up after %ld sn", (long)time(NULL) - start_time);
-
- return 0;
-}
-
-static void
-auto_action (int sig, siginfo_t *info, void *uctx)
-{
- (void) info;
- (void) uctx;
-
- if (sig == SIGSEGV || sig == SIGILL || sig == SIGFPE || sig == SIGBUS) {
- auto_kill_mongod(SIGTERM);
- }
- else {
- auto_kill_mongod(info->si_signo);
- }
-}
-
-static int
-auto_set_sig (void)
-{
- struct sigaction actb;
-
- memset(&actb, 0, sizeof(struct sigaction));
- actb.sa_flags |= SA_SIGINFO;
- actb.sa_sigaction = auto_action;
-
- /* Not trapping SIGINT or SIGHUP since mongo is in our session. */
- if (sigaction(SIGTERM, &actb, NULL) ||
- sigaction(SIGSEGV, &actb, NULL) ||
- sigaction(SIGILL, &actb, NULL) ||
- sigaction(SIGFPE, &actb, NULL) ||
- sigaction(SIGBUS, &actb, NULL) ||
- sigaction(SIGABRT, &actb, NULL)) {
- error(0, errno, "sigaction");
- return -1;
- }
- return 0;
-}
-
-static void
-auto_stop (void)
-{
- auto_kill_mongod(SIGTERM);
-}
-
-int
-auto_start (int dbport)
-{
- int rc;
- char **earg;
- int pid;
-
- auto_db_port = dbport;
- snprintf(auto_arg_port, sizeof(auto_arg_port), "%u", dbport);
-
- if (auto_prepare_area() < 0)
- return -1;
-
- rc = auto_test_mongod();
- if (rc < 0)
- return -1;
-
- /*
- * This is a trick. The auto_test_mongod() merely connects to a TCP
- * port, and does not execute a NO-OP in Mongo. Therefore, it succeeds
- * if a foreign application is listening on our private port.
- * We abort because we do not want anyone listening there.
- */
- if (rc == 0) {
- error (0, 0, "something is listening on port %s,"
- " not auto-starting Mongo", auto_arg_port);
- return -1;
- }
-
- DPRINTF("auto-starting mongodn");
- earg = (verbose
- ? (char **) auto_arg_mongod
- : (char **) auto_arg_mongod_quiet);
- pid = auto_spawn(AUTO_BIN_MONGOD, earg);
- if (pid < 0)
- return -1;
- auto_pid_mongod = pid;
- if (auto_wait_mongod() < 0) {
- auto_kill_mongod(SIGTERM);
- return -1;
- }
-
- if (auto_set_sig() < 0) {
- auto_kill_mongod(SIGTERM);
- return -1;
- }
- if (atexit(auto_stop) != 0) {
- error (0, 0, "atexit failed for auto_stop");
- auto_kill_mongod(SIGTERM);
- return -1;
- }
-
- DPRINTF("mongod listens on port %un", dbport);
- return 0;
-}
diff --git a/autogen.sh b/autogen.sh
deleted file mode 100755
index 56f9b9e..0000000
--- a/autogen.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-./bootstrap "$@"
diff --git a/backend.c b/backend.c
deleted file mode 100644
index 003406f..0000000
--- a/backend.c
+++ /dev/null
@@ -1,1375 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#include <config.h>
-
-#include <fcntl.h>
-#include <poll.h>
-#include <pthread.h>
-#include <regex.h>
-#include <semaphore.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/wait.h>
-#include <errno.h>
-#include <error.h>
-
-#include <microhttpd.h>
-#include <curl/curl.h>
-#include <hstor.h>
-#include <glib.h>
-
-#define GLOBALS_IMPL
-#include "iwh.h"
-#include "meta.h"
-#include "setup.h"
-#include "template.h"
-#include "mpipe.h"
-#include "backend.h"
-#include "state_defs.h"
-
-struct hstor_client *hstor;
-
-/***** Generic module stuff, not specific to one back end *****/
-
-/*
- * Sizes for internal string buffers. In general, ADDR_SIZE needs to be
- * big enough to hold a hostname, a port number, a bucket and key (each
- * MAX_FIELD_LEN=64) and some punctuation. Header size needs to be big
- * enough to hold the header name plus a CF token (32 bytes).
- */
-#define ADDR_SIZE 256
-#define HEADER_SIZE 64
-
-#define S3_IMAGE_PATTERN "^IMAGE[[:blank:]]+([^[:space:]]+)"
-#define S3_ERROR_PATTERN "^ERROR[[:blank:]]+([^[:space:]]+)"
-
-regex_t s3_success_pat;
-regex_t s3_failure_pat;
-int regex_ok = FALSE;
-
-void
-backend_init (void)
-{
- regex_ok = TRUE;
-
- if (regcomp(&s3_success_pat,S3_IMAGE_PATTERN,REG_EXTENDED) != 0){
- DPRINTF("could not compile S3 success patternn");
- regex_ok = FALSE;
- }
-
- if (regcomp(&s3_failure_pat,S3_ERROR_PATTERN,REG_EXTENDED) != 0){
- DPRINTF("could not compile S3 failure patternn");
- regex_ok = FALSE;
- }
-}
-
-/***** Stub functions for unimplemented stuff. *****/
-
-static void
-bad_init (provider_t *prov)
-{
- (void)prov;
-
- DPRINTF("*** bad call to %sn",__func__);
-}
-
-static void *
-bad_get_child (void * ctx)
-{
- backend_thunk_t *tp = (backend_thunk_t *)ctx;
- my_state *ms = tp->parent;
-
- DPRINTF("*** bad call to %sn",__func__);
- pipe_prod_siginit(&ms->pipe,-1);
- return NULL;
-}
-
-static void *
-bad_put_child (void * ctx)
-{
- pipe_private *pp = ctx;
- pipe_shared *ps = pp->shared;
-
- DPRINTF("*** bad call to %sn",__func__);
- pipe_cons_siginit(ps, -1);
- free(pp);
- return THREAD_FAILED;
-}
-
-static void *
-bad_cache_child (void * ctx)
-{
- (void)ctx;
-
- DPRINTF("*** bad call to %sn",__func__);
- return NULL;
-}
-
-static int
-bad_delete (const provider_t *prov, const char *bucket, const char *key,
- const char *url)
-{
- (void)prov;
- (void)bucket;
- (void)key;
- (void)url;
-
- DPRINTF("*** bad call to %sn",__func__);
- return MHD_HTTP_BAD_REQUEST;
-}
-
-static int
-bad_bcreate (const provider_t *prov, const char *bucket)
-{
- (void)prov;
- (void)bucket;
-
- DPRINTF("*** bad call to %sn",__func__);
- return MHD_HTTP_NOT_IMPLEMENTED;
-}
-
-static int
-bad_register (my_state *ms, const provider_t *prov, const char *next,
- Hash_table *args)
-{
- (void)ms;
- (void)prov;
- (void)next;
- (void)args;
-
- DPRINTF("*** bad call to %sn",__func__);
- return MHD_HTTP_NOT_IMPLEMENTED;
-}
-
-/***** Generic functions shared by the HTTP back ends. */
-
-/* Invoked from S3/CURL/CF. */
-static size_t
-http_get_prod (void *ptr, size_t size, size_t nmemb, void *stream)
-{
- size_t total = size * nmemb;
- pipe_shared *ps = stream;
-
- DPRINTF("producer posting %zu bytes as %ldn",total,ps->sequence+1);
- pipe_prod_signal(ps,ptr,total);
-
- DPRINTF("producer finished chunkn");
- return total;
-}
-
-/* Invoked from S3/CURL/CF. */
-static size_t
-http_put_cons (void *ptr, size_t size, size_t nmemb, void *stream)
-{
- size_t total = size * nmemb;
- pipe_private *pp = stream;
- pipe_shared *ps = pp->shared;
- size_t done;
-
- DPRINTF("consumer asked to read %zun",total);
-
- if (!pipe_cons_wait(pp)) {
- return 0;
- }
-
- DPRINTF("consumer offset %zu into %zun",
- pp->offset, ps->data_len);
- done = ps->data_len - pp->offset;
- if (done > total) {
- done = total;
- }
- memcpy(ptr,(char *)(ps->data_ptr)+pp->offset,done);
- pp->offset += done;
- DPRINTF("consumer copied %zu, new offset %zun",
- done, pp->offset);
- if (pp->offset == ps->data_len) {
- DPRINTF("consumer finished chunkn");
- pipe_cons_signal(pp, 0);
- }
-
- return done;
-}
-
-/***** S3-specific functions *****/
-
-static void
-s3_init (provider_t *prov)
-{
- char svc_acc[128];
- int chars;
-
- chars = snprintf(svc_acc,sizeof(svc_acc),"%s:%u",prov->host,prov->port);
- if (chars >= (int)sizeof(svc_acc)) {
- error(0,0,"hostname %s too long in %s",prov->host,__func__);
- return;
- }
- hstor = hstor_new(svc_acc,prov->host,prov->username,prov->password);
- if (hstor) {
- if (verbose) {
- hstor->verbose = 1;
- }
- }
- else {
- DPRINTF("could not create S3 clientn");
- }
-}
-
-/* Start an S3 _producer_. */
-static void *
-s3_get_child (void * ctx)
-{
- backend_thunk_t *tp = (backend_thunk_t *)ctx;
- my_state *ms = tp->parent;
-
- /* TBD: check existence before calling siginit */
- pipe_prod_siginit(&ms->pipe,0);
-
- hstor_get(hstor,ms->bucket,ms->key,http_get_prod,&ms->pipe,0);
- /* TBD: check return value */
-
- pipe_prod_finish(&ms->pipe);
-
- DPRINTF("producer exitingn");
- return NULL;
-}
-
-/* Start an S3 _consumer_. */
-static void *
-s3_put_child (void * ctx)
-{
- pipe_private *pp = ctx;
- pipe_shared *ps = pp->shared;
- my_state *ms = ps->owner;
- curl_off_t llen;
- const char *clen;
- bool rcb;
-
- llen = (curl_off_t)MHD_SIZE_UNKNOWN;
- if (ms->be_flags & BACKEND_GET_SIZE) {
- clen = MHD_lookup_connection_value(
- ms->conn, MHD_HEADER_KIND, "Content-Length");
- if (clen) {
- llen = strtoll(clen,NULL,10);
- }
- else {
- error (0, 0, "missing Content-Length");
- }
- }
-
- pipe_cons_siginit(ps, 0);
- rcb = hstor_put(hstor,ms->bucket,ms->key,http_put_cons,llen,pp,NULL);
- if (!rcb) {
- DPRINTF("%s returning with errorn",__func__);
- pipe_cons_siginit(ps, -1);
- free(pp);
- return THREAD_FAILED;
- }
-
- DPRINTF("%s returningn",__func__);
- free(pp);
- return NULL;
-}
-
-static int
-s3_delete (const provider_t *prov, const char *bucket, const char *key,
- const char *url)
-{
- (void)prov;
- (void)url;
-
- hstor_del(hstor,bucket,key);
- /* TBD: check return value */
-
- return MHD_HTTP_OK;
-}
-
-static int
-s3_bcreate (const provider_t *prov, const char *bucket)
-{
- (void)prov;
-
- DPRINTF("creating bucket %sn",bucket);
-
- if (!hstor_add_bucket(hstor,bucket)) {
- DPRINTF(" bucket create failedn");
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
-
- return MHD_HTTP_OK;
-}
-
-static const char *
-s3_init_tmpfile (const char *value)
-{
- char *path;
- int fd;
- size_t len;
- ssize_t written;
-
- /* FIXME: do not hard-code /tmp. */
- path = strdup("/tmp/iwtmp.XXXXXX");
- if (!path) {
- return NULL;
- }
-
- fd = mkstemp(path);
- if (fd < 0) {
- error (0, errno, "%s: failed to create file from template", path);
- free(path);
- return NULL;
- }
-
- len = strlen(value);
- if (len > 0) {
- written = write(fd,value,len);
- close(fd);
- if (written != (ssize_t)len) {
- if (written < 0) {
- error (0, errno, "failed to write to %s", path);
- }
- else {
- error (0, errno,
- "invalid write length %zd in %s",
- written, __func__);
- }
- unlink(path);
- free(path);
- return NULL;
- }
- }
-
- return path;
-}
-
-static int
-s3_register (my_state *ms, const provider_t *prov, const char *next,
- Hash_table *args)
-{
- char *kernel = kv_hash_lookup(args,"kernel");
- char *ramdisk = kv_hash_lookup(args,"ramdisk");
- char *api_key;
- char *api_secret;
- const char *ami_cert;
- const char *ami_key;
- const char *ami_uid;
- const char *argv[12];
- int argc = 0;
- pid_t pid;
- int organ[2];
- FILE *fp;
- char buf[ADDR_SIZE];
- char *cval = NULL;
- char *kval = NULL;
- int rc = MHD_HTTP_BAD_REQUEST;
- char *ami_bkt;
- char ami_id_buf[64];
- regmatch_t match[2];
-
- if (!regex_ok) {
- return MHD_HTTP_BAD_REQUEST;
- }
-
- if (next) {
- DPRINTF("S3 register with next!=NULLn");
- goto cleanup;
- }
-
- DPRINTF("*** register %s/%s via %s (%s:%d)n",
- ms->bucket, ms->key, prov->name, prov->host, prov->port);
- if (kernel) {
- DPRINTF(" (using kernel %s)n",kernel);
- }
- if (ramdisk) {
- DPRINTF(" (using ramdisk %s)n",ramdisk);
- }
-
- api_key = kv_hash_lookup(args,"api-key");
- if (!api_key) {
- api_key = (char *)prov->username;
- if (!api_key) {
- error (0, 0, "missing EC2 API key");
- goto cleanup;
- }
- }
-
- api_secret = kv_hash_lookup(args,"api-secret");
- if (!api_secret) {
- api_secret = (char *)prov->password;
- if (!prov->password) {
- error (0, 0, "missing EC2 API secret");
- goto cleanup;
- }
- }
-
- cval = kv_hash_lookup(args,"ami-cert");
- if (cval) {
- ami_cert = s3_init_tmpfile(cval);
- if (!ami_cert) {
- goto cleanup;
- }
- }
- else {
- ami_cert = get_provider_value(prov,"ami-cert");
- if (!ami_cert) {
- error (0, 0, "missing EC2 AMI cert");
- goto cleanup;
- }
- }
-
- kval = kv_hash_lookup(args,"ami-key");
- if (kval) {
- ami_key = s3_init_tmpfile(kval);
- if (!ami_cert) {
- goto cleanup;
- }
- }
- else {
- ami_key = get_provider_value(prov,"ami-key");
- if (!ami_key) {
- error (0, 0, "missing EC2 AMI key");
- goto cleanup;
- }
- }
-
- ami_uid = kv_hash_lookup(args,"ami-uid");
- if (!ami_uid) {
- ami_uid = get_provider_value(prov,"ami-uid");
- if (!ami_uid) {
- error (0, 0, "missing EC2 AMI uid");
- goto cleanup;
- }
- }
-
- ami_bkt = kv_hash_lookup(args,"ami-bkt");
- if (!ami_bkt) {
- ami_bkt = ms->bucket;
- }
-
- /*
- * This is the point where we go from validation to execution. If we
- * were double-forking so this could all be asynchronous, or for that
- * matter to return an early 100-continue, this would probably be the
- * place to do it. Even without that, we set the ami-id here so that
- * the caller can know things are actually in progress.
- */
- sprintf(ami_id_buf,"pending %lld",(long long)time(NULL));
- DPRINTF("temporary ami-id = "%s"n",ami_id_buf);
- (void)meta_set_value(ms->bucket,ms->key,"ami-id",ami_id_buf);
- rc = MHD_HTTP_INTERNAL_SERVER_ERROR;
-
- const char *cmd = "dc-register-image";
- argv[argc++] = cmd;
- argv[argc++] = ms->bucket;
- argv[argc++] = ms->key;
- argv[argc++] = api_key;
- argv[argc++] = api_secret;
- argv[argc++] = ami_cert;
- argv[argc++] = ami_key;
- argv[argc++] = ami_uid;
- argv[argc++] = ami_bkt;
- argv[argc++] = kernel ? kernel : "_default_";
- argv[argc++] = ramdisk ? ramdisk : "_default_";
- argv[argc] = NULL;
-
- DPRINTF("api-key = %sn",api_key);
- DPRINTF("api-secret = %sn",api_secret);
- DPRINTF("ami-cert = %sn",ami_cert);
- DPRINTF("ami-key = %sn",ami_key);
- DPRINTF("ami-uid = %sn",ami_uid);
- DPRINTF("ami-bkt = %sn",ami_bkt);
-
- if (pipe(organ) < 0) {
- error (0, errno, "pipe creation failed");
- goto cleanup;
- }
-
- pid = fork();
- if (pid < 0) {
- error (0, errno, "fork failed");
- close(organ[0]);
- close(organ[1]);
- goto cleanup;
- }
-
- if (pid == 0) {
- (void)dup2(organ[1],STDOUT_FILENO);
- (void)dup2(organ[1],STDERR_FILENO);
- execvp(cmd, (char* const*)argv);
- error (EXIT_FAILURE, errno, "failed to run command %s", cmd);
- }
-
- DPRINTF("waiting for child...n");
- if (waitpid(pid,NULL,0) < 0) {
- error (0, errno, "waitpid failed");
- }
- /* TBD: check identity/status from waitpid */
- DPRINTF("...child exitedn");
-
- close(organ[1]);
- fp = fdopen(organ[0],"r");
- if (!fp) {
- DPRINTF("could not open parent pipen");
- close(organ[0]);
- goto cleanup;
- }
- while (fgets(buf,sizeof(buf)-1,fp)) {
- buf[sizeof(buf)-1] = '0';
- if (regexec(&s3_success_pat,buf,2,match,0) == 0) {
- buf[match[1].rm_eo] = '0';
- DPRINTF("found AMI ID: %sn",buf+match[1].rm_so);
- sprintf(ami_id_buf,"OK %.60s",buf+match[1].rm_so);
- rc = MHD_HTTP_OK;
- }
- else if (regexec(&s3_failure_pat,buf,2,match,0) == 0) {
- buf[match[1].rm_eo] = '0';
- DPRINTF("found error marker: %sn",buf+match[1].rm_so);
- sprintf(ami_id_buf,"failed %.56s",buf+match[1].rm_so);
- rc = MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- else {
- DPRINTF("ignoring line: <%s>n",buf);
- }
- }
- fclose(fp);
-
-cleanup:
- /*
- * This is a bit tricky. If we found the cert in the HTTP request and
- * succeeded in creating a temp file, then this condition will succeed.
- * If we failed to create the temp file, or never found a cert
- * anywhere, there will be no ami_cert to clean up. If we got a cert
- * from the config, then ami_cert will be set but we'll (correctly)
- * skip cleanup because cval is null.
- */
- if (cval && ami_cert) {
- unlink(ami_cert);
- free((char *)ami_cert);
- }
- /* Same reasoning as above, with kval/ami_key. */
- if (kval && ami_key) {
- unlink(ami_key);
- free((char *)ami_key);
- }
- (void)meta_set_value(ms->bucket,ms->key,"ami-id",ami_id_buf);
-
- return rc;
-}
-
-/***** CURL-specific functions *****/
-
-static void
-curl_init (provider_t *prov)
-{
- (void)prov;
-}
-
-/* Start a CURL _producer_. */
-static void *
-curl_get_child (void * ctx)
-{
- char fixed[ADDR_SIZE];
- backend_thunk_t *tp = (backend_thunk_t *)ctx;
- my_state *ms = tp->parent;
- provider_t *prov = tp->prov;
- CURL *curl;
- int chars;
-
- curl = curl_easy_init();
- if (!curl) {
- pipe_prod_siginit(&ms->pipe,-1);
- return NULL; /* TBD: flag error somehow */
- }
- if (ms->from_master) {
- chars = snprintf(fixed,sizeof(fixed),"http://%s:%u%s",
- master_host, master_port, ms->url);
- }
- else {
- chars = snprintf(fixed,sizeof(fixed),"http://%s:%u%s",
- prov->host, prov->port, ms->url);
- }
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return NULL;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_easy_setopt(curl,CURLOPT_WRITEFUNCTION, http_get_prod);
- curl_easy_setopt(curl,CURLOPT_WRITEDATA,&ms->pipe);
- pipe_prod_siginit(&ms->pipe,0);
-
- curl_easy_perform(curl);
- curl_easy_getinfo(curl,CURLINFO_RESPONSE_CODE,&ms->rc);
- pipe_prod_finish(&ms->pipe);
-
- DPRINTF("producer exitingn");
- curl_easy_cleanup(curl);
- return NULL;
-}
-
-/* Start a CURL _consumer_. */
-static void *
-curl_put_child (void * ctx)
-{
- pipe_private *pp = ctx;
- pipe_shared *ps = pp->shared;
- my_state *ms = ps->owner;
- provider_t *prov = pp->prov;
- curl_off_t llen;
- char fixed[ADDR_SIZE];
- CURL *curl;
- const char *clen;
- struct curl_slist *slist = NULL;
- int chars;
-
- llen = (curl_off_t)MHD_SIZE_UNKNOWN;
- if (ms->be_flags & BACKEND_GET_SIZE) {
- clen = MHD_lookup_connection_value(
- ms->conn, MHD_HEADER_KIND, "Content-Length");
- if (clen) {
- llen = strtoll(clen,NULL,10);
- }
- else {
- error (0, 0, "missing Content-Length");
- }
- }
-
- /*
- * This is how the iwhd at the other end knows this is a replication
- * request and not just a PUT from some random user.
- * TBD: add some auth* for this.
- */
- slist = curl_slist_append(slist,"X-redhat-role: master");
-
- curl = curl_easy_init();
- if (!curl) {
- pipe_cons_siginit(ps, -1);
- free(pp);
- return THREAD_FAILED;
- }
- chars = snprintf(fixed,sizeof(fixed),
- "http://%s:%u/%s/%s",prov->host,prov->port,ms->bucket,ms->key);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return NULL;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_easy_setopt(curl,CURLOPT_UPLOAD,1);
- curl_easy_setopt(curl,CURLOPT_INFILESIZE_LARGE,llen);
- curl_easy_setopt(curl,CURLOPT_READFUNCTION,http_put_cons);
- curl_easy_setopt(curl,CURLOPT_READDATA,pp);
- curl_easy_setopt(curl,CURLOPT_HTTPHEADER,slist);
- pipe_cons_siginit(ps, 0);
- curl_easy_perform(curl);
- curl_easy_cleanup(curl);
- curl_slist_free_all(slist);
-
- DPRINTF("%s returningn",__func__);
- free(pp);
- return NULL;
-}
-
-/* Start a CURL cache consumer. */
-static void *
-curl_cache_child (void * ctx)
-{
- pipe_private *pp = ctx;
- pipe_shared *ps = pp->shared;
- my_state *ms = ps->owner;
- provider_t *prov = pp->prov;
- char fixed[ADDR_SIZE];
- CURL *curl;
- char *slash;
- char *my_url = strdup(ms->url);
- int chars;
-
- if (!my_url) {
- return THREAD_FAILED;
- }
-
- curl = curl_easy_init();
- if (!curl) {
- free(my_url);
- pipe_cons_siginit(ps,-1);
- return THREAD_FAILED;
- }
- chars = snprintf(fixed,sizeof(fixed),
- "http://%s:%u%s",prov->host,prov->port,ms->url);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return NULL;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_easy_setopt(curl,CURLOPT_UPLOAD,1);
- curl_easy_setopt(curl,CURLOPT_INFILESIZE_LARGE,
- (curl_off_t)MHD_SIZE_UNKNOWN);
- curl_easy_setopt(curl,CURLOPT_READFUNCTION,http_put_cons);
- curl_easy_setopt(curl,CURLOPT_READDATA,pp);
- curl_easy_perform(curl);
- curl_easy_cleanup(curl);
-
- slash = strchr(my_url+1,'/');
- if (slash) {
- *slash = '0';
- meta_got_copy(my_url+1,slash+1,me);
- }
-
- free(my_url);
- return NULL;
-}
-
-static int
-curl_delete (const provider_t *prov, const char *bucket, const char *key,
- const char *url)
-{
- CURL *curl;
- char fixed[ADDR_SIZE];
- int chars;
-
- (void)bucket;
- (void)key;
-
- curl = curl_easy_init();
- if (!curl) {
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
-
- chars = snprintf(fixed,sizeof(fixed),
- "http://%s:%u%s",prov->host,prov->port,url);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"DELETE");
- curl_easy_perform(curl);
- curl_easy_cleanup(curl);
-
- return MHD_HTTP_OK;
-}
-
-static int
-curl_bcreate (const provider_t *prov, const char *bucket)
-{
- char addr[ADDR_SIZE];
- int chars;
- CURL *curl;
-
- chars = snprintf(addr,sizeof(addr),"http://%s:%d/%s",
- prov->host,prov->port,bucket);
- if (chars >= (int)sizeof(addr)) {
- error(0,0,"path too long in %s",__func__);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
-
- curl = curl_easy_init();
- if (!curl) {
- error(0,errno,"no memory in %s",__func__);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- curl_easy_setopt(curl,CURLOPT_URL,addr);
- curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"PUT");
- curl_easy_perform(curl);
- curl_easy_cleanup(curl);
- return MHD_HTTP_OK;
-}
-
-/*
- * We can proxy through any number of CURL/HTTP warehouses, but the chain
- * eventually has to terminate at an S3 back end.
- */
-
-static int
-curl_register (my_state *ms, const provider_t *prov, const char *next,
- Hash_table *args)
-{
- char fixed[ADDR_SIZE];
- CURL *curl;
- struct curl_httppost *first = NULL;
- struct curl_httppost *last = NULL;
- char *kernel = kv_hash_lookup(args,"kernel");
- char *ramdisk = kv_hash_lookup(args,"ramdisk");
- int chars;
-
- if (!next) {
- DPRINTF("CURL register with next==NULLn");
- return MHD_HTTP_BAD_REQUEST;
- }
-
- DPRINTF("*** PROXY registration request for %s/%s to %s (%s:%d)n",
- ms->bucket, ms->key, prov->name, prov->host, prov->port);
-
- curl = curl_easy_init();
- if (!curl) {
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- chars = snprintf(fixed,sizeof(fixed),"http://%s:%d/%s/%s",
- prov->host,prov->port, ms->bucket, ms->key);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_formadd(&first,&last,
- CURLFORM_COPYNAME, "op",
- CURLFORM_COPYCONTENTS, "register",
- CURLFORM_END);
- curl_formadd(&first,&last,
- CURLFORM_COPYNAME, "site",
- CURLFORM_COPYCONTENTS, next,
- CURLFORM_END);
- if (kernel) {
- curl_formadd(&first,&last,
- CURLFORM_COPYNAME, "kernel",
- CURLFORM_COPYCONTENTS, kernel,
- CURLFORM_END);
- }
- if (ramdisk) {
- curl_formadd(&first,&last,
- CURLFORM_COPYNAME, "ramdisk",
- CURLFORM_COPYCONTENTS, ramdisk,
- CURLFORM_END);
- }
- curl_easy_setopt(curl,CURLOPT_HTTPPOST,first);
- curl_easy_perform(curl);
- curl_easy_cleanup(curl);
-
- return MHD_HTTP_OK;
-}
-
-/***** CF-specific functions *****/
-
-/*
- * TBD: refactor to maximize common code. Despite the de-duplication between
- * this module and replica.c, there's still a lot more that could be done to
- * combine xxx_yyy_child for xxx={http,cf} and yyy={put,cache}. A rough
- * outline might be:
- *
- * if xxx=cf, call CF-specific routine to add CF auth header
- * do common curl setup and execution
- * if yyy=cache, call meta_got_copy
- *
- * There might even be an opportunity to combine code for put and bucket
- * create in some cases, since the only difference is the URL and the
- * lack of a data transfer in the bucket-create case.
- */
-
-static size_t
-cf_writer (void *ptr ATTRIBUTE_UNUSED, size_t size, size_t nmemb,
- void *stream ATTRIBUTE_UNUSED)
-{
- return size * nmemb;
-}
-
-static size_t
-cf_header (void *ptr, size_t size, size_t nmemb, void *stream)
-{
- char *next;
- char *sctx;
- provider_t *prov = (provider_t *)stream;
-
- next = strtok_r(ptr,":",&sctx);
- if (next) {
- if (!strcasecmp(next,"X-Storage-Url")) {
- next = strtok_r(NULL," nr",&sctx);
- if (next) {
- DPRINTF("got CF URL %sn",next);
- /* NB: after this, original "host" is gone. */
- free((char *)prov->host);
- prov->host = strdup(next);
- }
- }
- else if (!strcasecmp(next,"X-Storage-Token")) {
- next = strtok_r(NULL," nr",&sctx);
- if (next) {
- DPRINTF("got CF token %sn",next);
- prov->token = strdup(next);
- }
- }
- }
- return size * nmemb;
-}
-
-static struct curl_slist *
-cf_add_token (struct curl_slist *in_slist, const char *token)
-{
- int chars;
- char auth_hdr[HEADER_SIZE];
-
- if (!token) {
- return in_slist;
- }
-
- chars = snprintf(auth_hdr,sizeof(auth_hdr),"X-Auth-Token: %s",token);
- if (chars >= (int)sizeof(auth_hdr)) {
- error(0,0,"auth_hdr too long");
- return in_slist;
- }
-
- return curl_slist_append(NULL,auth_hdr);
-}
-
-static void
-cf_init (provider_t *prov)
-{
- CURL *curl;
- char addr[ADDR_SIZE];
- char auth_user[HEADER_SIZE];
- char auth_key[HEADER_SIZE];
- struct curl_slist *slist;
- int chars;
-
- if (prov->token) {
- return;
- }
-
- chars = snprintf(addr,sizeof(addr),"https://%s:%u/v1.0",
- prov->host, prov->port);
- if (chars >= (int)sizeof(addr)) {
- error(0,0,"API URL too long in %s",__func__);
- return;
- }
-
- chars = snprintf(auth_user,sizeof(auth_user),"X-Auth-User: %s",
- prov->username);
- if (chars >= (int)sizeof(auth_user)) {
- error(0,0,"auth_user too long in %s",__func__);
- return;
- }
-
- chars = snprintf(auth_key,sizeof(auth_key),"X-Auth-Key: %s",
- prov->password);
- if (chars >= (int)sizeof(auth_key)) {
- error(0,0,"auth_key too long in %s",__func__);
- return;
- }
-
- curl = curl_easy_init();
- curl_easy_setopt(curl,CURLOPT_URL,addr);
- curl_easy_setopt(curl,CURLOPT_WRITEFUNCTION,cf_writer);
- curl_easy_setopt(curl,CURLOPT_HEADERFUNCTION,cf_header);
- curl_easy_setopt(curl,CURLOPT_WRITEHEADER,prov);
- slist = curl_slist_append(NULL,auth_user);
- slist = curl_slist_append(slist,auth_key);
- curl_easy_setopt(curl,CURLOPT_HTTPHEADER,slist);
- curl_easy_perform(curl);
- curl_easy_cleanup(curl);
- curl_slist_free_all(slist);
-
- DPRINTF("CF token = %sn",prov->token);
-}
-
-/* Start a CloudFiles _producer_. */
-static void *
-cf_get_child (void * ctx)
-{
- char fixed[ADDR_SIZE];
- backend_thunk_t *tp = (backend_thunk_t *)ctx;
- my_state *ms = tp->parent;
- provider_t *prov = tp->prov;
- CURL *curl;
- struct curl_slist *slist = NULL;
- int chars;
-
- slist = cf_add_token(slist,prov->token);
- if (!slist) {
- return THREAD_FAILED;
- }
- /*
- * Rackspace doesn't clearly document that you'll get
- * 412 (Precondition Failed) if you omit this.
- */
- slist = curl_slist_append(slist,
- "Content-Type: binary/octet-stream");
-
- curl = curl_easy_init();
- if (!curl) {
- pipe_prod_siginit(&ms->pipe,-1);
- curl_slist_free_all(slist);
- return NULL; /* TBD: flag error somehow */
- }
- chars = snprintf(fixed,sizeof(fixed),"%s%s", prov->host, ms->url);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return NULL;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_easy_setopt(curl,CURLOPT_WRITEFUNCTION, http_get_prod);
- curl_easy_setopt(curl,CURLOPT_WRITEDATA,&ms->pipe);
- curl_easy_setopt(curl,CURLOPT_HTTPHEADER,slist);
- pipe_prod_siginit(&ms->pipe,0);
-
- curl_easy_perform(curl);
- curl_easy_getinfo(curl,CURLINFO_RESPONSE_CODE,&ms->rc);
- pipe_prod_finish(&ms->pipe);
-
- DPRINTF("producer exitingn");
- curl_easy_cleanup(curl);
- curl_slist_free_all(slist);
- return NULL;
-}
-
-/* Start a CloudFiles _consumer_. */
-static void *
-cf_put_child (void * ctx)
-{
- pipe_private *pp = ctx;
- pipe_shared *ps = pp->shared;
- my_state *ms = ps->owner;
- provider_t *prov = pp->prov;
- curl_off_t llen;
- char fixed[ADDR_SIZE];
- CURL *curl;
- const char *clen;
- struct curl_slist *slist = NULL;
- int chars;
-
- slist = cf_add_token(slist,prov->token);
- if (!slist) {
- return THREAD_FAILED;
- }
-
- llen = (curl_off_t)MHD_SIZE_UNKNOWN;
- if (ms->be_flags & BACKEND_GET_SIZE) {
- clen = MHD_lookup_connection_value(
- ms->conn, MHD_HEADER_KIND, "Content-Length");
- if (clen) {
- llen = strtoll(clen,NULL,10);
- }
- else {
- error (0, 0, "missing Content-Length");
- }
- }
-
- curl = curl_easy_init();
- if (!curl) {
- pipe_cons_siginit(ps, -1);
- free(pp);
- curl_slist_free_all(slist);
- return THREAD_FAILED;
- }
- chars = snprintf(fixed,sizeof(fixed),
- "%s/%s/%s",prov->host,ms->bucket,ms->key);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return NULL;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_easy_setopt(curl,CURLOPT_UPLOAD,1);
- curl_easy_setopt(curl,CURLOPT_INFILESIZE_LARGE,llen);
- curl_easy_setopt(curl,CURLOPT_READFUNCTION,http_put_cons);
- curl_easy_setopt(curl,CURLOPT_READDATA,pp);
- curl_easy_setopt(curl,CURLOPT_HTTPHEADER,slist);
- pipe_cons_siginit(ps, 0);
-
- curl_easy_perform(curl);
- curl_easy_getinfo(curl,CURLINFO_RESPONSE_CODE,&ms->rc);
-
- DPRINTF("%s returningn",__func__);
- curl_easy_cleanup(curl);
- curl_slist_free_all(slist);
- free(pp);
- return NULL;
-}
-
-static int
-cf_delete (const provider_t *prov,
- const char *bucket ATTRIBUTE_UNUSED,
- const char *key ATTRIBUTE_UNUSED,
- const char *url)
-{
- CURL *curl;
- char fixed[ADDR_SIZE];
- long rc;
- struct curl_slist *slist = NULL;
- int chars;
-
- slist = cf_add_token(slist,prov->token);
- if (!slist) {
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
-
- curl = curl_easy_init();
- if (!curl) {
- curl_slist_free_all(slist);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
-
- chars = snprintf(fixed,sizeof(fixed),"%s%s",prov->host,url);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_easy_setopt(curl,CURLOPT_CUSTOMREQUEST,"DELETE");
- curl_easy_setopt(curl,CURLOPT_HTTPHEADER,slist);
-
- curl_easy_perform(curl);
- curl_easy_getinfo(curl,CURLINFO_RESPONSE_CODE,&rc);
- DPRINTF("%s: rc = %ldn",__func__,rc);
-
- curl_easy_cleanup(curl);
- curl_slist_free_all(slist);
-
- return MHD_HTTP_OK;
-}
-
-static size_t
-cf_null_reader (void *ptr ATTRIBUTE_UNUSED,
- size_t size ATTRIBUTE_UNUSED,
- size_t nmemb ATTRIBUTE_UNUSED,
- void *stream ATTRIBUTE_UNUSED)
-{
- return 0;
-}
-
-static int
-cf_bcreate (const provider_t *prov, const char *bucket)
-{
- char fixed[ADDR_SIZE];
- CURL *curl;
- long rc;
- struct curl_slist *slist = NULL;
- int chars;
-
- slist = cf_add_token(slist,prov->token);
- if (!slist) {
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
-
- curl = curl_easy_init();
- if (!curl) {
- curl_slist_free_all(slist);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- chars = snprintf(fixed,sizeof(fixed),"%s/%s",prov->host,bucket);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- curl_easy_setopt(curl,CURLOPT_URL,fixed);
- curl_easy_setopt(curl,CURLOPT_UPLOAD,1);
- curl_easy_setopt(curl,CURLOPT_INFILESIZE_LARGE,
- (curl_off_t)MHD_SIZE_UNKNOWN);
- curl_easy_setopt(curl,CURLOPT_READFUNCTION,cf_null_reader);
- curl_easy_setopt(curl,CURLOPT_HTTPHEADER,slist);
-
- curl_easy_perform(curl);
- curl_easy_getinfo(curl,CURLINFO_RESPONSE_CODE,&rc);
- DPRINTF("%s: rc = %ldn",__func__,rc);
-
- DPRINTF("%s returningn",__func__);
- curl_easy_cleanup(curl);
- curl_slist_free_all(slist);
- return MHD_HTTP_OK;
-}
-
-/***** FS-specific functions *****/
-
-static void
-fs_init (provider_t *prov)
-{
- DPRINTF("changing directory to %sn",prov->path);
- if (chdir(prov->path) < 0) {
- error(0,errno,"chdir failed, unsafe to continue");
- exit(!0); /* Value doesn't matter, as long as it's not zero. */
- }
-}
-
-/* Start an FS _producer_. */
-static void *
-fs_get_child (void * ctx)
-{
- backend_thunk_t *tp = (backend_thunk_t *)ctx;
- my_state *ms = tp->parent;
- int fd;
- char buf[1<<16];
- ssize_t bytes;
- char *file = ms->url+1;
-
- fd = open(file, O_RDONLY);
- if (fd < 0) {
- pipe_prod_siginit(&ms->pipe,-1);
- pipe_prod_finish(&ms->pipe);
- return THREAD_FAILED;
- }
-
- pipe_prod_siginit(&ms->pipe,0);
-
- for (;;) {
- bytes = read(fd,buf,sizeof(buf));
- if (bytes <= 0) {
- if (bytes < 0) {
- error (0, errno, "%s: read failed", file);
- }
- break;
- }
- pipe_prod_signal(&ms->pipe,buf,bytes);
- }
-
- close(fd);
- pipe_prod_finish(&ms->pipe);
-
- DPRINTF("producer exitingn");
- return NULL;
-}
-
-/* Start an FS _consumer_. */
-static void *
-fs_put_child (void * ctx)
-{
- pipe_private *pp = ctx;
- pipe_shared *ps = pp->shared;
- my_state *ms = ps->owner;
- int fd;
- ssize_t bytes;
- size_t offset;
- char fixed[ADDR_SIZE];
- int chars;
-
- chars = snprintf(fixed,sizeof(fixed),"%s/%s",ms->bucket,ms->key);
- if (chars >= (int)sizeof(fixed)) {
- error(0,0,"path too long in %s",__func__);
- return NULL;
- }
- if (unlink(fixed) < 0) {
- error(0,errno,"unlink failed for %s (non-fatal)",fixed);
- }
- fd = open(fixed,O_WRONLY|O_CREAT|O_EXCL,0666);
- if (fd < 0) {
- pipe_cons_siginit(ps, errno);
- free(pp);
- return THREAD_FAILED;
- }
-
- pipe_cons_siginit(ps, 0);
-
- while (pipe_cons_wait(pp)) {
- for (offset = 0; offset < ps->data_len; offset += bytes) {
- bytes = write(fd,
- (char *)(ps->data_ptr)+offset,
- ps->data_len-offset);
- if (bytes <= 0) {
- if (bytes < 0) {
- error (0, errno, "%s: write failed",
- fixed);
- pipe_cons_signal(pp, errno);
- }
- else {
- pipe_cons_signal(pp, ENOSPC);
- }
- break;
- }
- }
- pipe_cons_signal(pp, 0);
- }
-
- close(fd);
-
- DPRINTF("%s returningn",__func__);
- free(pp);
- return NULL;
-}
-
-static int
-fs_delete (const provider_t *prov, const char *bucket, const char *key,
- const char *url)
-{
- (void)prov;
- (void)bucket;
- (void)key;
-
- if (unlink(url+1) < 0) {
- error (0, errno, "%s: failed to unlink", url+1);
- return MHD_HTTP_NOT_FOUND;
- }
-
- return MHD_HTTP_OK;
-}
-
-static int
-fs_bcreate (const provider_t *prov, const char *bucket)
-{
- (void)prov;
-
- DPRINTF("creating bucket %sn",bucket);
-
- if (mkdir(bucket,0700) < 0) {
- error (0, errno, "%s: failed to create directory", bucket);
- return MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
-
- return MHD_HTTP_OK;
-}
-
-/***** Function tables. ****/
-
-backend_func_tbl bad_func_tbl = {
- "uninitialized",
- bad_init,
- bad_get_child,
- bad_put_child,
- bad_cache_child,
- bad_delete,
- bad_bcreate,
- bad_register,
-};
-
-backend_func_tbl s3_func_tbl = {
- "S3",
- s3_init,
- s3_get_child,
- s3_put_child,
- bad_cache_child,
- s3_delete,
- s3_bcreate,
- s3_register,
-};
-
-backend_func_tbl curl_func_tbl = {
- "HTTP",
- curl_init,
- curl_get_child,
- curl_put_child,
- curl_cache_child,
- curl_delete,
- curl_bcreate,
- curl_register,
-};
-
-backend_func_tbl cf_func_tbl = {
- "CF",
- cf_init,
- cf_get_child,
- cf_put_child,
- bad_cache_child,
- cf_delete,
- cf_bcreate,
- bad_register,
-};
-
-backend_func_tbl fs_func_tbl = {
- "FS",
- fs_init,
- fs_get_child,
- fs_put_child,
- bad_cache_child,
- fs_delete,
- fs_bcreate,
- bad_register,
-};
diff --git a/backend.h b/backend.h
deleted file mode 100644
index 7edae08..0000000
--- a/backend.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#if !defined(_BACKEND_H)
-#define _BACKEND_H
-
-#include "state_defs.h"
-#include "hash.h"
-
-typedef void init_func_t (struct _provider *prov);
-/* Get provider from passed backend_thunk. */
-typedef void *get_child_func_t (void *);
-/* Get provider from passed pipe_private. */
-typedef void *put_child_func_t (void *);
-typedef void *cache_child_func_t (void *);
-/* Get provider as an argument. */
-typedef int delete_func_t (const struct _provider *prov,
- const char *bucket, const char *key,
- const char *url);
-typedef int bcreate_func_t (const struct _provider *prov,
- const char *bucket);
-typedef int register_func_t (my_state *ms,
- const struct _provider *prov,
- const char *next, Hash_table *args);
-
-typedef struct {
- const char *name;
- init_func_t *init_func;
- get_child_func_t *get_child_func;
- put_child_func_t *put_child_func;
- cache_child_func_t *cache_child_func;
- delete_func_t *delete_func;
- bcreate_func_t *bcreate_func;
- register_func_t *register_func;
-} backend_func_tbl;
-
-#define THREAD_FAILED ((void *)(-1))
-
-void backend_init (void);
-
-#endif
diff --git a/bootstrap b/bootstrap
deleted file mode 100755
index e9ec11e..0000000
--- a/bootstrap
+++ /dev/null
@@ -1,946 +0,0 @@
-#! /bin/sh
-# Print a version string.
-scriptversion=2011-01-21.16; # UTC
-
-# Bootstrap this package from checked-out sources.
-
-# Copyright (C) 2003-2011 Free Software Foundation, Inc.
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Originally written by Paul Eggert. The canonical version of this
-# script is maintained as build-aux/bootstrap in gnulib, however, to
-# be useful to your project, you should place a copy of it under
-# version control in the top-level directory of your project. The
-# intent is that all customization can be done with a bootstrap.conf
-# file also maintained in your version control; gnulib comes with a
-# template build-aux/bootstrap.conf to get you started.
-
-# Please report bugs or propose patches to bug-gnulib(a)gnu.org.
-
-nl='
-'
-
-# Ensure file names are sorted consistently across platforms.
-LC_ALL=C
-export LC_ALL
-
-local_gl_dir=gl
-
-# Temporary directory names.
-bt='._bootmp'
-bt_regex=`echo "$bt"| sed 's/./[.]/g'`
-bt2=${bt}2
-me=$0
-
-usage() {
- cat <<EOF
-Usage: $me [OPTION]...
-Bootstrap this package from the checked-out sources.
-
-Options:
- --gnulib-srcdir=DIRNAME specify the local directory where gnulib
- sources reside. Use this if you already
- have gnulib sources on your machine, and
- do not want to waste your bandwidth downloading
- them again. Defaults to $GNULIB_SRCDIR
- --bootstrap-sync if this bootstrap script is not identical to
- the version in the local gnulib sources,
- update this script, and then restart it with
- /bin/sh or the shell $CONFIG_SHELL
- --no-bootstrap-sync do not check whether bootstrap is out of sync
- --copy copy files instead of creating symbolic links
- --force attempt to bootstrap even if the sources seem
- not to have been checked out
- --no-git do not use git to update gnulib. Requires that
- --gnulib-srcdir point to a correct gnulib snapshot
- --skip-po do not download po files
-
-If the file $me.conf exists in the same directory as this script, its
-contents are read as shell variables to configure the bootstrap.
-
-For build prerequisites, environment variables like $AUTOCONF and $AMTAR
-are honored.
-
-Running without arguments will suffice in most cases.
-EOF
-}
-
-# Configuration.
-
-# Name of the Makefile.am
-gnulib_mk=gnulib.mk
-
-# List of gnulib modules needed.
-gnulib_modules=
-
-# Any gnulib files needed that are not in modules.
-gnulib_files=
-
-# A function to be called to edit gnulib.mk right after it's created.
-# Override it via your own definition in bootstrap.conf.
-gnulib_mk_hook() { :; }
-
-# A function to be called after everything else in this script.
-# Override it via your own definition in bootstrap.conf.
-bootstrap_epilogue() { :; }
-
-# The command to download all .po files for a specified domain into
-# a specified directory. Fill in the first %s is the domain name, and
-# the second with the destination directory. Use rsync's -L and -r
-# options because the latest/%s directory and the .po files within are
-# all symlinks.
-po_download_command_format=-"rsync --delete --exclude '*.s1' -Lrtvz - 'translationproject.org::tp/latest/%s/' '%s'"
-
-extract_package_name='
- /^AC_INIT(/{
- /.*,.*,.*, */{
- s///
- s/[][]//g
- s/)$//
- p
- q
- }
- s/AC_INIT([*//
- s/]*,.*//
- s/^GNU //
- y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/
- s/[^A-Za-z0-9_]/-/g
- p
- }
-'
-package=`sed -n "$extract_package_name" configure.ac` || exit
-gnulib_name=lib$package
-
-build_aux=build-aux
-source_base=lib
-m4_base=m4
-doc_base=doc
-tests_base=tests
-
-# Extra files from gnulib, which override files from other sources.
-gnulib_extra_files="
- $build_aux/install-sh
- $build_aux/missing
- $build_aux/mdate-sh
- $build_aux/texinfo.tex
- $build_aux/depcomp
- $build_aux/config.guess
- $build_aux/config.sub
- doc/INSTALL
-"
-
-# Additional gnulib-tool options to use. Use "newline" to break lines.
-gnulib_tool_option_extras=
-
-# Other locale categories that need message catalogs.
-EXTRA_LOCALE_CATEGORIES=
-
-# Additional xgettext options to use. Use "
ewline" to break lines.
-XGETTEXT_OPTIONS='\- --flag=_:1:pass-c-format\- --flag=N_:1:pass-c-format\- --flag=error:3:c-format --flag=error_at_line:5:c-format\-'
-
-# Package bug report address and copyright holder for gettext files
-COPYRIGHT_HOLDER='Free Software Foundation, Inc.'
-MSGID_BUGS_ADDRESS=bug-$package(a)gnu.org
-
-# Files we don't want to import.
-excluded_files=
-
-# File that should exist in the top directory of a checked out hierarchy,
-# but not in a distribution tarball.
-checkout_only_file=README-hacking
-
-# Whether to use copies instead of symlinks.
-copy=false
-
-# Set this to '.cvsignore .gitignore' in bootstrap.conf if you want
-# those files to be generated in directories like lib/, m4/, and po/.
-# Or set it to 'auto' to make this script select which to use based
-# on which version control system (if any) is used in the source directory.
-vc_ignore=auto
-
-# Set this to true in bootstrap.conf to enable --bootstrap-sync by
-# default.
-bootstrap_sync=false
-
-# Use git to update gnulib sources
-use_git=true
-
-# find_tool ENVVAR NAMES...
-# -------------------------
-# Search for a required program. Use the value of ENVVAR, if set,
-# otherwise find the first of the NAMES that can be run (i.e.,
-# supports --version). If found, set ENVVAR to the program name,
-# die otherwise.
-find_tool ()
-{
- find_tool_envvar=$1
- shift
- find_tool_names=$@
- eval "find_tool_res=$$find_tool_envvar"
- if test x"$find_tool_res" = x; then
- for i
- do
- if ($i --version </dev/null) >/dev/null 2>&1; then
- find_tool_res=$i
- break
- fi
- done
- else
- find_tool_error_prefix="$$find_tool_envvar: "
- fi
- if test x"$find_tool_res" = x; then
- echo >&2 "$me: one of these is required: $find_tool_names"
- exit 1
- fi
- ($find_tool_res --version </dev/null) >/dev/null 2>&1 || {
- echo >&2 "$me: ${find_tool_error_prefix}cannot run $find_tool_res --version"
- exit 1
- }
- eval "$find_tool_envvar=$find_tool_res"
- eval "export $find_tool_envvar"
-}
-
-# Find sha1sum, named gsha1sum on MacPorts, and shasum on MacOS 10.6.
-find_tool SHA1SUM sha1sum gsha1sum shasum
-
-# Override the default configuration, if necessary.
-# Make sure that bootstrap.conf is sourced from the current directory
-# if we were invoked as "sh bootstrap".
-case "$0" in
- */*) test -r "$0.conf" && . "$0.conf" ;;
- *) test -r "$0.conf" && . ./"$0.conf" ;;
-esac
-
-
-if test "$vc_ignore" = auto; then
- vc_ignore=
- test -d .git && vc_ignore=.gitignore
- test -d CVS && vc_ignore="$vc_ignore .cvsignore"
-fi
-
-# Translate configuration into internal form.
-
-# Parse options.
-
-for option
-do
- case $option in
- --help)
- usage
- exit;;
- --gnulib-srcdir=*)
- GNULIB_SRCDIR=`expr "X$option" : 'X--gnulib-srcdir=(.*)'`;;
- --skip-po)
- SKIP_PO=t;;
- --force)
- checkout_only_file=;;
- --copy)
- copy=true;;
- --bootstrap-sync)
- bootstrap_sync=true;;
- --no-bootstrap-sync)
- bootstrap_sync=false;;
- --no-git)
- use_git=false;;
- *)
- echo >&2 "$0: $option: unknown option"
- exit 1;;
- esac
-done
-
-if $use_git || test -d "$GNULIB_SRCDIR"; then
- :
-else
- echo "$0: Error: --no-git requires --gnulib-srcdir" >&2
- exit 1
-fi
-
-if test -n "$checkout_only_file" && test ! -r "$checkout_only_file"; then
- echo "$0: Bootstrapping from a non-checked-out distribution is risky." >&2
- exit 1
-fi
-
-# If $STR is not already on a line by itself in $FILE, insert it,
-# sorting the new contents of the file and replacing $FILE with the result.
-insert_sorted_if_absent() {
- file=$1
- str=$2
- test -f $file || touch $file
- echo "$str" | sort -u - $file | cmp - $file > /dev/null - || echo "$str" | sort -u - $file -o $file - || exit 1
-}
-
-# Adjust $PATTERN for $VC_IGNORE_FILE and insert it with
-# insert_sorted_if_absent.
-insert_vc_ignore() {
- vc_ignore_file="$1"
- pattern="$2"
- case $vc_ignore_file in
- *.gitignore)
- # A .gitignore entry that does not start with `/' applies
- # recursively to subdirectories, so prepend `/' to every
- # .gitignore entry.
- pattern=`echo "$pattern" | sed s,^,/,`;;
- esac
- insert_sorted_if_absent "$vc_ignore_file" "$pattern"
-}
-
-# Die if there is no AC_CONFIG_AUX_DIR($build_aux) line in configure.ac.
-found_aux_dir=no
-grep '^[ ]*AC_CONFIG_AUX_DIR(['"$build_aux"'])' configure.ac - >/dev/null && found_aux_dir=yes
-grep '^[ ]*AC_CONFIG_AUX_DIR('"$build_aux"')' configure.ac - >/dev/null && found_aux_dir=yes
-if test $found_aux_dir = no; then
- echo "$0: expected line not found in configure.ac. Add the following:" >&2
- echo " AC_CONFIG_AUX_DIR([$build_aux])" >&2
- exit 1
-fi
-
-# If $build_aux doesn't exist, create it now, otherwise some bits
-# below will malfunction. If creating it, also mark it as ignored.
-if test ! -d $build_aux; then
- mkdir $build_aux
- for dot_ig in x $vc_ignore; do
- test $dot_ig = x && continue
- insert_vc_ignore $dot_ig $build_aux
- done
-fi
-
-# Note this deviates from the version comparison in automake
-# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a
-# but this should suffice as we won't be specifying old
-# version formats or redundant trailing .0 in bootstrap.conf.
-# If we did want full compatibility then we should probably
-# use m4_version_compare from autoconf.
-sort_ver() { # sort -V is not generally available
- ver1="$1"
- ver2="$2"
-
- # split on '.' and compare each component
- i=1
- while : ; do
- p1=$(echo "$ver1" | cut -d. -f$i)
- p2=$(echo "$ver2" | cut -d. -f$i)
- if [ ! "$p1" ]; then
- echo "$1 $2"
- break
- elif [ ! "$p2" ]; then
- echo "$2 $1"
- break
- elif [ ! "$p1" = "$p2" ]; then
- if [ "$p1" -gt "$p2" ] 2>/dev/null; then # numeric comparison
- echo "$2 $1"
- elif [ "$p2" -gt "$p1" ] 2>/dev/null; then # numeric comparison
- echo "$1 $2"
- else # numeric, then lexicographic comparison
- lp=$(printf "$p1n$p2n" | LANG=C sort -n | tail -n1)
- if [ "$lp" = "$p2" ]; then
- echo "$1 $2"
- else
- echo "$2 $1"
- fi
- fi
- break
- fi
- i=$(($i+1))
- done
-}
-
-get_version() {
- app=$1
-
- $app --version >/dev/null 2>&1 || return 1
-
- $app --version 2>&1 |
- sed -n '# Move version to start of line.
- s/.*[v ]([0-9])/1/
-
- # Skip lines that do not start with version.
- /^[0-9]/!d
-
- # Remove characters after the version.
- s/[^.a-z0-9-].*//
-
- # The first component must be digits only.
- s/^([0-9]*)[a-z-].*/1/
-
- #the following essentially does s/5.005/5.5/
- s/.0*([1-9])/.1/g
- p
- q'
-}
-
-check_versions() {
- ret=0
-
- while read app req_ver; do
- # We only need libtoolize from the libtool package.
- if test "$app" = libtool; then
- app=libtoolize
- fi
- # Exempt git if --no-git is in effect.
- if test "$app" = git; then
- $use_git || continue
- fi
- # Honor $APP variables ($TAR, $AUTOCONF, etc.)
- appvar=`echo $app | tr '[a-z]-' '[A-Z]_'`
- test "$appvar" = TAR && appvar=AMTAR
- eval "app=${$appvar-$app}"
- inst_ver=$(get_version $app)
- if [ ! "$inst_ver" ]; then
- echo "$me: Error: '$app' not found" >&2
- ret=1
- elif [ ! "$req_ver" = "-" ]; then
- latest_ver=$(sort_ver $req_ver $inst_ver | cut -d' ' -f2)
- if [ ! "$latest_ver" = "$inst_ver" ]; then
- echo "$me: Error: '$app' version == $inst_ver is too old" >&2
- echo " '$app' version >= $req_ver is required" >&2
- ret=1
- fi
- fi
- done
-
- return $ret
-}
-
-print_versions() {
- echo "Program Min_version"
- echo "----------------------"
- printf %s "$buildreq"
- echo "----------------------"
- # can't depend on column -t
-}
-
-use_libtool=0
-# We'd like to use grep -E, to see if any of LT_INIT,
-# AC_PROG_LIBTOOL, AM_PROG_LIBTOOL is used in configure.ac,
-# but that's not portable enough (e.g., for Solaris).
-grep '^[ ]*A[CM]_PROG_LIBTOOL' configure.ac >/dev/null - && use_libtool=1
-grep '^[ ]*LT_INIT' configure.ac >/dev/null - && use_libtool=1
-if test $use_libtool = 1; then
- find_tool LIBTOOLIZE glibtoolize libtoolize
-fi
-
-if ! printf "$buildreq" | check_versions; then
- echo >&2
- if test -f README-prereq; then
- echo "$0: See README-prereq for how to get the prerequisite programs" >&2
- else
- echo "$0: Please install the prerequisite programs" >&2
- fi
- exit 1
-fi
-
-echo "$0: Bootstrapping from checked-out $package sources..."
-
-# See if we can use gnulib's git-merge-changelog merge driver.
-if test -d .git && (git --version) >/dev/null 2>/dev/null ; then
- if git config merge.merge-changelog.driver >/dev/null ; then
- :
- elif (git-merge-changelog --version) >/dev/null 2>/dev/null ; then
- echo "$0: initializing git-merge-changelog driver"
- git config merge.merge-changelog.name 'GNU-style ChangeLog merge driver'
- git config merge.merge-changelog.driver 'git-merge-changelog %O %A %B'
- else
- echo "$0: consider installing git-merge-changelog from gnulib"
- fi
-fi
-
-
-cleanup_gnulib() {
- status=$?
- rm -fr "$gnulib_path"
- exit $status
-}
-
-git_modules_config () {
- test -f .gitmodules && git config --file .gitmodules "$@"
-}
-
-gnulib_path=`git_modules_config submodule.gnulib.path`
-test -z "$gnulib_path" && gnulib_path=gnulib
-
-# Get gnulib files.
-
-case ${GNULIB_SRCDIR--} in
--)
- if git_modules_config submodule.gnulib.url >/dev/null; then
- echo "$0: getting gnulib files..."
- git submodule init || exit $?
- git submodule update || exit $?
-
- elif [ ! -d "$gnulib_path" ]; then
- echo "$0: getting gnulib files..."
-
- trap cleanup_gnulib 1 2 13 15
-
- shallow=
- git clone -h 2>&1 | grep -- --depth > /dev/null && shallow='--depth 2'
- git clone $shallow git://git.sv.gnu.org/gnulib "$gnulib_path" ||
- cleanup_gnulib
-
- trap - 1 2 13 15
- fi
- GNULIB_SRCDIR=$gnulib_path
- ;;
-*)
- # Use GNULIB_SRCDIR as a reference.
- if test -d "$GNULIB_SRCDIR"/.git && - git_modules_config submodule.gnulib.url >/dev/null; then
- echo "$0: getting gnulib files..."
- if git submodule -h|grep -- --reference > /dev/null; then
- # Prefer the one-liner available in git 1.6.4 or newer.
- git submodule update --init --reference "$GNULIB_SRCDIR" - "$gnulib_path" || exit $?
- else
- # This fallback allows at least git 1.5.5.
- if test -f "$gnulib_path"/gnulib-tool; then
- # Since file already exists, assume submodule init already complete.
- git submodule update || exit $?
- else
- # Older git can't clone into an empty directory.
- rmdir "$gnulib_path" 2>/dev/null
- git clone --reference "$GNULIB_SRCDIR" - "$(git_modules_config submodule.gnulib.url)" "$gnulib_path" - && git submodule init && git submodule update - || exit $?
- fi
- fi
- GNULIB_SRCDIR=$gnulib_path
- fi
- ;;
-esac
-
-if $bootstrap_sync; then
- cmp -s "$0" "$GNULIB_SRCDIR/build-aux/bootstrap" || {
- echo "$0: updating bootstrap and restarting..."
- exec sh -c - 'cp "$1" "$2" && shift && exec "${CONFIG_SHELL-/bin/sh}" "$@"' - -- "$GNULIB_SRCDIR/build-aux/bootstrap" - "$0" "$@" --no-bootstrap-sync
- }
-fi
-
-gnulib_tool=$GNULIB_SRCDIR/gnulib-tool
-<$gnulib_tool || exit
-
-# Get translations.
-
-download_po_files() {
- subdir=$1
- domain=$2
- echo "$me: getting translations into $subdir for $domain..."
- cmd=`printf "$po_download_command_format" "$domain" "$subdir"`
- eval "$cmd"
-}
-
-# Mirror .po files to $po_dir/.reference and copy only the new
-# or modified ones into $po_dir. Also update $po_dir/LINGUAS.
-# Note po files that exist locally only are left in $po_dir but will
-# not be included in LINGUAS and hence will not be distributed.
-update_po_files() {
- # Directory containing primary .po files.
- # Overwrite them only when we're sure a .po file is new.
- po_dir=$1
- domain=$2
-
- # Mirror *.po files into this dir.
- # Usually contains *.s1 checksum files.
- ref_po_dir="$po_dir/.reference"
-
- test -d $ref_po_dir || mkdir $ref_po_dir || return
- download_po_files $ref_po_dir $domain - && ls "$ref_po_dir"/*.po 2>/dev/null |
- sed 's|.*/||; s|.po$||' > "$po_dir/LINGUAS" || return
-
- langs=`cd $ref_po_dir && echo *.po|sed 's/.po//g'`
- test "$langs" = '*' && langs=x
- for po in $langs; do
- case $po in x) continue;; esac
- new_po="$ref_po_dir/$po.po"
- cksum_file="$ref_po_dir/$po.s1"
- if ! test -f "$cksum_file" ||
- ! test -f "$po_dir/$po.po" ||
- ! $SHA1SUM -c --status "$cksum_file" - < "$new_po" > /dev/null; then
- echo "$me: updated $po_dir/$po.po..."
- cp "$new_po" "$po_dir/$po.po" - && $SHA1SUM < "$new_po" > "$cksum_file"
- fi
- done
-}
-
-case $SKIP_PO in
-'')
- if test -d po; then
- update_po_files po $package || exit
- fi
-
- if test -d runtime-po; then
- update_po_files runtime-po $package-runtime || exit
- fi;;
-esac
-
-symlink_to_dir()
-{
- src=$1/$2
- dst=${3-$2}
-
- test -f "$src" && {
-
- # If the destination directory doesn't exist, create it.
- # This is required at least for "lib/uniwidth/cjk.h".
- dst_dir=`dirname "$dst"`
- if ! test -d "$dst_dir"; then
- mkdir -p "$dst_dir"
-
- # If we've just created a directory like lib/uniwidth,
- # tell version control system(s) it's ignorable.
- # FIXME: for now, this does only one level
- parent=`dirname "$dst_dir"`
- for dot_ig in x $vc_ignore; do
- test $dot_ig = x && continue
- ig=$parent/$dot_ig
- insert_vc_ignore $ig `echo "$dst_dir"|sed 's,.*/,,'`
- done
- fi
-
- if $copy; then
- {
- test ! -h "$dst" || {
- echo "$me: rm -f $dst" &&
- rm -f "$dst"
- }
- } &&
- test -f "$dst" &&
- cmp -s "$src" "$dst" || {
- echo "$me: cp -fp $src $dst" &&
- cp -fp "$src" "$dst"
- }
- else
- test -h "$dst" &&
- src_ls=`ls -diL "$src" 2>/dev/null` && set $src_ls && src_i=$1 &&
- dst_ls=`ls -diL "$dst" 2>/dev/null` && set $dst_ls && dst_i=$1 &&
- test "$src_i" = "$dst_i" || {
- dot_dots=
- case $src in
- /*) ;;
- *)
- case /$dst/ in
- *//* | */../* | */./* | /*/*/*/*/*/)
- echo >&2 "$me: invalid symlink calculation: $src -> $dst"
- exit 1;;
- /*/*/*/*/) dot_dots=../../../;;
- /*/*/*/) dot_dots=../../;;
- /*/*/) dot_dots=../;;
- esac;;
- esac
-
- echo "$me: ln -fs $dot_dots$src $dst" &&
- ln -fs "$dot_dots$src" "$dst"
- }
- fi
- }
-}
-
-cp_mark_as_generated()
-{
- cp_src=$1
- cp_dst=$2
-
- if cmp -s "$cp_src" "$GNULIB_SRCDIR/$cp_dst"; then
- symlink_to_dir "$GNULIB_SRCDIR" "$cp_dst"
- elif cmp -s "$cp_src" "$local_gl_dir/$cp_dst"; then
- symlink_to_dir $local_gl_dir "$cp_dst"
- else
- case $cp_dst in
- *.[ch]) c1='/* '; c2=' */';;
- *.texi) c1='@c '; c2= ;;
- *.m4|*/Make*|Make*) c1='# ' ; c2= ;;
- *) c1= ; c2= ;;
- esac
-
- # If the destination directory doesn't exist, create it.
- # This is required at least for "lib/uniwidth/cjk.h".
- dst_dir=`dirname "$cp_dst"`
- test -d "$dst_dir" || mkdir -p "$dst_dir"
-
- if test -z "$c1"; then
- cmp -s "$cp_src" "$cp_dst" || {
- # Copy the file first to get proper permissions if it
- # doesn't already exist. Then overwrite the copy.
- echo "$me: cp -f $cp_src $cp_dst" &&
- rm -f "$cp_dst" &&
- cp "$cp_src" "$cp_dst-t" &&
- sed "s!$bt_regex/!!g" "$cp_src" > "$cp_dst-t" &&
- mv -f "$cp_dst-t" "$cp_dst"
- }
- else
- # Copy the file first to get proper permissions if it
- # doesn't already exist. Then overwrite the copy.
- cp "$cp_src" "$cp_dst-t" &&
- (
- echo "$c1-*- buffer-read-only: t -*- vi: set ro:$c2" &&
- echo "${c1}DO NOT EDIT! GENERATED AUTOMATICALLY!$c2" &&
- sed "s!$bt_regex/!!g" "$cp_src"
- ) > $cp_dst-t &&
- if cmp -s "$cp_dst-t" "$cp_dst"; then
- rm -f "$cp_dst-t"
- else
- echo "$me: cp $cp_src $cp_dst # with edits" &&
- mv -f "$cp_dst-t" "$cp_dst"
- fi
- fi
- fi
-}
-
-version_controlled_file() {
- dir=$1
- file=$2
- found=no
- if test -d CVS; then
- grep -F "/$file/" $dir/CVS/Entries 2>/dev/null |
- grep '^/[^/]*/[0-9]' > /dev/null && found=yes
- elif test -d .git; then
- git rm -n "$dir/$file" > /dev/null 2>&1 && found=yes
- elif test -d .svn; then
- svn log -r HEAD "$dir/$file" > /dev/null 2>&1 && found=yes
- else
- echo "$me: no version control for $dir/$file?" >&2
- fi
- test $found = yes
-}
-
-slurp() {
- for dir in . `(cd $1 && find * -type d -print)`; do
- copied=
- sep=
- for file in `ls -a $1/$dir`; do
- case $file in
- .|..) continue;;
- # FIXME: should all file names starting with "." be ignored?
- .*) continue;;
- esac
- test -d $1/$dir/$file && continue
- for excluded_file in $excluded_files; do
- test "$dir/$file" = "$excluded_file" && continue 2
- done
- if test $file = Makefile.am && test "X$gnulib_mk" != XMakefile.am; then
- copied=$copied${sep}$gnulib_mk; sep=$nl
- remove_intl='/^[^#].*/intl/s/^/#/;'"s!$bt_regex/!!g"
- sed "$remove_intl" $1/$dir/$file |
- cmp - $dir/$gnulib_mk > /dev/null || {
- echo "$me: Copying $1/$dir/$file to $dir/$gnulib_mk ..." &&
- rm -f $dir/$gnulib_mk &&
- sed "$remove_intl" $1/$dir/$file >$dir/$gnulib_mk &&
- gnulib_mk_hook $dir/$gnulib_mk
- }
- elif { test "${2+set}" = set && test -r $2/$dir/$file; } ||
- version_controlled_file $dir $file; then
- echo "$me: $dir/$file overrides $1/$dir/$file"
- else
- copied=$copied$sep$file; sep=$nl
- if test $file = gettext.m4; then
- echo "$me: patching m4/gettext.m4 to remove need for intl/* ..."
- rm -f $dir/$file
- sed '
- /^AC_DEFUN([AM_INTL_SUBDIR],/,/^]/c- AC_DEFUN([AM_INTL_SUBDIR], [])
- /^AC_DEFUN([gt_INTL_SUBDIR_CORE],/,/^]/c- AC_DEFUN([gt_INTL_SUBDIR_CORE], [])
- $a- AC_DEFUN([gl_LOCK_EARLY], [])
- ' $1/$dir/$file >$dir/$file
- else
- cp_mark_as_generated $1/$dir/$file $dir/$file
- fi
- fi || exit
- done
-
- for dot_ig in x $vc_ignore; do
- test $dot_ig = x && continue
- ig=$dir/$dot_ig
- if test -n "$copied"; then
- insert_vc_ignore $ig "$copied"
- # If an ignored file name ends with .in.h, then also add
- # the name with just ".h". Many gnulib headers are generated,
- # e.g., stdint.in.h -> stdint.h, dirent.in.h ->..., etc.
- # Likewise for .gperf -> .h, .y -> .c, and .sin -> .sed
- f=`echo "$copied" |
- sed '
- s/.in.h$/.h/
- s/.sin$/.sed/
- s/.y$/.c/
- s/.gperf$/.h/
- '
- `
- insert_vc_ignore $ig "$f"
-
- # For files like sys_stat.in.h and sys_time.in.h, record as
- # ignorable the directory we might eventually create: sys/.
- f=`echo "$copied"|sed 's/sys_.*.in.h$/sys/'`
- insert_vc_ignore $ig "$f"
- fi
- done
- done
-}
-
-
-# Create boot temporary directories to import from gnulib and gettext.
-rm -fr $bt $bt2 &&
-mkdir $bt $bt2 || exit
-
-# Import from gnulib.
-
-gnulib_tool_options="- --import- --no-changelog- --aux-dir $bt/$build_aux- --doc-base $bt/$doc_base- --lib $gnulib_name- --m4-base $bt/$m4_base/- --source-base $bt/$source_base/- --tests-base $bt/$tests_base- --local-dir $local_gl_dir- $gnulib_tool_option_extras-"
-if test $use_libtool = 1; then
- case "$gnulib_tool_options " in
- *' --libtool '*) ;;
- *) gnulib_tool_options="$gnulib_tool_options --libtool" ;;
- esac
-fi
-echo "$0: $gnulib_tool $gnulib_tool_options --import ..."
-$gnulib_tool $gnulib_tool_options --import $gnulib_modules &&
-slurp $bt || exit
-
-for file in $gnulib_files; do
- symlink_to_dir "$GNULIB_SRCDIR" $file || exit
-done
-
-
-# Import from gettext.
-with_gettext=yes
-grep '^[ ]*AM_GNU_GETTEXT_VERSION(' configure.ac >/dev/null || - with_gettext=no
-
-if test $with_gettext = yes; then
- echo "$0: (cd $bt2; ${AUTOPOINT-autopoint}) ..."
- cp configure.ac $bt2 &&
- (cd $bt2 && ${AUTOPOINT-autopoint} && rm configure.ac) &&
- slurp $bt2 $bt || exit
-fi
-rm -fr $bt $bt2 || exit
-
-# Remove any dangling symlink matching "*.m4" or "*.[ch]" in some
-# gnulib-populated directories. Such .m4 files would cause aclocal to fail.
-# The following requires GNU find 4.2.3 or newer. Considering the usual
-# portability constraints of this script, that may seem a very demanding
-# requirement, but it should be ok. Ignore any failure, which is fine,
-# since this is only a convenience to help developers avoid the relatively
-# unusual case in which a symlinked-to .m4 file is git-removed from gnulib
-# between successive runs of this script.
-find "$m4_base" "$source_base" - -depth ( -name '*.m4' -o -name '*.[ch]' ) - -type l -xtype l -delete > /dev/null 2>&1
-
-# Reconfigure, getting other files.
-
-# Skip autoheader if it's not needed.
-grep -E '^[ ]*AC_CONFIG_HEADERS?>' configure.ac >/dev/null ||
- AUTOHEADER=true
-
-for command in - libtool - "${ACLOCAL-aclocal} --force -I m4 $ACLOCAL_FLAGS" - "${AUTOCONF-autoconf} --force" - "${AUTOHEADER-autoheader} --force" - "${AUTOMAKE-automake} --add-missing --copy --force-missing"
-do
- if test "$command" = libtool; then
- test $use_libtool = 0 - && continue
- command="${LIBTOOLIZE-libtoolize} -c -f"
- fi
- echo "$0: $command ..."
- $command || exit
-done
-
-
-# Get some extra files from gnulib, overriding existing files.
-for file in $gnulib_extra_files; do
- case $file in
- */INSTALL) dst=INSTALL;;
- build-aux/*) dst=$build_aux/`expr "$file" : 'build-aux/(.*)'`;;
- *) dst=$file;;
- esac
- symlink_to_dir "$GNULIB_SRCDIR" $file $dst || exit
-done
-
-if test $with_gettext = yes; then
- # Create gettext configuration.
- echo "$0: Creating po/Makevars from po/Makevars.template ..."
- rm -f po/Makevars
- sed '
- /^EXTRA_LOCALE_CATEGORIES *=/s/=.*/= '"$EXTRA_LOCALE_CATEGORIES"'/
- /^COPYRIGHT_HOLDER *=/s/=.*/= '"$COPYRIGHT_HOLDER"'/
- /^MSGID_BUGS_ADDRESS *=/s|=.*|= '"$MSGID_BUGS_ADDRESS"'|
- /^XGETTEXT_OPTIONS *=/{
- s/$/ \/
- a- '"$XGETTEXT_OPTIONS"' $${end_of_xgettext_options+}
- }
- ' po/Makevars.template >po/Makevars || exit 1
-
- if test -d runtime-po; then
- # Similarly for runtime-po/Makevars, but not quite the same.
- rm -f runtime-po/Makevars
- sed '
- /^DOMAIN *=.*/s/=.*/= '"$package"'-runtime/
- /^subdir *=.*/s/=.*/= runtime-po/
- /^MSGID_BUGS_ADDRESS *=/s/=.*/= bug-'"$package"'@gnu.org/
- /^XGETTEXT_OPTIONS *=/{
- s/$/ \/
- a- '"$XGETTEXT_OPTIONS_RUNTIME"' $${end_of_xgettext_options+}
- }
- ' po/Makevars.template >runtime-po/Makevars || exit 1
-
- # Copy identical files from po to runtime-po.
- (cd po && cp -p Makefile.in.in *-quot *.header *.sed *.sin ../runtime-po)
- fi
-fi
-
-bootstrap_epilogue
-
-echo "$0: done. Now you can run './configure'."
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "scriptversion="
-# time-stamp-format: "%:y-%02m-%02d.%02H"
-# time-stamp-time-zone: "UTC"
-# time-stamp-end: "; # UTC"
-# End:
diff --git a/bootstrap.conf b/bootstrap.conf
deleted file mode 100644
index eb7ab94..0000000
--- a/bootstrap.conf
+++ /dev/null
@@ -1,106 +0,0 @@
-# Bootstrap configuration.
-
-# Copyright (C) 2006-2011 Free Software Foundation, Inc.
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# gnulib modules used by this package.
-gnulib_modules='
-announce-gen
-c-ctype
-calloc-posix
-close
-closeout
-dirname
-do-release-commit-and-tag
-dup2
-error
-getopt-gnu
-gettext-h
-git-version-gen
-gitlog-to-changelog
-gnu-web-doc-update
-gnupload
-hash
-hash-pjw
-malloc-gnu
-maintainer-makefile
-manywarnings
-mkstemp
-pipe-posix
-progname
-quotearg
-realloc-gnu
-ssize_t
-stddef
-stdlib
-stpcpy
-strerror
-string
-strstr
-strtok_r
-strtol
-strtoul
-strtoull
-strtoumax
-unistd
-unlink
-unlocked-io
-update-copyright
-useless-if-before-free
-version-etc-fsf
-xalloc
-xstrtoumax
-'
-
-gnulib_name=libiwhd
-
-checkout_only_file=
-MSGID_BUGS_ADDRESS=iwhd-devel(a)fedorahosted.org
-
-# Additional xgettext options to use. Use "
ewline" to break lines.
-XGETTEXT_OPTIONS=$XGETTEXT_OPTIONS'\- --from-code=UTF-8\- --flag=asprintf:2:c-format --flag=vasprintf:2:c-format\- --flag=asnprintf:3:c-format --flag=vasnprintf:3:c-format\- --flag=wrapf:1:c-format\- --flag=error:3:c-format --flag=error_at_line:5:c-format\-'
-
-# Automake requires that ChangeLog exist.
-test -f ChangeLog || touch ChangeLog || exit 1
-
-gnulib_tool_option_extras="--tests-base=$bt/gnulib-tests --with-tests $avoided_gnulib_modules"
-
-# Build prerequisites
-buildreq="-autoconf 2.62
-automake 1.11.1
-autopoint -
-bison -
-gettext -
-git 1.4.4
-gzip -
-makeinfo -
-rsync -
-tar -
-"
-
-bootstrap_epilogue()
-{
- # Change paths in gnulib-tests/gnulib.mk from "../.." to "..".
- m=gnulib-tests/gnulib.mk
- sed 's,../..,..,g' $m > $m-t
- mv -f $m-t $m
-}
diff --git a/cfg.mk b/cfg.mk
deleted file mode 100644
index 84fce00..0000000
--- a/cfg.mk
+++ /dev/null
@@ -1,66 +0,0 @@
-# Customize maint.mk -*- makefile -*-
-# Copyright (C) 2009-2011 Free Software Foundation, Inc.
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Used in maint.mk's web-manual rule
-manual_title = GNU Grep: Print lines matching a pattern
-
-# Fixing these can wait.
-skip_low_priority = - sc_bindtextdomain - sc_texinfo_acronym - sc_prohibit_atoi_atof - sc_prohibit_tab_based_indentation - sc_prohibit_strcmp - sc_unmarked_diagnostics - sc_error_message_uppercase - sc_cast_of_argument_to_free - sc_file_system
-
-# Tests not to run as part of "make distcheck".
-local-checks-to-skip = - sc_program_name - sc_space_tab - sc_useless_cpp_parens - $(skip_low_priority)
-
-# Tools used to bootstrap this package, used for "announcement".
-bootstrap-tools = autoconf,automake,gnulib
-
-# Now that we have better tests, make this the default.
-export VERBOSE = yes
-
-old_NEWS_hash = dba674b8d2d0a340da7654d16cced91e
-
-sc_prohibit_echo_minus_en:
- @prohibit='<echo -[en]' - halt='do not use echo ''-e or echo ''-n; use printf instead' - $(_sc_search_regexp)
-
-# Indent only with spaces.
-sc_prohibit_tab_based_indentation:
- @prohibit='^ * ' - halt='TAB in indentation; use only spaces' - $(_sc_search_regexp)
-
-# Don't use "indent-tabs-mode: nil" anymore. No longer needed.
-sc_prohibit_emacs__indent_tabs_mode__setting:
- @prohibit='^( *[*#] *)?indent-tabs-mode:' - halt='use of emacs indent-tabs-mode: setting' - $(_sc_search_regexp)
-
-update-copyright-env = - UPDATE_COPYRIGHT_USE_INTERVALS=1 - UPDATE_COPYRIGHT_MAX_LINE_LENGTH=79
diff --git a/config.h.in b/config.h.in
deleted file mode 100644
index e69de29..0000000
diff --git a/configure.ac b/configure.ac
deleted file mode 100644
index e874281..0000000
--- a/configure.ac
+++ /dev/null
@@ -1,182 +0,0 @@
-# -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-# Copyright (C) 1991, 1993-2011 Red Hat, Inc.
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-AC_PREREQ([2.65])
-AC_INIT([iwhd],
- m4_esyscmd([build-aux/git-version-gen .tarball-version]),
- [iwhd-devel(a)fedorahosted.org])
-AC_CONFIG_AUX_DIR([build-aux])
-
-AC_CONFIG_HEADERS([config.h:config.hin])
-AM_INIT_AUTOMAKE([1.11.1 dist-xz color-tests parallel-tests])
-AM_SILENT_RULES([yes]) # make --enable-silent-rules the default.
-
-# Checks for programs.
-AC_PROG_CXX
-AC_PROG_CC
-AM_PROG_CC_C_O
-gl_EARLY
-
-# Maintainer note - comment this line out if you plan to rerun
-# GNULIB_POSIXCHECK testing to see if M4 should be using more modules.
-# Leave it uncommented for normal releases, for faster ./configure.
-gl_ASSERT_NO_GNULIB_POSIXCHECK
-
-AC_PROG_RANLIB
-AC_TYPE_UINT64_T
-
-gl_INIT
-
-AC_PROG_YACC
-AC_PROG_LEX
-AM_MISSING_PROG([HELP2MAN], [help2man])
-
-AC_CHECK_LIB([curl], [curl_easy_init],
- [CURL_LIB=-lcurl],
- [AC_MSG_ERROR([Missing required CURL lib])])
-AC_SUBST([CURL_LIB])
-
-AC_CHECK_LIB([jansson], [json_object_get],
- [JANSSON_LIB=-ljansson],
- [AC_MSG_ERROR([Missing required Jansson lib])])
-AC_SUBST([JANSSON_LIB])
-
-AC_CHECK_LIB([microhttpd], [MHD_start_daemon],
- [UHTTPD_LIB=-lmicrohttpd],
- [AC_MSG_ERROR([Missing required microhttpd lib])])
-AC_SUBST([UHTTPD_LIB])
-
-AC_CHECK_LIB([pthread], [pthread_create],
- [PTHREAD_LIB=-lpthread],
- [AC_MSG_ERROR([Missing required pthread lib])])
-AC_SUBST([PTHREAD_LIB])
-
-AC_CHECK_LIB([glib-2.0], [g_hash_table_new_full],
- [GLIB2_LIB=-lglib-2.0],
- [AC_MSG_ERROR([Missing required glib2 lib])])
-AC_SUBST([GLIB2_LIB])
-
-PKG_CHECK_MODULES([HAIL],[libhail >= 0.8])
-AC_SUBST([HAIL_LIBS])
-AC_SUBST([HAIL_CFLAGS])
-
-AC_CHECK_LIB([xml2], [xmlInitParser],
- [XML2_LIB=-lxml2],
- [AC_MSG_ERROR([Missing required XML2 lib])])
-AC_SUBST([XML2_LIB])
-
-AC_CHECK_HEADER([gc.h], ,
- [AC_MSG_ERROR([Missing GC development library: gc-devel or libgc-dev])])
-
-# from http://www.gnu.org/software/autoconf-archive/
-AX_BOOST_BASE
-AX_BOOST_SYSTEM
-AX_BOOST_THREAD
-
-AC_ARG_ENABLE([gcc-warnings],
- [AS_HELP_STRING([--enable-gcc-warnings],
- [turn on lots of GCC warnings (for developers)])],
- [case $enableval in
- yes|no) ;;
- *) AC_MSG_ERROR([bad value $enableval for gcc-warnings option]) ;;
- esac
- gl_gcc_warnings=$enableval],
- [gl_gcc_warnings=no]
-)
-
-if test "$gl_gcc_warnings" = yes; then
- gl_WARN_ADD([-Werror], [WERROR_CFLAGS])
- AC_SUBST([WERROR_CFLAGS])
-
- nw=
- # This, $nw, is the list of warnings we disable.
- nw="$nw -Wdeclaration-after-statement" # too useful to forbid
- nw="$nw -Waggregate-return" # anachronistic
- nw="$nw -Wlong-long" # C90 is anachronistic (lib/gethrxtime.h)
- nw="$nw -Wc++-compat" # We don't care about C++ compilers
- nw="$nw -Wundef" # Warns on '#if GNULIB_FOO' etc in gnulib
- nw="$nw -Wtraditional" # Warns on #elif which we use often
- nw="$nw -Wcast-qual" # Too many warnings for now
- nw="$nw -Wconversion" # Too many warnings for now
- nw="$nw -Wsystem-headers" # Don't let system headers trigger warnings
- nw="$nw -Wsign-conversion" # Too many warnings for now
- nw="$nw -Wtraditional-conversion" # Too many warnings for now
- nw="$nw -Wunreachable-code" # Too many warnings for now
- nw="$nw -Wunused-macros" # bison-generated
- nw="$nw -Wpadded" # Our structs are not padded
- nw="$nw -Wredundant-decls" # openat.h declares e.g., mkdirat
- nw="$nw -Wlogical-op" # any use of fwrite provokes this
- nw="$nw -Wformat-nonliteral" # who.c and pinky.c strftime uses
- nw="$nw -Wvla" # warnings in gettext.h
- nw="$nw -Wnested-externs" # use of XARGMATCH/verify_function__
- nw="$nw -Wswitch-enum" # Too many warnings for now
- nw="$nw -Wswitch-default" # Too many warnings for now
- nw="$nw -Wstack-protector" # not worth working around
- nw="$nw -Wstrict-overflow" # in bison-generated code
- nw="$nw -Wunsafe-loop-optimizations" # in bison-generated code
- nw="$nw -Wmissing-noreturn" # yy_fatal_error in flex-generated code
-
- gl_MANYWARN_ALL_GCC([ws])
- gl_MANYWARN_COMPLEMENT([ws], [$ws], [$nw])
- for w in $ws; do
- gl_WARN_ADD([$w])
- done
- gl_WARN_ADD([-Wno-unused])
- gl_WARN_ADD([-Wno-missing-field-initializers]) # We need this one
- gl_WARN_ADD([-Wno-sign-compare]) # Too many warnings for now
- gl_WARN_ADD([-Wno-unused-parameter]) # Too many warnings for now
-
- # In spite of excluding -Wlogical-op above, it is enabled, as of
- # gcc 4.5.0 20090517, and it provokes warnings in cat.c, dd.c, truncate.c
- gl_WARN_ADD([-Wno-logical-op])
-
- gl_WARN_ADD([-fdiagnostics-show-option])
- gl_WARN_ADD([-funit-at-a-time])
-
- AC_SUBST([WARN_CFLAGS])
-
- AC_DEFINE([lint], [1], [Define to 1 if the compiler is checking for lint.])
- AC_DEFINE([_FORTIFY_SOURCE], [2],
- [enable compile-time and run-time bounds-checking, and some warnings])
- AC_DEFINE([GNULIB_PORTCHECK], [1], [enable some gnulib portability checks])
-
- # We use a slightly smaller set of warning options for lib/.
- # Remove the following and save the result in GNULIB_WARN_CFLAGS.
- nw=
- nw="$nw -Wuninitialized"
- nw="$nw -Wmissing-prototypes"
- nw="$nw -Wold-style-definition"
- gl_MANYWARN_COMPLEMENT([GNULIB_WARN_CFLAGS], [$WARN_CFLAGS], [$nw])
- AC_SUBST([GNULIB_WARN_CFLAGS])
-
- # For gnulib-tests, the set is slightly smaller still.
- nw=
- nw="$nw -Wstrict-prototypes"
- gl_MANYWARN_COMPLEMENT([GNULIB_TEST_WARN_CFLAGS],
- [$GNULIB_WARN_CFLAGS], [$nw])
- AC_SUBST([GNULIB_TEST_WARN_CFLAGS])
-fi
-
-AC_CONFIG_FILES([
-Makefile
-gnulib-tests/Makefile
-lib/Makefile
-man/Makefile
-t/Makefile
-])
-AC_OUTPUT
diff --git a/dc-register-image b/dc-register-image
deleted file mode 100755
index 0e9ed18..0000000
--- a/dc-register-image
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/bin/bash
-
-# Copyright (C) 2010-2011 Red Hat, Inc.
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Uncomment for debugging.
-#ECHO=echo
-
-# $bucket is iwhd bucket (not S3 bucket, see $ami_bkt below).
-bucket=$1; shift
-# $object is iwhd's image name, which also serves as a basename in S3.
-# So, if $object=foo.img, then we end uploading foo.img.manifest.xml and
-# a bunch of foo.img.part.NN files.
-# The foo.img itself is an uncompressed root filesystem, a device image.
-# Finally, $object is in local directory $bucket/ (as fs back-end works).
-object=$1; shift
-# $api_key is "key ID" for S3.
-api_key=$1; shift
-# $api_secret is "secret" for S3.
-api_secret=$1; shift
-# $cert_file contains X.509 certificate for EC2 (cert-foo.pem).
-cert_file=$1; shift
-# $key_file contains private key for EC2 (pk-foo.pem).
-key_file=$1; shift
-# $api_uid is AWS account ID, but without dashes.
-api_uid=$1; shift
-# $ami_bkt is S3 bucket into which we upload foo.img.manifest.xml etc.
-ami_bkt=$1; shift
-# $kernel is an aki-xxxxxxxx ID (e.g. aki-99a0f1dc is pvgrub)
-kernel=$1; shift
-# $ramdisk is an ari-xxxxxxxx ID or "_default_" (most of the time for pvgrub)
-ramdisk=$1; shift
-
-# We do not set JAVA_HOME and EC2_HOME because they are not necessary
-# if pre-packaged versions of ec2-api-tools and ec2-ami-tools are installed
-# from RPMfusion and Amazon respectively. This also allows the user to
-# set these locations in case an alternative set of tools is desired.
-#
-# However, we look for each missing tool in commonly overlooked locations.
-
-which_it () {
- cmd=$1
- ret=$(/usr/bin/which $cmd 2>/dev/null)
- if [ $? != 0 ]; then
- if [ -x /usr/local/bin/$cmd ]; then
- ret=/usr/local/bin/$cmd
- elif [ -x ~/bin/$cmd ]; then
- ret=~/bin/$cmd
- fi
- fi
- if [ -z "$ret" ]; then
- echo "ERROR missing_$cmd" >&2
- exit 1
- fi
- echo $ret
-}
-
-ec2_bundle_image=$(which_it ec2-bundle-image)
-ec2_upload_bundle=$(which_it ec2-upload-bundle)
-ec2_register=$(which_it ec2-register)
-[ -z "$ec2_bundle_image" -o -z "$ec2_upload_bundle" -o -z "$ec2_register" ] && exit 1
-
-# XXX Is this safe against running 2 requests simultaneously?
-tmpdir=$(mktemp -d -p $PWD/$bucket) || exit 1
-trap "rm -rf $tmpdir" EXIT
-
-bundle_args="--batch --arch x86_64 -c $cert_file -k $key_file -u $api_uid"
-if [ "$kernel" != "_default_" ]; then
- bundle_args="$bundle_args --kernel $kernel"
-fi
-if [ "$ramdisk" != "_default_" ]; then
- bundle_args="$bundle_args --ramdisk $ramdisk"
-fi
-$ECHO $ec2_bundle_image -i $bucket/$object -d $tmpdir $bundle_args
-if [ $? != 0 ]; then
- echo "ERROR bundling_failed" >&2
- echo $ec2_bundle_image -i $bucket/$object -d $tmpdir $bundle_args >&2
- exit 1
-fi
-
-upload_args="--batch --retry -b $ami_bkt -a $api_key -s $api_secret"
-$ECHO $ec2_upload_bundle -m $tmpdir/$object.manifest.xml $upload_args
-if [ $? != 0 ]; then
- echo "ERROR uploading_failed" >&2
- echo $ec2_upload_bundle -m $tmpdir/$object.manifest.xml $upload_args >&2
- exit 1
-fi
-
-register_args="-C $cert_file -K $key_file"
-$ECHO $ec2_register $register_args $ami_bkt/$object.manifest.xml -n $object
-if [ $? != 0 ]; then
- echo "ERROR registration_failed" >&2
- echo $ec2_register $register_args $ami_bkt/$object.manifest.xml -n $object >&2
- exit 1
-fi
diff --git a/doc/.gitignore b/doc/.gitignore
deleted file mode 100644
index e7a8672..0000000
--- a/doc/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/gendocs_template
-gendocs_template
diff --git a/doc/image_repo.odt b/doc/image_repo.odt
deleted file mode 100644
index b77f1f8..0000000
Binary files a/doc/image_repo.odt and /dev/null differ
diff --git a/doc/warehouse_api_final.md b/doc/warehouse_api_final.md
deleted file mode 100644
index b5d7294..0000000
--- a/doc/warehouse_api_final.md
+++ /dev/null
@@ -1,324 +0,0 @@
-Image Warehouse API
-===================
-
-Version 0.8
------------
-
-This is my attempt to document the current state of the REST API
-to the image warehouse, in its proposed final state and (in an
-appendix) in its current messy state. It does not cover authentication,
-fully dynamic configuration, or lesser items such as
-
-* reverse replication (pull from slave/downstream warehouse);
-
-* direct copy;
-
-* cache control;
-
-* HTTP chunked encoding.
-
-
-
-In general, data other than object bodies can be returned in either
-XML or JSON format, defaulting to XML unless an "Accept" header
-containing "/json" is present.
-
-For examples, the convention in this document is for the first
-line of an indented block to be the command you would issue, while
-the remainder is the output you might expect.
-
-API Root Operations
--------------------
-
-The only operation for the API root is to fetch information about
-other API components, including buckets and special endpoints
-such as the provider list. In other words, the
-
- $ curl http://fserver-1:9090
- <api service="image_warehouse" version="1.0">
- <bucket_factory path="http://fserver-1:9090/_new"/>
- <provider_list path="http://fserver-1:9090/_providers"/>
- <bucket path="http://fserver-1:9090/junk2"/>
- <bucket path="http://fserver-1:9090/data"/>
- </api>
-
-The "service" and "version" attributes identify this version
-of the warehouse API. Special API endpoints are distinguished
-by a leading underscore, as with the "bucket_factory" endpoint
-for creating new buckets and the "providers" endpoint for manipulating
-cloud-provider information. The remainder are actual buckets.
-
-Provider Operations
--------------------
-
-It is possible to list providers, and to change login credentials
-for those providers. Listing is very simple:
-
- $ curl http://fserver-1:9090/_providers
- <providers>
- <provider name="my tabled">
- <type>s3</type>
- <port>80</port>
- <username>foo</username>
- <password>bar</password>
- </provider>
- <provider name="backup">
- <type>http</type>
- <host>localhost</host>
- <port>9091</port>
- </provider>
- </providers>
-
-This shows two providers, named "my tabled" (our primary/local
-store) and "backup" (a secondary/remote store). The types can
-be:
-
-* http: our own API as described in this document
-
-* s3: S3 - includes Amazon S3, tabled, Walrus, ParkPlace, Google
- Storage
-
-* cf: CloudFiles or OpenStack Storage ("swift")
-
-
-
-For the time being, "s3" is the only fully functional type for
-a primary store, while any type can be used for a secondary store.
-Slave stores can also be started with the "-f" flag which uses
-a directory as a primary store but does no metadata/replication
-operations. Eventually, all of these options - including a directory
-on a local or distributed filesystem - will be supported as either
-primary or secondary stores.
-
-The only modifying operation for providers is an update of the
-username and password (must be both at once). For example:
-
- $ curl -d provider="my tabled" -d username=yyy -d password=zzz -http://fserver-1:9090/_providers
-
-Bucket Operations
------------------
-
-Buckets can be created, listed, and deleted. The create command
-is like this (using POST).
-
- $ curl -d name=my_bucket http://fserver-1:9090/_new
-
-Deletion requires that the bucket be empty, but is similarly
-simple.
-
- $ curl -X DELETE http://fserver-1:9090/my_bucket
-
-Here's a listing of a bucket's contents, using JSON just for variety.
-
- $ curl -H "Accept: */json" http://fserver-1:9090/my_bucket
- [
- {
- "type": "query",
- "path": "http://fserver-1:9090/my_bucket/_query"
- {
- "type": "object",
- "name": "file1",
- "path": "http://fserver-1:9090/my_bucket/file1"
- },
- {
- "type": "object",
- "name": "file2",
- "path": "http://fserver-1:9090/my_bucket/file1"
- }
- ]
-
-The query object is used to do complex queries, which will be described
-later. The remainder are regular objects.
-
-Object and Attribute Operations
--------------------------------
-
-Objects are represented as small directory trees, with several
-elements as shown here:
-
- $ curl http://fserver-1:9090/my_bucket/file1
- <object>
- <object_body path="http://fserver-1:9090/my_bucket/file1/body"/>
- <object_attr_list path="http://fserver-1:9090/my_bucket/file1/attrs"/>
- <object_attr name="xyz" path="http://fserver-1:9090/my_bucket/file1/attr_xyz"/>
- </object>
-
-The object body can be stored and retrieved using PUT and GET respectively,
-and can have any HTTP/MIME type. The attribute-list element
-can be used to fetch or set multiple attributes - including values
-- at once. To fetch:
-
- $ curl http://fserver-1:9090/my_bucket/file1/attrs
- <attributes>
- <attribute name="color">blue</attribute>
- <attribute name="flavor">lemon</attribute>
- </attributes>
-
-To set both of these attributes at once:
-
- $ curl -d color="blue" -d flavor="lemon" http://fserver-1:9090/my_bucket/file1/attrs
-
-Single-attribute operations are also supported. To fetch a
-single attribute:
-
- $ curl http://fserver-1:9090/my_bucket/file1/attr_color
- <attribute name="color">blue</attribute>
-
-The attribute can also be set with a PUT to the same URL.
-
- $ printf green | curl -T - http://fserver-1:9090/my_bucket/file1/color
-
-Lastly, objects and attributes can be deleted (object deletes
-are propagated to secondary warehouses).
-
- $ curl -X DELETE http://fserver-1:9090/my_bucket/file2
-
-Queries
--------
-
-Queries are supported as in the design doc. Queries can contain
-the following features, which are also supported for evaluating
-replication policies:
-
-* Literal integers, strings, and dates
-
-* Object-attribute access: $attr
-
-* Indirect object-attribute access: @link_on_cur_obj.link_target_attr
-
-* Site-attribute access (for replication policies only):
- #attr
-
-* Comparisons: <, <=, ==, !=, >=, >
-
-* Booleans: &&, ||, !
-
-
-
-The syntax to issue a query is as follows.
-
- $ curl -d '($color == "green") && ($flavor == "lemon")' - http://fserver-1:9090/my_bucket/_query
- <objects>
- <object>
- <bucket>my_bucket</bucket>
- <key>file1</key>
- </object>
- </objects>
-
-Replication Policies
--------------------------------------------
-
-Replication policies are stored as "_policy" attributes on
-objects. To set a policy, use the same mechanism as for other attributes.
-
- $ printf '$color == "green"' | curl -T - http://fserver-1:9090/my_bucket/file1/_policy
-
-This will cause the warehouse daemon to replicate to all secondary
-warehouses whenever the object is changed (including attribute
-changes) subsequently. You probably want to set the policy first,
-before sending the body, and this is entirely allowable using
-any of the attribute-setting mechanisms described above; this
-would result in an empty object being created, then the subsequent
-body PUT will be replicated. The above example is probably not
-what you want for two other reasons:
-
-1. Because the policy only refers to object attributes, it will
- replicate to all secondary warehouses.
-
-2. It's cumbersome and inefficient to set separate replication
- policies for every object individually.
-
-
-
-To specify selective replication, matching object atttributes
-with secondary-warehouse attributes, you would do this instead.
-
- $ printf '$color == #color' | curl -T - http://fserver-1:9090/my_bucket/file1/_policy
-
-To set a default replication policy for all objects within a bucket,
-use the "_default" pseudo-object.
-
- $ printf '$color == #color' | curl -T - http://fserver-1:9090/my_bucket/_default/_policy
-
-This will cause any modification to a green object to be replicated
-to green remote warehouses any time they are changed, but will
-not affect blue objects or purple warehouses. Note that the default
-replication policy for a bucket is overridden by any specific
-per-object policy.
-
-Appendix 1: Major Divergences
------------------------------
-
-The current code doesn't implement exactly the API described
-above. There are many differences in the exact format of data
-returned for the API root, provider list, or object listings.
-More importantly, the actual URLs and methods used for various
-operations are still pending reconciliation with what's described
-here. Here are the current equivalents, in approximately the
-same order as mentioned above:
-
-* bucket creation: PUT on .../my_bucket
-
-* object-body fetch: GET on .../my_bucket/file1
-
-* object-body store: PUT on .../my_bucket/file1
-
-* multi-attribute set: POST on .../my_bucket with key=file1
-
-* bucket and attribute deletes are not yet implemented
-
-
-
-There are also a couple of special control operations, implemented
-as POST methods on the object. The first of these is to force re-evaluation
-of the relevant replication policies and trigger re-replication
-to appropriate remote warehouses (equivalent to a PUT on the
-object body except that there's no data transfer from the client).
-
- $ curl -d op=push http://fserver-1:9090/my_bucket/file1
-
-The second control operation is used to determine whether replication
-to a specific remote warehouse has finished.
-
- $ curl -d op=check loc=backup http://fserver-1:9090/my_bucket/file1
-
-This will return a 404 (Not Found) if the object has not been replicated
-to that location, or a 200 (OK) if it has.
-
-Appendix 2: JSON Configuration Format
--------------------------------------
-
-The initial configuration for the image warehouse is pulled
-from a JSON configuration file, repo.json in the current directory
-by default. This defines a set of required attributes plus any
-others that the user might want to use in replication policies.
-Here's an example:
-
- [
- {
- "name": "my tabled",
- "type": "s3",
- "host": "localhost",
- "port": 80,
- "key": "foo",
- "secret": "bar",
- "color": "blue"
- },
- {
- "name": "backup",
- "type": "http",
- "host": "localhost",
- "port": 9091
- }
- ]
-
-This defines a primary (local) warehouse named "my tabled" which
-is using S3 on localhost. In this case the user name and password
-are required - named "key" and "secret" in the file for legacy
-reasons. We also have a secondary (remote) warehouse named "backup"
-that we'll replicate to, and we don't care what back end it uses.
-Since our interface to it is our own HTTP-based protocol, we don't
-(currently) need a user name and password. Lastly, we've defined
-our own "color" attribute to be used in making replication decisions.
diff --git a/gc-wrap.h b/gc-wrap.h
deleted file mode 100644
index 70c4b9d..0000000
--- a/gc-wrap.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <string.h>
-#define GC_THREADS
-#include "gc.h"
-
-#ifndef __cplusplus
-# define malloc(n) GC_MALLOC(n)
-# define calloc(m,n) GC_MALLOC((m)*(n))
-# define free(p) GC_FREE(p)
-# define realloc(p,n) GC_REALLOC((p),(n))
-#endif
-
-static inline char *
-my_strdup (char const *s)
-{
- size_t len = strlen (s);
- void *t = GC_MALLOC (len + 1);
- if (t == NULL)
- return NULL;
- return (char *) memcpy (t, s, len + 1);
-}
-# undef strdup
-# define strdup(s) my_strdup(s)
-
-static inline char *
-my_strndup (char const *s, size_t n)
-{
- size_t len = strnlen (s, n);
- char *t = (char *) GC_MALLOC (len + 1);
- if (t == NULL)
- return NULL;
- t[len] = '0';
- return (char *) memcpy (t, s, len);
-}
-# undef strndup
-# define strndup(s, n) my_strndup(s, n)
diff --git a/gnulib b/gnulib
deleted file mode 160000
index d9f5da6..0000000
--- a/gnulib
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit d9f5da66f7c95f84b6b28b17cfa4c5248ad2b591
diff --git a/gnulib-tests/.gitignore b/gnulib-tests/.gitignore
deleted file mode 100644
index 259f316..0000000
--- a/gnulib-tests/.gitignore
+++ /dev/null
@@ -1,267 +0,0 @@
-/alloca.h
-/alloca.in.h
-/anytostr.c
-/asnprintf.c
-/binary-io.h
-/dup2.c
-/fcntl.h
-/fcntl.in.h
-/float+.h
-/float.h
-/float.in.h
-/getpagesize.c
-/gnulib.mk
-/ignore-value.h
-/imaxtostr.c
-/init.sh
-/inttostr.c
-/inttostr.h
-/lstat.c
-/macros.h
-/malloca.c
-/malloca.h
-/malloca.valgrind
-/offtostr.c
-/open.c
-/pathmax.h
-/printf-args.c
-/printf-args.h
-/printf-parse.c
-/printf-parse.h
-/priv-set.c
-/priv-set.h
-/putenv.c
-/same-inode.h
-/setenv.c
-/signature.h
-/size_max.h
-/snprintf.c
-/stat.c
-/stdio-write.c
-/stdio.h
-/stdio.in.h
-/symlink.c
-/sys
-/sys_stat.h
-/sys_stat.in.h
-/test-alloca-opt.c
-/test-binary-io.c
-/test-binary-io.sh
-/test-bitrotate.c
-/test-c-ctype.c
-/test-dirname.c
-/test-dup2.c
-/test-environ.c
-/test-errno.c
-/test-fcntl-h.c
-/test-fpending.c
-/test-fpending.sh
-/test-getopt.c
-/test-getopt.h
-/test-getopt_long.h
-/test-gettimeofday.c
-/test-hash.c
-/test-ignore-value.c
-/test-inttostr.c
-/test-inttypes.c
-/test-lstat.c
-/test-lstat.h
-/test-malloc-gnu.c
-/test-malloca.c
-/test-mbrtowc.c
-/test-mbrtowc1.sh
-/test-mbrtowc2.sh
-/test-mbrtowc3.sh
-/test-mbrtowc4.sh
-/test-mbsinit.c
-/test-mbsinit.sh
-/test-memchr.c
-/test-open.c
-/test-open.h
-/test-pipe.c
-/test-priv-set.c
-/test-quotearg-simple.c
-/test-quotearg.h
-/test-realloc-gnu.c
-/test-setenv.c
-/test-snprintf.c
-/test-stat.c
-/test-stat.h
-/test-stdbool.c
-/test-stddef.c
-/test-stdint.c
-/test-stdio.c
-/test-stdlib.c
-/test-strerror.c
-/test-string.c
-/test-strnlen.c
-/test-strstr.c
-/test-symlink.c
-/test-symlink.h
-/test-sys_stat.c
-/test-sys_time.c
-/test-sys_wait.h
-/test-time.c
-/test-unistd.c
-/test-unlink.c
-/test-unlink.h
-/test-unsetenv.c
-/test-update-copyright.sh
-/test-vasnprintf.c
-/test-vc-list-files-cvs.sh
-/test-vc-list-files-git.sh
-/test-verify.c
-/test-verify.sh
-/test-version-etc.c
-/test-version-etc.sh
-/test-wchar.c
-/test-wctype-h.c
-/test-wctype.c
-/test-xalloc-die.c
-/test-xalloc-die.sh
-/test-xstrtol.c
-/test-xstrtol.sh
-/test-xstrtoul.c
-/test-xstrtoumax.c
-/test-xstrtoumax.sh
-/time.h
-/time.in.h
-/uinttostr.c
-/umaxtostr.c
-/unlinkdir.c
-/unlinkdir.h
-/unsetenv.c
-/vasnprintf.c
-/vasnprintf.h
-/wctob.c
-/xsize.h
-/zerosize-ptr.h
-alloca.h
-alloca.in.h
-anytostr.c
-asnprintf.c
-binary-io.h
-dup2.c
-fcntl.h
-fcntl.in.h
-float+.h
-float.h
-float.in.h
-getpagesize.c
-gnulib.mk
-hash-pjw.c
-hash-pjw.h
-ignore-value.h
-imaxtostr.c
-init.sh
-inttostr.c
-inttostr.h
-lstat.c
-macros.h
-malloca.c
-malloca.h
-malloca.valgrind
-offtostr.c
-open.c
-pathmax.h
-printf-args.c
-printf-args.h
-printf-parse.c
-printf-parse.h
-putenv.c
-same-inode.h
-setenv.c
-signature.h
-size_max.h
-snprintf.c
-stat.c
-stdio-write.c
-stdio.h
-stdio.in.h
-symlink.c
-sys
-sys_stat.h
-sys_stat.in.h
-test-alloca-opt.c
-test-binary-io.c
-test-binary-io.sh
-test-bitrotate.c
-test-c-ctype.c
-test-dirname.c
-test-dup2.c
-test-environ.c
-test-errno.c
-test-fcntl-h.c
-test-fpending.c
-test-fpending.sh
-test-getopt.c
-test-getopt.h
-test-getopt_long.h
-test-hash.c
-test-ignore-value.c
-test-inttostr.c
-test-inttypes.c
-test-lstat.c
-test-lstat.h
-test-malloc-gnu.c
-test-malloca.c
-test-mbrtowc.c
-test-mbrtowc1.sh
-test-mbrtowc2.sh
-test-mbrtowc3.sh
-test-mbrtowc4.sh
-test-mbsinit.c
-test-mbsinit.sh
-test-memchr.c
-test-open.c
-test-open.h
-test-quotearg-simple.c
-test-quotearg.h
-test-realloc-gnu.c
-test-setenv.c
-test-snprintf.c
-test-stat.c
-test-stat.h
-test-stdbool.c
-test-stddef.c
-test-stdint.c
-test-stdio.c
-test-stdlib.c
-test-strerror.c
-test-string.c
-test-strnlen.c
-test-symlink.c
-test-symlink.h
-test-sys_stat.c
-test-sys_wait.c
-test-sys_wait.h
-test-time.c
-test-unistd.c
-test-unsetenv.c
-test-update-copyright.sh
-test-vasnprintf.c
-test-vc-list-files-cvs.sh
-test-vc-list-files-git.sh
-test-verify.c
-test-verify.sh
-test-version-etc.c
-test-version-etc.sh
-test-wchar.c
-test-wctype.c
-test-xalloc-die.c
-test-xalloc-die.sh
-test-xstrtol.c
-test-xstrtol.sh
-test-xstrtoul.c
-test-xstrtoumax.c
-test-xstrtoumax.sh
-time.h
-time.in.h
-uinttostr.c
-umaxtostr.c
-unsetenv.c
-vasnprintf.c
-vasnprintf.h
-wctob.c
-xsize.h
-zerosize-ptr.h
diff --git a/gnulib-tests/Makefile.am b/gnulib-tests/Makefile.am
deleted file mode 100644
index c3a48e8..0000000
--- a/gnulib-tests/Makefile.am
+++ /dev/null
@@ -1,3 +0,0 @@
-include gnulib.mk
-
-AM_CFLAGS = $(GNULIB_WARN_CFLAGS) # $(WERROR_CFLAGS)
diff --git a/iwh.h b/iwh.h
deleted file mode 100644
index e8cf710..0000000
--- a/iwh.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#define MY_PORT 9090
-
-#if defined(GLOBALS_IMPL)
-#define GLOBAL(type,name,value) type name = value
-#else
-#define GLOBAL(type,name,value) extern type name
-#endif
-
-GLOBAL(int, verbose, 0);
-GLOBAL(const char *, master_host, NULL);
-GLOBAL(unsigned short, master_port, MY_PORT);
-GLOBAL(const char *, db_host, "localhost");
-GLOBAL(unsigned short, db_port, 0);
-GLOBAL(const char *, me, "here");
-
-#define DPRINTF(fmt,args...) do { - if (verbose) { - printf("%d " fmt,getpid(),##args); - fflush(stdout); - } -} while (0)
-
-#ifndef __attribute__
-# if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 8)
-# define __attribute__(x) /* empty */
-# endif
-#endif
-
-#ifndef ATTRIBUTE_UNUSED
-# define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
-#endif
-
-#ifndef ATTRIBUTE_NORETURN
-# define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
-#endif
-
-/*
- * Common parts of autostart
- *
- * Directories are relative so they are based off local_path.
- * Notice that we continue to use the underscore convention even though
- * buckets are inside the AUTO_DIR_FS and do not conflict. Visual help:
- * you can see what to delete right away.
- *
- * We want our own Mongo instance for autostart. Mongo does not have
- * a feature "listen on port 0 and tell us what you got" (like Hail),
- * so we define a port and hope it's not in use...
- */
-#define AUTO_HOST "localhost"
-#define AUTO_DIR_FS "_fs"
-#define AUTO_DIR_DB "_db"
-#define AUTO_BIN_MONGOD "/usr/bin/mongod"
-#define AUTO_MONGOD_LOG "_mongod.log"
-#define AUTO_MONGOD_PORT 27018
-
-int auto_start (int dbport);
-
-#include "gc-wrap.h"
diff --git a/iwhd.spec.in b/iwhd.spec.in
deleted file mode 100644
index 4b26dce..0000000
--- a/iwhd.spec.in
+++ /dev/null
@@ -1,67 +0,0 @@
-Name: iwhd
-Version: @VERSION@
-Release: 1%{?dist}
-Summary: Image WareHouse Daemon
-
-Group: System Environment/Libraries
-License: GPLv3
-
-# FIXME: this is just the gitweb URL. Do we need more?
-URL: http://repo.or.cz/w/iwhd.git
-
-# pulled from upstream git,
-# to recreate tarball, check out commit, then run "make dist"
-Source0: iwhd-%{version}.tar.gz
-
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
-
-BuildRequires: boost-devel
-BuildRequires: boost-filesystem
-BuildRequires: gc-devel
-BuildRequires: glib2-devel
-BuildRequires: hail-devel
-BuildRequires: jansson-devel
-BuildRequires: libcurl-devel
-BuildRequires: libmicrohttpd-devel
-BuildRequires: libxml2-devel
-BuildRequires: mongodb-devel
-BuildRequires: bison
-BuildRequires: flex
-BuildRequires: autoconf
-BuildRequires: automake
-BuildRequires: help2man
-
-# mongodb-server is required at build time so make check succeeds
-BuildRequires: mongodb-server
-
-%description
-Deltacloud image-warehouse daemon
-
-%prep
-%setup -q
-
-%build
-%configure
-make %{?_smp_mflags}
-
-%install
-rm -rf %{buildroot}
-make install DESTDIR=%{buildroot}
-
-%check
-make -s check
-
-%clean
-rm -rf %{buildroot}
-
-%post
-
-%files
-%defattr(-,root,root,-)
-%doc AUTHORS COPYING README NEWS
-%{_bindir}/iwhd
-%{_mandir}/man8/iwhd.8*
-
-%changelog
-* Wed Oct 7 2010 Jim Meyering <meyering(a)redhat.com> - 0.90-1
-- Initial release.
diff --git a/lib/.gitignore b/lib/.gitignore
deleted file mode 100644
index af6d24d..0000000
--- a/lib/.gitignore
+++ /dev/null
@@ -1,131 +0,0 @@
-/arg-nonnull.h
-/basename-lgpl.c
-/basename.c
-/bitrotate.h
-/c++defs.h
-/c-ctype.c
-/c-ctype.h
-/calloc.c
-/charset.alias
-/close-hook.c
-/close-hook.h
-/close-stream.c
-/close-stream.h
-/close.c
-/closeout.c
-/closeout.h
-/config.charset
-/configmake.h
-/dirname-lgpl.c
-/dirname.c
-/dirname.h
-/dup2.c
-/errno.h
-/errno.in.h
-/error.c
-/error.h
-/exitfail.c
-/exitfail.h
-/fclose.c
-/fpending.c
-/fpending.h
-/getopt.c
-/getopt.h
-/getopt.in.h
-/getopt1.c
-/getopt_int.h
-/gettext.h
-/gettimeofday.c
-/gnulib.mk
-/hash-pjw.c
-/hash-pjw.h
-/hash.c
-/hash.h
-/intprops.h
-/inttypes.h
-/inttypes.in.h
-/iswblank.c
-/libiwhd.a
-/localcharset.c
-/localcharset.h
-/lstat.c
-/malloc.c
-/mbrtowc.c
-/mbsinit.c
-/memchr.c
-/memchr.valgrind
-/mkstemp.c
-/pipe.c
-/progname.c
-/progname.h
-/quotearg.c
-/quotearg.h
-/realloc.c
-/ref-add.sed
-/ref-add.sin
-/ref-del.sed
-/ref-del.sin
-/stat.c
-/stdarg.h
-/stdarg.in.h
-/stdbool.h
-/stdbool.in.h
-/stddef.h
-/stddef.in.h
-/stdint.h
-/stdint.in.h
-/stdio-write.c
-/stdio.h
-/stdio.in.h
-/stdlib.h
-/stdlib.in.h
-/stpcpy.c
-/str-two-way.h
-/streq.h
-/strerror.c
-/string.h
-/string.in.h
-/stripslash.c
-/strndup.c
-/strnlen.c
-/strstr.c
-/strtoimax.c
-/strtok_r.c
-/strtol.c
-/strtoll.c
-/strtoul.c
-/strtoull.c
-/strtoumax.c
-/sys
-/sys_stat.h
-/sys_stat.in.h
-/sys_time.h
-/sys_time.in.h
-/sys_wait.in.h
-/tempname.c
-/tempname.h
-/time.h
-/time.in.h
-/unistd.h
-/unistd.in.h
-/unlink.c
-/unlocked-io.h
-/verify.h
-/version-etc-fsf.c
-/version-etc.c
-/version-etc.h
-/warn-on-use.h
-/wchar.h
-/wchar.in.h
-/wctype.h
-/wctype.in.h
-/xalloc-die.c
-/xalloc.h
-/xmalloc.c
-/xstrndup.c
-/xstrndup.h
-/xstrtol-error.c
-/xstrtol.c
-/xstrtol.h
-/xstrtoul.c
-/xstrtoumax.c
diff --git a/lib/Makefile.am b/lib/Makefile.am
deleted file mode 100644
index 4325180..0000000
--- a/lib/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-## Makefile for gnulib/lib -*-Makefile-*-
-
-# Copyright (C) 1995-2007, 2009-2011 Free Software Foundation, Inc.
-
-## This program is free software: you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation, either version 3 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-include gnulib.mk
-
-AM_CFLAGS += $(GNULIB_WARN_CFLAGS) $(WERROR_CFLAGS)
-
-libiwhd_a_LIBADD += $(LIBOBJS)
-libiwhd_a_DEPENDENCIES += $(LIBOBJS)
diff --git a/m4/ax_boost_base.m4 b/m4/ax_boost_base.m4
deleted file mode 100644
index 1aa3c2f..0000000
--- a/m4/ax_boost_base.m4
+++ /dev/null
@@ -1,252 +0,0 @@
-# ===========================================================================
-# http://www.gnu.org/software/autoconf-archive/ax_boost_base.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-# AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
-#
-# DESCRIPTION
-#
-# Test for the Boost C++ libraries of a particular version (or newer)
-#
-# If no path to the installed boost library is given the macro searchs
-# under /usr, /usr/local, /opt and /opt/local and evaluates the
-# $BOOST_ROOT environment variable. Further documentation is available at
-# <http://randspringer.de/boost/index.html>.
-#
-# This macro calls:
-#
-# AC_SUBST([BOOST_CPPFLAGS]) / AC_SUBST([BOOST_LDFLAGS])
-#
-# And sets:
-#
-# HAVE_BOOST
-#
-# LICENSE
-#
-# Copyright (c) 2008 Thomas Porschberg <thomas(a)randspringer.de>
-# Copyright (c) 2009 Peter Adolphs
-#
-# Copying and distribution of this file, with or without modification, are
-# permitted in any medium without royalty provided the copyright notice
-# and this notice are preserved. This file is offered as-is, without any
-# warranty.
-
-#serial 17
-
-AC_DEFUN([AX_BOOST_BASE],
-[
-AC_ARG_WITH([boost],
- [AS_HELP_STRING([--with-boost@<:@=ARG@:>@],
- [use Boost library from a standard location (ARG=yes),
- from the specified location (ARG=<path>),
- or disable it (ARG=no)
- @<:@ARG=yes@:>@ ])],
- [
- if test "$withval" = "no"; then
- want_boost="no"
- elif test "$withval" = "yes"; then
- want_boost="yes"
- ac_boost_path=""
- else
- want_boost="yes"
- ac_boost_path="$withval"
- fi
- ],
- [want_boost="yes"])
-
-
-AC_ARG_WITH([boost-libdir],
- AS_HELP_STRING([--with-boost-libdir=LIB_DIR],
- [Force given directory for boost libraries. Note that this will overwrite library path detection, so use this parameter only if default library detection fails and you know exactly where your boost libraries are located.]),
- [
- if test -d "$withval"
- then
- ac_boost_lib_path="$withval"
- else
- AC_MSG_ERROR(--with-boost-libdir expected directory name)
- fi
- ],
- [ac_boost_lib_path=""]
-)
-
-if test "x$want_boost" = "xyes"; then
- boost_lib_version_req=ifelse([$1], ,1.20.0,$1)
- boost_lib_version_req_shorten=`expr $boost_lib_version_req : '([[0-9]]*.[[0-9]]*)'`
- boost_lib_version_req_major=`expr $boost_lib_version_req : '([[0-9]]*)'`
- boost_lib_version_req_minor=`expr $boost_lib_version_req : '[[0-9]]*.([[0-9]]*)'`
- boost_lib_version_req_sub_minor=`expr $boost_lib_version_req : '[[0-9]]*.[[0-9]]*.([[0-9]]*)'`
- if test "x$boost_lib_version_req_sub_minor" = "x" ; then
- boost_lib_version_req_sub_minor="0"
- fi
- WANT_BOOST_VERSION=`expr $boost_lib_version_req_major * 100000 + $boost_lib_version_req_minor * 100 + $boost_lib_version_req_sub_minor`
- AC_MSG_CHECKING(for boostlib >= $boost_lib_version_req)
- succeeded=no
-
- dnl On x86_64 systems check for system libraries in both lib64 and lib.
- dnl The former is specified by FHS, but e.g. Debian does not adhere to
- dnl this (as it rises problems for generic multi-arch support).
- dnl The last entry in the list is chosen by default when no libraries
- dnl are found, e.g. when only header-only libraries are installed!
- libsubdirs="lib"
- if test `uname -m` = x86_64; then
- libsubdirs="lib64 lib lib64"
- fi
-
- dnl first we check the system location for boost libraries
- dnl this location ist chosen if boost libraries are installed with the --layout=system option
- dnl or if you install boost with RPM
- if test "$ac_boost_path" != ""; then
- BOOST_LDFLAGS="-L$ac_boost_path/$libsubdir"
- BOOST_CPPFLAGS="-I$ac_boost_path/include"
- elif test "$cross_compiling" != yes; then
- for ac_boost_path_tmp in /usr /usr/local /opt /opt/local ; do
- if test -d "$ac_boost_path_tmp/include/boost" && test -r "$ac_boost_path_tmp/include/boost"; then
- for libsubdir in $libsubdirs ; do
- if ls "$ac_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
- done
- BOOST_LDFLAGS="-L$ac_boost_path_tmp/$libsubdir"
- BOOST_CPPFLAGS="-I$ac_boost_path_tmp/include"
- break;
- fi
- done
- fi
-
- dnl overwrite ld flags if we have required special directory with
- dnl --with-boost-libdir parameter
- if test "$ac_boost_lib_path" != ""; then
- BOOST_LDFLAGS="-L$ac_boost_lib_path"
- fi
-
- CPPFLAGS_SAVED="$CPPFLAGS"
- CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
- export CPPFLAGS
-
- LDFLAGS_SAVED="$LDFLAGS"
- LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
- export LDFLAGS
-
- AC_REQUIRE([AC_PROG_CXX])
- AC_LANG_PUSH(C++)
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
- @%:@include <boost/version.hpp>
- ]], [[
- #if BOOST_VERSION >= $WANT_BOOST_VERSION
- // Everything is okay
- #else
- # error Boost version is too old
- #endif
- ]])],[
- AC_MSG_RESULT(yes)
- succeeded=yes
- found_system=yes
- ],[
- ])
- AC_LANG_POP([C++])
-
-
-
- dnl if we found no boost with system layout we search for boost libraries
- dnl built and installed without the --layout=system option or for a staged(not installed) version
- if test "x$succeeded" != "xyes"; then
- _version=0
- if test "$ac_boost_path" != ""; then
- if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then
- for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do
- _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's//include/boost-//' | sed 's/_/./'`
- V_CHECK=`expr $_version_tmp > $_version`
- if test "$V_CHECK" = "1" ; then
- _version=$_version_tmp
- fi
- VERSION_UNDERSCORE=`echo $_version | sed 's/./_/'`
- BOOST_CPPFLAGS="-I$ac_boost_path/include/boost-$VERSION_UNDERSCORE"
- done
- fi
- else
- if test "$cross_compiling" != yes; then
- for ac_boost_path in /usr /usr/local /opt /opt/local ; do
- if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then
- for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do
- _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's//include/boost-//' | sed 's/_/./'`
- V_CHECK=`expr $_version_tmp > $_version`
- if test "$V_CHECK" = "1" ; then
- _version=$_version_tmp
- best_path=$ac_boost_path
- fi
- done
- fi
- done
-
- VERSION_UNDERSCORE=`echo $_version | sed 's/./_/'`
- BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE"
- if test "$ac_boost_lib_path" = ""; then
- for libsubdir in $libsubdirs ; do
- if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
- done
- BOOST_LDFLAGS="-L$best_path/$libsubdir"
- fi
- fi
-
- if test "x$BOOST_ROOT" != "x"; then
- for libsubdir in $libsubdirs ; do
- if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
- done
- if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then
- version_dir=`expr //$BOOST_ROOT : '.*/(.*)'`
- stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'`
- stage_version_shorten=`expr $stage_version : '([[0-9]]*.[[0-9]]*)'`
- V_CHECK=`expr $stage_version_shorten >= $_version`
- if test "$V_CHECK" = "1" && test "$ac_boost_lib_path" = "" ; then
- AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT)
- BOOST_CPPFLAGS="-I$BOOST_ROOT"
- BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir"
- fi
- fi
- fi
- fi
-
- CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
- export CPPFLAGS
- LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
- export LDFLAGS
-
- AC_LANG_PUSH(C++)
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
- @%:@include <boost/version.hpp>
- ]], [[
- #if BOOST_VERSION >= $WANT_BOOST_VERSION
- // Everything is okay
- #else
- # error Boost version is too old
- #endif
- ]])],[
- AC_MSG_RESULT(yes)
- succeeded=yes
- found_system=yes
- ],[
- ])
- AC_LANG_POP([C++])
- fi
-
- if test "$succeeded" != "yes" ; then
- if test "$_version" = "0" ; then
- AC_MSG_NOTICE([[We could not detect the boost libraries (version $boost_lib_version_req_shorten or higher). If you have a staged boost library (still not installed) please specify $BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in <boost/version.hpp>. See http://randspringer.de/boost for more documentation.]])
- else
- AC_MSG_NOTICE([Your boost libraries seems to old (version $_version).])
- fi
- # execute ACTION-IF-NOT-FOUND (if present):
- ifelse([$3], , :, [$3])
- else
- AC_SUBST([BOOST_CPPFLAGS])
- AC_SUBST([BOOST_LDFLAGS])
- AC_DEFINE([HAVE_BOOST],,[define if the Boost library is available])
- # execute ACTION-IF-FOUND (if present):
- ifelse([$2], , :, [$2])
- fi
-
- CPPFLAGS="$CPPFLAGS_SAVED"
- LDFLAGS="$LDFLAGS_SAVED"
-fi
-
-])
diff --git a/m4/ax_boost_system.m4 b/m4/ax_boost_system.m4
deleted file mode 100644
index 612d6a6..0000000
--- a/m4/ax_boost_system.m4
+++ /dev/null
@@ -1,120 +0,0 @@
-# ===========================================================================
-# http://www.gnu.org/software/autoconf-archive/ax_boost_system.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-# AX_BOOST_SYSTEM
-#
-# DESCRIPTION
-#
-# Test for System library from the Boost C++ libraries. The macro requires
-# a preceding call to AX_BOOST_BASE. Further documentation is available at
-# <http://randspringer.de/boost/index.html>.
-#
-# This macro calls:
-#
-# AC_SUBST([BOOST_SYSTEM_LIB])
-#
-# And sets:
-#
-# HAVE_BOOST_SYSTEM
-#
-# LICENSE
-#
-# Copyright (c) 2008 Thomas Porschberg <thomas(a)randspringer.de>
-# Copyright (c) 2008 Michael Tindal
-# Copyright (c) 2008 Daniel Casimiro <dan.casimiro(a)gmail.com>
-#
-# Copying and distribution of this file, with or without modification, are
-# permitted in any medium without royalty provided the copyright notice
-# and this notice are preserved. This file is offered as-is, without any
-# warranty.
-
-#serial 10
-
-AC_DEFUN([AX_BOOST_SYSTEM],
-[
- AC_ARG_WITH([boost-system],
- AS_HELP_STRING([--with-boost-system@<:@=special-lib@:>@],
- [use the System library from boost - it is possible to specify a certain library for the linker
- e.g. --with-boost-system=boost_system-gcc-mt ]),
- [
- if test "$withval" = "no"; then
- want_boost="no"
- elif test "$withval" = "yes"; then
- want_boost="yes"
- ax_boost_user_system_lib=""
- else
- want_boost="yes"
- ax_boost_user_system_lib="$withval"
- fi
- ],
- [want_boost="yes"]
- )
-
- if test "x$want_boost" = "xyes"; then
- AC_REQUIRE([AC_PROG_CC])
- AC_REQUIRE([AC_CANONICAL_BUILD])
- CPPFLAGS_SAVED="$CPPFLAGS"
- CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
- export CPPFLAGS
-
- LDFLAGS_SAVED="$LDFLAGS"
- LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
- export LDFLAGS
-
- AC_CACHE_CHECK(whether the Boost::System library is available,
- ax_cv_boost_system,
- [AC_LANG_PUSH([C++])
- CXXFLAGS_SAVE=$CXXFLAGS
-
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/system/error_code.hpp>]],
- [[boost::system::system_category]])],
- ax_cv_boost_system=yes, ax_cv_boost_system=no)
- CXXFLAGS=$CXXFLAGS_SAVE
- AC_LANG_POP([C++])
- ])
- if test "x$ax_cv_boost_system" = "xyes"; then
- AC_SUBST([BOOST_CPPFLAGS])
-
- AC_DEFINE([HAVE_BOOST_SYSTEM],,[define if the Boost::System library is available])
- BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^/@:>@*//'`
-
- LDFLAGS_SAVE=$LDFLAGS
- if test "x$ax_boost_user_system_lib" = "x"; then
- for libextension in `ls $BOOSTLIBDIR/libboost_system*.so* $BOOSTLIBDIR/libboost_system*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib(boost_system.*).so.*$;1;' -e 's;^lib(boost_system.*).a*$;1;'` ; do
- ax_lib=${libextension}
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_SYSTEM_LIB="-l$ax_lib"; AC_SUBST([BOOST_SYSTEM_LIB]) link_system="yes"; break],
- [link_system="no"])
- done
- if test "x$link_system" != "xyes"; then
- for libextension in `ls $BOOSTLIBDIR/boost_system*.{dll,a}* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^(boost_system.*).dll.*$;1;' -e 's;^(boost_system.*).a*$;1;'` ; do
- ax_lib=${libextension}
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_SYSTEM_LIB="-l$ax_lib"; AC_SUBST([BOOST_SYSTEM_LIB]) link_system="yes"; break],
- [link_system="no"])
- done
- fi
-
- else
- for ax_lib in $ax_boost_user_system_lib boost_system-$ax_boost_user_system_lib; do
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_SYSTEM_LIB="-l$ax_lib"; AC_SUBST([BOOST_SYSTEM_LIB]) link_system="yes"; break],
- [link_system="no"])
- done
-
- fi
- if test "x$ax_lib" = "x"; then
- AC_MSG_ERROR(Could not find a version of the library!)
- fi
- if test "x$link_system" = "xno"; then
- AC_MSG_ERROR(Could not link against $ax_lib !)
- fi
- fi
-
- CPPFLAGS="$CPPFLAGS_SAVED"
- LDFLAGS="$LDFLAGS_SAVED"
- fi
-])
diff --git a/m4/ax_boost_thread.m4 b/m4/ax_boost_thread.m4
deleted file mode 100644
index b67b596..0000000
--- a/m4/ax_boost_thread.m4
+++ /dev/null
@@ -1,149 +0,0 @@
-# ===========================================================================
-# http://www.gnu.org/software/autoconf-archive/ax_boost_thread.html
-# ===========================================================================
-#
-# SYNOPSIS
-#
-# AX_BOOST_THREAD
-#
-# DESCRIPTION
-#
-# Test for Thread library from the Boost C++ libraries. The macro requires
-# a preceding call to AX_BOOST_BASE. Further documentation is available at
-# <http://randspringer.de/boost/index.html>.
-#
-# This macro calls:
-#
-# AC_SUBST([BOOST_THREAD_LIB])
-#
-# And sets:
-#
-# HAVE_BOOST_THREAD
-#
-# LICENSE
-#
-# Copyright (c) 2009 Thomas Porschberg <thomas(a)randspringer.de>
-# Copyright (c) 2009 Michael Tindal
-#
-# Copying and distribution of this file, with or without modification, are
-# permitted in any medium without royalty provided the copyright notice
-# and this notice are preserved. This file is offered as-is, without any
-# warranty.
-
-#serial 17
-
-AC_DEFUN([AX_BOOST_THREAD],
-[
- AC_ARG_WITH([boost-thread],
- AS_HELP_STRING([--with-boost-thread@<:@=special-lib@:>@],
- [use the Thread library from boost - it is possible to specify a certain library for the linker
- e.g. --with-boost-thread=boost_thread-gcc-mt ]),
- [
- if test "$withval" = "no"; then
- want_boost="no"
- elif test "$withval" = "yes"; then
- want_boost="yes"
- ax_boost_user_thread_lib=""
- else
- want_boost="yes"
- ax_boost_user_thread_lib="$withval"
- fi
- ],
- [want_boost="yes"]
- )
-
- if test "x$want_boost" = "xyes"; then
- AC_REQUIRE([AC_PROG_CC])
- AC_REQUIRE([AC_CANONICAL_BUILD])
- CPPFLAGS_SAVED="$CPPFLAGS"
- CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
- export CPPFLAGS
-
- LDFLAGS_SAVED="$LDFLAGS"
- LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
- export LDFLAGS
-
- AC_CACHE_CHECK(whether the Boost::Thread library is available,
- ax_cv_boost_thread,
- [AC_LANG_PUSH([C++])
- CXXFLAGS_SAVE=$CXXFLAGS
-
- if test "x$build_os" = "xsolaris" ; then
- CXXFLAGS="-pthreads $CXXFLAGS"
- elif test "x$build_os" = "xming32" ; then
- CXXFLAGS="-mthreads $CXXFLAGS"
- else
- CXXFLAGS="-pthread $CXXFLAGS"
- fi
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/thread/thread.hpp>]],
- [[boost::thread_group thrds;
- return 0;]])],
- ax_cv_boost_thread=yes, ax_cv_boost_thread=no)
- CXXFLAGS=$CXXFLAGS_SAVE
- AC_LANG_POP([C++])
- ])
- if test "x$ax_cv_boost_thread" = "xyes"; then
- if test "x$build_os" = "xsolaris" ; then
- BOOST_CPPFLAGS="-pthreads $BOOST_CPPFLAGS"
- elif test "x$build_os" = "xming32" ; then
- BOOST_CPPFLAGS="-mthreads $BOOST_CPPFLAGS"
- else
- BOOST_CPPFLAGS="-pthread $BOOST_CPPFLAGS"
- fi
-
- AC_SUBST([BOOST_CPPFLAGS])
-
- AC_DEFINE([HAVE_BOOST_THREAD],,[define if the Boost::Thread library is available])
- BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^/@:>@*//'`
-
- LDFLAGS_SAVE=$LDFLAGS
- case "x$build_os" in
- *bsd* )
- LDFLAGS="-pthread $LDFLAGS"
- break;
- ;;
- esac
- if test "x$ax_boost_user_thread_lib" = "x"; then
- for libextension in `ls $BOOSTLIBDIR/libboost_thread*.so* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib(boost_thread.*).so.*$;1;'` `ls $BOOSTLIBDIR/libboost_thread*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib(boost_thread.*).a*$;1;'`; do
- ax_lib=${libextension}
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST([BOOST_THREAD_LIB]) link_thread="yes"; break],
- [link_thread="no"])
- done
- if test "x$link_thread" != "xyes"; then
- for libextension in `ls $BOOSTLIBDIR/boost_thread*.dll* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^(boost_thread.*).dll.*$;1;'` `ls $BOOSTLIBDIR/boost_thread*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^(boost_thread.*).a*$;1;'` ; do
- ax_lib=${libextension}
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST([BOOST_THREAD_LIB]) link_thread="yes"; break],
- [link_thread="no"])
- done
- fi
-
- else
- for ax_lib in $ax_boost_user_thread_lib boost_thread-$ax_boost_user_thread_lib; do
- AC_CHECK_LIB($ax_lib, exit,
- [BOOST_THREAD_LIB="-l$ax_lib"; AC_SUBST([BOOST_THREAD_LIB]) link_thread="yes"; break],
- [link_thread="no"])
- done
-
- fi
- if test "x$ax_lib" = "x"; then
- AC_MSG_ERROR(Could not find a version of the library!)
- fi
- if test "x$link_thread" = "xno"; then
- AC_MSG_ERROR(Could not link against $ax_lib !)
- else
- case "x$build_os" in
- *bsd* )
- BOOST_LDFLAGS="-pthread $BOOST_LDFLAGS"
- break;
- ;;
- esac
-
- fi
- fi
-
- CPPFLAGS="$CPPFLAGS_SAVED"
- LDFLAGS="$LDFLAGS_SAVED"
- fi
-])
diff --git a/man/Makefile.am b/man/Makefile.am
deleted file mode 100644
index 76e1c2a..0000000
--- a/man/Makefile.am
+++ /dev/null
@@ -1,26 +0,0 @@
-# Automakefile for GNU diffutils man pages
-
-# Copyright (C) 2002, 2009-2011 Free Software Foundation, Inc.
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-dist_man8_MANS = iwhd.8
-MAINTAINERCLEANFILES = $(dist_man8_MANS)
-
-# Depend on configure.ac to get version number changes.
-iwhd.8: $(top_srcdir)/configure.ac $(top_srcdir)/rest.c
- $(AM_V_GEN) - (echo '[NAME]' && sed 's@/* *@@; s/-/\-/; q' $(top_srcdir)/rest.c)|- $(HELP2MAN) -i - -S '$(PACKAGE) $(VERSION)' ../iwhd | - sed 's/^.B info .*/.B info FIXME/' > $@-t && mv $@-t $@
diff --git a/meta.cpp b/meta.cpp
deleted file mode 100644
index 110a7c6..0000000
--- a/meta.cpp
+++ /dev/null
@@ -1,685 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#include <config.h>
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <sys/time.h>
-#include <iostream>
-#include "iwh.h"
-#include "meta.h"
-#include "query.h"
-
-using namespace std;
-
-/* Mongo (rather antisocially) tries to define this itself. */
-#if defined(VERSION)
-#undef VERSION
-#endif
-
-#include <mongo/client/dbclient.h>
-using namespace mongo;
-
-/* TBD: parameterize */
-#define MAIN_TBL "repo.main"
-
-/*
- * Since the client isn't inherently MT-safe, we serialize access to it
- * ourselves. Fortunately, none of our metadata operations should be very
- * long-lived; if they are it probably means our connection is FUBAR and other
- * threads will be affected anyway.
- */
-
-#define SHOW_CONTENTION
-
-pthread_mutex_t client_lock = PTHREAD_MUTEX_INITIALIZER;
-#if defined(SHOW_CONTENTION)
-#define CLIENT_LOCK do { - if (pthread_mutex_trylock(&client_lock) != 0) { - cout << "contention in " << __func__ << endl; - pthread_mutex_lock(&client_lock); - } -} while (0)
-#else
-#define CLIENT_LOCK pthread_mutex_lock(&client_lock)
-#endif
-#define CLIENT_UNLOCK pthread_mutex_unlock(&client_lock)
-
-void
-dbl_to_str (double *foo, char *optr)
-{
- unsigned int i;
- unsigned char *iptr = (unsigned char *)foo;
-
- for (i = 0; i < sizeof(*foo); ++i) {
- optr += sprintf(optr,"%02x",*(iptr++));
- }
-}
-
-class RepoMeta;
-class RepoQuery;
-
-class RepoMeta {
-
-public:
- RepoMeta ();
- ~RepoMeta ();
-
- DBClientConnection client;
- char addr[128];
-
- char * DidPut (const char *bucket, const char *key,
- const char *loc, size_t size);
- void GotCopy (const char *bucket, const char *key,
- const char *loc);
- char * HasCopy (const char *bucket, const char *key,
- const char *loc);
- int SetValue (const char *bucket, const char *key,
- const char *mkey, const char * mvalue);
- int GetValue (const char *bucket, const char *key,
- const char *mkey, char ** mvalue);
- RepoQuery * NewQuery (const char *bucket, const char *key,
- const char * expr);
- auto_ptr<DBClientCursor> GetCursor (Query &q);
- void Delete (const char *bucket, const char *key);
- size_t GetSize (const char *bucket, const char *key);
- int Check (const char *bucket, const char *key,
- const char *depot);
- void * GetAttrList (const char *bucket, const char *key);
-};
-
-class RepoQuery {
- RepoMeta & parent;
- DBClientCursor * curs;
- value_t * expr;
-public:
- RepoQuery (const char *, const char *, const char *,
- RepoMeta &);
- ~RepoQuery ();
- bool Next (void);
- char *bucket;
- char *key;
- getter_t getter;
-};
-
-static RepoMeta *it;
-
-RepoMeta::RepoMeta ()
-{
- if (!verbose) {
- cout.rdbuf(0);
- cout << "bite me" << endl;
- }
-
- // TBD: assemble this string properly
- sprintf(addr,"%s:%u",db_host,db_port);
- try {
- client.connect(addr);
- }
- catch (ConnectException &ce) {
- cerr << "server down, no metadata access" << endl;
- }
-}
-
-extern "C" void
-meta_init (void)
-{
- it = new RepoMeta();
-}
-
-RepoMeta::~RepoMeta ()
-{
-}
-
-extern "C" void
-meta_fini (void)
-{
- delete it;
-}
-
-auto_ptr<DBClientCursor>
-RepoMeta::GetCursor (Query &q)
-{
- auto_ptr<DBClientCursor> curs;
- bool looping = false;
-
- for (;;) {
- if (!client.isFailed()) {
- curs = client.query(MAIN_TBL,q);
- if (curs.get()) {
- break;
- }
- }
- if (looping) {
- break;
- }
- try {
- client.connect(addr);
- }
- catch (ConnectException &ce) {
- cerr << "reconnection to " << addr << " failed"
- << endl;
- }
- looping = true;
- }
-
- return curs;
-}
-
-char *
-RepoMeta::DidPut (const char *bucket, const char *key, const char *loc,
- size_t size)
-{
- BSONObjBuilder bb;
- struct timeval now_tv;
- double now;
- auto_ptr<DBClientCursor> curs;
- Query q;
- char now_str[sizeof(now)*2+1];
-
- gettimeofday(&now_tv,NULL);
- now = (double)now_tv.tv_sec + (double)now_tv.tv_usec / 1000000.0;
- dbl_to_str(&now,now_str);
- cout << "now_str = " << now_str << endl;
-
- q = QUERY("_bucket"<<bucket<<"_key"<<key);
- curs = GetCursor(q);
- if (!curs.get()) {
- cerr << "DidPut failed for " << bucket << "/" << key << endl;
- return NULL;
- }
- if (curs->more()) {
- /* Nice functionality, but what an ugly syntax! */
- client.update(MAIN_TBL,q,BSON(
- "$set"<<BSON("_loc"<<BSON_ARRAY(loc))
- << "$set"<<BSON("_date"<<now)
- << "$set"<<BSON("_etag"<<now_str)
- << "$set"<<BSON("_size"<<(long long)size)));
-#if 0
- client.update(MAIN_TBL,q,
- BSON("$set"<<BSON("_loc"<<BSON_ARRAY(loc))));
- client.update(MAIN_TBL,q,
- BSON("$set"<<BSON("_date"<<now)));
- client.update(MAIN_TBL,q,
- BSON("$set"<<BSON("_etag"<<now_str)));
- client.update(MAIN_TBL,q,
- BSON("$set"<<BSON("_size"<<(long long)size)));
-#endif
- }
- else {
- bb << "_bucket" << bucket << "_key" << key
- << "_loc" << BSON_ARRAY(loc) << "_date" << now
- << "_etag" << now_str << "_size" << (long long)size;
- client.insert(MAIN_TBL,bb.obj());
- }
-
- return strdup(now_str);
-}
-
-extern "C" char *
-meta_did_put (const char *bucket, const char *key, const char *loc, size_t size)
-{
- char *rc;
-
- cout << "meta_did_put(" << bucket << "," << key << "," << loc << ")"
- << endl;
-
- CLIENT_LOCK;
- rc = it->DidPut(bucket,key,loc,size);
- CLIENT_UNLOCK;
-
- return rc;
-}
-
-void
-RepoMeta::GotCopy (const char *bucket, const char *key, const char *loc)
-{
- BSONObjBuilder bb;
- auto_ptr<DBClientCursor> curs;
- Query q;
-
- q = QUERY("_bucket"<<bucket<<"_key"<<key);
- curs = GetCursor(q);
- if (!curs.get()) {
- cerr << "GotCopy failed for " << bucket << "/" << key << endl;
- return;
- }
- if (curs->more()) {
- /* Nice functionality, but what an ugly syntax! */
- client.update(MAIN_TBL,q,BSON("$addToSet"<<BSON("_loc"<<loc)));
- }
- else {
- cerr << bucket << "/" << key << " not found in GotCopy!" << endl;
- }
-}
-
-extern "C" void
-meta_got_copy (const char *bucket, const char *key, const char *loc)
-{
- CLIENT_LOCK;
- it->GotCopy(bucket,key,loc);
- CLIENT_UNLOCK;
-}
-
-char *
-RepoMeta::HasCopy (const char *bucket, const char *key, const char *loc)
-{
- BSONObjBuilder bb;
- auto_ptr<DBClientCursor> curs;
- Query q;
- const char *value;
-
- q = QUERY("_bucket"<<bucket<<"_key"<<key<<"_loc"<<loc);
- curs = GetCursor(q);
- if (!curs.get()) {
- cerr << "HasCopy failed for " << bucket << "/" << key << endl;
- return NULL;
- }
- if (!curs->more()) {
- cout << bucket << "/" << key << " not found at " << loc << endl;
- return (char *)"";
- }
-
- value = curs->next().getStringField("_etag");
- if (!value || !*value) {
- cout << bucket << "/" << key << " no _etag at " << loc << endl;
- return (char *)"";
- }
-
- cout << bucket << "/" << key << " _etag = " << value << endl;
- return strdup(value);
-}
-
-extern "C" char *
-meta_has_copy (const char *bucket, const char *key, const char *loc)
-{
- char *rc;
-
- CLIENT_LOCK;
- rc = it->HasCopy(bucket,key,loc);
- CLIENT_UNLOCK;
-
- return rc;
-}
-
-int
-RepoMeta::SetValue (const char *bucket, const char *key, const char *mkey,
- const char * mvalue)
-{
- Query q = QUERY("_bucket"<<bucket<<"_key"<<key);
-
- try {
- client.update(MAIN_TBL,q,BSON("$set"<<BSON(mkey<<mvalue)),1);
- }
- catch (ConnectException &ce) {
- cerr << "SetValue failed for " << bucket << "/" << key << ":"
- << mkey << endl;
- return ENOTCONN;
- }
-
- // TBD: check for and propagate errors.
- return 0;
-}
-
-extern "C" int
-meta_set_value (const char *bucket, const char *key, const char *mkey,
- const char * mvalue)
-{
- int rc;
-
- CLIENT_LOCK;
- rc = it->SetValue(bucket,key,mkey,mvalue);
- CLIENT_UNLOCK;
-
- return rc;
-}
-
-int
-RepoMeta::GetValue (const char *bucket, const char *key, const char *mkey,
- char ** mvalue)
-{
- auto_ptr<DBClientCursor> curs;
- Query q;
- BSONObj bo;
- const char * data;
-
- q = QUERY("_bucket"<<bucket<<"_key"<<key);
- curs = GetCursor(q);
- if (!curs.get()) {
- cerr << "GetValue failed for " << bucket << "/" << key << ":"
- << mkey << endl;
- return ENOTCONN;
- }
- if (!curs->more()) {
- return ENXIO;
- }
-
- bo = curs->next();
- data = bo.getStringField(mkey);
- if (!data || !*data) {
- return ENXIO;
- }
-
- *mvalue = strdup(data);
- return 0;
-}
-
-extern "C" int
-meta_get_value (const char *bucket, const char *key, const char *mkey,
- char ** mvalue)
-{
- int rc;
-
- CLIENT_LOCK;
- rc = it->GetValue(bucket,key,mkey,mvalue);
- CLIENT_UNLOCK;
-
- return rc;
-}
-
-RepoQuery::RepoQuery (const char *bucket, const char *key, const char *qstr,
- RepoMeta &p)
- : parent(p)
-{
- Query q;
- auto_ptr<DBClientCursor> tmp;
-
- if (bucket) {
- cout << "bucket is " << bucket << " and we don't care" << endl;
- q = QUERY("_bucket"<<bucket);
- }
- else if (key) {
- cout << "key is " << key << " and we don't care" << endl;
- q = QUERY("_key"<<key);
- }
- else {
- abort();
- }
-
- /*
- * TBD: we should really convert our query into one of Mongo's,
- * and let them do all the work. Handling the general case
- * would be pretty messy, but we could handle specific cases
- * pretty easily. For example, a very high percentage of
- * queries are likely to be a single field/value comparison.
- * For now just punt, but revisit later.
- */
-
- if (qstr) {
- expr = parse(qstr);
- if (expr) {
- print_value(expr);
- }
- else {
- cout << "could not parse " << qstr << endl;
- }
- }
- else {
- expr = NULL;
- }
-
- curs = parent.GetCursor(q).release();
- bucket = NULL;
- key = NULL;
-}
-
-RepoQuery::~RepoQuery ()
-{
- cout << "in " << __func__ << endl;
-
- delete curs;
-}
-
-extern "C" void
-meta_query_stop (void * qobj)
-{
- CLIENT_LOCK;
- delete (RepoQuery *)qobj;
- CLIENT_UNLOCK;
-}
-
-extern "C" const char *
-query_getter (void *ctx, const char *id)
-{
- BSONObj *cur_bo = (BSONObj *)ctx;
-
- return (char *)cur_bo->getStringField(id);
-}
-
-bool
-RepoQuery::Next (void)
-{
- BSONObj bo;
-
- if (!curs) {
- return false;
- }
-
- while (curs->more()) {
- bo = curs->next();
- if (expr) {
- getter.func = query_getter;
- getter.ctx = (void *)&bo;
- if (eval(expr,&getter,NULL) <= 0) {
- continue;
- }
- }
- bucket = (char *)bo.getStringField("_bucket");
- key = (char *)bo.getStringField("_key");
- return true;
- }
-
- return false;
-}
-
-RepoQuery *
-RepoMeta::NewQuery (const char *bucket, const char *key, const char *expr)
-{
- return new RepoQuery(bucket,key,expr,*this);
-}
-
-extern "C" void *
-meta_query_new (const char *bucket, const char *key, const char *expr)
-{
- void *rc;
-
- if ((bucket && key) || (!bucket && !key)) {
- return NULL;
- }
-
- CLIENT_LOCK;
- rc = it->NewQuery(bucket,key,expr);
- CLIENT_UNLOCK;
-
- return rc;
-}
-
-extern "C" int
-meta_query_next (void * qobj, char ** bucket, char ** key)
-{
- RepoQuery * rq = (RepoQuery *)qobj;
-
- CLIENT_LOCK;
- if (!rq->Next()) {
- CLIENT_UNLOCK;
- return 0;
- }
- CLIENT_UNLOCK;
-
- *bucket = rq->bucket;
- *key = rq->key;
- return 1;
-}
-
-#if 0
-char *
-RepoMeta::BucketList (void)
-{
- /*
- * TBD: make this return values instead of producing output.
- * This is just a code fragment showing how to get a list of buckets,
- * in case I forget.
- */
- BSONObj repl;
-
- BSONObj dist = BSON("distinct"<<"main"<<"_key"<<"_bucket");
- if (client.runCommand("repo",dist,repl)) {
- cout << repl.toString() << endl;
- BSONObj elem = repl.getField("values").embeddedObject();
- for (int i = 0; i < elem.nFields(); ++i) {
- cout << elem[i].str() << endl;
- }
- }
-}
-#endif
-
-void
-RepoMeta::Delete (const char *bucket, const char *key)
-{
- Query q = QUERY("_bucket"<<bucket<<"_key"<<key);
-
- try {
- client.remove(MAIN_TBL,q);
- }
- catch (ConnectException &ce) {
- cerr << "Delete failed for " << bucket << "/" << key << endl;
- }
-}
-
-extern "C"
-void
-meta_delete (const char *bucket, const char *key)
-{
- CLIENT_LOCK;
- it->Delete(bucket,key);
- CLIENT_UNLOCK;
-}
-
-size_t
-RepoMeta::GetSize (const char *bucket, const char *key)
-{
- auto_ptr<DBClientCursor> curs;
- Query q;
- BSONObj bo;
- const char * data;
-
- (void)data;
-
- q = QUERY("_bucket"<<bucket<<"_key"<<key);
- curs = GetCursor(q);
-
- if (!curs->more()) {
- return 0;
- }
-
- bo = curs->next();
- return bo.getField("_size").numberLong();
-}
-
-extern "C"
-size_t
-meta_get_size (const char *bucket, const char *key)
-{
- size_t rc;
-
- CLIENT_LOCK;
- rc = it->GetSize(bucket,key);
- CLIENT_UNLOCK;
-
- return rc;
-}
-
-class AttrList {
-public:
- AttrList (BSONObj &);
- int Next (const char **, const char **);
- BSONObj obj;
- vector<BSONElement> vec;
- int idx;
-};
-
-AttrList::AttrList (BSONObj &bo)
-{
- obj = bo.copy();
- obj.elems(vec);
- idx = 0;
-}
-
-int
-AttrList::Next (const char **name, const char **value)
-{
- BSONElement elem;
-
- while (idx < vec.size()) {
- elem = vec[idx++];
- if (elem.type() == String) {
- *name = elem.fieldName();
- *value = elem.String().c_str();
- return 1;
- }
- }
-
- return 0;
-}
-
-void *
-RepoMeta::GetAttrList (const char *bucket, const char *key)
-{
- auto_ptr<DBClientCursor> curs;
- Query q;
- BSONObj bo;
-
- q = QUERY("_bucket"<<bucket<<"_key"<<key);
- curs = GetCursor(q);
-
- if (!curs->more()) {
- return NULL;
- }
- bo = curs->next();
-
- return new AttrList(bo);
-}
-extern "C"
-void *
-meta_get_attrs (const char *bucket, const char *key)
-{
- void *poc;
-
- CLIENT_LOCK;
- poc = it->GetAttrList(bucket,key);
- CLIENT_UNLOCK;
-
- return poc;
-}
-
-extern "C"
-int
-meta_attr_next (void *ctx, const char **name, const char **value)
-{
- AttrList *poc = (AttrList *)ctx;
-
- return poc->Next(name,value);
-}
-
-extern "C"
-void
-meta_attr_stop (void *ctx)
-{
- AttrList *poc = (AttrList *)ctx;
-
- delete poc;
-}
diff --git a/meta.h b/meta.h
deleted file mode 100644
index b620178..0000000
--- a/meta.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#if !defined(_META_H)
-#define _META_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-enum { QUERY_BKT_LIST, QUERY_OBJ_LIST, QUERY_FILTER };
-
-void meta_init (void);
-void meta_fini (void);
-char *meta_did_put (const char *bucket, const char *key, const char *loc,
- size_t size);
-void meta_got_copy (const char *bucket, const char *key, const char *loc);
-char *meta_has_copy (const char *bucket, const char *key, const char *loc);
-int meta_set_value (const char *bucket, const char *key, const char *mkey,
- const char *mvalue);
-int meta_get_value (const char *bucket, const char *key, const char *mkey,
- char **mvalue);
-
-void *meta_query_new (const char *bucket, const char *key, const char *expr);
-int meta_query_next (void *qobj, char **bucket, char **key);
-void meta_query_stop (void *qobj);
-void meta_delete (const char *bucket, const char *key);
-size_t meta_get_size (const char *bucket, const char *key);
-void *meta_get_attrs (const char *bucket, const char *key);
-int meta_attr_next (void *aobj, const char **, const char **);
-void meta_attr_stop (void *aobj);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif
diff --git a/mpipe.c b/mpipe.c
deleted file mode 100644
index d4ddb7c..0000000
--- a/mpipe.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#include <config.h>
-#include <assert.h>
-#include <unistd.h>
-
-#include "iwh.h"
-#include "mpipe.h"
-
-void
-pipe_init_shared (pipe_shared *ps, void *owner, unsigned short ncons)
-{
- ps->owner = owner;
- pthread_mutex_init(&ps->lock,NULL);
- pthread_cond_init(&ps->prod_cond,NULL);
- pthread_cond_init(&ps->cons_cond,NULL);
- pipe_reset(ps,ncons);
-}
-
-void
-pipe_reset (pipe_shared *ps, unsigned short ncons)
-{
- ps->data_ptr = NULL;
- ps->data_len = 0;
- ps->sequence = 0; /* TBD: randomize? */
- ps->cons_total = ncons;
- ps->cons_done = 0;
- ps->cons_error = 0;
- ps->cons_init_done = 0;
- ps->cons_init_error = 0;
- ps->prod_state = PROD_INIT;
-}
-
-pipe_private *
-pipe_init_private (pipe_shared *ps)
-{
- pipe_private *pp;
-
- pp = malloc(sizeof(*pp));
- if (pp) {
- pp->shared = ps;
- /*
- * The producer might already have posted #1, so we can't use
- * ps->sequence+1. This precludes consumers joining
- * mid-stream, but that was never a goal anyway.
- */
- pp->sequence = 1;
- pp->offset = 0;
- }
- return pp;
-}
-
-int
-pipe_cons_wait (pipe_private *pp)
-{
- pipe_shared *ps = pp->shared;
- int rc;
-
- pp->offset = 0;
- pthread_mutex_lock(&ps->lock);
-
- while (ps->sequence != pp->sequence) {
- DPRINTF("consumer about to wait for %lun",pp->sequence);
- pthread_cond_wait(&ps->cons_cond,&ps->lock);
- DPRINTF("consumer done waitingn");
- }
-
- rc = (ps->data_len != 0);
- if (!rc) {
- DPRINTF("consumer saw producer is donen");
- if (++ps->cons_done + ps->cons_error >= ps->cons_total) {
- pthread_cond_signal(&ps->prod_cond);
- }
- rc = 0;
- }
-
- pthread_mutex_unlock(&ps->lock);
- return rc;
-}
-
-void
-pipe_cons_signal (pipe_private *pp, int error)
-{
- pipe_shared *ps = pp->shared;
-
- pthread_mutex_lock(&ps->lock);
- ++pp->sequence;
- pp->offset = 0;
-
- if (error) {
- ++ps->cons_error;
- }
- else {
- ++ps->cons_done;
- }
- if (ps->cons_done + ps->cons_error >= ps->cons_total) {
- DPRINTF("consumer signal, total %u done %u error %un",
- ps->cons_total, ps->cons_done, ps->cons_error);
- pthread_cond_signal(&ps->prod_cond);
- }
- pthread_mutex_unlock(&ps->lock);
-}
-
-void
-pipe_cons_siginit (pipe_shared *ps, int error)
-{
- pthread_mutex_lock(&ps->lock);
- assert ((ps->cons_init_done + ps->cons_init_error) < ps->cons_total);
- if (error) {
- ++ps->cons_init_error;
- }
- else {
- ++ps->cons_init_done;
- }
- pthread_cond_broadcast(&ps->prod_cond);
- DPRINTF("consumer init signal (total %u done %u error %u)n",
- ps->cons_total,ps->cons_init_done,ps->cons_init_error);
- pthread_mutex_unlock(&ps->lock);
-}
-
-/*
- * Return the number of bad children, or -1 if some other error.
- */
-int
-pipe_prod_wait_init (pipe_shared *ps)
-{
- pthread_mutex_lock(&ps->lock);
- DPRINTF("producer initializing (total %u done %u error %u)n",
- ps->cons_total, ps->cons_init_done, ps->cons_init_error);
- while (ps->cons_init_done + ps->cons_init_error < ps->cons_total) {
- pthread_cond_broadcast(&ps->cons_cond);
- pthread_cond_wait(&ps->prod_cond,&ps->lock);
- DPRINTF(" after sleep (total %u done %u error %u)n",
- ps->cons_total,ps->cons_init_done,ps->cons_init_error);
- }
- pthread_mutex_unlock(&ps->lock);
- return ps->cons_init_error;
-}
-
-void
-pipe_prod_signal (pipe_shared *ps, void *ptr, size_t total)
-{
- pthread_mutex_lock(&ps->lock);
- if (ps->cons_error >= ps->cons_total) {
- DPRINTF("producer posting %zu bytes as %ld, no sinks"
- " (total %u error %u)n",
- total,ps->sequence+1, ps->cons_total,ps->cons_error);
- pthread_mutex_unlock(&ps->lock);
- return;
- }
- ps->data_ptr = ptr;
- ps->data_len = total;
- ps->cons_done = ps->cons_error;
- ++ps->sequence;
- DPRINTF("producer posting %zu bytes as %ld (total %u error %u)n",
- total,ps->sequence, ps->cons_total,ps->cons_error);
- while (ps->cons_done + ps->cons_error < ps->cons_total) {
- pthread_cond_broadcast(&ps->cons_cond);
- pthread_cond_wait(&ps->prod_cond,&ps->lock);
- DPRINTF("%u children yet to read (total %u done %u error %u)n",
- ps->cons_total - (ps->cons_done + ps->cons_error),
- ps->cons_total,ps->cons_done,ps->cons_error);
- }
- pthread_mutex_unlock(&ps->lock);
-}
-
-void
-pipe_prod_siginit (pipe_shared *ps, int err)
-{
- pthread_mutex_lock(&ps->lock);
- assert (ps->prod_state == PROD_INIT);
- ps->prod_state = (err >= 0) ? PROD_RUNNING : PROD_ERROR;
- pthread_cond_broadcast(&ps->cons_cond);
- pthread_mutex_unlock(&ps->lock);
-}
-
-int
-pipe_cons_wait_init (pipe_shared *ps)
-{
- pthread_mutex_lock(&ps->lock);
- DPRINTF("consumer initingn");
- while (ps->prod_state == PROD_INIT) {
- pthread_cond_broadcast(&ps->prod_cond);
- pthread_cond_wait(&ps->cons_cond,&ps->lock);
- DPRINTF(" after sleep (state = %u)n",ps->prod_state);
- }
- pthread_mutex_unlock(&ps->lock);
- return (ps->prod_state == PROD_ERROR);
-}
-
-void
-pipe_prod_finish (pipe_shared *ps)
-{
- pthread_mutex_lock(&ps->lock);
- ps->data_len = 0;
- ps->cons_done = ps->cons_error;
- ++ps->sequence;
- DPRINTF("waiting for %u children (total %u error %u)n",
- ps->cons_total - (ps->cons_done + ps->cons_error),
- ps->cons_total,ps->cons_error);
- while (ps->cons_done + ps->cons_error < ps->cons_total) {
- pthread_cond_broadcast(&ps->cons_cond);
- pthread_cond_wait(&ps->prod_cond,&ps->lock);
- DPRINTF("%u children left (total %u done %u error %u)n",
- ps->cons_total - (ps->cons_done + ps->cons_error),
- ps->cons_total,ps->cons_done,ps->cons_error);
- }
- pthread_mutex_unlock(&ps->lock);
- DPRINTF("producer finished with sequence %ldn",ps->sequence);
-}
diff --git a/mpipe.h b/mpipe.h
deleted file mode 100644
index 62edbaa..0000000
--- a/mpipe.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#if !defined(_MPIPE_H)
-#define _MPIPE_H
-
-#include <poll.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <stdint.h>
-#include <stdio.h>
-
-/*
- * This is an in-memory "pipe" construct with a twist: it lets you have
- * multiple consumers instead of just one. For example, you might want
- * to stream data from a back-end store both to the user and into the
- * local cache, or you might want to replicate out to several back ends
- * simultaneously. The basic flow for the producer is as follows:
- *
- * while data available
- * read a chunk of data
- * lock shared structure
- * update the shared pointer/data/sequence
- * signal the consumer event
- * wait on the producer event
- * unlock shared structure
- * lock shared structure
- * set prod_done
- * signal the consumer event
- * wait on the producer event
- *
- * For consumers, it's a mirror image:
- * lock shared structure
- * loop
- * wait on the consumer event
- * continue if shared sequence != own sequence
- * break if len == 0
- * unlock shared structure
- * write the data somewhere
- * increment own sequence
- * lock shared structure
- * signal producer event if ++cons_done == cons_total
- * do cons_count/producer-event handshake one more time
- *
- * The sequence checking is not strictly necessary, but it makes things a lot
- * easier to debug if there is a bug that causes producer and consumers to get
- * out of sync. Instead of corrupting data and continuing, consumers block
- * waiting for the "right" sequence number while the producer blocks waiting
- * for a signal that will never come.
- *
- * The cons_error is the "deadweight" that only increments. This way the
- * thread ping-pong and zeroing of cons_done are left alone.
- */
-
-typedef struct {
- void *owner;
- pthread_mutex_t lock;
- pthread_cond_t prod_cond;
- pthread_cond_t cons_cond;
- void *data_ptr;
- size_t data_len;
- unsigned long sequence;
- unsigned short cons_total;
- unsigned short cons_init_done;
- unsigned short cons_init_error;
- unsigned short cons_done;
- unsigned short cons_error;
- enum { PROD_INIT, PROD_RUNNING, PROD_ERROR } prod_state;
-} pipe_shared;
-
-typedef struct {
- pipe_shared *shared;
- unsigned long sequence;
- size_t offset;
- void *prov;
-} pipe_private;
-
-
-void pipe_init_shared (pipe_shared *ps,
- void *owner, unsigned short ncons);
-pipe_private *pipe_init_private (pipe_shared *ps);
-int pipe_cons_wait (pipe_private *pp);
-void pipe_cons_signal (pipe_private *pp, int error);
-void pipe_cons_siginit (pipe_shared *ps, int error);
-int pipe_prod_wait_init (pipe_shared *ps);
-void pipe_prod_siginit (pipe_shared *ps, int error);
-int pipe_cons_wait_init (pipe_shared *ps);
-void pipe_prod_signal (pipe_shared *ps,
- void *ptr, size_t total);
-void pipe_prod_finish (pipe_shared *ps);
-void pipe_reset (pipe_shared *ps, unsigned short ncons);
-
-#endif
diff --git a/notes.txt b/notes.txt
deleted file mode 100644
index 92e4646..0000000
--- a/notes.txt
+++ /dev/null
@@ -1,192 +0,0 @@
-Commands:
-
- GET /
- list top-level objects
-
- POST /_new/bucket
- create bucket with attributes
-
- GET /_providers
- list providers
-
- POST /_providers/_new/provider
- create new provider with info
-
- GET /_providers/provider
- show provider information (requires token)
-
- POST /_providers/provider
- set provider information (requires/token)
-
- GET /bucket
- list bucket contents
-
- GET /bucket?query=xxx
- query bucket contents
-
- DELETE /bucket
- delete bucket (must be empty)
-
- POST /bucket/_new/object
- create object with attributes
-
- GET /bucket/object
- get body/attr* list
-
- GET /bucket/object/body
- get object body
-
- PUT /bucket/object/body
- put object body
-
- POST /bucket/object op=push
- trigger re-replication
-
- POST /bucket/object op=pull depot=xxx
- trigger reverse replication
-
- POST /bucket/object op-check depot=xxx
- check availability
-
- DELETE /bucket/object
- delete object
-
- GET /bucket/object/attrs
- get all attributes
-
- POST /bucket/object/attrs
- set multiple attributes
-
- GET /bucket/object/attr_X
- get attribute X
-
- PUT /bucket/object/attr_X
- set attribute X
-
- DELETE /bucket/object/attr_X
- delete attribute X
-
-Formats:
-
- JSON top-level list
- [
- {
- "type": "bucket_factory",
- "path": ".../_new"
- },
- {
- "type": "provider_list",
- "path": ".../_providers"
- },
- {
- "type": "bucket",
- "name": "bucketA",
- "path": ".../bucketA"
- },
- {
- "type": "bucket",
- "name": "bucketB",
- "path": ".../bucketB"
- }
- ]
-
- JSON provider list
- [
- {
- "name": "my primary",
- "type": "cf",
- "host": "swift.usersys.redhat.com",
- "port": 8080,
- "username": "my_cf_username",
- "password": "my_cf_password"
- },
- {
- "name": "my secondary",
- "type": "s3",
- "host": "s3.amazonaws.com",
- "port": 80,
- "username": "my_aws_key",
- "password": "my_aws_secret"
- }
- }
-
- JSON bucket-level list
- [
- {
- "type": "object_factory",
- "path": ".../bucket/_new"
- },
- {
- "type": "object",
- "name": "objectC",
- "path": ".../bucketA/objectC"
- },
- {
- "type": "object",
- "name": "objectD",
- "path": ".../bucketA/objectD"
- }
- ]
-
- JSON object-level list
- [
- {
- "type": "body",
- "path": ".../bucketA/objectC/body"
- },
- {
- "type": "multi_attributes",
- "path": ".../bucketA/objectC/attrs"
- },
- {
- "type": "single_attribute",
- "name": "abc",
- "path": ".../bucketA/objectC/attr_abc"
- }
- {
- "type": "single_attribute",
- "name": "xyz",
- "path": ".../bucketA/objectC/attr_xyz"
- }
- ]
-
-
-To Do - priority (1 highest) work (5 largest) desc:
- 1 1 policy inheritance
- 1 2 delete metadata as well as data
- 1 3 re-replicate on policy change (single object)
- 2 2 manual re-replication trigger
- 2 1 re-replicate on *any* tag change
- 2 3 replication-complete API
- 2 5 basic cred-management API
- 2 3 content types
- --- done
- 2 1 start own MongoDB
- 2 2 reverse-replication API
- 2 4 link-following syntax
- 2 5 reconcile dispatch with commands/format above
- 3 5 fully modular FS/S3/CF driver structure
- 3 5 VMWare back end
- 3 5 EBS back end
- 3 5 RHEV back end (dependency on Ayal)
- * Mark McLoughlin / Eoghan Glynn / rhevm-api
- 3 5 start own Hail
- 3 5 fully dynamic config
- 3 3 fix string handling (eliminate strtok)
- 3 4 re-replicate on policy change (default)
- 3 4 writes proxied upstream
- 4 1 direct-copy API (PUT with special header?)
- 4 2 use MongoDB C driver now that it's supported
- 4 3 chunked encoding
- 4 3 statistics/usage API
- 4 3 enhanced query syntax (limits)
- 4 4 optimize re-replication (check current locs)
- 4 5 dcloud-obj back end
- 4 5 immediate inline replication
- 4 5 generate MongoDB queries from ours
- 4 5 auth
- 5 2 deletes proxied upstream
- 5 3 cache control (TTL?)
- 5 5 replicated DB
- partial-object reads
- partial-object writes?
diff --git a/qlexer.l b/qlexer.l
deleted file mode 100644
index 976939b..0000000
--- a/qlexer.l
+++ /dev/null
@@ -1,93 +0,0 @@
-%{
-#include <config.h>
-#include <stdio.h>
-
-#ifndef FLEX_SCANNER
-# error This scanner must be made using flex, not lex.
-#endif
-
-#include "iwhd-qparser.h"
-#undef YY_DECL
-#define YY_DECL extern int yylex (YYSTYPE *yylval_param, yyscan_t yyscanner)
-%}
-
-%option warn nounput noinput noyywrap
-%option bison-bridge reentrant
-
-%%
-
-"[^"]*" { yytext[yyleng-1] = 0; yytext++;
- yylval_param->str = strdup(yytext);
- return yylval_param->str ? T_STRING : T_INVALID; }
-~[^~]*~ { yylval_param->str = strdup(yytext);
- return yylval_param->str ? T_DATE : T_INVALID; }
-[0-9]+ { yylval_param->str = strdup(yytext);
- return yylval_param->str ? T_NUMBER : T_INVALID; }
-[a-z_]+ { yylval_param->str = strdup(yytext);
- return yylval_param->str ? T_ID : T_INVALID; }
-[$#().] { return yytext[0]; }
-[<] { return T_LT; }
-[<]= { return T_LE; }
-> { return T_GT; }
->= { return T_GE; }
-== { return T_EQ; }
-!= { return T_NE; }
-! { return T_NOT; }
-&& { return T_AND; }
-|| { return T_OR; }
-[ tnr]+ { }
-. { return T_INVALID; }
-
-<<EOF>> { yyterminate (); /* aka return 0; */ }
-
-
-%%
-
-#if defined(UNIT_TEST)
-static void
-yyerror (const char *msg)
-{
- printf("%s: %sn",__func__,msg);
-}
-
-int
-main (int argc, char **argv)
-{
- yyscan_t scanner;
- yylex_init (&scanner);
- YY_BUFFER_STATE buf
- = yy_scan_string (""!@#" ~xxx~ 123 abc $#()<>=!&|.", scanner);
-
- while (1) {
- YYSTYPE v;
- int t = yylex(&v, scanner);
- if (t == 0)
- break;
-
- switch (t) {
- case T_STRING:
- if (!yytext[0]) {
- printf("EOFn");
- break;
- }
- case T_DATE:
- case T_NUMBER:
- case T_ID:
- printf("%d %sn",t,yytext);
- break;
- case T_SPACE:
- break;
- case T_INVALID:
- printf("INVALID %cn",yytext[0]);
- break;
- default:
- printf("%dn",t);
- }
- }
-
- yy_delete_buffer (buf, scanner);
- yylex_destroy (scanner);
-
- return 0;
-}
-#endif
diff --git a/qparser.y b/qparser.y
deleted file mode 100644
index c186890..0000000
--- a/qparser.y
+++ /dev/null
@@ -1,637 +0,0 @@
-%define api.pure
-%error-verbose
-
-%{
-#include <config.h>
-#include "query.h"
-#include "iwhd-qparser.h"
-%}
-
-%union {
- char *str;
- struct value_t *val;
-}
-
-%{
-#include <error.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <ctype.h>
-
-#include "iwh.h"
-
-/* Return a pointer to this when allocation fails in a value_t-returning
- function. */
-static value_t invalid = { T_INVALID, {0}, NULL };
-
-#define YY_DECL int yylex(YYSTYPE *, void *scanner);
-YY_DECL
-
-/* TBD: use separate function to parse dates differently */
-static value_t *
-make_number (const char *text)
-{
- value_t *tmp = malloc(sizeof(*tmp));
- if (!tmp)
- return &invalid;
-
- tmp->type = T_NUMBER;
- tmp->as_num = strtoll(text,NULL,10);
- tmp->resolved = NULL;
- free ((void *) text);
-
- return tmp;
-}
-
-/* Return a malloc'd value_t buffer, with its type to T and using TEXT
- (already malloc'd) as its string. */
-static value_t *
-make_string (const char *text, type_t t)
-{
- value_t *tmp = malloc(sizeof(*tmp));
- if (!tmp)
- return &invalid;
-
- tmp->type = t;
- tmp->as_str = (char *) text;
- tmp->resolved = NULL;
-
- return tmp;
-}
-
-/* Return a malloc'd tree_t, with type T and branches LEFT and RIGHT.
- LEFT must be non-NULL. RIGHT may be NULL (solely for use in handling
- a negated expression). */
-static value_t *
-make_tree (type_t t, const value_t *left, const value_t *right)
-{
- if (left->type == T_INVALID)
- return (value_t *) left;
- if (t != T_LINK && right && right->type == T_INVALID)
- return (value_t *) right;
- value_t *tmp = malloc(sizeof(*tmp));
-
- if (!tmp)
- return &invalid;
-
- tmp->type = t;
- tmp->as_tree.left = (value_t *) left;
- tmp->as_tree.right = (value_t *) right;
- tmp->resolved = NULL;
-
- return tmp;
-}
-
-/* Return a malloc'd comp_t, with type T and branches LEFT and RIGHT.
- LEFT and RIGHT must both be non-NULL. */
-static value_t *
-make_comp (comp_t c, const value_t *left, const value_t *right)
-{
- if (left->type == T_INVALID)
- return (value_t *) left;
- if (right->type == T_INVALID)
- return (value_t *) right;
- value_t *tmp = make_tree(T_COMP,left,right);
-
- if (!tmp)
- return &invalid;
-
- tmp->as_tree.op = c;
-
- return tmp;
-}
-
-static value_t *
-make_link (const value_t *left, const char *right)
-{
- return make_tree(T_LINK,left,(value_t *)right);
-}
-
-static void
-yyerror (void *scanner ATTRIBUTE_UNUSED,
- value_t **result ATTRIBUTE_UNUSED,
- const char *msg ATTRIBUTE_UNUSED)
-{
- /* empty */
-}
-
-%}
-
-%lex-param { yyscan_t scanner }
-%parse-param { void *scanner }
-%parse-param { value_t **result }
-
-%token <str> T_STRING T_COMP T_DATE T_ID T_LINK T_NUMBER T_OFIELD T_SFIELD
-%token T_EQ T_NE T_NOT T_AND T_OR T_INVALID
-%token T_LT T_GT T_LE T_GE
-
-%type <val> atom bbool_expr comp_expr field
-%type <val> link_field literal paren_expr ubool_expr
-
-%start policy
-
-%%
-
-policy:
- bbool_expr {
- *result = $1;
- };
-
-bbool_expr:
- ubool_expr {
- // printf("promoting ubool_expr to bbool_exprn");
- $$ = $1;
- }|
- bbool_expr T_AND ubool_expr {
- // printf("found AND expressionn");
- $$ = make_tree(T_AND,$1,$3);
- }|
- bbool_expr T_OR ubool_expr {
- // printf("found OR expressionn");
- $$ = make_tree(T_OR,$1,$3);
- };
-
-ubool_expr:
- comp_expr {
- // printf("promoting comp_expr to ubool_exprn");
- $$ = $1;
- }|
- T_NOT comp_expr {
- // printf("found NOT expressionn");
- $$ = make_tree(T_NOT,$2,NULL);
- };
-
-
-comp_expr:
- atom {
- // printf("promoting atom to comp_exprn");
- $$ = $1;
- }|
- atom T_LT atom {
- // printf("found LESS THAN expressionn");
- $$ = make_comp(C_LESSTHAN,$1,$3);
- }|
- atom T_LE atom {
- // printf("found LESS OR EQUAL expressionn");
- $$ = make_comp(C_LESSOREQ,$1,$3);
- }|
- atom T_EQ atom {
- // printf("found EQUAL expressionn");
- $$ = make_comp(C_EQUAL,$1,$3);
- }|
- atom T_NE atom {
- // printf("found NOT EQUAL expressionn");
- $$ = make_comp(C_DIFFERENT,$1,$3);
- }|
- atom T_GE atom {
- // printf("found GREATER OR EQUAL expressionn");
- $$ = make_comp(C_GREATEROREQ,$1,$3);
- }|
- atom T_GT atom {
- // printf("found GREATER THAN expressionn");
- $$ = make_comp(C_GREATERTHAN,$1,$3);
- };
-
-atom:
- link_field {
- // printf("promoting link_field to atomn");
- $$ = $1;
- }|
- literal {
- // printf("promoting literal to atomn");
- $$ = $1;
- }|
- paren_expr {
- // printf("promoting paren_expr to atomn");
- $$ = $1;
- };
-
-link_field:
- field {
- // printf("promoting field to link_fieldn");
- $$ = $1;
- }|
- link_field '.' T_ID {
- // printf("found LINK FIELDn");
- $$ = make_link($1,$3);
- };
-
-field:
- '$' T_ID {
- // printf("found DOLLAR FIELDn");
- $$ = make_string($2,T_OFIELD);
- }|
- '#' T_ID {
- // printf("found WAFFLE FIELDn");
- $$ = make_string($2,T_SFIELD);
- };
-
-literal:
- T_NUMBER {
- // printf("found NUMBER %sn",$1);
- $$ = make_number($1);
- }|
- T_STRING {
- // printf("found STRING %sn",$1);
- $$ = make_string($1,T_STRING);
- }|
- T_DATE {
- // printf("found DATEn");
- $$ = make_string($1,T_DATE);
- }|
- T_ID {
- // printf("found ID %sn",$1);
- $$ = make_string($1,T_ID);
- };
-
-paren_expr:
- '(' bbool_expr ')' {
- // printf("found PAREN expressionn");
- $$ = $2;
- };
-
-%%
-
-#if defined PARSER_UNIT_TEST
-
-#include "xalloc.h"
-
-static const struct { char *name; char *value; } hacked_obj_fields[] = {
- /* Fake object fields for generic unit testing. */
- { "a", "2" }, { "b", "7" }, { "c", "11" },
- /* This one's here to test links (e.g. $template.owner.name). */
- { "template", "templates/the_tmpl" },
- { NULL }
-};
-
-/* Fake out the eval code for unit testing. */
-static const char *
-unit_oget_func (void * notused, const char *text)
-{
- int i;
-
- for (i = 0; hacked_obj_fields[i].name; ++i) {
- if (!strcmp(hacked_obj_fields[i].name,text)) {
- return xstrdup(hacked_obj_fields[i].value);
- }
- }
-
- return NULL;
-}
-static const getter_t unit_oget = { unit_oget_func };
-
-/*
- * Same as above, but the site-field stuff is so similar to the object-field
- * stuff that it's not worth exercising too much separately.
- */
-static const char *
-unit_sget_func (void * notused, const char *text)
-{
- return "never";
-}
-static const getter_t unit_sget = { unit_sget_func };
-
-/* Fake links from an object/key tuple to an object/key string. */
-static const struct { char *obj; char *key; char *value; } hacked_links[] = {
- { "templates/the_tmpl", "owner", "users/the_user" },
- { "users/the_user", "name", "Jeff Darcy" },
- { NULL }
-};
-
-static char *
-follow_link (const char *object, const char *key)
-{
- unsigned int i;
-
- for (i = 0; hacked_links[i].obj; ++i) {
- if (strcmp(object,hacked_links[i].obj)) {
- continue;
- }
- if (strcmp(key,hacked_links[i].key)) {
- continue;
- }
- return hacked_links[i].value;
- }
-
- return NULL;
-}
-#else
-extern char *follow_link (const char *object, const char *key);
-#endif
-
-static void
-_print_value (const value_t *v, int level)
-{
- if (!v) {
- printf("%*sNULLn",level,"");
- return;
- }
-
- switch (v->type) {
- case T_NUMBER:
- printf("%*sNUMBER %lldn",level,"",v->as_num);
- break;
- case T_STRING:
- printf("%*sSTRING %sn",level,"",v->as_str);
- break;
- case T_OFIELD:
-#if defined PARSER_UNIT_TEST
- printf("%*sOBJECT FIELD %s (%s)n",level,"",v->as_str,
- unit_oget_func(NULL,v->as_str));
-#else
- printf("%*sOBJECT FIELD %sn",level,"",v->as_str);
-#endif
- break;
- case T_SFIELD:
-#if defined PARSER_UNIT_TEST
- printf("%*sSERVER FIELD %s (%s)n",level,"",v->as_str,
- unit_sget_func(NULL,v->as_str));
-#else
- printf("%*sSERVER FIELD %sn",level,"",v->as_str);
-#endif
- break;
- case T_COMP:
- printf("%*sCOMPARISONn",level,"");
- _print_value(v->as_tree.left,level+2);
- _print_value(v->as_tree.right,level+2);
- break;
- case T_NOT:
- printf("%*sNOTn",level,"");
- _print_value(v->as_tree.left,level+2);
- break;
- case T_AND:
- printf("%*sANDn",level,"");
- _print_value(v->as_tree.left,level+2);
- _print_value(v->as_tree.right,level+2);
- break;
- case T_OR:
- printf("%*sORn",level,"");
- _print_value(v->as_tree.left,level+2);
- _print_value(v->as_tree.right,level+2);
- break;
- case T_LINK:
- printf("%*sLINKn",level,"");
- _print_value(v->as_tree.left,level+2);
- printf("%*sDEST FIELD %sn",level+2,"",
- (char *)v->as_tree.right);
- break;
- default:
- printf("%*sUNKNOWN %dn",level,"",v->type);
- }
-}
-
-void
-print_value (const value_t *v)
-{
- _print_value(v,0);
-}
-
-#include "qlexer.c"
-
-value_t *
-parse (const char *text)
-{
- yyscan_t scanner;
- if (yylex_init (&scanner))
- error (0, errno, "failed to initialize query parser");
- YY_BUFFER_STATE buf = yy_scan_string (text, scanner);
- value_t *result = NULL;
- value_t *r = yyparse (scanner, &result) == 0 ? result : NULL;
- yy_delete_buffer (buf, scanner);
- yylex_destroy (scanner);
- return r;
-}
-
-/*
- * Return the string value of an expression for comparison or display, iff
- * all component parts are string-valued themselves. That excludes numbers
- * and booleans.
- */
-static const char *
-string_value (value_t *v, const getter_t *oget, const getter_t *sget)
-{
- const char *left;
-
- /* Disable this caching, which seems to be invalid. */
- v->resolved = NULL;
-
- switch (v->type) {
- case T_STRING:
- return v->as_str;
- case T_OFIELD:
- if (!v->resolved) {
- v->resolved = oget ? CALL_GETTER(oget,v->as_str) : NULL;
- }
- return v->resolved;
- case T_SFIELD:
- return sget ? CALL_GETTER(sget,v->as_str) : NULL;
- case T_LINK:
- if (!v->resolved) {
- left = string_value(v->as_tree.left,oget,sget);
- if (left) {
- v->resolved = follow_link((char *)left,
- (char *)v->as_tree.right);
- }
- }
- return v->resolved;
- default:
- return NULL;
- }
-}
-
-/*
- * Check whether a string looks like a simple decimal number. There's
- * probably a library function for this somewhere.
- */
-static int
-is_ok_number (const char *a_str)
-{
- const char *p;
-
- if (!a_str) {
- return 0;
- }
-
- for (p = a_str; *p; ++p) {
- if (!isdigit(*p)) {
- return 0;
- }
- }
-
- return 1;
-}
-
-/*
- * Comparisons are a bit messy. If both sides are numbers, strings that look
- * like numbers, or expressions that evaluate to numbers (booleans evaluate
- * to 0/1), then we do a numeric comparison. Otherwise, if both sides
- * evaluate to strings, we attempt a string comparison. That's the logic,
- * but the code is actually structured a different way to allow re-use of
- * common operator-specific code at the end for both cases.
- */
-static int
-compare (value_t *left, comp_t op, value_t *right,
- const getter_t *oget, const getter_t *sget)
-{
- const char *lstr;
- const char *rstr;
- int lval = 0; // solely to placate gcc
- int rval;
- int num_ok = 1;
-
- lstr = string_value(left,oget,sget);
- rstr = string_value(right,oget,sget);
-
- if (left->type == T_NUMBER) {
- lval = left->as_num;
- }
- else if (lstr) {
- if (is_ok_number(lstr)) {
- lval = strtoll(lstr,NULL,0);
- }
- else {
- num_ok = 0;
- }
- }
- else {
- lval = eval(left,oget,sget);
- if (lval < 0) {
- return lval;
- }
- }
-
- if (right->type == T_NUMBER) {
- rval = right->as_num;
- }
- else if (rstr) {
- if (is_ok_number(rstr)) {
- rval = strtoll(rstr,NULL,0);
- }
- else {
- num_ok = 0;
- }
- }
- else {
- rval = eval(right,oget,sget);
- if (rval < 0) {
- return rval;
- }
- }
-
- /*
- * Strcmp returns -1/0/1, but -1 for us would mean an error and
- * which of 0/1 we return depends on which comparison operatoer
- * we're dealing with. Therefore, we stick the strcmp result on
- * the left side and let the switch below do an operator-appropriate
- * compare against zero on the right.
- */
- if (!num_ok) {
- if (!lstr || !rstr) {
- return -1;
- }
- lval = strcmp(lstr,rstr);
- rval = 0;
- }
-
- switch (op) {
- case C_LESSTHAN: return (lval < rval);
- case C_LESSOREQ: return (lval <= rval);
- case C_EQUAL: return (lval == rval);
- case C_DIFFERENT: return (lval != rval);
- case C_GREATEROREQ: return (lval >= rval);
- case C_GREATERTHAN: return (lval > rval);
- default:
- return -1;
- }
-}
-
-/*
- * Evaluate an AST in the current context to one of:
- * true=1
- * false=0
- * error=-1
- * It's up to the caller whether error is functionally the same as false.
- * Note that even T_NUMBER gets squeezed down to these three values. The
- * only thing numbers are used for is comparing against other numbers to
- * yield a boolean for the query or replication-policy code. If you want
- * something that returns a number, this is the wrong language for it.
- */
-
-int
-eval (const value_t *v, const getter_t *oget, const getter_t *sget)
-{
- int res;
- const char *str;
-
- switch (v->type) {
- case T_NUMBER:
- return v->as_num != 0;
- case T_STRING:
- return v->as_str && *v->as_str;
- case T_OFIELD:
- str = CALL_GETTER(oget,v->as_str);
- return str && *str;
- case T_SFIELD:
- str = CALL_GETTER(sget,v->as_str);
- return str && *str;
- case T_LINK:
- str = string_value(v->as_tree.left,oget,sget);
- if (str) {
- str = follow_link(str,(char *)v->as_tree.right);
- }
- return str && *str;
- case T_COMP:
- return compare(v->as_tree.left,(comp_t)v->as_tree.op,
- v->as_tree.right, oget, sget);
- case T_NOT:
- res = eval(v->as_tree.left,oget,sget);
- return (res >= 0) ? !res : res;
- case T_AND:
- res = eval(v->as_tree.left,oget,sget);
- if (res > 0) {
- res = eval(v->as_tree.right,oget,sget);
- }
- return res;
- case T_OR:
- res = eval(v->as_tree.left,oget,sget);
- if (res > 0) {
- return res;
- }
- return eval(v->as_tree.right,oget,sget);
- default:
- return -1;
- }
-}
-
-#ifdef PARSER_UNIT_TEST
-int
-main (int argc, char **argv)
-{
- int fail = 0;
- unsigned int i;
- GC_INIT ();
- for (i = 1; i < argc; ++i)
- {
- value_t *expr = parse (argv[i]);
- if (!expr)
- {
- printf ("could not parse '%s'n", argv[i]);
- fail = 1;
- continue;
- }
-
- print_value (expr);
-
- const char *str = string_value (expr, &unit_oget, &unit_sget);
- if (str)
- {
- printf ("s= %sn", str);
- continue;
- }
- printf ("d= %dn", eval (expr, &unit_oget, &unit_sget));
- }
-
- return fail;
-}
-#endif
diff --git a/query.h b/query.h
deleted file mode 100644
index ef6729e..0000000
--- a/query.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#ifndef _QUERY_H
-#define _QUERY_H 1
-
-#if defined(__CPLUSPLUS__) || defined(__cplusplus)
-extern "C" {
-#endif
-
-#include "iwhd-qparser.h"
-
-/*
- * Comparisons are all the same type to the parser, but when it comes to
- * evaluation we need to know the difference so we use these subtypes.
- */
-typedef enum {
- C_LESSTHAN, C_LESSOREQ,
- C_EQUAL, C_DIFFERENT,
- C_GREATEROREQ, C_GREATERTHAN
-} comp_t;
-
-/* The actual values are generated by the parser. */
-typedef enum yytokentype type_t;
-
-/*
- * Universal AST object. T_NUMBER uses as_num, and some day T_DATE might as
- * well. Several types (T_STRING, T_ID, T_*FIELD) all use as_str. The rest
- * use as_tree, but there's a caveat. In most cases as_tree.right really is
- * a value_t, but for T_LINK it's a bare string.
- * TBD: use a separate as_link union member for T_LINK.
- */
-typedef struct value_t {
- type_t type;
- union {
- long long as_num;
- char *as_str;
- struct {
- comp_t op;
- struct value_t *left;
- struct value_t *right;
- } as_tree;
- };
- const char *resolved; /* saved result for T_OFIELD/T_SFIELD/T_LINK */
-} value_t;
-
-/*
- * In a higher-level language, this would be a method pointer. It's just
- * a pointer to a function plus a little piece of the caller's context (in
- * the replication-policy case it's the current bucket and key) so that we
- * can do concurrent evaluations with separate contexts.
- */
-typedef struct {
- const char *(*func) (void *, const char *);
- void *ctx;
-} getter_t;
-#define CALL_GETTER(g,x) g->func(g->ctx,x)
-
-/*
- * In the normal case a caller would invoke parse once and eval multiple times.
- * print_value is just for debugging/testing.
- * TBD: make parse reentrant (eval already is).
- * Unfortunately, a quick scan of generated code and information on the web
- * seems to indicate that even a "reentrant" bison parser only encapsulates
- * user state and still relies quite a bit on internal globals. That might
- * mean that we just have to put a lock around it instead.
- */
-int eval (const value_t *expr,
- const getter_t *oget, const getter_t *sget);
-void print_value (const value_t *);
-
-value_t *parse (const char *text);
-
-#if defined(__CPLUSPLUS__) || defined(__cplusplus)
-}
-#endif
-
-#endif
diff --git a/replica.c b/replica.c
deleted file mode 100644
index ee1034a..0000000
--- a/replica.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#include <config.h>
-
-#include <errno.h>
-#include <error.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <hstor.h>
-#include <microhttpd.h> /* for HTTP status values */
-
-#include "iwh.h"
-#include "setup.h"
-#include "query.h"
-#include "meta.h"
-#include "replica.h"
-
-typedef enum {
- REPL_PUT, /* store an object */
- REPL_ODELETE, /* delete an object */
- REPL_BCREATE, /* create a bucket */
- /* TBD: bucket deletion, others? */
-} repl_t;
-
-typedef struct _repl_item {
- struct _repl_item *next;
- repl_t type;
- char *path;
- provider_t *server;
- size_t size;
- my_state *ms;
-} repl_item;
-
-typedef struct {
- char *cur_bucket;
- char *cur_key;
- provider_t *cur_server;
-} query_ctx_t;
-
-static repl_item *queue_head = NULL;
-static repl_item *queue_tail = NULL;
-static pthread_mutex_t queue_lock;
-static sem_t queue_sema;
-static volatile gint rep_count = 0;
-
-static void *
-proxy_repl_prod (void *ctx)
-{
- repl_item *item = ctx;
- backend_thunk_t thunk;
- void *result;
-
- thunk.parent = item->ms;
- thunk.prov = get_main_provider();
-
- result = thunk.prov->func_tbl->get_child_func(&thunk);
- return result;
-}
-
-static void *
-proxy_repl_cons (void *ctx)
-{
- repl_item *item = ctx;
- my_state *ms = item->ms;
- pipe_private *pp;
-
- pp = pipe_init_private(&ms->pipe);
- if (!pp) {
- pipe_cons_siginit(&ms->pipe,-1);
- return THREAD_FAILED;
- }
-
- pp->prov = item->server;
- ms->be_flags = 0;
-
- return item->server->func_tbl->put_child_func(pp);
-}
-
-static void
-repl_worker_del (const repl_item *item)
-{
- char *bucket;
- char *key;
- int rc;
-
- bucket = strdup(item->path);
- if (!bucket) {
- error(0,errno,"ran out of memory replicating delete for %s",
- item->path);
- return;
- }
-
- key = strchr(bucket,'/');
- if (!key) {
- error(0,0,"invalid path replicating delete for %s",item->path);
- return;
- }
- ++key;
-
- rc = item->server->func_tbl->delete_func(item->server,
- bucket, key, item->path);
- if (rc != MHD_HTTP_OK) {
- error(0,0,"got status %d replicating delete for %s",
- rc, item->path);
- }
-
- DPRINTF("finished replicating delete for %s, rc = %dn",item->path,rc);
-}
-
-static void
-repl_worker_bcreate (repl_item *item)
-{
- int rc;
-
- rc = item->server->func_tbl->bcreate_func(item->server,item->path);
- if (rc != MHD_HTTP_OK) {
- error(0,0,"got status %d replicating bcreate for %s",
- rc, item->path);
- }
-
- DPRINTF("finished replicating bcreate for %s, rc = %dn",item->path,rc);
-}
-
-/* Use this to diagnose failed thread creation. */
-#define xpthread_create(thread, start_routine, item, msg) - do { - int err = pthread_create (thread, NULL, start_routine, item); - if (err) { - error (0, err, msg); - return NULL; - } - } while (0)
-
-static void *
-repl_worker (void *notused ATTRIBUTE_UNUSED)
-{
- repl_item *item;
- pthread_t cons;
- pthread_t prod;
- my_state *ms;
-
- for (;;) {
- sem_wait(&queue_sema);
- pthread_mutex_lock(&queue_lock);
- item = queue_head;
- queue_head = item->next;
- if (!queue_head) {
- queue_tail = NULL;
- }
- pthread_mutex_unlock(&queue_lock);
-
- /*
- * Do a full initialization here, not just in the rest. It's
- * necessary in the oddball case where we're re-replicating as
- * a result of an attribute/policy change, and it's not harmful
- * in the normal case where we're actually storing a new file.
- */
- ms = item->ms;
- pipe_init_shared(&ms->pipe,ms,1);
- switch (item->type) {
- case REPL_PUT:
- xpthread_create(&prod,proxy_repl_prod,item,
- "failed to start producer thread");
- xpthread_create(&cons,proxy_repl_cons,item,
- "failed to start consumer thread");
- pthread_join(prod,NULL);
- pthread_join(cons,NULL);
- break;
- case REPL_ODELETE:
- repl_worker_del(item);
- break;
- case REPL_BCREATE:
- repl_worker_bcreate(item);
- break;
- default:
- error(0,0,"bad repl type %d (url=%s) skipped",
- item->type, item->path);
- }
- /* No atomic dec without test? Lame. */
- (void)g_atomic_int_dec_and_test(&rep_count);
- }
-}
-
-void
-repl_init (void)
-{
- pthread_t tid;
-
- sem_init(&queue_sema,0,0);
- pthread_mutex_init(&queue_lock,NULL);
- pthread_create(&tid,NULL,repl_worker,NULL);
-}
-
-static const char *
-repl_oget (void *ctx, const char *id)
-{
- query_ctx_t *qctx = ctx;
- char *cur_value = NULL;
-
- (void)meta_get_value(qctx->cur_bucket,qctx->cur_key,id,&cur_value);
-
- return cur_value;
-}
-
-static const char *
-repl_sget (void *ctx, const char *id)
-{
- query_ctx_t *qctx = ctx;
- provider_t *prov = qctx->cur_server;
-
- if (!strcmp(id,"name")) {
- return prov->name;
- }
- if (!strcmp(id,"type")) {
- return prov->type;
- }
- if (!strcmp(id,"host")) {
- return prov->host;
- }
- if (!strcmp(id,"key")) {
- return prov->username;
- }
- if (!strcmp(id,"secret")) {
- return prov->password;
- }
- if (!strcmp(id,"path")) {
- return prov->path;
- }
-
- struct kv_pair kv;
- kv.key = (char *) id;
- struct kv_pair *p = hash_lookup (prov->attrs, &kv);
-
- return p ? p->val : NULL;
-}
-
-void
-replicate (const char *url, size_t size, const char *policy, my_state *ms)
-{
- repl_item *item;
- value_t *expr;
- int res;
- char *url2;
- char *stctx;
- query_ctx_t qctx;
- getter_t oget;
- getter_t sget;
-
- url2 = strdup(url);
- if (!url2) {
- error(0,0,"could not parse url %s",url);
- return;
- }
- qctx.cur_bucket = strtok_r(url2,"/",&stctx);
- qctx.cur_key = strtok_r(NULL,"/",&stctx);
-
- if (!size) {
- size = meta_get_size(qctx.cur_bucket,qctx.cur_key);
- DPRINTF("fetched size %zu for %sn",size,url);
- }
-
- if (policy) {
- DPRINTF("--- policy = %sn",policy);
- expr = parse(policy);
- }
- else {
- expr = NULL;
- }
-
- oget.func = repl_oget;
- oget.ctx = &qctx;
- sget.func = repl_sget;
- sget.ctx = &qctx;
-
- size_t n_prov;
- provider_t **prov_list = hash_get_prov_list (&n_prov);
- if (prov_list == NULL) {
- DPRINTF("failed to allocate space for provider listn");
- return;
- }
-
- size_t i;
- for (i = 0; i < n_prov; i++) {
- provider_t *prov = prov_list[i];
- if (!strcmp(prov->name, me)) {
- continue;
- }
- if (expr) {
- qctx.cur_server = prov;
- res = eval(expr,&oget,&sget);
- }
- else {
- res = 0;
- }
- if (res <= 0) {
- DPRINTF("skipping %s for %sn",prov->name,url);
- continue;
- }
- DPRINTF("REPLICATING %s to %sn",url,prov->name);
- item = malloc(sizeof(*item));
- if (!item) {
- error(0,errno,"could not create repl_item for %s",
- url);
- break;
- }
- item->type = REPL_PUT;
- item->path = strdup(url);
- if (!item->path) {
- error(0,errno,"could not create repl_item for %s",
- url);
- break;
- }
- item->server = prov;
- item->size = size;
- item->ms = ms;
- pthread_mutex_lock(&queue_lock);
- if (queue_tail) {
- item->next = queue_tail->next;
- queue_tail->next = item;
- }
- else {
- item->next = NULL;
- queue_head = item;
- }
- queue_tail = item;
- pthread_mutex_unlock(&queue_lock);
- g_atomic_int_inc(&rep_count);
- sem_post(&queue_sema);
- }
-}
-
-static void
-replicate_namespace_action (const char *name, repl_t action, my_state *ms)
-{
- size_t n_prov;
- provider_t **prov_list = hash_get_prov_list (&n_prov);
- if (prov_list == NULL) {
- DPRINTF("failed to allocate space for provider listn");
- return;
- }
-
- size_t i;
- for (i = 0; i < n_prov; i++) {
- provider_t *prov = prov_list[i];
- if (!strcmp(prov->name, me)) {
- continue;
- }
- DPRINTF("replicating %s(%s) on %sn",
- (action == REPL_ODELETE ? "delete" : "create"),
- name, prov->name);
- repl_item *item = malloc(sizeof(*item));
- if (!item) {
- error(0,errno,"could not create repl_item for %s",
- name);
- return;
- }
- item->type = action;
- item->path = strdup(name);
- if (!item->path) {
- return;
- }
- item->server = prov;
- item->ms = ms;
- pthread_mutex_lock(&queue_lock);
- if (queue_tail) {
- item->next = queue_tail->next;
- queue_tail->next = item;
- }
- else {
- item->next = NULL;
- queue_head = item;
- }
- queue_tail = item;
- pthread_mutex_unlock(&queue_lock);
- g_atomic_int_inc(&rep_count);
- sem_post(&queue_sema);
- }
-}
-
-void
-replicate_delete (const char *name, my_state *ms)
-{
- replicate_namespace_action(name,REPL_ODELETE,ms);
-}
-
-void
-replicate_bcreate (const char *name, my_state *ms)
-{
- replicate_namespace_action(name,REPL_BCREATE,ms);
-}
-
-/* Part of our API to the query module. */
-char *
-follow_link (char *object, const char *key)
-{
- char *slash;
- char *value = NULL;
-
- slash = strchr(object,'/');
- if (!slash) {
- return NULL;
- }
-
- *(slash++) = '0';
- (void)meta_get_value(object,slash,key,&value);
- *(--slash) = '/';
-
- DPRINTF("%s: %s:%s => %sn",__func__,object,key,value);
- return value;
-}
-
-int
-get_rep_count (void)
-{
- return g_atomic_int_get(&rep_count);
-}
diff --git a/replica.h b/replica.h
deleted file mode 100644
index 4dea07f..0000000
--- a/replica.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#if !defined(_REPLICA_H)
-#define _REPLICA_H
-
-#include "state_defs.h"
-
-void repl_init (void);
-void replicate (const char *url, size_t size,
- const char *policy, my_state *ms);
-void replicate_delete (const char *url, my_state *ms);
-void replicate_bcreate (const char *bucket, my_state *ms);
-int get_rep_count (void);
-
-char *follow_link (char *object, const char *key);
-
-#endif
diff --git a/rest.c b/rest.c
deleted file mode 100644
index 1fb1aa8..0000000
--- a/rest.c
+++ /dev/null
@@ -1,2316 +0,0 @@
-/* Copyright (C) 2010-2011 Red Hat, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#include <config.h>
-
-#include <error.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <poll.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <assert.h>
-#include <errno.h>
-
-#include <microhttpd.h>
-#include <hstor.h> /* only for ARRAY_SIZE at this point */
-#include <curl/curl.h>
-#include <glib.h>
-
-#include "dirname.h"
-#include "iwh.h"
-#include "closeout.h"
-#include "hash.h"
-#include "progname.h"
-#include "meta.h"
-#include "backend.h"
-#include "setup.h"
-#include "replica.h"
-#include "template.h"
-#include "mpipe.h"
-#include "state_defs.h"
-
-/* Define-away for now. Eventually, define to gettext. */
-#define _(msgid) (msgid)
-
-#if defined(DEBUG)
-#define MY_MHD_FLAGS MHD_USE_THREAD_PER_CONNECTION | MHD_USE_DEBUG
-//#define MY_MHD_FLAGS MHD_USE_SELECT_INTERNALLY | MHD_USE_DEBUG
-#else
-#define MY_MHD_FLAGS MHD_USE_THREAD_PER_CONNECTION
-#endif
-
-/* Buffer size for MHD_create_post_processor, used to buffer and parse keys. */
-enum { POST_BUF_SIZE = 4096 };
-
-/* Upper bound on the block size used when microhttpd queries
- the callback function (i.e., I/O buffer size). */
-enum { CB_BLOCK_SIZE = 64 * 1024 };
-
-#define gc_register_thread() - { - struct GC_stack_base gc_stack_base; - int st = GC_get_stack_base (&gc_stack_base); - assert (st == GC_SUCCESS); - GC_register_my_thread (&gc_stack_base); - }
-
-typedef enum {
- URL_ROOT=0, URL_BUCKET, URL_OBJECT, URL_ATTR, URL_INVAL,
- URL_QUERY, URL_PROVLIST, URL_PROVIDER, URL_PROVIDER_SET_PRIMARY
-} url_type;
-
-typedef struct {
- const char *method;
- url_type utype;
- MHD_AccessHandlerCallback handler;
-} rule;
-
-static unsigned short my_port = MY_PORT;
-char *cfg_file = NULL;
-
-static const char *const (reserved_name[]) = {"_default", "_new", "_policy", "_query", NULL};
-static const char *const (reserved_attr[]) = {"_bucket", "_date", "_etag", "_key", "_loc", "_size", NULL};
-static const char *const (reserved_bucket_name[]) = {"_new", "_providers", NULL};
-
-static int
-validate_put (struct MHD_Connection *conn)
-{
- const char *mhdr;
-
- mhdr = MHD_lookup_connection_value(conn,MHD_HEADER_KIND,
- "X-redhat-role");
- /*
- * This will fail most obviously in the case where we are not the
- * master, we know we're not the master, and we don't see this
- * header (which is set in master-to-slave replication requests).
- * It will *also* fail, deliberately, if we do see this header when
- * we think we're the master, as it means there's a mismatch between
- * their config and ours. This avoids "strange" behavior in such
- * cases, in favor of a more obvious failure.
- * TBD: this will be less of a problem if/when we identify the
- * master and DB via the config file instead of -m/-d.
- */
- if (master_host) {
- return (mhdr && !strcmp(mhdr,"master"));
- }
- else {
- return !mhdr;
- }
-}
-
-static int
-is_reserved (const char *cand, char const *const *resv_list)
-{
- int i;
-
- for (i = 0; resv_list[i]; ++i) {
- if (!strcmp(cand,resv_list[i])) {
- return TRUE;
- }
- }
-
- return FALSE;
-}
-
-static int
-validate_url (const char *url)
-{
- char *slash = strrchr(url,'/');
-
- if (!slash) {
- /* There should be at least one betwixt bucket and key. */
- return 0;
- }
-
- return !is_reserved(slash+1,reserved_name);
-}
-
-/**********
- * The proxy has MHD on one side and CURL on the other. The CURL side is
- * always run in a child thread. Yes, there are both context switches
- * and copies between the threads. Get over it. The focus here is on
- * supporting multi-way replication on PUT, with minimal complexity. These
- * were the two easiest libraries to use, and they both want to allocate
- * their own buffers so we're kind of stuck with the copies unless we want
- * to buffer whole files in memory (which would have to be virtual since
- * they're potentialy bigger than physical) or explicitly ping them through
- * a local filesystem. We could potentially take over scheduling from one
- * or both to avoid some of the context switching, but those interfaces are
- * significantly more error-prone and (at least in CURL's case) buggy.
- *
- * For a GET, the CURL child acts as a producer while the MHD parent acts
- * as consumer. For a PUT, the MHD parent is the producer and the CURL
- * child is the consumer. For GET the MHD component is invoked via a
- * callback set up in the access handler; for PUT it's invoked via repeated
- * calls to the access handler itself. Either way, the producer's job is
- * to post its pointer+length to the my_state structure and then wait for
- * all consumers to check back in saying they've finished it. This might
- * involve multiple passes through each consumer for one pass through the
- * single producer. When the producer is done, it does a similar handshake
- * with the consumers. Each consumer has its own pipe_private structure,
- * containing a pointer to the shared my_state plus a per-consumer offset
- * into the current chunk.
- *
- * Attribute functions don't use CURL, so they do much simpler in-memory
- * buffering. Queries also don't use CURL, but the MHD POST interface
- * introduces some of its own complexity so see below for that.
- **********/
-
-static void
-simple_closer (void *ctx)
-{
- my_state *ms = ctx;
-
- DPRINTF("%s: cleaning upn",__func__);
-}
-
-static void
-child_closer (void * ctx)
-{
- pipe_private *pp = ctx;
-
- DPRINTF("in %sn",__func__);
-}
-
-/* Invoked from MHD. */
-static ssize_t
-proxy_get_cons (void *ctx, uint64_t pos, char *buf, size_t max)
-{
- pipe_private *pp = ctx;
- pipe_shared *ps = pp->shared;
- my_state *ms = ps->owner;
- ssize_t done;
- void *child_res;
-
- (void)pos;
-
- DPRINTF("consumer asked to read %zun",max);
-
- if (pipe_cons_wait(pp)) {
- DPRINTF("consumer offset %zu into %zun",
- pp->offset, ps->data_len);
- if (ps->data_len < pp->offset)
- // Warn about bogus offset?
- done = -1;
- else {
- done = ps->data_len - pp->offset;
- if ((size_t) done > max) {
- done = max;
- }
- memcpy(buf,(char *)(ps->data_ptr)+pp->offset,done);
- pp->offset += done;
- DPRINTF("consumer copied %zu, new offset %zun",
- done, pp->offset);
- if (pp->offset == ps->data_len) {
- DPRINTF("consumer finished chunkn");
- pipe_cons_signal(pp, 0);
- }
- }
- }
- else {
- done = -1;
- }
-
- if (done == (-1)) {
- child_res = NULL;
- pthread_join(ms->backend_th,&child_res);
- if (child_res == THREAD_FAILED) {
- DPRINTF("GET producer failedn");
- /* Nothing we can do; already sent status. */
- }
- if (ms->from_master) {
- pthread_join(ms->cache_th,NULL);
- /* TBD: do something about cache failure? */
- }
- }
-
- return done;
-}
-
-static int
-proxy_get_data (void *cctx, struct MHD_Connection *conn, const char *url,
- const char *method, const char *version, const char *data,
- size_t *data_size, void **rctx)
-{
- struct MHD_Response *resp;
- my_state *ms = *rctx;
- pipe_private *pp;
- pipe_private *pp2;
- char *my_etag;
- const char *user_etag;
- int rc;
-
- (void)cctx;
- (void)method;
- (void)version;
- (void)data;
- (void)data_size;
-
- DPRINTF("PROXY GET DATA %sn",url);
-
- ms->url = strdup(url);
- if (!ms->url) {
- return MHD_NO;
- }
-
- my_etag = meta_has_copy(ms->bucket,ms->key,me);
- if (!my_etag) {
- DPRINTF("falling back to local for %s/%sn",ms->bucket,ms->key);
- ms->from_master = 0;
- }
- else if (*my_etag) {
- user_etag = MHD_lookup_connection_value(
- conn, MHD_HEADER_KIND, "If-None-Match");
- if (user_etag && !strcmp(user_etag,my_etag)) {
- DPRINTF("ETag match!n");
- resp = MHD_create_response_from_data(0,NULL,
- MHD_NO,MHD_NO);
- MHD_queue_response(conn,MHD_HTTP_NOT_MODIFIED,resp);
- MHD_destroy_response(resp);
- return MHD_YES;
- }
- ms->from_master = 0;
- }
- else {
- DPRINTF("%s/%s not found locallyn",ms->bucket,ms->key);
- if (!master_host) {
- DPRINTF(" that means it doesn't existn");
- resp = MHD_create_response_from_data(0,NULL,
- MHD_NO,MHD_NO);
- MHD_queue_response(conn,MHD_HTTP_NOT_FOUND,resp);
- MHD_destroy_response(resp);
- return MHD_YES;
- }
- DPRINTF(" will fetch from %s:%un", master_host,master_port);
- ms->from_master = 1;
- }
-
- pipe_init_shared(&ms->pipe,ms,ms->from_master+1);
- pp = pipe_init_private(&ms->pipe);
- if (!pp) {
- return MHD_NO;
- }
- provider_t *main_prov = get_main_provider();
- ms->thunk.parent = ms;
- ms->thunk.prov = ms->from_master ? g_master_prov : main_prov;
- pthread_create(&ms->backend_th,NULL,
- ms->thunk.prov->func_tbl->get_child_func,&ms->thunk);
- /* TBD: check return value */
-
- if (ms->from_master) {
- pp2 = pipe_init_private(&ms->pipe);
- if (!pp2) {
- return MHD_NO;
- }
- pp2->prov = main_prov;
- pthread_create(&ms->cache_th,NULL,
- main_prov->func_tbl->cache_child_func,pp2);
- /* TBD: check return value */
- }
- else {
- pp2 = NULL;
- }
-
- rc = pipe_cons_wait_init(&ms->pipe);
- ms->rc = (rc == 0) ? MHD_HTTP_OK : MHD_HTTP_INTERNAL_SERVER_ERROR;
-
- resp = MHD_create_response_from_callback(MHD_SIZE_UNKNOWN,
- CB_BLOCK_SIZE, proxy_get_cons, pp, child_closer);
- if (!resp) {
- fprintf(stderr,"MHD_crfc failedn");
- if (pp2) {
- /* TBD: terminate thread */
- }
- child_closer(pp);
- return MHD_NO;
- }
- MHD_queue_response(conn,ms->rc,resp);
- MHD_destroy_response(resp);
-
- return MHD_YES;
-}
-
-static void
-recheck_replication (my_state * ms, char *policy)
-{
- int rc;
- char fixed[MAX_FIELD_LEN];
-
- if (is_reserved(ms->key,reserved_name)) {
- DPRINTF("declining to replicate reserved object %sn",ms->key);
- return;
- }
-
- if (!policy && ms->dict) {
- DPRINTF("using new policy for %s/%sn",ms->bucket,ms->key);
- policy = kv_hash_lookup (ms->dict, "_policy");
- }
-
- if (!policy) {
- DPRINTF("fetching policy for %s/%sn",ms->bucket,ms->key);
- rc = meta_get_value(ms->bucket,ms->key, "_policy", &policy);
- }
-
- if (!policy) {
- DPRINTF(" inheriting policy from %sn",ms->bucket);
- rc = meta_get_value(ms->bucket,
- "_default", "_policy", &policy);
- }
-
- if (policy) {
- DPRINTF(" implementing policy %sn",policy);
- /*
- * Can't use ms->url here because it might be a bucket POST
- * and in that case ms->url points to the bucket.
- */
- snprintf(fixed,sizeof(fixed),"%s/%s",ms->bucket,ms->key);
- replicate(fixed,0,policy,ms);
- }
- else {
- DPRINTF(" could not find a policy anywhere!n");
- }
-}
-
-static int
-proxy_put_data (void *cctx, struct MHD_Connection *conn, const char *url,
- const char *method, const char *version, const char *data,
- size_t *data_size, void **rctx)
-{
- struct MHD_Response *resp;
- my_state *ms = *rctx;
- pipe_private *pp;
- int rc;
- char *etag = NULL;
- void *child_res;
-
- (void)cctx;
- (void)method;
- (void)version;
-
- DPRINTF("PROXY PUT DATA %s (%zu)n",url,*data_size);
-
- if (ms->state == MS_NEW) {
- if (!validate_put(conn) || !validate_url(url)) {
- DPRINTF("rejecting %sn",url);
- resp = MHD_create_response_from_data(0,NULL,
- MHD_NO,MHD_NO);
- if (!resp) {
- return MHD_NO;
- }
- MHD_queue_response(conn,MHD_HTTP_FORBIDDEN,resp);
- MHD_destroy_response(resp);
- return MHD_YES;
- }
- ms->state = MS_NORMAL;
- ms->url = strdup(url);
- if (!ms->url) {
- return MHD_NO;
- }
- ms->size = 0;
- pipe_init_shared(&ms->pipe,ms,1);
- pp = pipe_init_private(&ms->pipe);
- if (!pp) {
- return MHD_NO;
- }
- provider_t *main_prov = get_main_provider();
- pp->prov = main_prov;
- ms->be_flags = BACKEND_GET_SIZE;
- pthread_create(&ms->backend_th,NULL,
- main_prov->func_tbl->put_child_func,pp);
- /* TBD: check return value */
-
- /*
- * Do the initial handshake with children. If we return from
- * this callback without an error response, Microhttpd posts
- * the "100 Continue" header and the client starts sending
- * the data. We must report errors here or forever keep
- * our peace.
- */
- rc = pipe_prod_wait_init(&ms->pipe);
- if (rc != 0) {
- DPRINTF("producer wait failedn");
- resp = MHD_create_response_from_data(0,NULL,
- MHD_NO,MHD_NO);
- if (!resp) {
- return MHD_NO;
- }
- MHD_queue_response(conn,MHD_HTTP_INTERNAL_SERVER_ERROR,
- resp);
- MHD_destroy_response(resp);
- } else if (rc > 0) {
- /*
- * Note that we fail here even if 1 of N replicas fail.
- * Might want to fix this when we start looping over
- * pipe_init_private() above.
- */
- DPRINTF("producer replicas failed (%u of %u)n",
- rc, ms->pipe.cons_total);
- resp = MHD_create_response_from_data(0,NULL,
- MHD_NO,MHD_NO);
- if (!resp) {
- return MHD_NO;
- }
- MHD_queue_response(conn,MHD_HTTP_INTERNAL_SERVER_ERROR,
- resp);
- MHD_destroy_response(resp);
- } else {
- DPRINTF("producer proceedingn");
- }
- }
- else if (*data_size) {
- pipe_prod_signal(&ms->pipe,(void *)data,*data_size);
- ms->size += *data_size;
- DPRINTF("producer chunk finishedn");
- *data_size = 0;
- }
- else {
- pipe_prod_finish(&ms->pipe);
- pthread_join(ms->backend_th,&child_res);
- if (child_res == THREAD_FAILED) {
- DPRINTF("thread failedn");
- rc = MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- else if (ms->pipe.cons_error == ms->pipe.cons_total) {
- DPRINTF("all %u consumers failedn",
- ms->pipe.cons_error);
- rc = MHD_HTTP_INTERNAL_SERVER_ERROR;
- }
- else {
- if (master_host) {
- meta_got_copy(ms->bucket,ms->key,me);
- etag = NULL;
- }
- else {
- etag = meta_did_put(ms->bucket,ms->key,me,
- ms->size);
- }
- DPRINTF("rereplicate (obj PUT)n");
- recheck_replication(ms,NULL);
- rc = MHD_HTTP_OK;
- }
- resp = MHD_create_response_from_data(0,NULL,MHD_NO,MHD_NO);
- if (!resp) {
- return MHD_NO;
- }
- if (etag) {
- MHD_add_response_header(resp,"ETag",etag);
- }
- MHD_queue_response(conn,rc,resp);
- MHD_destroy_response(resp);
- }
-
- return MHD_YES;
-}
-
-static int
-proxy_get_attr (void *cctx, struct MHD_Connection *conn, const char *url,
- const char *method, const char *version, const char *data,
- size_t *data_size, void **rctx)
-{
- struct MHD_Response *resp;
- char *fixed;
- my_state *ms = *rctx;
- int rc = MHD_HTTP_NOT_FOUND;
-
- (void)cctx;
- (void)method;
- (void)version;
- (void)data;
- (void)data_size;
-
- DPRINTF("PROXY GET ATTR %sn",url);
-
- if (meta_get_value(ms->bucket,ms->key,ms->attr,&fixed) == 0) {
- resp = MHD_create_response_from_data(strlen(fixed),fixed,
- MHD_NO,MHD_NO);
- rc = MHD_HTTP_OK;
- }
- else {
- resp = MHD_create_response_from_data(0,NULL,MHD_NO,MHD_NO);
- }
- if (!resp) {
- return MHD_NO;
- }
- MHD_queue_response(conn,rc,resp);
- MHD_destroy_response(resp);
-
- return MHD_YES;
-}
-
-static int
-proxy_put_attr (void *cctx, struct MHD_Connection *conn, const char *url,
- const char *method, const char *version, const char *data,
- size_t *data_size, void **rctx)
-{
- struct MHD_Response *resp;
- my_state *ms = *rctx;
- const char *attrval;
- int send_resp = 0;
-
- (void)cctx;
- (void)method;
- (void)version;
-
- DPRINTF("PROXY PUT ATTR %s (%zu)n",url,*data_size);
-
- if (ms->state == MS_NEW) {
- ms->state = MS_NORMAL;
- ms->url = strdup(url);
- if (!ms->url) {
- return MHD_NO;
- }
- attrval = MHD_lookup_connection_value(conn,MHD_HEADER_KIND,
- "X-redhat-value");
- if (attrval) {
- meta_set_value(ms->bucket,ms->key,ms->attr,
- (char *)attrval);
- send_resp = 1;
- }
- }
- else if (*data_size) {
- if (ms->pipe.data_len) {
- ms->pipe.data_len += *data_size;
- char *p = realloc(ms->pipe.data_ptr,ms->pipe.data_len);
- if (!p) {
- return MHD_NO;
- }
- ms->pipe.data_ptr = p;
- }
- else {
- ms->pipe.data_len = *data_size + 1;
- ms->pipe.data_ptr = malloc(ms->pipe.data_len);
- if (!ms->pipe.data_ptr) {
- return MHD_NO;
- }
- ((char *)ms->pipe.data_ptr)[0] = '0';
- }
- (void)strncat(ms->pipe.data_ptr,data,*data_size);
- /* TBD: check return value */
- *data_size = 0;
- }
- else {
- if (!ms->pipe.data_ptr) {
- return MHD_NO;
- }
- if (is_reserved(ms->attr,reserved_attr)) {
- resp = MHD_create_response_from_data(
- 0,NULL,MHD_NO,MHD_NO);
- if (!resp) {
- return MHD_NO;
- }
- MHD_queue_response(conn,MHD_HTTP_BAD_REQUEST,
- resp);
- MHD_destroy_response(resp);
- return MHD_YES;
- }
- meta_set_value(ms->bucket,ms->key,ms->attr,ms->pipe.data_ptr);
- /*
- * We should always re-replicate, because the replication
- * policy might refer to this attr.
- */
- DPRINTF("rereplicate (attr PUT)n");
- recheck_replication(ms,NULL);
- send_resp = 1;
- }
-
- if (send_resp) {
- resp = MHD_create_response_from_data(0,NULL,MHD_NO,MHD_NO);
- if (!resp) {
- return MHD_NO;
- }
- MHD_queue_response(conn,MHD_HTTP_CREATED,resp);
- MHD_destroy_response(resp);
- /*
- * TBD: check if the attribute was a replication policy, and
- * start/stop replication activities as appropriate.
- */
- }
-
- return MHD_YES;
-}
-
-/**********
- * For queries, we have to deal with MHD's post-iterator interface (not
- * quite the same as the data-iteration even though we use it that way) on
- * one side, and a query-iterator interface on the other. Data on both
- * sides could be quite large, so we can't just stick them in header lines.
- * We do still buffer the query in memory, though. Once that's done, we do
- * very simple parsing - it will be more complicated later - and create the
- * query iterator. That's also driven by MHD, this time though the
- * content-callback interface, and repeatedly calls in to the metadata
- * module to fetch one object name at a time.
- **********/
-
-static int
-query_iterator (void *ctx, enum MHD_ValueKind kind, const char *key,
- const char *filename, const char *content_type,
- const char *transfer_encoding, const char *data,
- uint64_t off, size_t size)
-{
- (void)ctx;
- (void)kind;
- (void)key;
- (void)filename;
- (void)content_type;
- (void)transfer_encoding;
- (void)data;
- (void)off;
- (void)size;
-
- /* We actually accumulate the data in proxy_query. */
- return MHD_YES;
-}
-
-/* MHD reader function during queries. Return -1 for EOF. */
-static ssize_t
-proxy_query_func (void *ctx, uint64_t pos, char *buf, size_t max)
-{
- my_state *ms = ctx;
- size_t len;
- char *bucket;
- char *key;
-
- (void)pos;
-
- if (!ms->gen_ctx) {
- const char *accept_hdr
- = MHD_lookup_connection_value(ms->conn, MHD_HEADER_KIND,
- "Accept");
- ms->gen_ctx = tmpl_get_ctx(accept_hdr);
- if (!ms->gen_ctx) {
- return -1;
- }
- len = tmpl_list_header(ms->gen_ctx);
- if (!len) {
- return -1;
- }
- if (len > max) {
- len = max;
- }
- memcpy(buf,ms->gen_ctx->buf,len);
- return len;
- }
-
- if (ms->gen_ctx == TMPL_CTX_DONE) {
- return -1;
- }
-
- for(;;) {
- if (!meta_query_next(ms->query,&bucket,&key)) {
- break;
- }
- if (is_reserved(key,reserved_name)) {
- continue;
- }
- len = tmpl_list_entry(ms->gen_ctx,bucket,key);
- if (!len) {
- return -1;
- }
- if (len > max) {
- len = max;
- }
- memcpy(buf,ms->gen_ctx->buf,len);
- return len;
- }
-
- len = tmpl_list_footer(ms->gen_ctx);
- if (!len) {
- return -1;
- }
- if (len > max) {
- len = max;
- }
- memcpy(buf,ms->gen_ctx->buf,len);
- ms->gen_ctx = TMPL_CTX_DONE;
- return len;
-}
-
-/* Helper used by gc_register_finalizer_ms. */
-static void
-destroy_state_postprocessor (void *ms_v, void *client_data)
-{
- my_state *ms = ms_v;
- if (ms->post)
- MHD_destroy_post_processor (ms->post);
- if (ms->dict)
- hash_free (ms->dict);
- if (ms->query)
- meta_query_stop (ms->query);
- if (ms->aquery)
- meta_query_stop (ms->aquery);
-}
-
-/* Tell the garbage collector that when freeing MS, it must invoke
- destroy_state_postprocessor(MS). This is required for each ms->post
- since they're allocated via MHD_create_post_processor, which is
- in a separate library into which the GC has no view.
- Likewise for ms->dict, ms->query and ms->aquery. */
-static void
-gc_register_finalizer_ms(void *ms)
-{
- if (ms)
- GC_register_finalizer(ms, destroy_state_postprocessor, 0, 0, 0);
-}
-
-static int
-proxy_query (void *cctx, struct MHD_Connection *conn, const char *url,
- const char *method, const char *version, const char *data,
- size_t *data_size, void **rctx)
-{
- struct MHD_Response *resp;
- my_state *ms = *rctx;
-
- (void)cctx;
- (void)method;
- (void)version;
-
- DPRINTF("PROXY QUERY %s (%zu)n",url,*data_size);
-
- if (ms->state == MS_NEW) {
- ms->state = MS_NORMAL;
- ms->post = MHD_create_post_processor(conn, POST_BUF_SIZE,
- query_iterator,ms);
- if (!ms->post)
- return MHD_NO;
- gc_register_finalizer_ms(ms);
- }
- else if (*data_size) {
- MHD_post_process(ms->post,data,*data_size);
- if (ms->pipe.data_len) {
- ms->pipe.data_len += *data_size;
- char *p = realloc(ms->pipe.data_ptr,ms->pipe.data_len);
- if (!p) {
- return MHD_NO;
- }
- ms->pipe.data_ptr = p;
- }
- else {
- ms->pipe.data_len = *data_size + 1;
- ms->pipe.data_ptr = malloc(ms->pipe.data_len);
- if (!ms->pipe.data_ptr) {
- return MHD_NO;
- }
- ((char *)ms->pipe.data_ptr)[0] = '0';
- }
- (void)strncat(ms->pipe.data_ptr,data,*data_size);
- /* TBD: check return value */
- *data_size = 0;
- }
- else {
- if (!ms->pipe.data_ptr) {
- return MHD_NO;
- }
- ms->query = meta_query_new(ms->bucket,NULL,ms->pipe.data_ptr);
- resp = MHD_create_response_from_callback(MHD_SIZE_UNKNOWN,
- CB_BLOCK_SIZE, proxy_query_func, ms, simple_closer);
- if (!resp) {
- fprintf(stderr,"MHD_crfc failedn");
- simple_closer(ms);
- return MHD_NO;
- }
- MHD_queue_response(conn,MHD_HTTP_OK,resp);
- MHD_destroy_response(resp);
- }
-
- return MHD_YES;
-}
-
-static int
-proxy_list_objs (void *cctx, struct MHD_Connection *conn, const char *url,
- const char *method, const char *version, const char *data,
- size_t *data_size, void **rctx)
-{
- my_state *ms = *rctx;
- struct MHD_Response *resp;
-
- (void)cctx;
- (void)url;
- (void)method;
- (void)version;
- (void)data;
- (void)data_size;
-
- ms->query = meta_query_new((char *)ms->bucket,NULL,NULL);
-
- resp = MHD_create_response_from_callback(MHD_SIZE_UNKNOWN,
- CB_BLOCK_SIZE, proxy_query_func, ms, simple_closer);
- if (!resp) {
- fprintf(stderr,"MHD_crfc failedn");
- simple_closer(ms);
- return MHD_NO;
- }
-
- MHD_queue_response(conn,MHD_HTTP_OK,resp);
- MHD_destroy_response(resp);
- return MHD_YES;
-}
-
-static int
-proxy_delete (void *cctx, struct MHD_Connection *conn, const char *url,
- const char *method, const char *version, const char *data,
- size_t *data_size, void **rctx)
-{
- my_state *ms = *rctx;
- struct MHD_Response *resp;
- char *copied_url;
- char *bucket;
- char *key;
- char *stctx = NULL;
- int rc;
-
- (void)cctx;
- (void)method;
- (void)version;
- (void)data;
- (void)data_size;
-
- DPRINTF("PROXY DELETE %sn",url);
-
- provider_t *main_prov = get_main_provider();
- ms->thunk.parent = ms;
- ms->thunk.prov = main_prov;
- rc = ms->thunk.prov->func_tbl->delete_func(main_prov,
- ms->bucket,ms->key,url);
- if (rc == MHD_HTTP_OK) {
- copied_url = strdup(url);
- assert (copied_url);
- bucket = strtok_r(copied_url,"/",&stctx);
- key = strtok_r(NULL,"/",&stctx);
- meta_delete(bucket,key);
- replicate_delete(url,ms);
- }
-
- resp = MHD_create_response_from_data(0,NULL,MHD_NO,MHD_NO);
- if (!resp) {
- return MHD_NO;
- }
- error (0, 0, "DELETE BUCKET: rc=%d", rc);
- MHD_queue_response(conn,rc,resp);
- MHD_destroy_response(resp);
-
- return MHD_YES;
-}
-
-/* TBD: get actual bucket list */
-typedef struct {
- const char *rel;
- const char *link;
-} fake_bucket_t;
-
-/* FIXME: ensure that the RHS values here stay in sync with those
- in reserved_bucket_name. */
-static const fake_bucket_t fake_bucket_list[] = {
- { "bucket_factory", "_new" },
- { "provider_list", "_providers" },
-};
-
-static ssize_t
-root_blob_generator (void *ctx, uint64_t pos, char *buf, size_t max)
-{
- my_state *ms = ctx;
- const fake_bucket_t *fb;
- size_t len;
- const char *host;
- char *bucket;
- char *key;
-
- (void)pos;
-
- host = MHD_lookup_connection_value(ms->conn,MHD_HEADER_KIND,"Host");
-
- if (!ms->gen_ctx) {
- const char *accept_hdr
- = MHD_lookup_connection_value(ms->conn, MHD_HEADER_KIND,
- "Accept");
- ms->gen_ctx = tmpl_get_ctx(accept_hdr);
- if (!ms->gen_ctx) {
- return -1;
- }
- ms->gen_ctx->base = host;
- len = tmpl_root_header(ms->gen_ctx,"image_warehouse",VERSION);
- if (!len) {
- return -1;
- }
- if (len > max) {
- len = max;
- }
- memcpy(buf,ms->gen_ctx->buf,len);
- return len;
- }
-
- if (ms->gen_ctx == TMPL_CTX_DONE) {
- return -1;
- }
-
- if (ms->gen_ctx->index < ARRAY_SIZE(fake_bucket_list)) {
- fb = fake_bucket_list + ms->gen_ctx->index;
- len = tmpl_root_entry(ms->gen_ctx,fb->rel,fb->link);
- if (!len) {
- return -1;
- }
- if (len > max) {
- len = max;
- }
- memcpy(buf,ms->gen_ctx->buf,len);
- return len;
- }
-
- if (meta_query_next(ms->query,&bucket,&key)) {
- len = tmpl_root_entry(ms->gen_ctx,"bucket",bucket);
- if (!len) {
- return -1;
- }
- if (len > max) {
- len = max;
- }
- memcpy(buf,ms->gen_ctx->buf,len);
- return len;
- }
-
- len = tmpl_root_footer(ms->gen_ctx);
- if (!len) {
- return -1;
- }
- if (len > max) {
- len = max;
- }
- memcpy(buf,ms->gen_ctx->buf,len);
- ms->gen_ctx = TMPL_CTX_DONE;
- return len;
-}
-
-static int
-proxy_api_root (void *cctx, struct MHD_Connection *conn, const char *url,
- const char *method, const char *version, const char *data,
- size_t *data_size, void **rctx)
-{
- struct MHD_Response *resp = NULL;
- unsigned int rc = MHD_HTTP_OK;
- my_state *ms = *rctx;
-
- (void)cctx;
- (void)method;
- (void)version;
- (void)data;
-
- DPRINTF("PROXY API ROOT (%s, %zu)n",url,*data_size);
-
- ms->query = meta_query_new(NULL,"_default",NULL);
- if (!ms->query) {
- return MHD_NO;
- }
- resp = MHD_create_response_from_callback(MHD_SIZE_UNKNOWN,
- CB_BLOCK_SIZE, root_blob_generator, ms, simple_closer);
- if (!resp) {
- return MHD_NO;
- }
- MHD_queue_response(conn,rc,resp);
- MHD_destroy_response(resp);
-
- return MHD_YES;
-
-}
-
-static int
-post_iterator (void *ctx, enum MHD_ValueKind kind, const char *key,
- const char *filename, const char *content_type,
- const char *transfer_encoding, const char *data,
- uint64_t off, size_t size)
-{
- char *old_val;
- size_t old_len;
- char *new_val;
-
- (void)kind;
- (void)filename;
- (void)content_type;
- (void)transfer_encoding;
- (void)off;
-
- printf("adding %s, size=%zun",key,size);
-
- // TBD: don't assume that values are null-terminated strings
- old_val = kv_hash_lookup(ctx,key);
- if (old_val) {
- old_len = strlen(old_val);
- new_val = malloc(old_len+size+1);
- if (!new_val) {
- return MHD_NO;
- }
- memcpy(new_val,old_val,old_len);
- memcpy(new_val+old_len,data,size);
- new_val[old_len+size] = '0';
- }
- else {
- new_val = malloc(size+1);
- if (!new_val) {
- return MHD_NO;
- }
- memcpy(new_val,data,size);
- new_val[size] = '0';
- }
-
- char *k = strdup (key);
- if (!k) {
- free (new_val);
- return MHD_NO;
- }
-
- kv_hash_insert_new (ctx, k, new_val);
-
- return MHD_YES;
-}
-
-/* Returns TRUE if we found an *invalid* key. */
-static bool
-post_find (void *kvv, void *ctx_v)
-{
- struct kv_pair *kv = kvv;
- if (!is_reserved(kv->key,reserved_attr)) {
- return true;
- }
-
- DPRINTF("bad attr %sn", kv->key);
- void **ctx = ctx_v;
- *ctx = kv;
- return false;
-}
-
-static bool
-post_foreach (void *kvv, void *ms_v)
-{
- struct kv_pair *kv = kvv;
- my_state *ms = ms_v;
-
- DPRINTF("setting %s = %s for %s/%sn", kv->key, kv->val,
- ms->bucket, ms->key);
- meta_set_value(ms->bucket, ms->key, kv->key, kv->val);
- return true;
-}
-
-static int
-create_bucket (char *name, my_state *ms)
-{
- int rc;
-
- if (is_reserved(name, reserved_name)
- || is_reserved(name, reserved_bucket_name)) {
- return MHD_HTTP_BAD_REQUEST;
- }
-
- provider_t *main_prov = get_main_provider();
- rc = main_prov->func_tbl->bcreate_func(main_prov,name);
- if (rc == MHD_HTTP_OK) {
- if (meta_set_value(name,"_default", "_policy","0") != 0) {
- DPRINTF("default-policy " "create failedn");
- /* Non-fatal. */
- }
- DPRINTF("created bucket %sn",name);
- /*
- * There's not a whole lot to do about bucket-creation
- * failures on replicas, other than to report them, unless
- * we adopt an "all or nothing" approach and unwind the
...e-mail trimmed, has been too large.
13 years, 2 months
Changes to 'master'
by Jim Meyering
New branch 'master' available with the following commits:
commit 4cb41b49a2b69509658d4fb90c21efde29476d9d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Feb 14 14:14:05 2011 +0100
avoid file descriptor leak in replication
The "pipes[2]" member was set by calling pipe, but those
two descriptors were never closed or even used.
* replica.c (struct _repl_item) [pipes[2]]: Remove member.
(repl_worker): Don't use ->pipes.
commit 25a832f79a7685cecad169206df7a235547b81f8
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Feb 14 13:44:23 2011 +0100
tweak diagnostics
commit 9e39dc2a1855c7c313e39f643c1681f1bd32f141
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 15:31:35 2011 +0100
maint: also create xz-compressed tarballs
* configure.ac (AM_INIT_AUTOMAKE): Also make xz-compressed tarballs.
They are more than 30% smaller.
commit 684918a88245adbbb80cd3b317d80877d1f9a861
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 14:58:50 2011 +0100
post-release administrivia
* NEWS: Add header line for next release.
* .prev-version: Record previous version.
* cfg.mk (old_NEWS_hash): Auto-update.
commit 8f68e88ae81db999209f0cf16bba47bd9df9cd14
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 14:56:52 2011 +0100
version 0.91
* NEWS: Record release date.
commit 600ca43a8cd39dae9aa1256a670da9a243678207
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 14:52:35 2011 +0100
maint: record previous release tag name
* .prev-version: Record tag name of previous release.
This is used (and automatically advanced) when making a release
via e.g., "build-aux/do-release-commit-and-tag 0.91 stable".
commit 8c22e73ad9f889f528a0acb7d761bf15d3d91c95
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 14:45:48 2011 +0100
doc: add to NEWS
* NEWS: Update
commit fa248d5fdb71e0c52b0257c59e2c28a9dd82e451
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Feb 8 16:00:37 2011 +0100
maint: speed up configure
* configure.ac (gl_ASSERT_NO_GNULIB_POSIXCHECK): Speed up normal
configure runs.
commit e41b7a771670ffdd08bcfe0550810632c6a466de
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Feb 8 15:56:11 2011 +0100
maint: build via make CFLAGS='-DGNULIB_POSIXCHECK=1'; address warnings
* bootstrap.conf: Add most of the recommended modules:
calloc-posix close dup2 mkstemp pipe-posix strstr strtok_r unlink
commit f47eb186d1fb2d2fc6b7698427295f437a39c83b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 25 11:12:05 2011 +0100
tests: reenable excluded gnulib test; run gnulib-tests first
* bootstrap.conf: Don't disable malloca-test. It has been fixed
so it is no longer so slow.
* gnulib: Update to latest.
* Makefile.am (SUBDIRS): Run gnulib-tests before ours,
so the results of ours aren't displaced as gnulib's scroll by.
commit e598b6675690fb79fc9c74ffb11be97f8f258421
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Jan 21 16:05:22 2011 +0100
don't pass NULL buffer to formatter in provider list generation
* rest.c (prov_list_generator): Pre-allocate a reasonably-large
buffer, rather than starting with a 0-length buffer and relying on
the ~doubling/realloc loop to make the buffer large enough.
commit 80a978acd30fc420ce4d4139caa955d43ec04ac7
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 20 14:30:31 2011 +0100
protect remaining uses of prov_hash against concurrent access
* setup.c (hash_get_prov_list): New function.
* setup.h: Declare it.
* replica.c (replicate, replicate_namespace_action): Use
hash_get_prov_list to get all provider pointers at once, and which
locks the hash table before accessing it.
With hash_get_first_prov and hash_get_next_prov that was not possible.
* setup.c (hash_get_first_prov, hash_get_next_prov): Remove functions.
* setup.h: Remove declarations.
commit e73b7221f26dc46b07575a18e207836eaed6d7e7
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 20 14:29:04 2011 +0100
remove dead code
* rest.c (prov_list_generator): Don't store into unused member,
ms->prov_iter.
* state_defs.h (_my_state) [prov_iter]: Remove now-unused member.
commit 1f415f5dfaa403b3ef86945806fb93f24335bc20
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Jan 19 18:16:19 2011 +0100
use symbolic names in place of more hard-coded constants
* rest.c (POST_BUF_SIZE, CB_BLOCK_SIZE): Define constants.
(proxy_get_data): Use them in place of hard-coded constants.
(proxy_query, proxy_list_objs, proxy_api_root): Likewise.
(control_api_root, proxy_bucket_post, show_parts): Likewise.
(proxy_object_post, proxy_list_provs, proxy_add_prov): Likewise.
Reported by Jeff Darcy.
commit 9b8036c2cdae166c532a6c510017f2de485e489c
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Jan 19 17:41:18 2011 +0100
use SMALL_PRIME in place of literal 13 (initial hash table size)
* setup.h (SMALL_PRIME): Define.
* setup.c: s/13/SMALL_PRIME/
* rest.c: Likewise.
Reported by Jeff Darcy.
commit 0a5ec6af13b82ad1dd4b1314f98f209660820d06
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Jan 19 17:15:38 2011 +0100
build: make configure fail if gc-devel (aka libgc-dev) is not installed
* configure.ac: Check for <gc.h>.
Reported by Jeff Darcy.
commit facb15d171c3d99112291511137d92e300698950
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Jan 19 17:08:42 2011 +0100
plug a potential leak
This tells GC that when finalizing an "ma" pointer, it must
also release a few more members of that structure.
* rest.c (destroy_state_postprocessor): Also free ms->query
and ms->aquery.
(gc_register_finalizer_ms): Update comment to reflect reality.
commit 0b0c8f73f794cf6c9b5427013aacda31e6ed4a54
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Jan 17 21:58:38 2011 +0100
remove gnulib hash.c diff hack
Rather than compiling hash.c differently, treat it more like
part of the library that it is, and instead arrange to free
things via our GC finalize handler.
Remove kv_free and all uses.
* setup.h (kv_free): Remove definition. Now unused.
* gl/lib/hash.c.diff: Remove file. Not needed.
* rest.c (destroy_state_postprocessor): Also call hash_free.
* setup.c: Remove uses of kv_free. No need.
commit 16dd48f463abbb2905afe5b74f4905932eb85ca4
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 18 15:13:29 2011 +0100
tests: also check JSON provider lists
* t/init.cfg (emit_provider): Add parameter, is-last, so that
we know whether to print the final comma for JSON output.
* t/basic: Update emit_trivial_provider_list use.
* t/provider: Check both XML and JSON formats.
commit fff9a6a6445c089b2a8f557176d89819aac2a3ec
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 18 19:16:16 2011 +0100
list providers: avoid syntax error in JSON output
Without this change, we would print "[,\n" at the beginning
of the list of JSON-formatted providers.
* template.c (tmpl_prov_entry): Don't emit the leading ","
on the first entry.
* template.h: Adjust prototype.
* rest.c: Update sole caller.
Spotted by Jeff Darcy.
commit ba44b876ad572f2861114a0ba01124c6611a9da8
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 18 13:05:20 2011 +0100
sort provider list on "name"
* rest.c: Sort provider list output on name:
(struct plist): Define.
(prov_get, prov_name_compare): New functions.
(prov_list_generator): Rather than emitting provider XML/JSON
in arbitrary (hash-traversal) order, first gather an array of
provider_t* pointers, sort them, and *then* emit listing.
* t/init.cfg (emit_provider_list_prefix): New helper.
(emit_provider_list_suffix): Likewise.
(emit_trivial_provider_list): Renamed from emit_provider.
(emit_provider): Emit output for a single provider.
I.e., do not emit prefix and suffix.
* t/basic: s/emit_provider/emit_trivial_provider_list/
* t/provider: Compare full output with expected output,
rather than just grepping for a summary.
This is feasible, now that the provider list is sorted.
commit 33dea68259d6ea08253163373f4c36400f6535ca
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 18 10:28:55 2011 +0100
tests: prepare for improved provider checks in t/provider
* t/basic (emit_provider, emit_bucket_list): Move helper functions...
* t/init.cfg: ...to here.
commit 8eccf807a16213425c463630b9bdae0c6734ee52
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 18 08:51:59 2011 +0100
microhttpd may also spawn threads to call prov_list_generator; tell GC
Just as done for access_handler (registered via MHD_start_daemon),
we have to tell the garbage collector about the thread that runs
MHD_connection_handle_idle and calls prov_list_generator.
The symptom is an abort when the MHD_connection_handle_idle runs.
To diagnose, invoke gdb on the resulting core file, then type
"thread apply all bt" and note the functions at the base of the
stack on the losing thread. That's the one that hasn't yet been
registered for GC.
* rest.c (gc_register_thread): New macro, factored out of...
(access_handler): ...here.
(prov_list_generator): Use it here, too.
commit f08ff1ceab77a8e2e06d1170b4a46fb47920d29b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Jan 14 22:13:27 2011 +0100
also mutex-protect the provider-iterator used in listing
* setup.c (prov_do_for_each): Guard with a mutex.
commit 549b989ddd8b189cb3b798d602ee30502aea473a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Jan 14 21:49:09 2011 +0100
rewrite provider-listing code so we can protect it with a mutex:
The above is the primary goal, but this change also avoids
printing invalid output for pathologically-long provider records.
When iterating through the provider hash, we must prevent insertion.
Rather than getting/formatting a new provider for each callback
(which would mean holding a mutex for way too long -- and hard to
know if/when to release it), iterate over all providers the first
time and save all output in an allocated buffer. Then serve up
bite-sized pieces of that buffer until it's all output.
* rest.c (a2nrealloc): New function, derived from gnulib's x2nrealloc.
(prov_fmt): New function.
(prov_list_generator): Rewrite, prov_do_for_each and the above.
Assert that header fits in our buffer, rather than silently
truncating it and thus producing invalid output.
Do the same for the footer.
* template.c (tmpl_prov_entry): Rewrite not to use a fixed-size buffer.
Now, this is just a thin layer around snprintf.
* template.h (tmpl_prov_entry): New prototype.
* state_defs.h [struct _my_state] (buf, buf_n_alloc, size_t buf_n_used):
New members.
* setup.c (prov_do_for_each): New function.
* setup.h: Declare it.
commit 9414f266a267f5e8a1860ef87ad2a59a3022ff24
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Jan 14 17:47:42 2011 +0100
avoid unnecessary MHD_lookup_connection_value calls
Move "accept_hdr" decl and definition into sole block where it's used
This avoids the unnecessary call to MHD_lookup_connection_value on
all but the first call to each of these event-handling functions.
* rest.c (proxy_query_func, root_blob_generator, parts_callback):
(prov_list_generator): As above.
commit ac3e583f5cf58a38f261d12abba558e40b1e7873
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Jan 14 17:11:10 2011 +0100
handle hash_initialize and MHD_create_post_processor failure
* rest.c (proxy_query, control_api_root, proxy_bucket_post):
(proxy_object_post, proxy_add_prov): Return MHD_NO, rather than
ignoring the failures.
commit b247691b9ea2f7dacbcf1edbedef28206eb2a722
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 13 22:17:00 2011 +0100
avoid a leak via ms->post = MHD_create_post_processor(...
* rest.c (gc_register_finalizer_ms): New function.
(proxy_query, control_api_root, proxy_bucket_post): Use it.
(proxy_object_post, proxy_add_prov): Likewise.
(destroy_state_postprocessor): New function.
commit d4de957c8a10b97952b18c705369f305393b6b16
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 13 18:26:25 2011 +0100
t/provider: warn-then-sleep on failure -- eases debugging
commit a2eee0a0daabc99ff2b2db49948adfdead278c63
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Jan 12 18:36:27 2011 +0100
tell GC about the thread spawned by MHD_start_daemon
rest.c's main program calls MHD_start_daemon to register
access_handler as a function that it will call from the thread
it creates. Normally, the garbage collector learns of pthread_create
calls because they're cpp-wrapped. However, when it's called from
3rd-party libraries as in this case, we can't very well recompile,
so have to use a different approach:
* rest.c (access_handler): Call GC_register_my_thread to inform the
garbage collector of this new thread.
commit eb90a1c8aedc31c4c6587a9c9b79fc3ba6b4fc54
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Jan 12 18:29:17 2011 +0100
insinuate GC into gnulib's hash-related code
* bootstrap.conf: Disable the hash-test, which would otherwise
get link failures due to unresolved GC_malloc, etc.
* gl/lib/hash.c.diff: New file. This patch is automatically
applied to gnulib's hash.c at bootstrap time.
commit f941f9f0fac64fcadb87dec827964e0ca3f28316
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Jan 10 20:00:45 2011 +0100
convert all remaining uses of g_hash_* functions
g_hash_table_insert
g_hash_table_remove
g_hash_table_foreach
g_hash_table_find
g_hash_table_iter_init
g_hash_table_iter_next
commit ef8b8713dd64dce7c32a9b3a3eca2a2c1e1ff7ef
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Jan 10 15:42:54 2011 +0100
convert remaining g_hash_table_lookup functions to kv_hash_lookup
commit d2ab5e735f780571599111bac2cf726c2f9ae62b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 6 22:49:53 2011 +0100
begin converting hash tables from glib to gnulib
commit f5febefdfefc5ebff68388e4899d690d55d83a72
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 6 23:13:38 2011 +0100
remove more tests of in-place provider changing
commit eb66edbe487f746d8bb6c244973bd86bb782a34a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 6 23:03:25 2011 +0100
do not allow "updating" a provider in place -- now, you must remove and then re-add
commit a7de76399efe41f1040259ade74837d2eb8736c3
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 6 20:16:24 2011 +0100
guard provider-addition with a mutex; tighten provider test
* rest.c: Remove some small, now-unnecessary free calls.
* setup.c (add_provider): Guard with new file-global mutex.
* t/provider: Remove ulimit on core size, so that if one is
dumped, we get something that's usable.
Sleep upon failure.
commit d1aa322fb7750dd52d893f73d499f4f0e27f0c06
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 4 12:59:23 2011 +0100
tests: add dynamic-provider test
* t/provider: New file.
commit f0877f6496e4c7f1df2834bb01681161f379a4a0
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 23 22:04:41 2010 +0100
remove functions and struct members that are no longer needed
* rest.c (free_ms): Remove function and all uses.
* setup.c (delete_provider): Remove function and sole use.
* setup.h (refcnt): Remove struct member and all uses.
* state_defs.h (cleanup, refcnt): Remove struct members.
Remove all uses.
* replica.c: Remove uses of the above.
commit 7ec84dde8be9610f33c46b3fbd9dc5607f13510b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 23 18:36:38 2010 +0100
garbage-collection fix-up
Do not instruct libraries to free things that they do not allocate,
since they would use the system "free" function.
* setup.c, rest.c: Do not tell g_hash_table_new_full to free anything.
* rest.c (proxy_get_attr): Do not tell MHD to free anything.
commit b98c47711c707bdd25f3d8d08f725321e25924f2
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Dec 22 17:25:19 2010 +0100
use garbage collection
Add -lgc when linking.
* gc-wrap.h: New file, to map malloc, realloc, free,
etc. to GC'd equivalents.
* iwh.h: Include it.
* template.c: Include it.
* Makefile.am (iwhd_LDADD): Add -lgc and -lpthread.
* t/Makefile.am (parser_LDADD): Likewise.
* Makefile.am (iwhd_SOURCES): Add gc-wrap.h.
(TESTS): Move the simpler parser-test to precede all others.
* iwhd.spec.in (BuildRequires): Require gc-devel.
* qparser.y (free_value): Remove function.
* meta.cpp, replica.c: Remove all uses.
* query.h: Remove declaration.
* rest.c (main): Call GC_INIT.
* qparser.y (main) [PARSER_UNIT_TEST]: Likewise.
* mpipe.c: Include unistd.h here, ...
* mpipe.h: ...not here.
Don't include the following, either: fcntl.h, stdlib.h, string.h,
strings.h, sys/stat.h. They were not used, and got in the way
of gc-wrap's redefinitions.
commit cee9beecc22c373814a7a8e56b2860eacd3a8acd
Author: Jim Meyering <meyering(a)redhat.com>
Date: Sat Dec 18 11:08:25 2010 +0100
maint: rename file-scoped global s/main_prov/g_main_prov/, and...
* setup.c (g_main_prov): Rename from main_prov, to avoid confusion
between this file-scoped global and the locals of the same name
in other compilation units.
(g_master_prov): Rename from master_prov, for consistency.
Though this one is truly global...
* setup.h (g_master_prov): Update here,...
* rest.c (g_master_prov): ...and here.
commit 9e059c3626429c46b6990bc1892605fc7f3a5740
Author: Jim Meyering <meyering(a)redhat.com>
Date: Sat Dec 18 10:47:58 2010 +0100
rename s/_set_primary/_primary/: more RESTful
commit 44ee9894f3b04e3d8baa65b2579d5ca938af6e7e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Dec 17 17:07:47 2010 +0100
new interface: curl -X PUT http://_providers/PROVIDER/_set_primary
* rest.c: Include <errno.h>.
(proxy_set_primary): New function.
(parse_url): Handle new type: URL_PROVIDER_SET_PRIMARY.
* setup.c (set_main_provider): New function.
* setup.h (set_main_provider): Declare it.
* t/basic: Exercise the new functionality.
commit 2e9196c2628eade109cc2bcffb695f3575c7c067
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Dec 17 15:37:14 2010 +0100
get primary provider name via http://host:$port/_providers/_primary
Get a URL of the form http://.../_providers/_primary
to obtain the name of the primary provider.
* rest.c (proxy_primary_prov): Implement it.
(proxy_add_prov): Prohibit addition of a provider
with the reserved name, "_primary".
* t/basic: Exercise the new functionality.
commit a04b76ca821f27765659fa52a96be620fb8fc431
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 18:27:10 2010 +0100
tests: clean up provider-deletion test
commit 8187979f677652a5c617dd435cb6644ad360a9e5
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Dec 15 12:45:28 2010 +0100
use new function, get_main_provider, rather than global "main_prov"
* setup.c (get_main_provider): New function.
(main_prov): Declare static.
* setup.h (main_prov): Remove global decl.
(get_main_provider): Declare.
* rest.c, replica.c: Update all uses of "main_prov".
commit d209a1391ecf2737693f061b8401513599806515
Author: Jim Meyering <meyering(a)redhat.com>
Date: Sat Dec 11 15:58:18 2010 +0100
reject an attempt to add a provider with "name" parameter
The "name" is specified as part of the URL, not via a parameter.
* rest.c (proxy_add_prov): Handle undesired "name" parameter properly.
* t/basic: Exercise the above.
commit 65df8ac6ad67633bbf42171dd40bfc132368459f
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Dec 10 21:11:27 2010 +0100
add provider ref-counting; FIXME: partial impl. (i.e., no incr)
delete_provider: New function.
commit e2ecc0a75108543bcde4e3509202a5ac511ef1fe
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 23 18:17:23 2010 +0100
don't use xstrndup via base_name
* rest.c (url_to_provider_name): Rewrite not to use base_name, since
that function uses xstrndup, which exits on OOM.
commit 530db87554e7c801813dce2f4cbb0108334c21de
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Nov 15 17:50:53 2010 +0100
allow dynamic addition/deletion of providers
* setup.h (struct _provider) [deleted]: New member.
* setup.c (validate_provider): New function.
(json_validate_server): Renamed from validate_server.
(convert_provider): Initialize new "deleted" member.
(add_provider, find_provider): New functions.
Declare new functions.
* rest.c (prov_list_generator): Don't list a provider that
is marked as deleted.
(url_to_provider_name): New function.
(proxy_delete_prov, proxy_add_prov): New functions.
(my_rules): Add corresponding entries in this table.
* t/basic: Add minimal tests of new functionality.
commit 5685b973f295453871778372a6d1bad9f1b75750
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Jan 10 18:51:20 2011 +0100
fix an unchecked strdup
commit 186b8eca47f80c8cb97c429bcdb7594028880fa2
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Feb 7 19:34:57 2011 +0100
build: update gnulib submodule to latest
commit 114b3cb856c58f24af540c6e9153f42e833c67d2
Author: Pete Zaitcev <zaitcev(a)redhat.com>
Date: Fri Feb 4 16:53:26 2011 -0700
avoid hang when creating an object in non-existing bucket
This hang occurs when doing something like the following, without
creating "templates" first:
echo hello | curl -T - http://lembas:9090/templates/my_file
This bug appears to have been introduced due to an incomplete
change during the cons_error/cons_init_error split.
* mpipe.c (pipe_cons_siginit): Use cons_init_done, not cons_init.
(pipe_prod_wait_init): Use cons_init_error, not cons_init.
* t/basic: add test for hang-no-parent bug
commit f22452b530a85b36a50cc8f7dc9fb9ea8387389b
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Feb 4 17:33:30 2011 +0100
don't segfault on a simple query
* qparser.y (free_value): Don't free v->resolved.
(string_value): Disable caching of v->resolved.
* rest.c (proxy_query): Don't free ms here. It's still in use.
* t/basic: Add test case to trigger crash reported by Steve Loranz.
commit 6d0ab2da3a3fb3c92dde97a4072a317a8a63bba3
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 25 11:12:05 2011 +0100
tests: reenable excluded gnulib test; run gnulib-tests first
* bootstrap.conf: Don't disable malloca-test. It has been fixed
so it is no longer so slow.
* Makefile.am (SUBDIRS): Run gnulib-tests before ours,
so the results of ours aren't displaced as gnulib's scroll by.
commit ad4a71c5af22dd71c159181b37e63fc9ff16a31b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Feb 4 09:17:17 2011 +0100
build: update gnulib submodule to latest
commit 1bd814bab002ff719896c7ce6717f5758a73791d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Feb 4 09:17:01 2011 +0100
maint: update files copied from gnulib
* t/init.sh: Update from gnulib.
* bootstrap: Likewise.
commit 923ba16951c01909de4ef456baf9bc410844f055
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 11 16:03:04 2011 +0100
build: update gnulib submodule to latest
commit 27463b0ae9a121f196035a016e81039b2fd6d66e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Jan 6 20:20:33 2011 +0100
maint: update copyright year ranges to include 2011
Run "make update-copyright", so "make syntax-check" works in 2011.
Also run this to handle the Red Hat ones:
git ls-files |UPDATE_COPYRIGHT_HOLDER='Red Hat, Inc.' \
UPDATE_COPYRIGHT_USE_INTERVALS=1 \
UPDATE_COPYRIGHT_MAX_LINE_LENGTH=79 xargs gnulib/build-aux/update-copyright
commit e61482884574cd72831bbfd2935657ef34893b6e
Author: Chris Lalancette <clalance(a)redhat.com>
Date: Tue Jan 4 17:50:57 2011 +0100
maint: add a comment: BuildRequires vs. mongodb-server
* iwhd.spec.in: Add a comment about why iwhd "BuildRequires"
mongodb-server.
commit b1cf0514518d23e2c347a63eae971b31f567bed7
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 4 17:02:05 2011 +0100
revert "build: iwhd "Requires" mongodb-server (rather than BuildRequires)"
This reverts commit 1912f449225f380f82968780ccffadfab156ef66.
The use of mongodb-server in "make check" counts as a build
requirement. Pointed out by Pete Zaitcev.
commit 1912f449225f380f82968780ccffadfab156ef66
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Jan 4 11:45:29 2011 +0100
build: iwhd "Requires" mongodb-server (rather than BuildRequires)
* iwhd.spec.in (Requires): mongodb-server is a run-time
requirement, not a build requirement.
commit 4d64b213464a20f4c28921b3ed68c0240d9c99dd
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 16:55:54 2010 +0100
build: make the "rpm" rule work once again
* Makefile.am (rpm): Accommodate a version number string like
"0.0.273-1621", that contains a hyphen: s/-/./
(iwhd.spec): Depend on Makefile, not Makefile.am, so that
a version number change provokes an update.
commit 162167233c1f92809060154dbed4be3146c5f504
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 13:41:29 2010 +0100
maint: make autogen.sh invoke bootstrap
* autogen.sh: Invoke bootstrap.
commit 7ffa21ac39d711bfab5da7b847445c1b5db3e9e8
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 12:23:25 2010 +0100
maint: avoid const-related warnings
* auto.c: Impossible to avoid without casts. Add const attributes
and then add two casts.
* setup.c (dup_json_string): Add const attribute to each parameter.
commit c4597f7fc4990f0dc4dc0ae6c5be116e9ea1dd8b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 12:20:55 2010 +0100
maint: avoid warning about "noreturn" function
* iwh.h (ATTRIBUTE_NORETURN): Define
* rest.c (usage): Use it here, to avoid a warning.
commit 8dfe3f897c05855f3d39fd30e7e047ce952b5531
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 12:18:10 2010 +0100
maint: avoid theoretical risk of signed overflow
* rest.c (main): Declare "autostart" as bool and set to true or false
rather than incrementing or setting to 0.
commit c059c0e95df17316d60bd70974f3b029602738bd
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 12:13:23 2010 +0100
maint: accommodate new, stricter warnings
* rest.c (struct): Avoid const-related warnings.
commit 1fbd0757294489f70137770e44ab20fdb4f467c4
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 11:58:46 2010 +0100
maint: declare follow_link in replica.h
* replica.h (follow_link): Declare.
commit 371b2b03a156deb7c0190b4a463eaf164f609bbb
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 11:56:46 2010 +0100
maint: declare cf_put_child to be static
* backend.c (cf_put_child): Make "static" to avoid a warning.
commit 90fdaa34458e2ae186ff5f2820c6fa087fb10951
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 11:54:16 2010 +0100
maint: remove unused definitions
* replica.c (ADDR_SIZE): Remove unused definition.
(SVC_ACC_SIZE, HEADER_SIZE): Likewise.
* setup.c (NEED_NONE): Likewise.
commit 83aef2f0bce333bff50105af9fd86562ef112275
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 11:52:30 2010 +0100
do not perform arithmetic on void* pointers
* backend.c (http_put_cons, fs_put_child): Cast to (char*) first.
* rest.c (proxy_get_cons): Likewise.
commit 5876b06bee5a9412ba372f0d72b4bb629abacc6a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Dec 16 11:39:41 2010 +0100
build: support configure-time --enable-gcc-warnings option
* configure.ac: Handle new --enable-gcc-warnings option.
* Makefile.am (AM_CFLAGS): Use configure-determined
$(WARN_CFLAGS) $(WERROR_CFLAGS), rather than hard-coded ones.
commit b25dcd02204c034521a47bd0678a591004638868
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Dec 15 16:01:07 2010 +0100
clean-up: don't define xmalloc, xstrdup, etc. -- use gnulib's definitions
* qparser.y [PARSER_UNIT_TEST]: Include "xalloc.h".
(xalloc_die, xmalloc, xmemdup, xstrdup): Remove definitions.
* t/Makefile.am (parser_CPPFLAGS): Append for xalloc.h.
(parser_LDADD): Link with gnulib.
commit 652d185244963c54f102c1b106fab3a5bb46259e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Dec 15 08:23:52 2010 +0100
bootstrap.conf: mention bison
With this, an attempt to build from cloned-sources without "bison"
will evoke a nice error from bootstrap. Without this, you'd get
a relatively cryptic diagnostic much further down the build road.
commit 25a002a745a999ea9a4e58def2bf1298737d8643
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 25 16:35:03 2010 +0200
skip the malloca-tests module; too slow
commit 1c518138da2b987619c76eac0eb3020894e1becb
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 25 16:27:19 2010 +0200
avoid redundant const -- move it to the right of "*"
commit 2af22e15f0a95fb1e9b32557a4097688794fc532
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 25 16:26:03 2010 +0200
disable sc_cast_of_argument_to_free check
commit 0d9cd613e081776874dabcf298b450dde95c093e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 5 18:21:08 2010 +0200
remove GNUmakefile -- now it is pulled from gnulib
commit 75c31581176c3d405fb075e997f80a4f51f1b00a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 25 15:47:43 2010 +0200
use gnulib's closeout module
commit 8cf7615fec61f889635255bf8158c22ba64dc392
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 15:06:56 2010 +0200
use first gnulib module: progname
* cfg.mk: Enable program_name check.
* Makefile.am (iwhd_CPPFLAGS): Use -I$(top_srcdir)/lib.
(iwhd_LDADD): Link with gnulib, aka lib/libiwhd.a.
* rest.c (main): Use set_program_name.
commit c615729c5fb2877fea6e0383fb2bda29db570f23
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 14:59:28 2010 +0200
enable no-blank-lines-at-EOF rule
commit 693fc8a86fc54bd2f29c31ab2cefa1d86add8767
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 14:57:45 2010 +0200
enable "test -a/-o" prohibition
commit 839172bc2f35f8ba37ab59bdd25ea482a6e86c15
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 14:56:30 2010 +0200
enable "echo -n/-e" prohibition
commit 72b4c4ff6037f506c78b839a48aa88a73fad5a95
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 14:54:56 2010 +0200
enable m4 quoting check
commit c8d0be9f4c70775fca231996d1420e717628cc6c
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 14:50:16 2010 +0200
maint: remove unused #include directives spotted by "make syntax-check"
commit 2bbe861de39bc6e0e3220fefcbbb40dbeaa84e1d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 14:41:02 2010 +0200
maint: enable no-trailing blanks prohibition
commit 88b1d8ae3d489a1df5d687a3422627d8cfc31f7b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Sep 30 16:44:11 2010 +0200
Use gnulib
Add gnulib as a submodule.
Move ax/*.m4 to m4/*.
* bootstrap: New file. Copied from gnulib.
Fill in blanks in bootstrap.conf template.
New file, cfg.mk, is very similar to what's used in other projects.
* configure.ac: use gl_INIT and gl_EARLY, per documentation.
Generate a Makefile for each of the new lib/ and gnulib-tests/
directories.
* Makefile.am (SUBDIRS): Update so we build in lib before "."
and so that "make check" runs the tests in gnulib-tests.
* rest.c: Do not hard-code 1.0 as version number.
* .gitignore: Ignore gnulib-tests.
commit df00ac3954bf66d4f6026f6dc787514f6e76a96c
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Dec 10 21:34:54 2010 +0100
tests: wait for up to 5 seconds for start-up, not just 3s
* t/replication: When running via valgrind wrappers,
3 seconds was not enough.
* t/basic: Likewise.
commit 2f9bb3adf0e201fd00c1b1abbd95dee892a67995
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Dec 14 18:09:20 2010 +0100
use strchr and strrchr, not index or rindex
* backend.c (curl_cache_child): s/index/strchr/
* rest.c (validate_url, register_image): Likewise and s/rindex/strrchr/
commit d0ceff3457d861291f2139af3cad17fb423cddee
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Dec 15 18:47:09 2010 +0100
fix trivial comment typo
commit 604fc112cbda1ab89d8c94554055e447ab225c6d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Sat Dec 11 15:41:52 2010 +0100
plug a leak
rest.c (proxy_put_data): Plug a leak exposed by reserved-object-name
testing. Without this, "make -C t check TESTS=basic" with valgrind-
wrapped iwhd would leak one "ms" object per reserved word test here:
for obj_name in _default _new _policy _query; do
echo reserved-obj-name | curl -f -T - $bucket/$obj_name \
2> bad_oname.err
...
2,240 bytes in 4 blocks are definitely lost in loss record 311 of 316
at 0x4A04896: calloc (vg_replace_malloc.c:418)
by 0x42EA18: access_handler (rest.c:1967)
by 0x4E405F8: ??? (in /usr/lib64/libmicrohttpd.so.10.0.0)
by 0x4E4105F: MHD_connection_handle_idle (in /usr/lib64/libmicrohttpd.so.10.0.0)
by 0x4E43986: ??? (in /usr/lib64/libmicrohttpd.so.10.0.0)
by 0x3F49806D5A: start_thread (pthread_create.c:301)
by 0x3F48CE4AAC: clone (clone.S:115)
commit ddf6a9609e1663eb5444232b77446ae2c5b1da0a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Sat Dec 11 17:43:35 2010 +0100
plug leaks-after-OOM-failure
* rest.c (proxy_object_post, proxy_update_prov): Call free_ms
before returning.
commit 56bccd9c5ee7dfce955b38519f89b34b5a14e9ec
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Dec 7 13:36:09 2010 +0100
avoid warnings from new/better microhttpd.h signature for...
MHD_ContentReaderCallback, e.g.,
/usr/include/microhttpd.h:1138:22: note: expected 'MHD_ContentReaderCallback'
but argument is of type 'int (*)(void *, uint64_t, char *, int)'
The new signature returns ssize_t (not int) and its last parameter is
size_t, not int.
commit efe01cecf78c8e36f7f1fef6f45a1d3a2547454f
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Nov 30 18:52:44 2010 +0100
build: don't check for or use -lcrypto; it was not used
* configure.ac: Don't check for -lcrypto; we weren't using it.
* Makefile.am (iwhd_LDADD): Don't use $(CRYPTO_LIB).
commit e83f9b64c1cd304f50dfdd392963f7a18d78b577
Author: Pete Zaitcev <zaitcev(a)yahoo.com>
Date: Thu Nov 25 15:29:23 2010 -0700
switch to using installed ec2 tools
Amazon and RPMfusion package perfectly useable ec2-ami-tools and
ec2-api-tools, for Fedora 13 and 14 at least. Let's use them and
drop account-specific environment variables. But be careful to permit
going back, in case (e.g. running on Fedora 15 Rawhide).
Also, document the dc-register-image's paramenters.
commit d24b2ce226447b4191ea1fa6d0288b65adc496ff
Author: Pete Zaitcev <zaitcev(a)yahoo.com>
Date: Thu Nov 18 08:37:30 2010 -0700
error to stderr
This is useless in production, since backend.c dups the same pipe to both
stderr and stdout. However it is marginally uselful in debugging if they
are redirected by a developer.
OK, I admit it: this feels more "correct".
commit 464741d6589ad92dd4086ad04c43199ed8e1801c
Author: Pete Zaitcev <zaitcev(a)yahoo.com>
Date: Thu Nov 18 08:36:14 2010 -0700
exit if mkdir fails
One fine way to have this fail is to use a back-end other than fs.
To be fixed for real later, just stub it out for now to prevent
a bunch of apriori incorrect operations that follow.
commit 1478b5d5011e213aa1ecc2549a5bc52e333c8bbd
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Nov 10 19:28:06 2010 +0100
GNUmakefile: replace leading spaces with TABs
commit cc80a83104880b098979a27d594734c916497c92
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Nov 5 18:33:54 2010 +0100
convert reserved attribute names to have a leading underscore prefix
Before, they were date, etag, loc, key, bucket, and
now they are _date, _etag, _loc, _key, _bucket.
Add the accidentally omitted "_size" to the new list.
commit a1e1c061a29a840288a7f072afea1a743c45ee7b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Nov 4 19:37:14 2010 +0100
prohibit creation of a bucket with one of the reserved names
commit 30cfcfe2d708e11f027b3d8f3303680ee15b8e65
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Nov 4 16:29:09 2010 +0100
_policy is a reserved object name; reject it, just like the others
* rest.c (reserved_name): Add "_policy" to the list of reserved
object names. Alphabetize.
(reserved_attr): Alphabetize.
Mark each array and members as const and propagate that to the
sole interface, ...
(is_reserved): More const.
* t/basic: Exercise the code that rejects reserved object names.
commit f0c9b98d7c02a01b44e550db62088acb23e5655e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 25 20:16:00 2010 +0200
s3_register: correct a diagnostic: s/key/secret/
commit 2b25f2693c1cbc2f85be3fd7a152eabff6ae26bb
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 25 19:36:13 2010 +0200
add a reminder not to hard-code /tmp/iwtmp.XXXXXX
We should be able to specify some other directory.
Also add a "const".
commit f74043a7e17d49b1f27e166d88ad8d09e9aafd2a
Author: Pete Zaitcev <zaitcev(a)yahoo.com>
Date: Fri Oct 22 15:32:56 2010 -0600
Bump testing timeouts
I have a slow laptop that needs this. Lots of disk activity when testing,
looks like Mongo does not like ext3 much, at least on Fedora 15.
Note that it's preferable for the framework timeout to be greater than
the autostart's own timeout. This way if autostart times out, it kills
mongo that it started, and usually succeeds. But if we let framework
kill things, mongo sometimes escapes.
Also it's weird, but basic usually succeeds quickly enough, so no bump
there - only bump auto and replication tests.
commit b992b5231f12c76a9c33e9cd777a9e1863489d00
Author: Pete Zaitcev <zaitcev(a)yahoo.com>
Date: Fri Oct 22 15:32:03 2010 -0600
Lock auto.c to IPv4
As it turned out Mongo manages to listen on IPv4 only even if the
system supports IPv6. The autostart tries to connect over IPv6,
depending which address is returned first, and then fails.
The fix is simply to use IPv4 always, at least for loopback.
commit 39c17c133e63a5c9b339da577a09e0a77babcf9b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 22 16:11:28 2010 +0200
parser: plug more leaks
* qparser.y (parse): Don't leak each expression tree.
(free_value): Handle v == &invalid.
Omit debug printf -- it would cause test failure, now that
we're using free_value more.
(main) [PARSER_UNIT_TEST]: Free "expr".
commit 0cbc1d76117be554a5d60f22bc1883fe9ee3de37
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 21 12:41:46 2010 -0400
Change test-file names to be descriptive, avoid dups.
commit 2261a70baa183c94dde736e2636c8c00c8f171e8
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 21 11:51:06 2010 -0400
Add test for truncation when overwriting an object.
Also added test-file-usage info in t/basic and t/replication.
commit 56c3693765e0cdedde5a52b13063470836fb7215
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 21 11:33:44 2010 -0400
Add test for re-replication when an attribute changes.
commit 7520862b39704ab36ae8ff478a032c3398ef12f5
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 21 11:24:20 2010 -0400
Fixed X-redhat-role check.
rest.c: validate_put fails if hdr present on master
t/basic: new test for PUT rejected due to role
t/replication: added missing -m to downstream iwhd
commit e2d933c6e5e1d39edc0664d4c21658148e437125
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 21 08:41:59 2010 +0200
plug error path leak
commit f4d05852a6185b36660f5feee44cdb82ccba7b32
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 21 09:08:39 2010 +0200
reduce scope of global to be file-only
* qparser.y (hacked_links): Declare to be "static const".
commit c98f5c1f712ca5b35cbaef83d2ea47015687e68c
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 21:36:37 2010 +0200
parser: move x*alloc functions into #if-unit-test block where used
* qparser.y (xalloc_die, xmalloc, xmemdup, xstrdup): Move functions...
[#if PARSER_UNIT_TEST]: ... into this #if-block, since now they're
used only there.
commit e6a29d3e5890a27c8f3afae96b8d5a2cf6cbaed6
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 21:34:08 2010 +0200
parser: remove dead code
* qparser.y (xrealloc): Remove if-0'd function.
commit 064e3f485714996a9a4c85acd32dd514f4481fd6
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 21:33:29 2010 +0200
parser: handle OOM gracefully
Before, a failed malloc or strdup would lead to a NULL dereference.
* qparser.y (invalid): New file-scoped global.
(make_number): Never return NULL. Instead, return &invalid.
(make_string): Likewise.
(make_tree): If an input has type T_INVALID, return it right away.
Upon allocation failure, return &invalid.
(make_comp, make_link): Likewise.
(make_link, make_string): Avoid unnecessary strdup, now that
lexed tokens are malloc'd.
Also, add some per-function comments.
commit 2925ba08e51d0fb17df600da56c70e20627ab8e6
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 21 10:35:51 2010 -0400
Add unlink/O_EXCL in case new file is shorter than what's there.
commit 70765c12c21e6f433d00bae5ea64f377f6f28f03
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 21 10:33:02 2010 -0400
Add X-redhat-role header when replicating to ourselves.
commit 26af6493b707f493676f3151557a04c25dd69a8e
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 21 10:23:33 2010 -0400
Sprintf/snprintf cleanup.
commit d30160123fa4665414c490461ac55493406c150e
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Oct 20 15:22:04 2010 -0400
Fix re-replication on attribute change.
Problem #1 was that the back-end code sometimes used ms->url instead of
ms->bucket and ms->key, causing the other end to see the URL for the
attribute that was changed instead of the URL for the file being
re-replicated. Problem #2 was a memory leak when we re-use ms->pipe
in the replica module.
backend.c: fix functional error #1
rest.c fix leak #2 by freeing ms->pipe.data_ptr in proxy_put_attr
commit 9f30bdb8086d10b45b27ee8d582e81615bf28b22
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 12:03:30 2010 +0200
query parser: avoid 25 reduce/reduce conflicts
* qparser.y (T_SPACE): Remove token and all related rules.
* qlexer.l: Ignore white space rather than returning T_SPACE.
* t/parser-test: Adjust sole failing test: "$ a" was rejected before,
due to ignored white space; now it's interpreted as "$a".
Add one more: "$.a" that does still fail.
commit c7748e96024d9a845f8f7fbd0305a72a1f8bc077
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 11:37:18 2010 +0200
simplify parser: <, >, >=, <=
commit b9c444677501e24e5aa1b2d10d58b8a99faf41fd
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 11:34:00 2010 +0200
simplify parser: T_EQ
commit cd22b27d3fc97df06f7e5917f958d3a5660627fe
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 11:32:59 2010 +0200
simplify parser: T_NE
commit 406cc793d1241acbc0c4dcb6bd070209b4289507
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 11:14:53 2010 +0200
mark more unused parameters
commit ded3c1df9c18a30ce4794b12ab7d82297a9e6d2e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 09:43:03 2010 +0200
remove final "static" state variable from the parser
* qparser.y: Remove "syntax_error" decl.
(yyerror): Adjust. Make this function static, too.
<policy>: Adjust the sole rule that used it.
commit 3c5b2840f6ef9d562ef12299f1cd6d7442a3f1a0
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 10:20:33 2010 +0200
mark unused parameters as such
commit 0c4fa8c07b57aa79f89a371231d98b01162d3e12
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 20 09:25:33 2010 +0200
remove decls of unused variables
commit 398c516c99ae513d259767af4b6e91701aeec2ad
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 19 18:27:52 2010 +0200
automatically generate man page
* configure.ac (AC_CONFIG_FILES): Add man/Makefile.
Check for help2man.
* Makefile.am (SUBDIRS): Add "." and man.
* man/Makefile.am: New file.
* .gitignore: Ignore new artifacts and anchor patterns.
* iwhd.spec.in (%files): Add man page file name.
commit 3a8570e05e1e906435c97f456150ec8683b35c91
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Oct 19 21:10:49 2010 -0400
Fix two pipe-initialization races that occur in replication.
Turns out there were two race conditions, either of which could lead to
a replication task hanging. In the first, the consumer thread could
potentially run before the pipe_shared structure was re-initialized in
the producer. In the second, the producer could post the first chunk
before the consumer initialized its pipe_private, so the sequence
numbers would be off. That's kind of ironic, since the sequence number
isn't strictly necessary and is only there to guard against a whole
different class of synchronization problems (for which purpose I think
it should remain). Here's a brief summary.
* replica.c: moved re-initialization of pipe_shared to be before thread
start (race #1)
* mpipe.c: changed initialization of pp->sequence in pipe_init_private
(race #2)
* iwh.h: added PID to DPRINTF to distinguish parent/child output
* backend.c: fixed bytes==0 behavior in fs_put_child (unrelated)
* rest.c: fixed pipe_prod_wait_init check in proxy_put_data (unrelated)
commit e8e1e84c9af5996630f65b9bf0b9dbe1108092e9
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Oct 19 20:59:33 2010 -0400
Undo accidental reversal of wait_for_repl return values.
commit 8e27a1a5623d6fe674ebcaefa5ae43cae372873f
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 7 13:44:31 2010 +0200
add iwhd.spec.in and Makefile rules
With these, you can now run "make rpm" to invoke rpmbuild
on a just-created tarball.
commit 0317fd51b2994b9c512122ce97092fd3d0252ffc
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 16:36:45 2010 +0200
avoid NULL deref on failed strdup
* qlexer.l: Don't dereference NULL on failed strdup.
commit d28494103e69b75ca5a242b09eb761570d15f930
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 16:24:39 2010 +0200
xrealloc was not used; #if-0 it out
commit 2d0b629e6cf361f18e660df7419c66ae61c56355
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 14:02:33 2010 +0200
now that lexer calls strdup, free those strings
commit 136c20fc5c20bc053f86c643d2863d8ceb116dac
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 12:37:57 2010 +0200
qlexer.l: rewrite not to need static var, at_eof.
commit 7a1642252d00263da1b36237661fb310f9d9cee7
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 12:31:33 2010 +0200
handle yylex_init failure
commit 8da2bda33280f84f2a209be74f3cb4abd9798ef9
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 12:20:45 2010 +0200
pure, almost
commit 61be1a9c7d0e2756613470fb82e285c980635950
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 11:16:05 2010 +0200
add beginnings of %union support
commit 319d8c1f35ff61956c5c319976b81fed70b24267
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 11:01:54 2010 +0200
query.h: use struct value_t (same name as typedef)
This is required to allow bison to parse the incoming %union decl.
commit cc926cc887f194d8ec7c97e7905baa669fff185e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 10:55:19 2010 +0200
add missing #define part of double-inclusion guard
commit c366f6de5cb002e3306197e8ef2249b3120c635b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 10:16:44 2010 +0200
lex && and || as tokens, not "&" and "|"
There is no point in allowing spaces between the bytes
of these operators.
commit 8d267f6f44e8c64623f66dfbb73673297c2514d5
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Oct 15 12:01:20 2010 -0400
Add query.h comments back in.
commit b3302bb0a5b82689d8c890cc703fd0efc55512b8
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 15 15:07:19 2010 +0200
build: ensure we use only Flex
commit 43e90569e516a77ed71bc2d568e8a25e2e1bc15d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 15 14:24:21 2010 +0200
qlexer.l: simplify
* qlexer.l: Include <config.h> first.
Use a few %options.
Use <<EOF>> rather than yywrap.
commit 172ce552307317456a644868ad02aba5b3528ba8
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 15 13:05:44 2010 +0200
avoid undefined-yydecl warning
commit 66864937990bf7de81e6e5e2fba6ebf521f109c3
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 14 18:00:11 2010 +0200
use literals in bison
commit e1f97c75f4afa180643691324fa0552877fd8454
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 14 17:01:43 2010 +0200
static and const
commit cddd16da84f4beb975f9e96546ea7ad1a6557fb8
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 14 16:43:06 2010 +0200
cast away "const" on free argument
commit 31e05f1f8045bd38d47eeacfd09a4c27895b10db
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 14 16:41:36 2010 +0200
qparser.y: Include <ctype.h> for use of isdigit
commit 90c6359047186d03e04a4e73e99d0c935e40be1a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 14 16:16:24 2010 +0200
apply Jeff's link-following change
commit 41f6621794a607e2a8a5940d6fb2be22746d2cf1
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 14 16:07:33 2010 +0200
apply jeffs leak-fixing patch from master (manually merge 3 failed hunks)
commit 9d0990a469ada32886e0c09e98da498a5106b2fd
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 14 16:02:08 2010 +0200
parse-test now passes
commit 19f9c826b6a2e6ad5dc51f1e1c4030f1eac82553
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 15 14:50:59 2010 +0200
build: rearrange things to use Bison/Flex; adapt; clean up
commit 5ed3c7cb77b0d921759f9407dd17dbb9b2b54ba3
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 14 16:54:58 2010 +0200
remove query.leg and query.c.diff
* query.c.diff: Remove file.
* query.leg: Likewise.
commit fd0cbe4410ec2b1ad831e418b472cc46fd4cacaf
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Oct 12 17:18:16 2010 +0200
rewrite query parser using Bison, not peg/leg
* qparser.y: New file.
* qparser.l: New file.
commit 9ca9c3d6bdd3d210b5c58fca4bc9c63aacbea78d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 19 19:24:08 2010 +0200
tests: don't clutter regular output with expected diagnostics
* t/init.cfg (wait_for): Emit common diagnostic to log, not tty.
commit d003a5d273b985ea3135cbd34c139191d394d8ac
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 19 19:02:08 2010 +0200
clean-up: move file-scoped global into "main"
* rest.c (main): Move "autostart" into main.
commit 5a88451cf5e0e69635a9dc4eacbb7f2606256fcc
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 19 16:52:17 2010 +0200
tests: avoid using a temp file in wait_for_repl utility
* t/replication (wait_for_repl): Use a variable rather than a
temporary file.
commit 32a041e25d2c1c9d6e4c353ce8db6649aed62954
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 19 16:50:57 2010 +0200
tests: wait_for: report how long we waited, or that timeout expired
* t/init.cfg (wait_for): Tell how long we wait.
commit 5372c8c2f472846e480b44c498aae3177038949a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 19 16:17:13 2010 +0200
fflush debugging output when writing to stdout
* iwh.h (DPRINTF): Call fflush.
commit a10711d0124f8130ea0339dbc34d0bfd06cbb815
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 19 16:11:45 2010 +0200
replica: don't say we're "deleting" when creating a bucket
* replica.c: Include "replica.h".
Correct a misleading debug print statement.
commit 5f3562c3efe50314a708094b19641ba282b35b6a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 18 20:23:29 2010 +0200
init.sh: don't comment out cleanup-handling rm -rf
commit fc5ba187804f575cd4272d77ec54d0814c9fa6a9
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Oct 18 13:45:48 2010 -0400
Unify backend/replica modules, support dynamic config.
Now rest.c and replica.c use only backend.c functions to get/put/delete
files and create buckets, without knowledge of or regard for what types
of back ends are involved. In addition:
unused fields/globals (e.g. s3mode, proxy_*) have been removed;
headers have been tweaked a bit;
reference counting on my_state structures;
fixed a bug when re-replicating a file that already exists.
commit cfa0553caaeea30e5b542ca9f91b69244d07d709
Author: Pete Zaitcev <zaitcev(a)yahoo.com>
Date: Sat Oct 16 17:17:44 2010 -0600
add static to parse_config_inner
This suppresses a warning about missing prototype.
commit 57bfee81c42c26eac3299404738bdd06bed5f4f8
Author: Pete Zaitcev <zaitcev(a)yahoo.com>
Date: Sat Oct 16 16:42:12 2010 -0600
switch to atexit(auto_stop)
The main impetus for this change is that query.peg has an error() that
can exit. However, we cannot add an explicit auto_stop in front of it,
because the parser is also linked into tests, which do not have autostart
code and thus throw a link error.
Secondarily, since atexit() is only called when the start of Mongo is
assured, we do not really need checking for PID being valid or any other
scheme to disable auto_stop in the child, etc. We're not changing this
for now, but maybe later.
commit d82854a5c8cbc35897fe931376cff1e7f446c953
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 7 14:02:08 2010 +0200
avoid warning about signed/unsigned comparison
commit c947ab61da94a8f85844bc85f061105aa1c6e1b4
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Oct 7 14:01:29 2010 +0200
build: avoid warning about unused parameter
commit 7fb8264a528a19fd999dc4626121c2d6427d437d
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Oct 13 16:08:39 2010 -0400
Add link-following implementation (not just syntax).
Also fixed parser main() so that mainline can pass tests modified to suit
an unposted version.
commit 8f3bc25341e2a212e3497028d59ff6a5f3527445
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 13 21:30:12 2010 +0200
tests: add missing "Exit $fail"
commit bb9ae00fcc2c84440108054ceb0ea33bafda5cf4
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 13 21:22:49 2010 +0200
tests: parser-test: don't ignore "compare" failure
commit 436ce690c6dca730f9fdf6e3cd831d53c48ab474
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Oct 13 14:06:00 2010 -0400
New tests for autostart and "headless operation"
t/Makefile.am: added new "auto" test
t/auto: new
t/basic: added headless test, fixed bug in waiting for mongod to start
t/init.cfg: changed ports we use to avoid the mongo/auto defaults
auto.c: added --pidfilepath so tests can get mongod PID
commit 1b561174b1eca8fe2fca2586313d324d19e08dd9
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Oct 13 15:12:27 2010 -0400
Fix rollup which didn't include renamed/added files.
commit 0f82ac4e7c2cfbfe18a58f89b5543ca8e64290ba
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Oct 13 10:16:03 2010 -0400
Dynamic config, steps 1-3
(rollup of previous patches to keep commits buildable)
Generate provider_t structures at init instead of on demand
Change non-setup code to use provider_t instead of JSON calls
get_provider in rest.c (provider list/update)
get_provider_value in backend.c (EC2 bundling)
get_provider_value in query.leg (policy eval)
many in proxy.c (replication)
Split proxy.c into setup.c and replica.c
Propagate const changes where needed
Reconcile with autostart changes
commit c63dfdc7d21bc0bd221777621a7649552a8aa40f
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Oct 12 12:34:27 2010 -0400
Fix a memory leak in object-attribute evaluation (found by Jim).
commit 491344e55d77c754a8adb70a178bdbef4578675b
Author: Pete Zaitcev <zaitcev(a)yahoo.com>
Date: Tue Oct 12 13:14:51 2010 -0600
[patch repod] add autostart of Mongo
We would like to start the required database instance automatically,
so that it does not need to be configured. Naturally doing is requires
certain assumptions, in particular where the database is located
(we assume that it is in a subdirectory _db/ of the current directory).
This version of the patch attempts to stop the database that we launched.
commit 982febea642d47a7286ce0dc4602df629c2d8626
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 12 16:53:06 2010 +0200
tests: use -9 only when killing mongod, not iwhd
Otherwise, a valgrind wrapper around iwhd would not generate much
of its results.
commit de1daf159adbe9475f350d6d0d7a3183864bced8
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 7 17:29:18 2010 -0400
Improve behavior in no-metadata-access cases.
Stage 2: proxy_get_data actually does "the right thing" and we now have
a producer/consumer handshake for the GET direction as well as PUT.
commit 39621d061ca5f31f0b2cfdf4acfa124119f4759d
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 7 16:10:48 2010 -0400
Allow simple GETs without metadata access.
First stage: meta.cpp functions handle lack of connection better.
commit e085aab9a4664efad596e4177443c94125fd0aac
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 11 22:50:57 2010 +0200
template: const correctness
* template.c (xml_format, json_format): Declare const.
(xml_obj_header, xml_obj_entry, xml_obj_footer): Likewise.
Adjust all uses of ctx->format accordingly.
* template.h [struct tmpl_format_t] (format): Declare const.
commit c9e88edd5d31c8b3465af9558d424b329d483812
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Oct 11 22:47:39 2010 +0200
query.c.diff: regenerate to avoid offsets
commit f1159957b064f94c07f7c7feca363172f2834e19
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Oct 8 14:36:37 2010 -0400
Fix BsonObject invalid-ref in attribute-listing code.
commit 4d36d7cb21c43eab39f90ac1b949a84abf1a9cea
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Oct 7 13:43:02 2010 -0400
Add comments to parsing/evaluation code.
commit 10c20c718924cdc340cd77f8134a39c27c0027bd
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Oct 6 17:05:12 2010 -0400
Add op=parts API.
template.*: renamed _obj stuff to _list for bucket listings
added new _obj stuff for listings of object parts
state_defs.h: added aquery field for attribute queries
meta.*: added attribute-query functions
rest.c: added code and dispatch for new API
commit af9442a8cc25dd8b0c4df858a9d3543ce3b667e3
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 6 10:15:54 2010 +0200
tests: remove seemingly unnecessary "sleep 3"
* t/replication (cleanup_): Don't sleep for 3 seconds.
* t/replication (cleanup_): Kill with -9, to avoid diagnostics
from init.sh's cleanup (rm -rf) process when it takes too long
for mongod to handle the signal.
* t/basic (cleanup_): Likewise.
commit 9d18678eedbf2fd1f95f16a4756654ae6dea86bd
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 6 10:14:31 2010 +0200
tests: factor out range of ports we'll use for mongod instances
* t/init.cfg (mongo_base_port): Define.
* t/replication: Use $mongo_base_port.
* t/basic (m_port): Define/use, as in t/replication.
Remove "FIXME" comments, now that each test starts its own,
independent mongod instance.
commit bcd8e34f8c259e7f32f85ad96cfbc147c1d0697e
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Oct 6 11:28:11 2010 -0400
Fix replication test when *not* running in parallel.
commit 764a77b949627d4466a762e307a2fbf8ff8b577c
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Oct 6 09:59:12 2010 +0200
tests: run 2nd test's mongod on a different port for parallel make check
With the addition of this second mongod-running test script,
"make -j2 check" would often fail.
* t/replication: Each test script must run mongod on a different
port so that all may be run in parallel.
commit eca524a8875390971df31e8c6e695638d7feffe6
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Oct 5 13:07:12 2010 -0400
Add replication tests.
commit ed1124e42e3767ea31fc06226ab526e7bf3f4cdc
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Oct 5 13:06:27 2010 -0400
Add rep_count control operation on API root.
commit 6b291d8aaeaac24bf4ac44ef9f83db173ed663fc
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Oct 6 13:17:22 2010 +0200
tests: remove vestigial VERBOSE=yes
* t/Makefile.am: Don't set VERBOSE=yes here.
It's not needed, now that it is set in GNUmakefile.
commit 688f1ee40a27f38bc410f23bfb9e3b2a1feed6a0
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 5 17:07:25 2010 +0200
tests: exercise the parser
* t/Makefile.am: Add rules/code to generate parser.c and to
run the new test.
* t/parser-test: New file. I/O pairs from Jeff Darcy.
commit d64add4185ab48f67c70dff941e0143aa72fe0ae
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Oct 5 14:23:58 2010 -0400
tests: exercise attributes
commit e914cb511e8785deef50a3c711d674ac056a697e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 5 19:21:46 2010 +0200
fix bug just introduced in repl_worker
* proxy.c (repl_worker): Don't omit "item" parameter in
call to pthread_create. Introduced by me in commit aeb23797.
Spotted by Jeff Darcy.
commit 2f781a55e1966f19e2d8a9ed7fd2958563e3b9d4
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Oct 4 13:30:01 2010 -0400
More basic tests.
Added some through-the-API result checking to cover metadata.
Added a duplicate-bucket test.
commit 5e4e136deec466074117525ba672618d2445dbe1
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Oct 4 11:47:47 2010 -0400
Enhance tests.
Added failure check for "PUT to root" test.
Added "-s -S" to curl commands to kill silly progress bars.
Added rdbuf call in meta.cpp to silence C++ code.
commit 4866f4e6b9f3c966170f349eb6db74d2ff2d1692
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 5 14:05:56 2010 +0200
build: GNUmakefile: new file, for better output from "make check"
This file is used only via GNU make.
For now, its main purpose is to ensure that VERBOSE=yes
is always set in the environment. That ensures more useful
output from "make check" and that automake-generated code
in Makefile prints more details upon failure.
A tiny bonus: you also get a diagnostic when
running "make" without a configure file.
commit aeb23797cf2e1e8ce6d57ec341235d48abb05e2f
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 5 08:02:00 2010 +0200
don't ignore failed thread creation
* proxy.c (xpthread_create): New macro.
(repl_worker): Use it to diagnose thread creation failure and to
propagate any failure to caller.
commit 64c29da2559a084d3ba74dcdaa03cbd0f4189dee
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Oct 5 07:44:29 2010 +0200
proxy.c: declare functions and file-scoped variables static
commit 6c2fe0153226238cdf342e13333cec65a2b769d0
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 16:01:20 2010 +0200
don't ignore write failures
* proxy.c (junk_writer): Don't ignore fwrite failure.
Warn upon fwrite or fflush failure.
* rest.c (main): Don't ignore fflush failure.
Exit upon fflush or preceding printf failure.
commit 9b68844bac11d8d0737c74986ca6add744acd223
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 19:57:52 2010 +0200
declare file-scoped globals "static"; use const, too
commit dded28ea3564271e258d14014dd6089f2bfa7cda
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Oct 1 13:04:44 2010 -0400
Fix various issues in policy syntax.
Made AND/OR properly associative/recursive.
Gave NOT lower precedence than comparisons.
Simplified field/link_field parsing.
Generally improved whitespace handling.
Updated diff to match other changes.
commit b49e121de992338353ccb8427b3e557ef73603cc
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Oct 1 12:11:23 2010 -0400
Fix memory leak, uninit var, syntax
Another ms leak in proxy_get_attr
Uninitialized variable in repl_oget
Whitespace issues in policy syntax
commit 89c0b8cace3fe481792e1380e620ac68b060963a
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Oct 1 12:08:31 2010 -0400
New version reflecting current reality/terminology/etc.
commit 69e5866ad141e535969075fccabcd73f2598a721
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 19:05:25 2010 +0200
maint: adjust copyright on most files: Red Hat, not FSF
commit 08ea30e37e29169d6b0ab9a1395d54262f3319af
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Oct 1 18:51:00 2010 +0200
boiler-plate README files
commit 4b78090d0043367ed7a408aa3f04615c4700f12a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Sep 30 13:58:27 2010 +0200
maint: remove empty/unused ChangeLog file
commit a2bb964aed893e8d4e86fe165244352b2a1746c2
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Sep 29 22:26:22 2010 +0200
tests: add more
commit 8a480375b7d4a48ea3c3d76b6a2b8eb6b202e3ab
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Sep 29 15:13:12 2010 +0200
remove unnecessary casts
commit 94932e139d8d58cab65114d340dcf0c201eab125
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Sep 28 21:03:43 2010 -0400
Fix leak when deleting nonexistent file.
commit 1a876712ef22c1b70dc11401884e2ba30c0c90a9
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Sep 28 14:30:00 2010 -0400
Fix ms leaks when object not found, deleted.
commit 10d05a999d53191bc01e82b6a34840ffefef8d95
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 28 21:31:23 2010 +0200
tests: exercise object deletion
commit 2214d5b9c6adb68baabcaffabc73dd02239096b1
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 28 15:12:33 2010 +0200
plug a nasty leak and 3 others like it
* rest.c (proxy_update_prov): Don't leak the state buffer
allocated in access_handler. I confirmed that this does plug
an actual leak.
(proxy_query, proxy_bucket_post, proxy_object_post):
These are in the same vein, but were fixed by inspection,
since tests don't yet exercise these.
commit 5f1095727aeb9a484c46c3586485e1b89468b06a
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 28 14:07:09 2010 +0200
don't leak json_strings on username/password update
* proxy.c (update_provider): Avoid leaks exposed by new test.
Valgrind reported this:
78 (72 direct, 6 indirect) bytes in 3 blocks are definitely lost in loss record 246 of 317
at 0x4A0515D: malloc (vg_replace_malloc.c:195)
by 0x4C35BD9: json_string_nocheck (in /usr/lib64/libjansson.so.0.2.0)
by 0x420DA7: update_provider (proxy.c:1042)
by 0x425856: proxy_update_prov (rest.c:1461)
by 0x4E3BB38: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x4E3CD27: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x4E3F9F9: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x31BB407760: start_thread (pthread_create.c:301)
by 0x31BA8E151C: clone (clone.S:115)
78 (72 direct, 6 indirect) bytes in 3 blocks are definitely lost in loss record 247 of 317
at 0x4A0515D: malloc (vg_replace_malloc.c:195)
by 0x4C35BD9: json_string_nocheck (in /usr/lib64/libjansson.so.0.2.0)
by 0x420DDA: update_provider (proxy.c:1043)
by 0x425856: proxy_update_prov (rest.c:1461)
by 0x4E3BB38: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x4E3CD27: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x4E3F9F9: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x31BB407760: start_thread (pthread_create.c:301)
by 0x31BA8E151C: clone (clone.S:115)
commit c46547610682271c08d970fb927c83e523491b41
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 28 13:49:32 2010 +0200
tests: exercise basic providers_ functionality
commit bc81e99ff8ea7f910ee48be11c4aca485794ec6f
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 28 10:32:45 2010 +0200
don't end "error(..." diagnostic with "\n"
Fix them all by running this command:
git grep -l 'error.*\\n'|xargs perl -pi -e 's/(\berror ?\(.*)\\n"/$1"/'
commit fe98d54da3f5f78667d2ca1a766fd9d8a3f31aa1
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Sep 27 18:29:35 2010 +0200
tests: don't let ~/.curlrc settings perturb these tests
Quiet the wait_for use of curl.
commit 32c6ca985e228c7db3968a8bdcd4f33006a6f854
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Sep 27 18:29:35 2010 +0200
tests: check for root xml and json
Adjust test not to "cd FS".
commit ca56bd601ea8e0544c1830ac0ba95c7c76332b05
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Sep 27 18:24:11 2010 +0200
tests: remove unnecessary "kill..." stmt, now it's done via trap
commit 9724d076a990c61f39578f1ff828507e0ba195ad
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Sep 27 16:57:57 2010 +0200
don't deref NULL on fdopen failure; plug a FILE-sized leak
* proxy.c (proxy_repl_cons, proxy_repl_prod): Handle fdopen failure
and don't leak a FILE buffer.
commit a65eb158f5ade6b5e1dda87ba0495612a13d5f36
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Sep 27 18:24:11 2010 +0200
tests: specify "path", now that it's required
Adjust test, now that buckets are created under ./FS
commit ab3e12cc1a3946af2cc1e0f9bde39d73f01c4c24
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Sep 27 20:35:03 2010 -0400
Just use chdir instead of chroot+chdir, per email convo.
commit 2b2804d90e7837718303b3e119f00d5e00eb9217
Merge: 53982480742f385905cb80eaccd06722d0f202c4 00fb9bc78433a89a3869c274e959d8728c6e1b37
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Sep 27 20:24:23 2010 -0400
Merge branch 'master' of ssh://orcz/srv/git/iwhd
commit 00fb9bc78433a89a3869c274e959d8728c6e1b37
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Sep 27 16:12:27 2010 +0200
rest.c: plug a leak
* rest.c (proxy_create_bucket): Free "ms" here, too, so as not to
leak the state buffer allocated and dispatched in access_handler.
496 bytes in 1 blocks are definitely lost in loss record 270 of 288
at 0x4A0515D: malloc (vg_replace_malloc.c:195)
by 0x42700C: access_handler (rest.c:1603)
by 0x4E3BB38: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x4E3D087: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x4E3F9F9: ??? (in /usr/lib64/libmicrohttpd.so.5.2.1)
by 0x31BB407760: start_thread (pthread_create.c:301)
by 0x31BA8E151C: clone (clone.S:115)
commit 53982480742f385905cb80eaccd06722d0f202c4
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Sep 27 11:17:03 2010 -0400
Require a "path" parameter for filesystem back ends.
This includes checking for existence of the parameter, doing a chroot/chdir
in fs_init, and disallowing sneaky "../" escapes. The path can be relative
(handy for development) but you shouldn't be able to traverse out of it.
commit bba237a9d3d15caf36736a28ec0669329bace040
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Sep 27 15:52:06 2010 +0200
use calloc in place of malloc+memset-0
commit 349f72d5faab412250d83d8c0bca5e7d620c46f3
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 21 20:57:36 2010 +0200
tests: add test framework and first test
commit 94430296b2b7a2c5722e1290fd05e7e494d63baf
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Sep 22 23:19:58 2010 +0200
hoist definition of cmd to remove one more hard-coded "dc-register-image"
commit 4e714bbe7007b5bacecec4dca34dde36fe9c73fc
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Sep 27 11:28:25 2010 +0200
maint: use new iwhd-devel address as bug-reporting addr
* configure.ac (AC_INIT): Now that we have an email address,
use it as the package bug-reporting address.
commit bedf9e129597380eb865de17abe3245d90076158
Author: Jim Meyering <meyering(a)redhat.com>
Date: Sat Sep 25 09:27:44 2010 +0200
avoid printf format abuse; use -Wformat-security; report errno more
A use like "error(0,errno,cfg_file)" would malfunction for a file
name containing a printf %-directive. Using -Wformat-security
will help prevent this.
Upon OOM, always use errno in the diagnostic.
commit d40ae99163994b6995a16505ae590399fa416b1e
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 24 11:58:56 2010 -0400
Fix string handling in proxy (replication) module.
Mostly sprintf->snprintf, with checks. Also using constants now for
buffer lengths.
commit cf6f65ebfac1a6c59aae0251896a067bac1220ef
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 24 10:43:54 2010 -0400
Replace fprintf/perror calls with officially mandated error().
commit ba702e788612453e387acd30efedf4ead738eb65
Author: Jim Meyering <jim(a)meyering.net>
Date: Fri Sep 24 15:53:43 2010 +0200
More const changes.
commit 27c25ba8d9cff693b28a67ecdd3862fef5d4e3b6
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 23 14:02:40 2010 -0400
Add replication for bucket-create requests.
Split replicate_namespace_action from replicate_delete.
Modified replicate_delete and (new) replicate_bcreate to use
replicate_namespace_action.
Added repl_worker_bcreate, REPL_BCREATE, plumbing to hook it up.
Fixed uninitialized-URL bug in repl_worker_del.
Fixed memory leak in repl_worker_del.
Added meta_got_copy call in proxy_repl_cons.
commit b6702e9012c1f0d74bf09d9479bc2e4d07235017
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Sep 24 10:25:15 2010 +0200
build: m4-quote use of AC_LANG_PROGRAM to avoid warning from new autoconf
Using autoconf-2.68 would evoke many new warnings like this:
configure.ac:78: warning: AC_LANG_CONFTEST: no AC_LANG_SOURCE call detected in body
../../lib/autoconf/lang.m4:194: AC_LANG_CONFTEST is expanded from...
../../lib/autoconf/general.m4:2591: _AC_COMPILE_IFELSE is expanded from...
../../lib/autoconf/general.m4:2607: AC_COMPILE_IFELSE is expanded from...
../../lib/m4sugar/m4sh.m4:606: AS_IF is expanded from...
../../lib/autoconf/general.m4:2032: AC_CACHE_VAL is expanded from...
../../lib/autoconf/general.m4:2053: AC_CACHE_CHECK is expanded from...
ax/ax_boost_thread.m4:35: AX_BOOST_THREAD is expanded from...
configure.ac:78: the top level
Autoconf was unable to detect the existing use of AC_LANG_SOURCE
because it was underquoted. Fix that.
* ax/ax_boost_system.m4: Quote use of AC_LANG_PROGRAM.
* ax/ax_boost_thread.m4: Likewise.
commit a37a958aa1901ff3cc7d0b79fbe0d8058c4930db
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Sep 24 09:38:05 2010 +0200
build: boost support: don't depend on C++-mangled names
The mangled names were missing from rawhide's libraries,
so all library existence tests would fail.
* configure.ac: Use AX_BOOST_ macros instead.
Don't check for boost's filesystem or program_options libraries.
They were not being used.
* ax/ax_boost_base.m4: New file.
* ax/ax_boost_system.m4: New file.
* ax/ax_boost_thread.m4: New file.
* Makefile.am (ACLOCAL_AMFLAGS): Point aclocal at the new dir, ax/
(iwhd_LDADD): Adjust substituted names to match those defined
by the new AX_ macros.
commit d7ccb4fda632e2707c21198255e08e3369d9bc88
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Sep 23 18:50:22 2010 +0200
configure.ac (AC_INIT): Use iwhd as package name, not image-warehouse.
commit e7293008351aa4018aa565f865d536b133e930e6
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Sep 23 16:24:53 2010 +0200
remove trailing blanks
commit 1ddb0a9455890c6e66e27292d16c8839368018f7
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Sep 23 16:19:13 2010 +0200
Add Copyright comments, and regenerate query.c.diff
commit 713eef0ae9e22c73b02ec33dfd88126f9fe78389
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Sep 22 21:47:06 2010 +0200
FS backend: print better diagnostics
I.e., print program_name: as well as the offending FS object
when possible, not just a syscall name and strerror.
Before: mkdir: File exists
After: iwhd: b4: failed to create directory: File exists
* backend.c (s3_put_child, s3_init_tmpfile, s3_register)
(curl_put_child, fs_get_child, fs_put_child, fs_delete)
(fs_bcreate): Use error() in place of perror/f?printf.
commit df7d24b7af937f09ae12c315c933f5073c8206a4
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 22 15:47:12 2010 -0400
Added _providers to API-root listing. Fixed bug in form-data handling,
which affected calls to attribute-put and query functions. Added debug
output when we can't parse a query string.
commit e45376ee92057a8db6cdc364e2981240d6817097
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Sep 22 20:08:00 2010 +0200
don't declare free'd variables/params to be const
commit c50990b6720b80e31ae13461a2959502d5cb2650
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 22 11:58:45 2010 -0400
More changes suggested by Jim, made by me - const pointers, unsigned ints,
and so on.
commit 0ee197d7faf98e2bdb038cb3aae51bbced7992ad
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Sep 22 08:07:12 2010 +0200
build: drop -Wshadow(for now) and turn off -Wunused
commit 013d172a863ac1d21cd3d48cd5cbf146e0052362
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Sep 21 16:59:56 2010 -0400
More warning suppression, regenerated query.c.diff
commit c2b25713450724aa667de079b3a4ba0ca4c6013c
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Sep 21 14:22:44 2010 -0400
The long-promised type cleanup. Eval always returns 0 or 1. String_value
always returns a string. Other ints are handled mostly in compare (which
is a bit ugly). The unit-test code has been updated to allow testing the
link syntax.
commit 53480dba464f315dba68b682948a086a87896d37
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 16 14:39:14 2010 -0400
First piece of link-following syntax. Still needs type clean-up.
commit f66a5492fb79b975a3c48d172fd209e9d972cbfc
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Sep 21 12:42:30 2010 -0400
Changed previous API-change patch so that old and new bucket-creation
methods use common code, and so that the new (POST to _new) method doesn't
kick in for a regular bucket.
commit 7ce5f783e5249c151fc62d073fc081410db4ee0a
Author: Pete Zaitcev <zaitcev(a)redhat.com>
Date: Tue Sep 21 09:53:03 2010 -0600
Cleanups
I make printouts slightly differ, so that the exact place can be
found easier.
The in_init is not likely to be dropped, so kill TBD.
The TBD for pipe_cons_signal became incorrect since fs backend
reports errors through it (in case volume is full or disc error).
commit 5f1211b28cc8985c4a610269b3a23d3dc57f45c6
Author: Pete Zaitcev <zaitcev(a)redhat.com>
Date: Tue Sep 21 09:52:56 2010 -0600
Implement documented API for _new
This patch does two things:
1. implements the documented way to create buckets with POST
and "name=bucket" in data. The legacy way with PUT and path
is kept for now.
2. checks against creating buckets with reserved names, e.g.
if someone does POST and name=_default. Letting people do that
leads to some fun miscief, so don't.
commit 6826429634d35af3f8c627548edcf5f0152a76e9
Author: Pete Zaitcev <zaitcev(a)redhat.com>
Date: Tue Sep 21 09:52:44 2010 -0600
cure a hang on S3 error
If tabled throws an error when iwhd tries to store an object, iwhd
hangs forever. There was even a "TBD" marker about it.
An easy way to reproduce this is to store the object into a bucket
that does not exist, which is wrong but should not hang iwhd, like so:
curl -T moo.data http://localhost:9090/nobucket/object2
Fix is to introduce a concept of consumer that failed. The number of
failed consumers is accounted as a "floor" in cons_error, so that the
producer and remaining consumers continue to function normally.
This, however makes a transfer to succeed even all replicas failed
to store the data. So, once transfer is complete, we look not just
at the error code, but also at the error count.
That's the core patch. However, if we just use pipe_cons_signal with
an error flag, the Microhttpd first returns "100 Continue" response,
and then "500 Internal Server Error". The discussion for the
100-continue suggests that it's a bad idea. We should return
any kind of error instead of "100 Continue", or otherwise
accept the data. To change that, I added one more handshake
with pipe_cons_siginit, that allows us to do this.
This was tested with basic bucket creation and object put/get
operations with s3 and fs backends.
commit 05e71fc613490c9a6668b4dfd592cc4192cbe015
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Sep 21 10:57:04 2010 -0400
Make the code buildable again after the last salvo.
commit 1768b3e6a8548f3bea07d40db42385eda567e2ee
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 21 13:33:19 2010 +0200
remove useless 'if'-tests-before-free
* rest.c (proxy_put_data): Remove useless if-before-free.
* query.leg (free_value): Likewise.
* proxy.c (replicate): Likewise.
commit ee5487ed4c8a143b673ef0579d6ff19344a13cdf
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 21 10:53:03 2010 +0200
more const-correctness; and remove an unused decl
Also, remove unused decl, meta_query, and its associated typedef.
commit 1e4e5edaec284bef93a344c17003eca010164b09
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 21 10:36:34 2010 +0200
more const-correctness changes
commit 2bc1f9a56405a04ccffc1969a720eaa9bf64a102
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Sep 16 14:26:00 2010 +0200
remove vestige of -f option
* rest.c (main): Remove vestigial "f:" from getopt format string.
commit 6b2df0aafbf9ef39a994a82fa0d70a83acff5340
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Sep 20 15:47:40 2010 -0400
More verbose output for debugging.
commit f5f8c524dffc7a8ac60bfba0bce2201db07179c7
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Sep 20 15:04:50 2010 -0400
Added more detailed tracking of status via ami-id, switched result parsing
to use regexes, other changes to facilitate those two.
commit ad3a9a12682fed3a8d41775cf3b04889f0513f97
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 17 14:52:32 2010 -0400
Added ami-bkt argument, fixed output-parsing bug.
commit 24742c32a3ba3c84cdeec160d9f8cd7283fd1b98
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 17 10:50:35 2010 -0400
Fix some memory leaks in the new cert/key temp-file code.
commit 425adbf8a6a5a387bd0ed228e6aeb21225cde7cd
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 16 17:31:15 2010 -0400
Take registration information from the request, if available.
commit eba666795095a9ba2aa6debfb1ec3b2215c9a240
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 16 15:30:26 2010 -0400
Set "ami-id" property after registration. Also fixed bug with fetching
non-existent attributes.
commit 5b9a52cb44d32a602fc8498e4aaa3f25ea9523e9
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Sep 14 12:14:41 2010 -0400
Last bits to do AMI bundling/upload/registration entirely through the
warehouse. Now to clean up all those loose ends.
commit 5d6ba6e4dc02acadd6489678e5d273e078edb79b
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Sep 14 10:10:47 2010 -0400
Teach replication module ("proxy.c") how to use a filesystem-backed primary
store. Needed for EC2 bundling.
commit 7bf3497d15f7452c6dabcfa198f9994583e96051
Author: Jim Meyering <jim(a)meyering.net>
Date: Mon Sep 13 14:33:07 2010 +0200
split inadvertently joined declarations; remove trailing spaces
commit 1a0c1c3f97edabde186e08658080de27eb7c9df7
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 10 16:43:06 2010 -0400
Added code to pass registration requests through one or more CURL/HTTP
proxies to an S3 back end where registration will actually occur. Bucket,
key and next provider name are always provided; kernel and ramdisk IDs will
be propagated if present.
commit 88509a97337156470dafd636ccfbf14fc8ca44fa
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 10 16:14:49 2010 -0400
More AMI-registration plumbing.
commit 8970f95ffa546f184d392239cd7bcde6770c524f
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 10 15:08:36 2010 -0400
Specify filesystem mode in config file instead of command line (needed for
AMI-registration work flow).
commit 19729bf5e30f0798399f029f54d489f3497eaca9
Merge: 34553ec469d25eb8b7fa647807090ff5513c028c f15f38c4f971a55b092bc17d0f256deabe9194b0
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 10 14:28:26 2010 -0400
Merge branch 'ec2' into ec2-2
commit f15f38c4f971a55b092bc17d0f256deabe9194b0
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 10 13:59:24 2010 -0400
More EC2-registration changes, including back-end register method and
auto-includes.
commit 34553ec469d25eb8b7fa647807090ff5513c028c
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Sep 10 19:47:34 2010 +0200
.gitignore: ignore a few more
commit a49b143c4b36a2df2fe0b1be90c7aebc2f4d9e84
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Sep 10 19:30:01 2010 +0200
correct a bug: add ":" after "f" in getopt option spec
Change --fsmode to ---fs-mode (note the three leading hyphens).
That is consistent with the fact that this option is now
internal-only and not documented.
Remove LOC_ID from Usage line, now that that is no longer
supported.
This also corrects a problem (spotted by Jeff Darcy) with my
previous change: it had made it so -f FILE no longer worked.
commit aada69bfcb2f56553ac86a43b9e4ba95bd7c8cff
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Sep 10 18:21:43 2010 +0200
require --config=FILE as a command line option
This eliminates two more stray uses of "repo".
commit 4e72dc54a4b360bba6ca0b964283a31be84e1d2d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Fri Sep 10 17:58:02 2010 +0200
rename tool: repod->iwhd, header: repo.h->iwh.h
* Makefile.am: Adjust accordingly.
* Makefile.old: Remove.
* *.c: Change "repo.h" to "iwh.h".
* .gitignore: Adjust.
commit 44256e8a87d50435fe8335d9b02072060a193b93
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Fri Sep 10 09:40:24 2010 -0400
Added serialization on MongoDB client. Test is to transfer 50 1MB files
simultaneously, would previously fail ~2/3 of the time, can now get through
ten successive runs without error.
commit c1b1926543edc78434c3abc89a688b45e34b2cdf
Merge: 36b576b15251123c0d10f1137dab2978abff5cc1 83de5edc46744db2188e54685d6cda0d8e60168d
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 9 17:02:45 2010 -0400
Merge branch 'master' into ec2
commit 83de5edc46744db2188e54685d6cda0d8e60168d
Author: Jim Meyering <jim(a)meyering.net>
Date: Thu Sep 9 21:48:51 2010 +0200
configure.ac: refer to a better URL for peg
A nit (still on same branch: help-and-name-change):
From f0cfa231cc4333d2141933eede06f3ed2d9e0b1f Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Thu, 9 Sep 2010 21:47:50 +0200
Subject: [PATCH] configure.ac: refer to a better URL for peg
commit 4e08979356432a68839ede52b75468ba780fbb3e
Author: Jim Meyering <jim(a)meyering.net>
Date: Thu Sep 9 20:08:33 2010 +0200
no more -D... options on command line
There are way too many -D options on each compilation command line,
e.g.,
gcc -DPACKAGE_NAME=\"repod\" -DPACKAGE_TARNAME=\"repod\" -DPACKAGE_VERSION=\"1.0\
" -DPACKAGE_STRING=\"repod\ 1.0\" -DPACKAGE_BUGREPORT=\"FIXME(a)example.com\" -DPAC
KAGE_URL=\"\" -DPACKAGE=\"repod\" -DVERSION=\"1.0\" -DSTDC_HEADERS=1 -DHAVE_SYS_T
YPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=
1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHA
VE_FCNTL_H=1 -DHAVE_STDINT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_STRINGS
_H=1 -DHAVE_SYS_TIME_H=1 -DHAVE_UNISTD_H=1 -DHAVE__BOOL=1 -DHAVE_STDBOOL_H=1 -DHA
VE_STDLIB_H=1 -DHAVE_MALLOC=1 -DHAVE_STDLIB_H=1 -DHAVE_REALLOC=1 -DHAVE_GETTIMEOF
DAY=1 -DHAVE_MEMMOVE=1 -DHAVE_MEMSET=1 -DHAVE_STRCASECMP=1 -DHAVE_STRDUP=1 -DHAVE
_STRNDUP=1 -DHAVE_STRTOUL=1 -I. -I.. -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include -W -Wall -Wshadow -Wextra -g -O2 -MT repod-template.o -MD -MP -MF .deps/repod-template.Tpo -c -o repod-template.o `test -f 'template.c' || echo '../'`template.c
That makes things hard to read and has a few other problems.
Most projects avoid those problems now by using something like
this to cause autotools to put definitions in "config.h":
AC_CONFIG_HEADERS([config.h:config.hin])
The only caveat is that now, each and every source
file must include config.h first thing. I've done that.
One other required change: I tweaked autogen
just to run autoreconf -i, so that it now runs autoheader, too
(that's the thing that creates the config.hin template).
Here's the patch. It's also on the help-and-name-change branch.
Everything still builds fine and passes make distcheck on F13, of
course.
BTW, with that, now I see this when I run "make":
[and the leg/patching stuff will soon be hidden
just like usual rules]
leg < ./query.leg > query-orig.c-t && mv query-orig.c-t query-orig.c
patch --fuzz=0 query-orig.c ./query.c.diff --output=query.c-t
patching file query-orig.c
mv query.c-t query.c
make all-am
make[1]: Entering directory `/home/j/w/co/repo'
CC repod-query.o
CC repod-template.o
CC repod-backend.o
CC repod-mpipe.o
CC repod-proxy.o
CXXLD repod
make[1]: Leaving directory `/home/j/w/co/repo'
IMHO, the above is a lot more readable and lends it self much
more to spotting new warnings than the overblown -D clutter.
If you need to diagnose something and *want* to see the
actual compiler/linker invocations, run "make V=1".
From 388c8f8a9df800f2a18ec7f86d28e5e01f5853d7 Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Thu, 9 Sep 2010 20:04:31 +0200
Subject: [PATCH] build: use config.h; enable some automake features
* autogen.sh: Replace contents: invoke autoreconf -i.
* configure.ac: Use AC_CONFIG_HEADER (aka config.h).
Require automake-1.11.1, since 1.11 had a CVE.
Enable colored test results and parallelized tests.
Make silent rules the default. Use "make V=1" to see
the build commands again.
* backend.c: Include <config.h> first thing.
* meta.cpp: Likewise.
* mpipe.c: Likewise.
* proxy.c: Likewise.
* rest.c: Likewise.
* template.c: Likewise.
commit b533d5007588fc81e3f4f8eaba5b20e126cbfcc0
Author: Jim Meyering <jim(a)meyering.net>
Date: Thu Sep 9 19:47:25 2010 +0200
provide --help, --version, bug-reporting address; normalize
Here are two more patches. Jeff, you can use these, or pull from my
new help-and-name-change branch.
--help and --version are required for just about any package in Fedora.
Exit 0 (not 1) for --help is standard, but diagnosing
bogus options requires a non-zero exit status, hence the
new signature for the usage function.
The next step is to change file names and the few internal uses of "repod",
but obviously best to defer that until you and Pete have no pending
changes to the affected files.
From a21d098e2766438c25d67953a43449cc744fb12d Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Thu, 9 Sep 2010 19:13:39 +0200
Subject: [PATCH 1/2] provide --help, --version, bug-reporting address; normalize
commit 36b576b15251123c0d10f1137dab2978abff5cc1
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 9 15:48:37 2010 -0400
EC2 experiments.
commit e41b1d528f9a717a1238e0ffb6b1585f976f0cc5
Author: Jim Meyering <jim(a)meyering.net>
Date: Thu Sep 9 17:06:51 2010 +0200
build: make ./configure fail if "leg" is not available
* configure.ac: Ensure that the "leg" program is present.
If not, ./configure fails. Suggested by Jeff Darcy.
commit dbbe6ed397f104bd46994a414bbf772482b55e7e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Sep 8 20:12:06 2010 +0200
build: new rules; run leg; apply patch to fix generated code
* Makefile.am (query-orig.c): New rule to run leg.
(query.c): Generate this from query-orig.c and an included DIFF.
Set automake variables so "make distcheck" still works.
* query.c: Remove file, now that we generate it.
* query.c.diff: New file. Fix NULL-deref bugs in generated code.
* .gitignore: Ignore generated files.
commit ffcdb13d7768ec8e7eea805eedf1ff8a3b7952c0
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 7 23:58:38 2010 +0200
query: avoid NULL-deref-on-OOM bugs
* query.leg: don't deref NULL on strdup failure
Include <error.h>.
Define new functions: xmalloc, xrealloc, xstrdup, etc.
Use these functions in place of offending malloc, realloc, etc.
* query.c: Regenerate.
commit dc8f752f2a0221aebf5a34382e54b8b5f678f429
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 8 12:51:15 2010 -0400
Up-to-date API docs.
commit ae2c9069e7ebb88d319f61e9792a146ecf05ff44
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 8 12:14:17 2010 -0400
Replace assert (now undefined) with abort. Actually Jim's fix.
commit e9aa5e61be753e98dba5ae9d7345fee2120598c0
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 7 16:09:16 2010 +0200
Makefile.am: use $(...) rather than @...@
They're technically equivalent, but the latter notation
has been deprecated for several years.
commit 08850d117149820216041151d58469d0811d268c
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 7 15:38:38 2010 +0200
configure.ac: avoid m4 under-quoting errors
commit 64a2afa08d17ca3eb0a9e77b289be215ac26b82e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Sep 7 15:16:55 2010 +0200
configure.ac: remove obsolete comment
commit 5349f61b1ff3c813cc6990c3a2c855aafefa0ece
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 2 11:51:56 2010 -0400
Deleted old (higher level) local-filesystem interface.
commit b23faa4d19347d296e17ca776760b11132a473b4
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 2 11:48:03 2010 -0400
Added low-level filesystem back end.
commit 72de81ed949da99aa7f70465d74d0e0b91a5f570
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 2 10:52:52 2010 -0400
Moved back-end-specific init code into back-end module.
commit e6bd1cdf072fed72454ec8d362ae1719c36560e7
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 2 10:41:21 2010 -0400
Moved bucket-create into backend module, fixed a couple of other bugs I
found in the process.
commit 6c96bac27eea4f3e629558853520649a6477db93
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Thu Sep 2 10:11:03 2010 -0400
Moved delete into backend module.
commit c013bf58efaf8b197f7bec89925cfc1ce3aade3b
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 1 17:16:35 2010 -0400
First step of making back ends fully modular and interchangeable.
commit 2151274bb62d0dbf8ca706749b353b5803084e3d
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 1 15:53:35 2010 -0400
Lost mpipe.* in the last merge.
commit a5de87504edf5ab9114b7e317124eb9734a89ab6
Merge: 252715774eeec18b75738d44f7dbb67d3e023511 87eb13438fd9130aca66c78b431e2cfa3a56364e
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 1 15:51:50 2010 -0400
Merge branch 'merge'
Conflicts:
Makefile.am
repo.h
rest.c
commit 252715774eeec18b75738d44f7dbb67d3e023511
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 1 15:39:09 2010 -0400
Jim's latest fixes (DPRINTF/realloc/strtok_r) merged by hand because I
forgot to do it before starting on the massive mpipe rewrite and now they
don't apply automatically.
commit 22a86eca56dbf242b70ae50bcfe3662b0e9dca3c
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Wed Sep 1 14:33:35 2010 -0400
Major rework to make all of the producer/consumer stuff more modular.
commit 87eb13438fd9130aca66c78b431e2cfa3a56364e
Author: Jim Meyering <meyering(a)redhat.com>
Date: Wed Sep 1 10:20:42 2010 +0200
build: make "make distcheck" work
* Makefile.am (repod_SOURCES): Add the 5 .h files.
commit 81a71b33f95f20261804be5f71b922ccdd4c964b
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Aug 31 22:19:06 2010 +0200
don't dereference NULL upon OOM
* rest.c (proxy_query): Don't deref NULL upon OOM.
Avoid leak-upon-OOM.
(proxy_put_attr): Likewise.
commit 408671c995aa5535b7019e67917682dddb09e1a1
Author: Jim Meyering <meyering(a)redhat.com>
Date: Tue Aug 31 19:18:35 2010 +0200
maint: tweak DPRINTF definition
* repo.h (DPRINTF): Wrap in "do {...} while (0)", not "{...}"
so the statement-ending semicolon is required.
commit 09bdfd55576e46cee1e339f742dd5f429af3d9ee
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 20:13:28 2010 +0200
promise gcc -W that we won't abuse strtok_r (rest.c, this time)
commit 0d2dec0a10cce0eb9c0a91ad0aaf4ea52569231e
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Tue Aug 31 14:16:03 2010 -0400
Fixed immediate-disconnection issue by adding a size_t cast for the MHD
buffer size (found on EC2). Also, rearranged bucket-creation code so it
does the necessary metadata things for non-S3 primary stores even though it
can't do the data part (which will have to be done by hand in those cases).
commit 32be1e5c390bc6c2998511dd31174779d41e44d7
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Aug 30 15:36:59 2010 -0400
Fixed a bunch of warnings...
unused variable
computed value not used
shadowed declaration
missing initializer
int/size_t mismatch
There are still some left in query.* but this should make real issues
elsewhere easier to spot.
commit 1e0d0594953dd70bddd1184a6f8df392de893022
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 20:25:37 2010 +0200
avoid a "comparison between signed and unsigned..." warning
* template.h (struct tmpl_ctx_t) [index]: Change type from int
to "unsigned int". Conceptually, it's unsigned, so the type should
reflect that.
commit 688a7fcb2ce67f9959bfeb109315c20af1951727
Author: Jeff Darcy <jdarcy(a)redhat.com>
Date: Mon Aug 30 14:36:32 2010 -0400
More fixes from Jim.
commit af07e3461cbb1e9a479ddb20be6d06e8ffa467ff
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 15:39:53 2010 +0200
tmpl_get_ctx: return something
* template.c (tmpl_get_ctx): Provide required return statement.
Remove unnecessary cast.
commit 9c9c1a144ba87bf0d103b8b1e76099e28122f029
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 15:31:44 2010 +0200
promise gcc -W that we won't abuse strtok_r
gcc -W is worried that we might call strtok_r(s, delim, &var) with
uninitialized "var", because it sees that "S" may be NULL.
If that were ever to happen, then strtok_r would write through
the arbitrary pointer. This is just stopgap in 2 of the 3 cases.
In those, we should handle strdup failure.
* proxy.c (proxy_repl_prod): Assert that strtok_r input pointer is
non-NULL.
* (proxy_repl_cons, repl_worker_del): Likewise.
commit 2d3511e28322970d0d8c6fe74c402db9d02aefdf
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 14:52:03 2010 +0200
maint: placate gcc
* proxy.c (proxy_repl_cons): Initialize local to avoid
spurious "may be used uninitialized" warning.
commit 48c15454ed05224133df56a9a40ae53c8c054917
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 14:39:43 2010 +0200
maint: avoid warnings about implicitly declared functions
* proxy.c: Include "meta.h" for decl of meta_get_value, etc.
commit 9455151cdcac765eb84f037e6830f2a7d1dfe737
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 14:38:17 2010 +0200
maint: avoid warnings about unused parameters
* proxy.c: Mark unused parameters as such.
Use %u, not %lu for unsigned short port number.
commit 8be7a4155657dc35bc2e51c70d1ef0d2aa809827
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 17:00:55 2010 +0200
maint: define macros used to teach compiler about semantics
* repo.h (__attribute__, ATTRIBUTE_UNUSED): Define.
commit 27b62864cb02331ed7c518228c1385b7312a2d81
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 14:33:16 2010 +0200
proxy.c: more %llu -> %zu
commit 46ef84beb97bd472fb498b7f506f080a214f5271
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 14:31:27 2010 +0200
maint: avoid format mismatch warnings
* rest.c (post_find): Cast gpointer to char* in DPRINTF.
rest.c (post_foreach): Likewise.
(main): Use %u, not %lu, for an argument of type unsigned short.
commit d03db9f49bcd7c84a94f43919a7e086fd0ee6680
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 14:26:13 2010 +0200
maint: avoid warnings about printf format/type mismatch
* rest.c (main): Use %zu, not %llu for values of type size_t.
commit f9ac4b4b02eacab9d717f55c103a933c2cad3360
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 14:22:32 2010 +0200
maint: avoid warning about undeclared function
* template.h (tmpl_prov_footer): Declare.
commit ded414a6d00ed4d4937029d1a45ca320a4d75b7d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 13:56:11 2010 +0200
remove trailing spaces
commit 3976fe654feef45daaef9c7ddf396ea14f5b228f
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 13:55:24 2010 +0200
maint: enable some gcc compiler warnings
* Makefile.am (AM_CFLAGS): Enable some gcc warnings.
commit efc87e6e031d463c7a701a4c51976e39762cda8f
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Aug 30 13:53:41 2010 +0200
maint: remove declarations of unused local variables
* rest.c (proxy_query_func): Remove unused local, prov.
(proxy_api_root): Remove unused local, host.
(proxy_list_provs): Remove unused local, op.
(proxy_create_bucket): Remove unused local, policy.
commit 5102b0ed023941ff8a823564a065be77d87d7e0a
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Fri Aug 20 09:13:22 2010 -0400
Library fix from Jim.
commit 560331b18580fdafabece362504b5f9ab08ad128
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Aug 19 20:12:52 2010 -0400
Added back "fake buckets" (i.e. special REST-API stuff) in top-level list.
commit 771176f86f5a13f17cb3e9ac607dedae4eb2556e
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Aug 19 20:05:23 2010 -0400
Added real bucket listing, reworked request cleanup.
commit 02809d6538a80419fa678d1bb0e2245a3caabbdc
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Aug 19 15:35:57 2010 -0400
Added bucket creation.
commit b44c2b531e6a887a41d0fea2e5d12cb651377d26
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Aug 19 13:53:47 2010 -0400
Converted object listing to use templates.
commit 077f53fb99bee8213445849b2ff3b023eab6d63a
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Aug 19 12:28:23 2010 -0400
Added Host/Accept handling for templates.
commit b5e0b5cfc2d4192d599a598ae554855f8b9a0057
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Aug 19 12:05:14 2010 -0400
Added JSON templates.
commit 2e5d5ee1815efcb3ba5b945ac76324e730393730
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Aug 19 11:49:18 2010 -0400
Added template module.
commit b07a1ceff70b4097ed6844f38732c367154aa188
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Wed Aug 18 17:00:14 2010 -0400
Added config fetch and credential update.
commit 72a8b98634599eb63f2ae00d8698512406dd8b51
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Wed Aug 18 16:29:11 2010 -0400
Allow users to fetch provider list.
commit c77b945def349b95d42c0cc14b71187eb6da8381
Author: root <root(a)fserver-1.virtual>
Date: Tue Aug 17 15:45:01 2010 -0400
Added replication-complete check, tweaked metadata-DB-update code.
commit 0f2d6bfa7cee6bf356af36dbbc81d415e60f85ce
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Tue Aug 17 14:06:32 2010 -0400
Made query code reentrant and stuff.
commit 84c4af8715d0894b4e14d507c52f24234a49a56f
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Tue Aug 10 14:44:22 2010 -0400
Added re-replication, changed original replication to use same code for
looking up policy etc.
commit d7592254d29bd09b23c7a585435133108534746c
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Wed Aug 4 13:23:26 2010 -0400
Added manual re-replication trigger.
commit 80a1172da7c31d522098e3a5b5bc195579a49369
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Tue Aug 3 15:22:39 2010 -0400
Clear locations when file changes at root.
Delete local metadata as well as data.
Re-replicate when (per-object) policy changes.
Get site name from config file.
commit 472404054c244850ddebb7c940ff9ede69aeae3b
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Tue Aug 3 15:20:43 2010 -0400
Clear locations when file changes at root.
Delete local metadata as well as data.
Re-replicate when (per-object) policy changes.
Get site name from config file.
commit 22b9dacf1742d26bed46dfef957d239b48fe685b
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Jul 22 11:19:48 2010 -0400
Added docs.
commit 1f9fb7f90f7d2aaa2c9a27072bd610b447ba79f5
Author: Jeff Darcy <jdarcy(a)jdarcy-dt.usersys.redhat.com>
Date: Thu Jul 22 10:47:57 2010 -0400
Initial version (code only so far) for internal git server.
13 years, 2 months
[repo.or.cz] iwhd.git branch master updated: v0.91-4-g4cb41b4
by Jim Meyering
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project iwhd.git.
The branch, master has been updated
via 4cb41b49a2b69509658d4fb90c21efde29476d9d (commit)
via 25a832f79a7685cecad169206df7a235547b81f8 (commit)
from 9e39dc2a1855c7c313e39f643c1681f1bd32f141 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
http://repo.or.cz/w/iwhd.git/commit/4cb41b49a2b69509658d4fb90c21efde29476d9d
commit 4cb41b49a2b69509658d4fb90c21efde29476d9d
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Feb 14 14:14:05 2011 +0100
avoid file descriptor leak in replication
The "pipes[2]" member was set by calling pipe, but those
two descriptors were never closed or even used.
* replica.c (struct _repl_item) [pipes[2]]: Remove member.
(repl_worker): Don't use ->pipes.
diff --git a/replica.c b/replica.c
index 5540efa..ee1034a 100644
--- a/replica.c
+++ b/replica.c
@@ -49,7 +49,6 @@ typedef struct _repl_item {
char *path;
provider_t *server;
size_t size;
- int pipes[2];
my_state *ms;
} repl_item;
@@ -181,17 +180,12 @@ repl_worker (void *notused ATTRIBUTE_UNUSED)
pipe_init_shared(&ms->pipe,ms,1);
switch (item->type) {
case REPL_PUT:
- if (pipe(item->pipes) >= 0) {
- xpthread_create(&prod,proxy_repl_prod,item,
- "failed to start producer thread");
- xpthread_create(&cons,proxy_repl_cons,item,
- "failed to start consumer thread");
- pthread_join(prod,NULL);
- pthread_join(cons,NULL);
- }
- else {
- error(0, errno, "pipe failed");
- }
+ xpthread_create(&prod,proxy_repl_prod,item,
+ "failed to start producer thread");
+ xpthread_create(&cons,proxy_repl_cons,item,
+ "failed to start consumer thread");
+ pthread_join(prod,NULL);
+ pthread_join(cons,NULL);
break;
case REPL_ODELETE:
repl_worker_del(item);
@@ -201,7 +195,7 @@ repl_worker (void *notused ATTRIBUTE_UNUSED)
break;
default:
error(0,0,"bad repl type %d (url=%s) skipped",
- item->type, item->path);
+ item->type, item->path);
}
/* No atomic dec without test? Lame. */
(void)g_atomic_int_dec_and_test(&rep_count);
http://repo.or.cz/w/iwhd.git/commit/25a832f79a7685cecad169206df7a235547b81f8
commit 25a832f79a7685cecad169206df7a235547b81f8
Author: Jim Meyering <meyering(a)redhat.com>
Date: Mon Feb 14 13:44:23 2011 +0100
tweak diagnostics
diff --git a/backend.c b/backend.c
index bd78ad5..003406f 100644
--- a/backend.c
+++ b/backend.c
@@ -512,7 +512,7 @@ s3_register (my_state *ms, const provider_t *prov, const char *next,
(void)dup2(organ[1],STDOUT_FILENO);
(void)dup2(organ[1],STDERR_FILENO);
execvp(cmd, (char* const*)argv);
- error (EXIT_FAILURE, errno, "failed run command %s", cmd);
+ error (EXIT_FAILURE, errno, "failed to run command %s", cmd);
}
DPRINTF("waiting for child...n");
diff --git a/replica.c b/replica.c
index a813d82..5540efa 100644
--- a/replica.c
+++ b/replica.c
@@ -190,7 +190,7 @@ repl_worker (void *notused ATTRIBUTE_UNUSED)
pthread_join(cons,NULL);
}
else {
- error(0,errno,"pipe");
+ error(0, errno, "pipe failed");
}
break;
case REPL_ODELETE:
-----------------------------------------------------------------------
Summary of changes:
backend.c | 2 +-
replica.c | 20 +++++++-------------
2 files changed, 8 insertions(+), 14 deletions(-)
repo.or.cz automatic notification. Contact project admin jim(a)meyering.net
if you want to unsubscribe, or site admin admin(a)repo.or.cz if you receive
no reply.
--
iwhd.git ("image warehouse daemon")
13 years, 2 months
[repo.or.cz] iwhd.git annotated tag v0.91 created: v0.91
by Jim Meyering
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project iwhd.git.
The annotated tag, v0.91 has been created
at 9300bdaa8b6a3c9474c815ce632a7588926b38e3 (tag)
tagging 8f68e88ae81db999209f0cf16bba47bd9df9cd14 (commit)
replaces v0.0
tagged by Jim Meyering
on Thu Feb 10 14:56:52 2011 +0100
- Log -----------------------------------------------------------------
iwhd 0.91
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.11 (GNU/Linux)
iQIcBAABCAAGBQJNU+6kAAoJEH/Z/MsAC+7u0ykQAJGAVSHUGMVjZWvS+OAOmoQQ
YCkkexwNbOcaq5cMVfXmE8ICR2cv0bSzm3zwtVVfZOokenKG/X/IAsqz3aJRUx3H
fyvrHW2mxzgjBgGE3JHW26KB2Cp9NyaJZDljZvPAuQ3K6XVYaF0WOareM91BR/No
rs8M8ppMTpvN87gbJk7eyTWh9AzPlX+GLhK6sgydDgGFQpOzmeteKTTxjwgjU2Pu
ds5GVtlnetYdrmNsoDnFbZJTYR2LVB7U1ZIN3bfVxdXhHpjGkPR8wtVW5rixrJyq
5iFT7yJKuRHcJsf5T1pYHdYJxUhp/zqYXtBzpC5vKgwDg6hvjCkha3O/dZ+Zxp5S
Hh1fjTcqk4qQhNUBCIZgQXHgdZnr2sTdtbmgfXYo8IM/RMjg7Fzm9jUelMxFYLMu
ZgsHCRH4t3fGxsoVcnHyIOUU8SAkytR6r54CPlWBwpdeSGg6geQQnPeOYWeZ6Xjb
BkHAXeuKcaRtrMkuEXQOtmOBYvY0JythKqMMivAC5/YkZJ04OrKgnafKT7kG1am+
xND589KuGTzd1P6xUqVW/JRaYYnJWbm3fWGIU/uwtduM6qXe78MppTOf+oOw4pAr
Mompi17oOz4nItIdYAI2WIHZ0HgDjp1lQv4PVweFP1noGSK6A5Xb/4yts8X0Goq2
v//DKpsJOeDSxdDQZoIM
=SRJk
-----END PGP SIGNATURE-----
Chris Lalancette (1):
maint: add a comment: BuildRequires vs. mongodb-server
Jeff Darcy (96):
Added docs.
Clear locations when file changes at root.
Clear locations when file changes at root.
Added manual re-replication trigger.
Added re-replication, changed original replication to use same code for
Made query code reentrant and stuff.
Allow users to fetch provider list.
Added config fetch and credential update.
Added template module.
Added JSON templates.
Added Host/Accept handling for templates.
Converted object listing to use templates.
Added bucket creation.
Added real bucket listing, reworked request cleanup.
Added back "fake buckets" (i.e. special REST-API stuff) in top-level list.
Library fix from Jim.
More fixes from Jim.
Fixed a bunch of warnings...
Fixed immediate-disconnection issue by adding a size_t cast for the MHD
Major rework to make all of the producer/consumer stuff more modular.
Jim's latest fixes (DPRINTF/realloc/strtok_r) merged by hand because I
Merge branch 'merge'
Lost mpipe.* in the last merge.
First step of making back ends fully modular and interchangeable.
Moved delete into backend module.
Moved bucket-create into backend module, fixed a couple of other bugs I
Moved back-end-specific init code into back-end module.
Added low-level filesystem back end.
Deleted old (higher level) local-filesystem interface.
Replace assert (now undefined) with abort. Actually Jim's fix.
Up-to-date API docs.
EC2 experiments.
Merge branch 'master' into ec2
Added serialization on MongoDB client. Test is to transfer 50 1MB files
More EC2-registration changes, including back-end register method and
Merge branch 'ec2' into ec2-2
Specify filesystem mode in config file instead of command line (needed for
More AMI-registration plumbing.
Added code to pass registration requests through one or more CURL/HTTP
Teach replication module ("proxy.c") how to use a filesystem-backed primary
Last bits to do AMI bundling/upload/registration entirely through the
Set "ami-id" property after registration. Also fixed bug with fetching
Take registration information from the request, if available.
Fix some memory leaks in the new cert/key temp-file code.
Added ami-bkt argument, fixed output-parsing bug.
Added more detailed tracking of status via ami-id, switched result parsing
More verbose output for debugging.
Make the code buildable again after the last salvo.
Changed previous API-change patch so that old and new bucket-creation
First piece of link-following syntax. Still needs type clean-up.
The long-promised type cleanup. Eval always returns 0 or 1. String_value
More warning suppression, regenerated query.c.diff
More changes suggested by Jim, made by me - const pointers, unsigned ints,
Added _providers to API-root listing. Fixed bug in form-data handling,
Add replication for bucket-create requests.
Replace fprintf/perror calls with officially mandated error().
Fix string handling in proxy (replication) module.
Require a "path" parameter for filesystem back ends.
Merge branch 'master' of ssh://orcz/srv/git/iwhd
Just use chdir instead of chroot+chdir, per email convo.
Fix ms leaks when object not found, deleted.
Fix leak when deleting nonexistent file.
New version reflecting current reality/terminology/etc.
Fix memory leak, uninit var, syntax
Fix various issues in policy syntax.
Enhance tests.
More basic tests.
tests: exercise attributes
Add rep_count control operation on API root.
Add replication tests.
tests: run 2nd test's mongod on a different port for parallel make check
Fix replication test when *not* running in parallel.
Add op=parts API.
Add comments to parsing/evaluation code.
Fix BsonObject invalid-ref in attribute-listing code.
Allow simple GETs without metadata access.
Improve behavior in no-metadata-access cases.
Fix a memory leak in object-attribute evaluation (found by Jim).
Dynamic config, steps 1-3
Fix rollup which didn't include renamed/added files.
New tests for autostart and "headless operation"
Add link-following implementation (not just syntax).
Unify backend/replica modules, support dynamic config.
rewrite query parser using Bison, not peg/leg
Add query.h comments back in.
Undo accidental reversal of wait_for_repl return values.
Fix two pipe-initialization races that occur in replication.
Fix re-replication on attribute change.
Sprintf/snprintf cleanup.
Add X-redhat-role header when replicating to ourselves.
Add unlink/O_EXCL in case new file is shorter than what's there.
Fixed X-redhat-role check.
Add test for re-replication when an attribute changes.
Add test for truncation when overwriting an object.
Change test-file names to be descriptive, avoid dups.
don't segfault on a simple query
Jim Meyering (223):
maint: remove declarations of unused local variables
maint: enable some gcc compiler warnings
remove trailing spaces
maint: avoid warning about undeclared function
maint: avoid warnings about printf format/type mismatch
maint: avoid format mismatch warnings
proxy.c: more %llu -> %zu
maint: define macros used to teach compiler about semantics
maint: avoid warnings about unused parameters
maint: avoid warnings about implicitly declared functions
maint: placate gcc
promise gcc -W that we won't abuse strtok_r
tmpl_get_ctx: return something
avoid a "comparison between signed and unsigned..." warning
promise gcc -W that we won't abuse strtok_r (rest.c, this time)
maint: tweak DPRINTF definition
don't dereference NULL upon OOM
build: make "make distcheck" work
configure.ac: remove obsolete comment
configure.ac: avoid m4 under-quoting errors
Makefile.am: use $(...) rather than @...@
query: avoid NULL-deref-on-OOM bugs
build: new rules; run leg; apply patch to fix generated code
build: make ./configure fail if "leg" is not available
provide --help, --version, bug-reporting address; normalize
no more -D... options on command line
configure.ac: refer to a better URL for peg
rename tool: repod->iwhd, header: repo.h->iwh.h
require --config=FILE as a command line option
correct a bug: add ":" after "f" in getopt option spec
.gitignore: ignore a few more
split inadvertently joined declarations; remove trailing spaces
remove vestige of -f option
more const-correctness changes
more const-correctness; and remove an unused decl
remove useless 'if'-tests-before-free
build: drop -Wshadow(for now) and turn off -Wunused
don't declare free'd variables/params to be const
FS backend: print better diagnostics
Add Copyright comments, and regenerate query.c.diff
remove trailing blanks
configure.ac (AC_INIT): Use iwhd as package name, not image-warehouse.
build: boost support: don't depend on C++-mangled names
build: m4-quote use of AC_LANG_PROGRAM to avoid warning from new autoconf
More const changes.
avoid printf format abuse; use -Wformat-security; report errno more
maint: use new iwhd-devel address as bug-reporting addr
hoist definition of cmd to remove one more hard-coded "dc-register-image"
tests: add test framework and first test
use calloc in place of malloc+memset-0
rest.c: plug a leak
tests: specify "path", now that it's required
don't deref NULL on fdopen failure; plug a FILE-sized leak
tests: remove unnecessary "kill..." stmt, now it's done via trap
tests: check for root xml and json
tests: don't let ~/.curlrc settings perturb these tests
don't end "error(..." diagnostic with "n"
tests: exercise basic providers_ functionality
don't leak json_strings on username/password update
plug a nasty leak and 3 others like it
tests: exercise object deletion
remove unnecessary casts
tests: add more
maint: remove empty/unused ChangeLog file
boiler-plate README files
maint: adjust copyright on most files: Red Hat, not FSF
declare file-scoped globals "static"; use const, too
don't ignore write failures
proxy.c: declare functions and file-scoped variables static
don't ignore failed thread creation
build: GNUmakefile: new file, for better output from "make check"
fix bug just introduced in repl_worker
tests: exercise the parser
tests: remove vestigial VERBOSE=yes
tests: factor out range of ports we'll use for mongod instances
tests: remove seemingly unnecessary "sleep 3"
query.c.diff: regenerate to avoid offsets
template: const correctness
tests: use -9 only when killing mongod, not iwhd
tests: parser-test: don't ignore "compare" failure
tests: add missing "Exit $fail"
build: avoid warning about unused parameter
avoid warning about signed/unsigned comparison
init.sh: don't comment out cleanup-handling rm -rf
replica: don't say we're "deleting" when creating a bucket
fflush debugging output when writing to stdout
tests: wait_for: report how long we waited, or that timeout expired
tests: avoid using a temp file in wait_for_repl utility
clean-up: move file-scoped global into "main"
tests: don't clutter regular output with expected diagnostics
remove query.leg and query.c.diff
build: rearrange things to use Bison/Flex; adapt; clean up
parse-test now passes
apply jeffs leak-fixing patch from master (manually merge 3 failed hunks)
apply Jeff's link-following change
qparser.y: Include <ctype.h> for use of isdigit
cast away "const" on free argument
static and const
use literals in bison
avoid undefined-yydecl warning
qlexer.l: simplify
build: ensure we use only Flex
lex && and || as tokens, not "&" and "|"
add missing #define part of double-inclusion guard
query.h: use struct value_t (same name as typedef)
add beginnings of %union support
pure, almost
handle yylex_init failure
qlexer.l: rewrite not to need static var, at_eof.
now that lexer calls strdup, free those strings
xrealloc was not used; #if-0 it out
avoid NULL deref on failed strdup
add iwhd.spec.in and Makefile rules
automatically generate man page
remove decls of unused variables
mark unused parameters as such
remove final "static" state variable from the parser
mark more unused parameters
simplify parser: T_NE
simplify parser: T_EQ
simplify parser: <, >, >=, <=
query parser: avoid 25 reduce/reduce conflicts
parser: handle OOM gracefully
parser: remove dead code
parser: move x*alloc functions into #if-unit-test block where used
reduce scope of global to be file-only
plug error path leak
parser: plug more leaks
add a reminder not to hard-code /tmp/iwtmp.XXXXXX
s3_register: correct a diagnostic: s/key/secret/
_policy is a reserved object name; reject it, just like the others
prohibit creation of a bucket with one of the reserved names
convert reserved attribute names to have a leading underscore prefix
GNUmakefile: replace leading spaces with TABs
build: don't check for or use -lcrypto; it was not used
avoid warnings from new/better microhttpd.h signature for...
plug leaks-after-OOM-failure
plug a leak
fix trivial comment typo
use strchr and strrchr, not index or rindex
tests: wait for up to 5 seconds for start-up, not just 3s
Use gnulib
maint: enable no-trailing blanks prohibition
maint: remove unused #include directives spotted by "make syntax-check"
enable m4 quoting check
enable "echo -n/-e" prohibition
enable "test -a/-o" prohibition
enable no-blank-lines-at-EOF rule
use first gnulib module: progname
use gnulib's closeout module
remove GNUmakefile -- now it is pulled from gnulib
disable sc_cast_of_argument_to_free check
avoid redundant const -- move it to the right of "*"
skip the malloca-tests module; too slow
bootstrap.conf: mention bison
clean-up: don't define xmalloc, xstrdup, etc. -- use gnulib's definitions
build: support configure-time --enable-gcc-warnings option
do not perform arithmetic on void* pointers
maint: remove unused definitions
maint: declare cf_put_child to be static
maint: declare follow_link in replica.h
maint: accommodate new, stricter warnings
maint: avoid theoretical risk of signed overflow
maint: avoid warning about "noreturn" function
maint: avoid const-related warnings
maint: make autogen.sh invoke bootstrap
build: make the "rpm" rule work once again
build: iwhd "Requires" mongodb-server (rather than BuildRequires)
revert "build: iwhd "Requires" mongodb-server (rather than BuildRequires)"
maint: update copyright year ranges to include 2011
build: update gnulib submodule to latest
maint: update files copied from gnulib
build: update gnulib submodule to latest
tests: reenable excluded gnulib test; run gnulib-tests first
build: update gnulib submodule to latest
fix an unchecked strdup
allow dynamic addition/deletion of providers
don't use xstrndup via base_name
add provider ref-counting; FIXME: partial impl. (i.e., no incr)
reject an attempt to add a provider with "name" parameter
use new function, get_main_provider, rather than global "main_prov"
tests: clean up provider-deletion test
get primary provider name via http://host:$port/_providers/_primary
new interface: curl -X PUT http://_providers/PROVIDER/_set_primary
rename s/_set_primary/_primary/: more RESTful
maint: rename file-scoped global s/main_prov/g_main_prov/, and...
use garbage collection
garbage-collection fix-up
remove functions and struct members that are no longer needed
tests: add dynamic-provider test
guard provider-addition with a mutex; tighten provider test
do not allow "updating" a provider in place -- now, you must remove and then re-add
remove more tests of in-place provider changing
begin converting hash tables from glib to gnulib
convert remaining g_hash_table_lookup functions to kv_hash_lookup
convert all remaining uses of g_hash_* functions
insinuate GC into gnulib's hash-related code
tell GC about the thread spawned by MHD_start_daemon
t/provider: warn-then-sleep on failure -- eases debugging
avoid a leak via ms->post = MHD_create_post_processor(...
handle hash_initialize and MHD_create_post_processor failure
avoid unnecessary MHD_lookup_connection_value calls
rewrite provider-listing code so we can protect it with a mutex:
also mutex-protect the provider-iterator used in listing
microhttpd may also spawn threads to call prov_list_generator; tell GC
tests: prepare for improved provider checks in t/provider
sort provider list on "name"
list providers: avoid syntax error in JSON output
tests: also check JSON provider lists
remove gnulib hash.c diff hack
plug a potential leak
build: make configure fail if gc-devel (aka libgc-dev) is not installed
use SMALL_PRIME in place of literal 13 (initial hash table size)
use symbolic names in place of more hard-coded constants
remove dead code
protect remaining uses of prov_hash against concurrent access
don't pass NULL buffer to formatter in provider list generation
tests: reenable excluded gnulib test; run gnulib-tests first
maint: build via make CFLAGS='-DGNULIB_POSIXCHECK=1'; address warnings
maint: speed up configure
doc: add to NEWS
maint: record previous release tag name
version 0.91
Pete Zaitcev (12):
cure a hang on S3 error
Implement documented API for _new
Cleanups
[patch repod] add autostart of Mongo
switch to atexit(auto_stop)
add static to parse_config_inner
Lock auto.c to IPv4
Bump testing timeouts
exit if mkdir fails
error to stderr
switch to using installed ec2 tools
avoid hang when creating an object in non-existing bucket
root (1):
Added replication-complete check, tweaked metadata-DB-update code.
-----------------------------------------------------------------------
repo.or.cz automatic notification. Contact project admin jim(a)meyering.net
if you want to unsubscribe, or site admin admin(a)repo.or.cz if you receive
no reply.
--
iwhd.git ("image warehouse daemon")
13 years, 2 months
[repo.or.cz] iwhd.git branch master updated: v0.0-335-g9e39dc2
by Jim Meyering
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project iwhd.git.
The branch, master has been updated
via 9e39dc2a1855c7c313e39f643c1681f1bd32f141 (commit)
via 684918a88245adbbb80cd3b317d80877d1f9a861 (commit)
via 8f68e88ae81db999209f0cf16bba47bd9df9cd14 (commit)
via 600ca43a8cd39dae9aa1256a670da9a243678207 (commit)
from 8c22e73ad9f889f528a0acb7d761bf15d3d91c95 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
http://repo.or.cz/w/iwhd.git/commit/9e39dc2a1855c7c313e39f643c1681f1bd32f141
commit 9e39dc2a1855c7c313e39f643c1681f1bd32f141
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 15:31:35 2011 +0100
maint: also create xz-compressed tarballs
* configure.ac (AM_INIT_AUTOMAKE): Also make xz-compressed tarballs.
They are more than 30% smaller.
diff --git a/configure.ac b/configure.ac
index 1d59e3f..e874281 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@ AC_INIT([iwhd],
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_HEADERS([config.h:config.hin])
-AM_INIT_AUTOMAKE([1.11.1 color-tests parallel-tests])
+AM_INIT_AUTOMAKE([1.11.1 dist-xz color-tests parallel-tests])
AM_SILENT_RULES([yes]) # make --enable-silent-rules the default.
# Checks for programs.
http://repo.or.cz/w/iwhd.git/commit/684918a88245adbbb80cd3b317d80877d1f9a861
commit 684918a88245adbbb80cd3b317d80877d1f9a861
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 14:58:50 2011 +0100
post-release administrivia
* NEWS: Add header line for next release.
* .prev-version: Record previous version.
* cfg.mk (old_NEWS_hash): Auto-update.
diff --git a/.prev-version b/.prev-version
index ba66466..0ac647c 100644
--- a/.prev-version
+++ b/.prev-version
@@ -1 +1 @@
-0.0
+0.91
diff --git a/NEWS b/NEWS
index 9cc78f7..9a380f9 100644
--- a/NEWS
+++ b/NEWS
@@ -1,5 +1,8 @@
iwhd NEWS -*- outline -*-
+* Noteworthy changes in release ?.? (????-??-??) [?]
+
+
* Noteworthy changes in release 0.91 (2011-02-10) [stable]
** Bug fixes
diff --git a/cfg.mk b/cfg.mk
index 7fdbf36..84fce00 100644
--- a/cfg.mk
+++ b/cfg.mk
@@ -42,7 +42,7 @@ bootstrap-tools = autoconf,automake,gnulib
# Now that we have better tests, make this the default.
export VERBOSE = yes
-old_NEWS_hash = d41d8cd98f00b204e9800998ecf8427e
+old_NEWS_hash = dba674b8d2d0a340da7654d16cced91e
sc_prohibit_echo_minus_en:
@prohibit='<echo -[en]'
http://repo.or.cz/w/iwhd.git/commit/8f68e88ae81db999209f0cf16bba47bd9df9cd14
commit 8f68e88ae81db999209f0cf16bba47bd9df9cd14
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 14:56:52 2011 +0100
version 0.91
* NEWS: Record release date.
diff --git a/NEWS b/NEWS
index c72f7d9..9cc78f7 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,6 @@
iwhd NEWS -*- outline -*-
-* Noteworthy changes in release ?.? (????-??-??) [?]
+* Noteworthy changes in release 0.91 (2011-02-10) [stable]
** Bug fixes
http://repo.or.cz/w/iwhd.git/commit/600ca43a8cd39dae9aa1256a670da9a243678207
commit 600ca43a8cd39dae9aa1256a670da9a243678207
Author: Jim Meyering <meyering(a)redhat.com>
Date: Thu Feb 10 14:52:35 2011 +0100
maint: record previous release tag name
* .prev-version: Record tag name of previous release.
This is used (and automatically advanced) when making a release
via e.g., "build-aux/do-release-commit-and-tag 0.91 stable".
diff --git a/.prev-version b/.prev-version
new file mode 100644
index 0000000..ba66466
--- /dev/null
+++ b/.prev-version
@@ -0,0 +1 @@
+0.0
-----------------------------------------------------------------------
Summary of changes:
.prev-version | 1 +
NEWS | 3 +++
cfg.mk | 2 +-
configure.ac | 2 +-
4 files changed, 6 insertions(+), 2 deletions(-)
create mode 100644 .prev-version
repo.or.cz automatic notification. Contact project admin jim(a)meyering.net
if you want to unsubscribe, or site admin admin(a)repo.or.cz if you receive
no reply.
--
iwhd.git ("image warehouse daemon")
13 years, 2 months