[hail/f16] Byte-to-byte identical to Rawhide hail-0.8-0.7.gf9c5b967.fc17, for f16.
Pete Zaitcev
zaitcev at fedoraproject.org
Sat Jan 28 05:45:39 UTC 2012
commit bd4f48d223caf66a103ff22226c25e90464a648a
Author: Pete Zaitcev <zaitcev at kotori.zaitcev.us>
Date: Fri Jan 27 22:27:53 2012 -0700
Byte-to-byte identical to Rawhide hail-0.8-0.7.gf9c5b967.fc17, for f16.
hail-0.8-fixes.patch | 547 ++++++++++++++++++++++++++++++++++++++++++++++++++
hail-0.8-libdb.patch | 30 +++
hail.spec | 21 ++-
3 files changed, 596 insertions(+), 2 deletions(-)
---
diff --git a/hail-0.8-fixes.patch b/hail-0.8-fixes.patch
new file mode 100644
index 0000000..d61a0be
--- /dev/null
+++ b/hail-0.8-fixes.patch
@@ -0,0 +1,547 @@
+Pick essential crash and leak fixes. Add subdomain support (compatible).
+
+38f99270d278b6d13dfc06ac6bdac59e00c338c1 -- good leak
+7b6c6c4387a5d9ff8f4fd8d4416721146d22cd74 -- crash fix
+82e01e95c981e00efd905f9f386f139336dd913b -- subdomain
+
+Also, the const fixes around huri_escape.
+
+diff -urp -X dontdiff.hail hail-0.8git/include/hstor.h hail-0.8git-p3/include/hstor.h
+--- hail-0.8git/include/hstor.h 2010-07-07 18:18:05.000000000 -0600
++++ hail-0.8git-p3/include/hstor.h 2012-01-25 22:48:20.560907723 -0700
+@@ -25,6 +25,8 @@
+ #include <curl/curl.h>
+ #include <glib.h>
+
++enum hstor_calling_format { HFMT_ORDINARY, HFMT_SUBDOMAIN };
++
+ struct hstor_client {
+ CURL *curl;
+ char *acc;
+@@ -32,6 +34,7 @@ struct hstor_client {
+ char *user;
+ char *key;
+ bool verbose;
++ bool subdomain;
+ };
+
+ struct hstor_bucket {
+@@ -144,7 +147,7 @@ extern int hreq_acl_canned(struct http_r
+ /* uri.c */
+ extern struct http_uri *huri_parse(struct http_uri *uri_dest, char *uri_src_text);
+ extern int huri_field_unescape(char *s, int s_len);
+-extern char* huri_field_escape (char *signed_str, unsigned char mask);
++extern char* huri_field_escape (const char *signed_str, unsigned char mask);
+
+ static inline bool hreq_http11(struct http_req *req)
+ {
+@@ -165,6 +168,9 @@ extern void hstor_free_keylist(struct hs
+ extern struct hstor_client *hstor_new(const char *service_acc,
+ const char *service_host, const char *user, const char *secret_key);
+
++extern bool hstor_set_format(struct hstor_client *hstor,
++ enum hstor_calling_format f);
++
+ extern bool hstor_add_bucket(struct hstor_client *hstor, const char *name);
+ extern bool hstor_del_bucket(struct hstor_client *hstor, const char *name);
+
+diff -urp -X dontdiff.hail hail-0.8git/lib/hstor.c hail-0.8git-p3/lib/hstor.c
+--- hail-0.8git/lib/hstor.c 2010-07-14 01:11:08.000000000 -0600
++++ hail-0.8git-p3/lib/hstor.c 2012-01-25 22:45:52.957753017 -0700
+@@ -86,6 +86,21 @@ err_out:
+ return NULL;
+ }
+
++bool hstor_set_format(struct hstor_client *hstor, enum hstor_calling_format f)
++{
++ switch (f) {
++ case HFMT_ORDINARY:
++ hstor->subdomain = false;
++ break;
++ case HFMT_SUBDOMAIN:
++ hstor->subdomain = true;
++ break;
++ default:
++ return false;
++ }
++ return true;
++}
++
+ static size_t all_data_cb(void *ptr, size_t size, size_t nmemb, void *user_data)
+ {
+ GByteArray *all_data = user_data;
+@@ -176,10 +191,56 @@ next:
+ }
+ }
+
++static bool hstor_resplit(const struct hstor_client *hstor,
++ const char *bucket, const char *key,
++ char **url, char **hosthdr, char **path)
++{
++ char *unesc_path;
++ int rc;
++
++ if (hstor->subdomain)
++ rc = asprintf(&unesc_path, "/%s", key);
++ else
++ rc = asprintf(&unesc_path, "/%s/%s", bucket, key);
++ if (rc < 0)
++ goto err_spath;
++ *path = huri_field_escape(unesc_path, PATH_ESCAPE_MASK);
++ if (!*path)
++ goto err_epath;
++
++ if (hstor->subdomain)
++ rc = asprintf(hosthdr, "Host: %s.%s", bucket, hstor->host);
++ else
++ rc = asprintf(hosthdr, "Host: %s", hstor->host);
++ if (rc < 0)
++ goto err_host;
++
++ if (hstor->subdomain)
++ rc = asprintf(url, "http://%s.%s%s", bucket, hstor->acc, *path);
++ else
++ rc = asprintf(url, "http://%s%s", hstor->acc, *path);
++ if (rc < 0)
++ goto err_url;
++
++ free(unesc_path);
++ return true;
++
++ /* free(*url); */
++ err_url:
++ free(*hosthdr);
++ err_host:
++ free(*path);
++ err_epath:
++ free(unesc_path);
++ err_spath:
++ return false;
++}
++
+ struct hstor_blist *hstor_list_buckets(struct hstor_client *hstor)
+ {
+ struct http_req req;
+- char datestr[80], timestr[64], hmac[64], auth[128], host[80], url[80];
++ char datestr[80], timestr[64], hmac[64], auth[128];
++ char *host, *url;
+ struct curl_slist *headers = NULL;
+ struct hstor_blist *blist;
+ xmlDocPtr doc;
+@@ -190,7 +251,7 @@ struct hstor_blist *hstor_list_buckets(s
+
+ all_data = g_byte_array_new();
+ if (!all_data)
+- return NULL;
++ goto err_data;
+
+ memset(&req, 0, sizeof(req));
+ req.method = "GET";
+@@ -204,14 +265,15 @@ struct hstor_blist *hstor_list_buckets(s
+ hreq_sign(&req, NULL, hstor->key, hmac);
+
+ sprintf(auth, "Authorization: AWS %s:%s", hstor->user, hmac);
+- sprintf(host, "Host: %s", hstor->host);
++ if (asprintf(&host, "Host: %s", hstor->host) < 0)
++ goto err_host;
++ if (asprintf(&url, "http://%s/", hstor->acc) < 0)
++ goto err_url;
+
+ headers = curl_slist_append(headers, host);
+ headers = curl_slist_append(headers, datestr);
+ headers = curl_slist_append(headers, auth);
+
+- snprintf(url, sizeof(url), "http://%s/", hstor->acc);
+-
+ curl_easy_reset(hstor->curl);
+ if (hstor->verbose)
+ curl_easy_setopt(hstor->curl, CURLOPT_VERBOSE, 1);
+@@ -289,15 +351,20 @@ struct hstor_blist *hstor_list_buckets(s
+
+ xmlFreeDoc(doc);
+ g_byte_array_free(all_data, TRUE);
+- all_data = NULL;
++ free(url);
++ free(host);
+
+ return blist;
+
+ err_out_doc:
+ xmlFreeDoc(doc);
+ err_out:
++ free(url);
++err_url:
++ free(host);
++err_host:
+ g_byte_array_free(all_data, TRUE);
+- all_data = NULL;
++err_data:
+ return NULL;
+ }
+
+@@ -305,12 +372,13 @@ static bool __hstor_ad_bucket(struct hst
+ bool delete)
+ {
+ struct http_req req;
+- char datestr[80], timestr[64], hmac[64], auth[128], host[80],
+- url[80], orig_path[80];
++ char datestr[80], timestr[64], hmac[64], auth[128];
++ char *host, *url, *orig_path;
+ struct curl_slist *headers = NULL;
+ int rc;
+
+- sprintf(orig_path, "/%s/", name);
++ if (!hstor_resplit(hstor, name, "", &url, &host, &orig_path))
++ goto err_split;
+
+ memset(&req, 0, sizeof(req));
+ req.method = delete ? "DELETE" : "PUT";
+@@ -321,11 +389,9 @@ static bool __hstor_ad_bucket(struct hst
+
+ hreq_hdr_push(&req, "Date", timestr);
+
+- hreq_sign(&req, NULL, hstor->key, hmac);
++ hreq_sign(&req, hstor->subdomain ? name : NULL, hstor->key, hmac);
+
+ sprintf(auth, "Authorization: AWS %s:%s", hstor->user, hmac);
+- sprintf(host, "Host: %s", hstor->host);
+- snprintf(url, sizeof(url), "http://%s/%s/", hstor->acc, name);
+
+ headers = curl_slist_append(headers, host);
+ headers = curl_slist_append(headers, datestr);
+@@ -344,7 +410,13 @@ static bool __hstor_ad_bucket(struct hst
+
+ curl_slist_free_all(headers);
+
++ free(url);
++ free(host);
++ free(orig_path);
+ return (rc == 0);
++
++err_split:
++ return false;
+ }
+
+ bool hstor_add_bucket(struct hstor_client *hstor, const char *name)
+@@ -362,15 +434,13 @@ bool hstor_get(struct hstor_client *hsto
+ void *user_data, bool want_headers)
+ {
+ struct http_req req;
+- char datestr[80], timestr[64], hmac[64], auth[128], host[80],
+- url[80], *orig_path, *stmp;
++ char datestr[80], timestr[64], hmac[64], auth[128];
++ char *url, *host, *orig_path;
+ struct curl_slist *headers = NULL;
+ int rc;
+
+- if (asprintf(&stmp, "/%s/%s", bucket, key) < 0)
+- return false;
+-
+- orig_path = huri_field_escape(stmp, PATH_ESCAPE_MASK);
++ if (!hstor_resplit(hstor, bucket, key, &url, &host, &orig_path))
++ goto err_split;
+
+ memset(&req, 0, sizeof(req));
+ req.method = "GET";
+@@ -381,11 +451,9 @@ bool hstor_get(struct hstor_client *hsto
+
+ hreq_hdr_push(&req, "Date", timestr);
+
+- hreq_sign(&req, NULL, hstor->key, hmac);
++ hreq_sign(&req, hstor->subdomain ? bucket : NULL, hstor->key, hmac);
+
+ sprintf(auth, "Authorization: AWS %s:%s", hstor->user, hmac);
+- sprintf(host, "Host: %s", hstor->host);
+- snprintf(url, sizeof(url), "http://%s%s", hstor->acc, orig_path);
+
+ headers = curl_slist_append(headers, host);
+ headers = curl_slist_append(headers, datestr);
+@@ -406,9 +474,13 @@ bool hstor_get(struct hstor_client *hsto
+ rc = curl_easy_perform(hstor->curl);
+
+ curl_slist_free_all(headers);
++ free(url);
++ free(host);
+ free(orig_path);
+-
+ return (rc == 0);
++
++err_split:
++ return false;
+ }
+
+ void *hstor_get_inline(struct hstor_client *hstor, const char *bucket, const char *key,
+@@ -442,15 +514,14 @@ bool hstor_put(struct hstor_client *hsto
+ uint64_t len, void *user_data, char **user_hdrs)
+ {
+ struct http_req req;
+- char datestr[80], timestr[64], hmac[64], auth[128], host[80],
+- url[80], *orig_path, *stmp, *uhdr_buf = NULL;
++ char datestr[80], timestr[64], hmac[64], auth[128];
++ char *host, *url, *orig_path;
++ char *uhdr_buf = NULL;
+ struct curl_slist *headers = NULL;
+ int rc = -1;
+
+- if (asprintf(&stmp, "/%s/%s", bucket, key) < 0)
+- return false;
+-
+- orig_path = huri_field_escape(stmp, PATH_ESCAPE_MASK);
++ if (!hstor_resplit(hstor, bucket, key, &url, &host, &orig_path))
++ goto err_split;
+
+ memset(&req, 0, sizeof(req));
+ req.method = "PUT";
+@@ -477,7 +548,7 @@ bool hstor_put(struct hstor_client *hsto
+ /* alloc buf to hold all hdr strings */
+ uhdr_buf = calloc(1, uhdr_len);
+ if (!uhdr_buf)
+- goto out;
++ goto err_ubuf;
+
+ /* copy and nul-terminate hdr keys and values for signing */
+ idx = 0;
+@@ -506,11 +577,9 @@ bool hstor_put(struct hstor_client *hsto
+ }
+ }
+
+- hreq_sign(&req, NULL, hstor->key, hmac);
++ hreq_sign(&req, hstor->subdomain ? bucket : NULL, hstor->key, hmac);
+
+ sprintf(auth, "Authorization: AWS %s:%s", hstor->user, hmac);
+- sprintf(host, "Host: %s", hstor->host);
+- snprintf(url, sizeof(url), "http://%s%s", hstor->acc, orig_path);
+
+ headers = curl_slist_append(headers, host);
+ headers = curl_slist_append(headers, datestr);
+@@ -533,11 +602,19 @@ bool hstor_put(struct hstor_client *hsto
+ rc = curl_easy_perform(hstor->curl);
+
+ curl_slist_free_all(headers);
++ free(url);
++ free(host);
+ free(orig_path);
+-
+-out:
+ free(uhdr_buf);
+ return (rc == 0);
++
++ /* free(uhdr_buf); */
++err_ubuf:
++ free(url);
++ free(host);
++ free(orig_path);
++err_split:
++ return false;
+ }
+
+ struct hstor_put_info {
+@@ -572,15 +649,13 @@ bool hstor_put_inline(struct hstor_clien
+ bool hstor_del(struct hstor_client *hstor, const char *bucket, const char *key)
+ {
+ struct http_req req;
+- char datestr[80], timestr[64], hmac[64], auth[128], host[80],
+- url[80], *orig_path, *stmp;
++ char datestr[80], timestr[64], hmac[64], auth[128];
++ char *host, *url, *orig_path;
+ struct curl_slist *headers = NULL;
+ int rc;
+
+- if (asprintf(&stmp, "/%s/%s", bucket, key) < 0)
+- return false;
+-
+- orig_path = huri_field_escape(stmp, PATH_ESCAPE_MASK);
++ if (!hstor_resplit(hstor, bucket, key, &url, &host, &orig_path))
++ goto err_split;
+
+ memset(&req, 0, sizeof(req));
+ req.method = "DELETE";
+@@ -594,8 +669,6 @@ bool hstor_del(struct hstor_client *hsto
+ hreq_sign(&req, NULL, hstor->key, hmac);
+
+ sprintf(auth, "Authorization: AWS %s:%s", hstor->user, hmac);
+- sprintf(host, "Host: %s", hstor->host);
+- snprintf(url, sizeof(url), "http://%s%s", hstor->acc, orig_path);
+
+ headers = curl_slist_append(headers, host);
+ headers = curl_slist_append(headers, datestr);
+@@ -613,9 +686,13 @@ bool hstor_del(struct hstor_client *hsto
+ rc = curl_easy_perform(hstor->curl);
+
+ curl_slist_free_all(headers);
++ free(url);
++ free(host);
+ free(orig_path);
+-
+ return (rc == 0);
++
++err_split:
++ return false;
+ }
+
+ static GString *append_qparam(GString *str, const char *key, const char *val,
+@@ -629,9 +706,11 @@ static GString *append_qparam(GString *s
+ str = g_string_append(str, key);
+ str = g_string_append(str, "=");
+
+- stmp = huri_field_escape(strdup(val), QUERY_ESCAPE_MASK);
+- str = g_string_append(str, stmp);
+- free(stmp);
++ stmp = huri_field_escape(val, QUERY_ESCAPE_MASK);
++ if (stmp) {
++ str = g_string_append(str, stmp);
++ free(stmp);
++ }
+
+ return str;
+ }
+@@ -681,7 +760,7 @@ void hstor_free_keylist(struct hstor_key
+ static void hstor_parse_key(xmlDocPtr doc, xmlNode *node,
+ struct hstor_keylist *keylist)
+ {
+- struct hstor_object *obj = calloc(1, sizeof(*obj));
++ struct hstor_object *obj;
+ xmlChar *xs;
+
+ obj = calloc(1, sizeof(*obj));
+@@ -761,8 +840,8 @@ struct hstor_keylist *hstor_keys(struct
+ const char *delim, unsigned int max_keys)
+ {
+ struct http_req req;
+- char datestr[80], timestr[64], hmac[64], auth[128], host[80];
+- char orig_path[strlen(bucket) + 8];
++ char datestr[80], timestr[64], hmac[64], auth[128];
++ char *host, *orig_path;
+ struct curl_slist *headers = NULL;
+ struct hstor_keylist *keylist;
+ xmlDocPtr doc;
+@@ -775,9 +854,15 @@ struct hstor_keylist *hstor_keys(struct
+
+ all_data = g_byte_array_new();
+ if (!all_data)
+- return NULL;
++ goto err_data;
+
+- sprintf(orig_path, "/%s/", bucket);
++ if (hstor->subdomain) {
++ if (asprintf(&orig_path, "/") < 0)
++ goto err_spath;
++ } else {
++ if (asprintf(&orig_path, "/%s/", bucket) < 0)
++ goto err_spath;
++ }
+
+ memset(&req, 0, sizeof(req));
+ req.method = "GET";
+@@ -788,10 +873,16 @@ struct hstor_keylist *hstor_keys(struct
+
+ hreq_hdr_push(&req, "Date", timestr);
+
+- hreq_sign(&req, NULL, hstor->key, hmac);
++ hreq_sign(&req, hstor->subdomain? bucket: NULL, hstor->key, hmac);
+
+ sprintf(auth, "Authorization: AWS %s:%s", hstor->user, hmac);
+- sprintf(host, "Host: %s", hstor->host);
++ if (hstor->subdomain) {
++ if (asprintf(&host, "Host: %s.%s", bucket, hstor->host) < 0)
++ goto err_host;
++ } else {
++ if (asprintf(&host, "Host: %s", hstor->host) < 0)
++ goto err_host;
++ }
+
+ headers = curl_slist_append(headers, host);
+ headers = curl_slist_append(headers, datestr);
+@@ -804,6 +895,10 @@ struct hstor_keylist *hstor_keys(struct
+ }
+
+ url = g_string_append(url, "http://");
++ if (hstor->subdomain) {
++ url = g_string_append(url, bucket);
++ url = g_string_append(url, ".");
++ }
+ url = g_string_append(url, hstor->acc);
+ url = g_string_append(url, orig_path);
+
+@@ -873,12 +968,12 @@ struct hstor_keylist *hstor_keys(struct
+ }
+ else if (!_strcmp(node->name, "Marker")) {
+ xs = xmlNodeListGetString(doc, node->children, 1);
+- keylist->marker = strdup((char *)xs);
++ keylist->marker = strdup(xs ? (char *)xs : "");
+ xmlFree(xs);
+ }
+ else if (!_strcmp(node->name, "Delimiter")) {
+ xs = xmlNodeListGetString(doc, node->children, 1);
+- keylist->delim = strdup((char *)xs);
++ keylist->delim = strdup(xs ? (char *)xs : "");
+ xmlFree(xs);
+ }
+ else if (!_strcmp(node->name, "MaxKeys")) {
+@@ -928,16 +1023,21 @@ struct hstor_keylist *hstor_keys(struct
+ }
+
+ xmlFreeDoc(doc);
++ free(host);
++ free(orig_path);
+ g_byte_array_free(all_data, TRUE);
+- all_data = NULL;
+
+ return keylist;
+
+ err_out_doc:
+ xmlFreeDoc(doc);
+ err_out:
++ free(host);
++err_host:
++ free(orig_path);
++err_spath:
+ g_byte_array_free(all_data, TRUE);
+- all_data = NULL;
++err_data:
+ return NULL;
+ }
+
+diff -urp -X dontdiff.hail hail-0.8git/lib/huri.c hail-0.8git-p3/lib/huri.c
+--- hail-0.8git/lib/huri.c 2010-07-07 18:18:05.000000000 -0600
++++ hail-0.8git-p3/lib/huri.c 2012-01-25 22:48:03.765117698 -0700
+@@ -25,7 +25,6 @@
+ #include <string.h>
+ #include <ctype.h>
+ #include <stdbool.h>
+-#include <glib.h>
+ #include <hstor.h>
+
+ /* our own ISSPACE. ANSI isspace is locale dependent */
+@@ -221,7 +220,7 @@ static const guchar neednt_escape_table[
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+
+-char* huri_field_escape (char *signed_str, unsigned char mask)
++char* huri_field_escape (const char *signed_str, unsigned char mask)
+ {
+ int len;
+ int i;
+@@ -250,10 +249,12 @@ char* huri_field_escape (char *signed_st
+
+ /* Don't escape if unnecessary */
+ if (must_escape == FALSE)
+- return signed_str;
++ return strdup(signed_str);
+
+ /* Allocate buffer */
+- dst = (gchar*) g_malloc(len + 1);
++ dst = malloc(len + 1);
++ if (!dst)
++ return NULL;
+
+ /* Copy */
+ for (i = j = 0; str[i]; i++, j++)
+@@ -284,7 +285,6 @@ char* huri_field_escape (char *signed_st
+ }
+ dst[j] = '\0';
+
+- g_free (signed_str);
+ return dst;
+ }
+
diff --git a/hail-0.8-libdb.patch b/hail-0.8-libdb.patch
new file mode 100644
index 0000000..3bc97c4
--- /dev/null
+++ b/hail-0.8-libdb.patch
@@ -0,0 +1,30 @@
+Make sure we do not link to db4 by accident (and then CLD crashes
+immediately). This happens on F17 which ships with db-5.2. If buildroot
+has db4, we build with libdb headers and db4 library, then crash.
+
+Upstream also renames DB4_LIBS.
+commit 09f89b0b961c04b601bddd4202a59e285494eca5
+
+diff -urp -X dontdiff.hail hail-0.8git/configure.ac hail-0.8git-p3/configure.ac
+--- hail-0.8git/configure.ac 2010-07-07 00:02:26.000000000 -0600
++++ hail-0.8git-p3/configure.ac 2012-01-25 22:27:46.542335077 -0700
+@@ -75,15 +75,10 @@ dnl AC_TYPE_PID_T
+ dnl -----------------------------
+ dnl Checks for required libraries
+ dnl -----------------------------
+-AC_CHECK_LIB(db-5.0, db_create, DB4_LIBS=-ldb-5.0,
+- AC_CHECK_LIB(db-4.9, db_create, DB4_LIBS=-ldb-4.9,
+- AC_CHECK_LIB(db-4.8, db_create, DB4_LIBS=-ldb-4.8,
+- AC_CHECK_LIB(db-4.7, db_create, DB4_LIBS=-ldb-4.7,
+- AC_CHECK_LIB(db-4.6, db_create, DB4_LIBS=-ldb-4.6,
+- AC_CHECK_LIB(db-4.5, db_create, DB4_LIBS=-ldb-4.5,
+- AC_CHECK_LIB(db-4.4, db_create, DB4_LIBS=-ldb-4.4,
+- AC_CHECK_LIB(db-4.3, db_create, DB4_LIBS=-ldb-4.3,
+- [AC_MSG_ERROR([Missing required libdb 4.x])]))))))))
++AC_CHECK_LIB(db-5.2, db_create, DB4_LIBS=-ldb-5.2,
++ AC_CHECK_LIB(db-5.1, db_create, DB4_LIBS=-ldb-5.1,
++ AC_CHECK_LIB(db-5.0, db_create, DB4_LIBS=-ldb-5.0,
++ [AC_MSG_ERROR([Missing required libdb 5.x])])))
+ AC_CHECK_LIB(crypto, MD5_Init, CRYPTO_LIBS=-lcrypto)
+ AC_CHECK_LIB(ssl, SSL_new, SSL_LIBS=-lssl)
+ AC_SEARCH_LIBS(argp_parse, argp)
diff --git a/hail.spec b/hail.spec
index c20125a..f2fd056 100644
--- a/hail.spec
+++ b/hail.spec
@@ -1,6 +1,6 @@
Name: hail
Version: 0.8
-Release: 0.5.gf9c5b967%{?dist}
+Release: 0.7.gf9c5b967%{?dist}
Summary: Project Hail core cloud services
Group: System Environment/Libraries
@@ -18,11 +18,17 @@ Source3: cld.sysconf
Source4: chunkd.service
Source5: chunkd.sysconf
-BuildRequires: db4-devel glib2-devel doxygen openssl-devel
+Patch1: hail-0.8-libdb.patch
+Patch2: hail-0.8-fixes.patch
+
+BuildRequires: libdb-devel glib2-devel doxygen openssl-devel
BuildRequires: texlive-latex fuse-devel libcurl-devel
BuildRequires: libxml2-devel procps tokyocabinet-devel
BuildRequires: systemd-units
+# These are only needed because we patch configure.am, to accomodate libdb.
+BuildRequires: autoconf automake libtool
+
%description
Core libraries and document associated with cloud computing related
Project Hail.
@@ -75,8 +81,11 @@ developing applications that use %{name}.
%prep
%setup -q -n hail-0.8git
+%patch1 -p1
+%patch2 -p1
%build
+sh autogen.sh
%configure --disable-static
make %{?_smp_mflags}
rm -rf gendoc && mkdir gendoc && doxygen
@@ -198,6 +207,14 @@ fi
%{_includedir}/*
%changelog
+* Wed Jan 24 2012 Pete Zaitcev <zaitcev at redhat.com> - 0.8-0.7.gf9c5b967
+- Fix leaks and crashes in hstor library
+- Add support for subdomain addressing of buckets, bz#784937
+- Force a build against libdb instead of db4
+
+* Fri Jan 13 2012 Fedora Release Engineering <rel-eng at lists.fedoraproject.org> - 0.8-0.6.gf9c5b967
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild
+
* Fri Sep 09 2011 Tom Callaway <spot at fedoraproject.org> - 0.8-0.5.gf9c5b967
- add missing systemd scriptlets
More information about the scm-commits
mailing list