[openstack-nova/f19] Fix 2 CVEs that did not make it into 2013.1.3
Nikola Dipanov
ndipanov at fedoraproject.org
Tue Aug 27 13:56:12 UTC 2013
commit 6d611a334489aa85e46775ca6fa23d4e3d8b17a2
Author: Nikola Dipanov <ndipanov at redhat.com>
Date: Tue Aug 27 16:05:30 2013 +0200
Fix 2 CVEs that did not make it into 2013.1.3
...nforce-flavor-access-during-instance-boot.patch | 85 ++++++
0004-Update-rpc-impl_qpid.py-from-oslo.patch | 299 ++++++++++++++++++++
openstack-nova.spec | 10 +-
3 files changed, 393 insertions(+), 1 deletions(-)
---
diff --git a/0003-Enforce-flavor-access-during-instance-boot.patch b/0003-Enforce-flavor-access-during-instance-boot.patch
new file mode 100644
index 0000000..1d6e9b4
--- /dev/null
+++ b/0003-Enforce-flavor-access-during-instance-boot.patch
@@ -0,0 +1,85 @@
+From b5a3324a793ccef1c7004d72b1236d65b3655a4a Mon Sep 17 00:00:00 2001
+From: Russell Bryant <rbryant at redhat.com>
+Date: Tue, 20 Aug 2013 11:06:12 -0400
+Subject: [PATCH] Enforce flavor access during instance boot
+
+The code in the servers API did not pass the context when retrieving
+flavor details. That means it would use an admin context instead,
+bypassing all flavor access control checks.
+
+This patch includes the fix, and the corresponding unit test for the v2
+API.
+
+Closes-bug: #1212179
+
+(cherry picked from commit 4054cc4a22a1fea997dec76afb5646fd6c6ea6b9)
+
+Conflicts:
+ nova/api/openstack/compute/plugins/v3/servers.py
+ nova/api/openstack/compute/servers.py
+ nova/tests/api/openstack/compute/plugins/v3/test_servers.py
+ nova/tests/api/openstack/compute/test_servers.py
+
+Change-Id: I681ae9965e19767df22fa74c3315e4e03a459d3b
+(cherry picked from commit 8b686195afe7e6dfb46c56c1ef2fe9c993d8e495)
+---
+ nova/api/openstack/compute/servers.py | 3 ++-
+ nova/tests/api/openstack/compute/test_servers.py | 22 ++++++++++++++++++++--
+ 2 files changed, 22 insertions(+), 3 deletions(-)
+
+diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
+index 85ef080..6c38219 100644
+--- a/nova/api/openstack/compute/servers.py
++++ b/nova/api/openstack/compute/servers.py
+@@ -873,7 +873,8 @@ class Controller(wsgi.Controller):
+
+ try:
+ _get_inst_type = instance_types.get_instance_type_by_flavor_id
+- inst_type = _get_inst_type(flavor_id, read_deleted="no")
++ inst_type = _get_inst_type(flavor_id, ctxt=context,
++ read_deleted="no")
+
+ (instances, resv_id) = self.compute_api.create(context,
+ inst_type,
+diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
+index 7748c2e..89d0f8a 100644
+--- a/nova/tests/api/openstack/compute/test_servers.py
++++ b/nova/tests/api/openstack/compute/test_servers.py
+@@ -1822,10 +1822,10 @@ class ServersControllerCreateTest(test.TestCase):
+ """utility function - check server_dict for absence of adminPass."""
+ self.assertTrue("adminPass" not in server_dict)
+
+- def _test_create_instance(self):
++ def _test_create_instance(self, flavor=2):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ body = dict(server=dict(
+- name='server_test', imageRef=image_uuid, flavorRef=2,
++ name='server_test', imageRef=image_uuid, flavorRef=flavor,
+ metadata={'hello': 'world', 'open': 'stack'},
+ personality={}))
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+@@ -1837,6 +1837,24 @@ class ServersControllerCreateTest(test.TestCase):
+ self._check_admin_pass_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
++ def test_create_instance_private_flavor(self):
++ values = {
++ 'name': 'fake_name',
++ 'memory_mb': 512,
++ 'vcpus': 1,
++ 'root_gb': 10,
++ 'ephemeral_gb': 10,
++ 'flavorid': '1324',
++ 'swap': 0,
++ 'rxtx_factor': 0.5,
++ 'vcpu_weight': 1,
++ 'disabled': False,
++ 'is_public': False,
++ }
++ db.instance_type_create(context.get_admin_context(), values)
++ self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
++ flavor=1324)
++
+ def test_create_server_bad_image_href(self):
+ image_href = 1
+ flavor_ref = 'http://localhost/123/flavors/3'
diff --git a/0004-Update-rpc-impl_qpid.py-from-oslo.patch b/0004-Update-rpc-impl_qpid.py-from-oslo.patch
new file mode 100644
index 0000000..e6af161
--- /dev/null
+++ b/0004-Update-rpc-impl_qpid.py-from-oslo.patch
@@ -0,0 +1,299 @@
+From f7b1c8ea82001d70ec6fb3823ff6313911f45abf Mon Sep 17 00:00:00 2001
+From: Andrew Laski <andrew.laski at rackspace.com>
+Date: Wed, 5 Jun 2013 10:02:07 -0400
+Subject: [PATCH] Update rpc/impl_qpid.py from oslo
+
+The current qpid driver cannot serialize objects containing strings
+longer than 65535 characters. This just became a breaking issue when
+the message to scheduler_run_instance went over that limit. The fix has
+been commited to oslo, so this just syncs it over to Nova.
+
+The first part of this fix was already ported to Grizzly in
+Ib52e9458a9db01e2c8fd2a6d84010733f85980f8 to allow receiving messages
+from Havana in the new format. Now we need the rest of it to be able to
+send messages bigger than 65K which can happen when fetchingn the
+console log from an instance.
+
+This crash in the messaging layer for big messages caused the connection
+to be left of the connection pool, which means that after several
+crashes the compute node would be left without remaining connectiosn and
+completely issolated from the other components. This could be easily
+exploited by flooding the console from inside an instance and afterwards
+requesting the console-log from the dashboard or the API until the
+compute node couldn't answer any more.
+
+Fixes bug: #1215091 (CVE-2013-4261)
+
+Change-Id: I505b648c3d0e1176ec7a3fc7d1646fa5a5232261
+---
+ nova/openstack/common/rpc/impl_qpid.py | 84 ++++++++++++++++++++++------------
+ 1 file changed, 56 insertions(+), 28 deletions(-)
+
+diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py
+index 0044088..59f55c4 100644
+--- a/nova/openstack/common/rpc/impl_qpid.py
++++ b/nova/openstack/common/rpc/impl_qpid.py
+@@ -31,6 +31,7 @@ from nova.openstack.common import log as logging
+ from nova.openstack.common.rpc import amqp as rpc_amqp
+ from nova.openstack.common.rpc import common as rpc_common
+
++qpid_codec = importutils.try_import("qpid.codec010")
+ qpid_messaging = importutils.try_import("qpid.messaging")
+ qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
+
+@@ -120,7 +121,7 @@ class ConsumerBase(object):
+ self.reconnect(session)
+
+ def reconnect(self, session):
+- """Re-declare the receiver after a qpid reconnect"""
++ """Re-declare the receiver after a qpid reconnect."""
+ self.session = session
+ self.receiver = session.receiver(self.address)
+ self.receiver.capacity = 1
+@@ -142,7 +143,7 @@ class ConsumerBase(object):
+ msg.content_type = 'amqp/map'
+
+ def consume(self):
+- """Fetch the message and pass it to the callback object"""
++ """Fetch the message and pass it to the callback object."""
+ message = self.receiver.fetch()
+ try:
+ self._unpack_json_msg(message)
+@@ -158,7 +159,7 @@ class ConsumerBase(object):
+
+
+ class DirectConsumer(ConsumerBase):
+- """Queue/consumer class for 'direct'"""
++ """Queue/consumer class for 'direct'."""
+
+ def __init__(self, conf, session, msg_id, callback):
+ """Init a 'direct' queue.
+@@ -176,7 +177,7 @@ class DirectConsumer(ConsumerBase):
+
+
+ class TopicConsumer(ConsumerBase):
+- """Consumer class for 'topic'"""
++ """Consumer class for 'topic'."""
+
+ def __init__(self, conf, session, topic, callback, name=None,
+ exchange_name=None):
+@@ -196,7 +197,7 @@ class TopicConsumer(ConsumerBase):
+
+
+ class FanoutConsumer(ConsumerBase):
+- """Consumer class for 'fanout'"""
++ """Consumer class for 'fanout'."""
+
+ def __init__(self, conf, session, topic, callback):
+ """Init a 'fanout' queue.
+@@ -215,7 +216,7 @@ class FanoutConsumer(ConsumerBase):
+
+
+ class Publisher(object):
+- """Base Publisher class"""
++ """Base Publisher class."""
+
+ def __init__(self, session, node_name, node_opts=None):
+ """Init the Publisher class with the exchange_name, routing_key,
+@@ -244,16 +245,43 @@ class Publisher(object):
+ self.reconnect(session)
+
+ def reconnect(self, session):
+- """Re-establish the Sender after a reconnection"""
++ """Re-establish the Sender after a reconnection."""
+ self.sender = session.sender(self.address)
+
++ def _pack_json_msg(self, msg):
++ """Qpid cannot serialize dicts containing strings longer than 65535
++ characters. This function dumps the message content to a JSON
++ string, which Qpid is able to handle.
++
++ :param msg: May be either a Qpid Message object or a bare dict.
++ :returns: A Qpid Message with its content field JSON encoded.
++ """
++ try:
++ msg.content = jsonutils.dumps(msg.content)
++ except AttributeError:
++ # Need to have a Qpid message so we can set the content_type.
++ msg = qpid_messaging.Message(jsonutils.dumps(msg))
++ msg.content_type = JSON_CONTENT_TYPE
++ return msg
++
+ def send(self, msg):
+- """Send a message"""
++ """Send a message."""
++ try:
++ # Check if Qpid can encode the message
++ check_msg = msg
++ if not hasattr(check_msg, 'content_type'):
++ check_msg = qpid_messaging.Message(msg)
++ content_type = check_msg.content_type
++ enc, dec = qpid_messaging.message.get_codec(content_type)
++ enc(check_msg.content)
++ except qpid_codec.CodecException:
++ # This means the message couldn't be serialized as a dict.
++ msg = self._pack_json_msg(msg)
+ self.sender.send(msg)
+
+
+ class DirectPublisher(Publisher):
+- """Publisher class for 'direct'"""
++ """Publisher class for 'direct'."""
+ def __init__(self, conf, session, msg_id):
+ """Init a 'direct' publisher."""
+ super(DirectPublisher, self).__init__(session, msg_id,
+@@ -261,7 +289,7 @@ class DirectPublisher(Publisher):
+
+
+ class TopicPublisher(Publisher):
+- """Publisher class for 'topic'"""
++ """Publisher class for 'topic'."""
+ def __init__(self, conf, session, topic):
+ """init a 'topic' publisher.
+ """
+@@ -271,7 +299,7 @@ class TopicPublisher(Publisher):
+
+
+ class FanoutPublisher(Publisher):
+- """Publisher class for 'fanout'"""
++ """Publisher class for 'fanout'."""
+ def __init__(self, conf, session, topic):
+ """init a 'fanout' publisher.
+ """
+@@ -281,7 +309,7 @@ class FanoutPublisher(Publisher):
+
+
+ class NotifyPublisher(Publisher):
+- """Publisher class for notifications"""
++ """Publisher class for notifications."""
+ def __init__(self, conf, session, topic):
+ """init a 'topic' publisher.
+ """
+@@ -349,7 +377,7 @@ class Connection(object):
+ return self.consumers[str(receiver)]
+
+ def reconnect(self):
+- """Handles reconnecting and re-establishing sessions and queues"""
++ """Handles reconnecting and re-establishing sessions and queues."""
+ attempt = 0
+ delay = 1
+ while True:
+@@ -400,7 +428,7 @@ class Connection(object):
+ self.reconnect()
+
+ def close(self):
+- """Close/release this connection"""
++ """Close/release this connection."""
+ self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
+ try:
+@@ -413,7 +441,7 @@ class Connection(object):
+ self.connection = None
+
+ def reset(self):
+- """Reset a connection so it can be used again"""
++ """Reset a connection so it can be used again."""
+ self.cancel_consumer_thread()
+ self.wait_on_proxy_callbacks()
+ self.session.close()
+@@ -437,7 +465,7 @@ class Connection(object):
+ return self.ensure(_connect_error, _declare_consumer)
+
+ def iterconsume(self, limit=None, timeout=None):
+- """Return an iterator that will consume from all queues/consumers"""
++ """Return an iterator that will consume from all queues/consumers."""
+
+ def _error_callback(exc):
+ if isinstance(exc, qpid_exceptions.Empty):
+@@ -461,7 +489,7 @@ class Connection(object):
+ yield self.ensure(_error_callback, _consume)
+
+ def cancel_consumer_thread(self):
+- """Cancel a consumer thread"""
++ """Cancel a consumer thread."""
+ if self.consumer_thread is not None:
+ self.consumer_thread.kill()
+ try:
+@@ -476,7 +504,7 @@ class Connection(object):
+ proxy_cb.wait()
+
+ def publisher_send(self, cls, topic, msg):
+- """Send to a publisher based on the publisher class"""
++ """Send to a publisher based on the publisher class."""
+
+ def _connect_error(exc):
+ log_info = {'topic': topic, 'err_str': str(exc)}
+@@ -506,15 +534,15 @@ class Connection(object):
+ topic, callback)
+
+ def declare_fanout_consumer(self, topic, callback):
+- """Create a 'fanout' consumer"""
++ """Create a 'fanout' consumer."""
+ self.declare_consumer(FanoutConsumer, topic, callback)
+
+ def direct_send(self, msg_id, msg):
+- """Send a 'direct' message"""
++ """Send a 'direct' message."""
+ self.publisher_send(DirectPublisher, msg_id, msg)
+
+ def topic_send(self, topic, msg, timeout=None):
+- """Send a 'topic' message"""
++ """Send a 'topic' message."""
+ #
+ # We want to create a message with attributes, e.g. a TTL. We
+ # don't really need to keep 'msg' in its JSON format any longer
+@@ -529,15 +557,15 @@ class Connection(object):
+ self.publisher_send(TopicPublisher, topic, qpid_message)
+
+ def fanout_send(self, topic, msg):
+- """Send a 'fanout' message"""
++ """Send a 'fanout' message."""
+ self.publisher_send(FanoutPublisher, topic, msg)
+
+ def notify_send(self, topic, msg, **kwargs):
+- """Send a notify message on a topic"""
++ """Send a notify message on a topic."""
+ self.publisher_send(NotifyPublisher, topic, msg)
+
+ def consume(self, limit=None):
+- """Consume from all queues/consumers"""
++ """Consume from all queues/consumers."""
+ it = self.iterconsume(limit=limit)
+ while True:
+ try:
+@@ -546,7 +574,7 @@ class Connection(object):
+ return
+
+ def consume_in_thread(self):
+- """Consumer from all queues/consumers in a greenthread"""
++ """Consumer from all queues/consumers in a greenthread."""
+ def _consumer_thread():
+ try:
+ self.consume()
+@@ -557,7 +585,7 @@ class Connection(object):
+ return self.consumer_thread
+
+ def create_consumer(self, topic, proxy, fanout=False):
+- """Create a consumer that calls a method in a proxy object"""
++ """Create a consumer that calls a method in a proxy object."""
+ proxy_cb = rpc_amqp.ProxyCallback(
+ self.conf, proxy,
+ rpc_amqp.get_connection_pool(self.conf, Connection))
+@@ -573,7 +601,7 @@ class Connection(object):
+ return consumer
+
+ def create_worker(self, topic, proxy, pool_name):
+- """Create a worker that calls a method in a proxy object"""
++ """Create a worker that calls a method in a proxy object."""
+ proxy_cb = rpc_amqp.ProxyCallback(
+ self.conf, proxy,
+ rpc_amqp.get_connection_pool(self.conf, Connection))
+@@ -616,7 +644,7 @@ class Connection(object):
+
+
+ def create_connection(conf, new=True):
+- """Create a connection"""
++ """Create a connection."""
+ return rpc_amqp.create_connection(
+ conf, new,
+ rpc_amqp.get_connection_pool(conf, Connection))
diff --git a/openstack-nova.spec b/openstack-nova.spec
index 936a160..645d8a0 100644
--- a/openstack-nova.spec
+++ b/openstack-nova.spec
@@ -2,7 +2,7 @@
Name: openstack-nova
Version: 2013.1.3
-Release: 1%{?dist}
+Release: 2%{?dist}
Summary: OpenStack Compute (nova)
Group: Applications/System
@@ -39,6 +39,8 @@ Source30: openstack-nova-novncproxy.sysconfig
#
Patch0001: 0001-Ensure-we-don-t-access-the-net-when-building-docs.patch
Patch0002: 0002-avoid-code-path-causing-qpid-exchange-leaks.patch
+Patch0003: 0003-Enforce-flavor-access-during-instance-boot.patch
+Patch0004: 0004-Update-rpc-impl_qpid.py-from-oslo.patch
BuildArch: noarch
BuildRequires: intltool
@@ -386,6 +388,8 @@ This package contains documentation files for nova.
%patch0001 -p1
%patch0002 -p1
+%patch0003 -p1
+%patch0004 -p1
find . \( -name .gitignore -o -name .placeholder \) -delete
@@ -840,6 +844,10 @@ fi
%endif
%changelog
+* Fri Aug 27 2013 Nikola Đipanov <ndipanov at redhat.com> - 2013.1.3-2
+- Qpid handles strings longer than 65536 chars (CVE-2013-4261)
+- Enforce flavor access during instance boot (CVE-2013-4278)
+
* Fri Aug 09 2013 Nikola Đipanov <ndipanov at redhat.com> - 2013.1.3-1
- Update to stable/grizzly 2013.1.3 release
More information about the scm-commits
mailing list